diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000..f8c4036159 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,19 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +indent_style = space +indent_size = 4 + +[*.{js,mjs,json}] +indent_size = 4 + +[*.{md,yml,yaml}] +indent_size = 2 +trim_trailing_whitespace = false + +[package.json] +indent_size = 2 diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 6553e014e9..636b0e4395 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -9,7 +9,7 @@ the **[Zenko Forum](http://forum.zenko.io/)**. > Questions opened as GitHub issues will systematically be closed, and moved to > the [Zenko Forum](http://forum.zenko.io/). --------------------------------------------------------------------------------- +--- ## Avoiding duplicates @@ -21,7 +21,7 @@ any duplicates already open: - if there is a duplicate, please do not open your issue, and add a comment to the existing issue instead. --------------------------------------------------------------------------------- +--- ## Bug report information @@ -52,7 +52,7 @@ Describe the results you expected - distribution/OS, - optional: anything else you deem helpful to us. --------------------------------------------------------------------------------- +--- ## Feature Request @@ -78,10 +78,10 @@ Please provide use cases for changing the current behavior ### Additional information - Is this request for your company? Y/N - - If Y: Company name: - - Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N + - If Y: Company name: + - Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N - Are you willing to contribute this feature yourself? - Position/Title: - How did you hear about us? --------------------------------------------------------------------------------- +--- diff --git a/.github/docker/vault-config.json b/.github/docker/vault-config.json index a5936ca019..3ff7706186 100644 --- a/.github/docker/vault-config.json +++ b/.github/docker/vault-config.json @@ -68,7 +68,7 @@ "kmsAWS": { "noAwsArn": true, "providerName": "local", - "region": "us-east-1", + "region": "us-east-1", "endpoint": "http://0:8080", "ak": "456", "sk": "123" diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 36c702b832..a39121dbc0 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -95,10 +95,10 @@ jobs: cache: pip - name: Install python deps run: pip install flake8 + - name: Check code formatting + run: yarn run --silent check-format - name: Lint Javascript run: yarn run --silent lint -- --max-warnings 0 - - name: Lint Markdown - run: yarn run --silent lint_md - name: Lint python run: flake8 $(git ls-files "*.py") - name: Lint Yaml diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000000..76c8a4f2ae --- /dev/null +++ b/.prettierignore @@ -0,0 +1,32 @@ +# Build outputs +node_modules/ +dist/ +build/ +coverage/ + +# Configuration files that should maintain their format +package-lock.json +yarn.lock +*.log + +# Data directories +localData/ +localMetadata/ + +# Test artifacts +artifacts/ +**/junit/ + +# Binary files +*.png +*.jpg +*.jpeg +*.gif +*.ico +*.svg +*.pdf + +# Generated or vendor files +*.min.js +*.min.css +*.map \ No newline at end of file diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 0000000000..96ce45fb9e --- /dev/null +++ b/.prettierrc @@ -0,0 +1,13 @@ +{ + "singleQuote": true, + "tabWidth": 4, + "useTabs": false, + "semi": true, + "trailingComma": "es5", + "bracketSpacing": true, + "arrowParens": "avoid", + "printWidth": 120, + "endOfLine": "lf", + "bracketSameLine": false, + "singleAttributePerLine": false +} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 786c141215..20f07ad2d3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,9 @@ # Contributing rules Please follow the -[Contributing Guidelines]( -https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md). +[Contributing Guidelines](https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md). + +## Development Setup + +For development setup, code style guidelines, and formatting instructions, +see the **Development** section in the [README.md](README.md#development). diff --git a/Healthchecks.md b/Healthchecks.md index 3950c8bf29..95372bf23e 100644 --- a/Healthchecks.md +++ b/Healthchecks.md @@ -6,19 +6,19 @@ response with HTTP code - 200 OK - Server is up and running +Server is up and running - 500 Internal Server error - Server is experiencing an Internal Error +Server is experiencing an Internal Error - 400 Bad Request - Bad Request due to unsupported HTTP methods +Bad Request due to unsupported HTTP methods - 403 Forbidden - Request is not allowed due to IP restriction +Request is not allowed due to IP restriction ## Stats @@ -53,12 +53,12 @@ returned. This is accomplished by retrieving the 6 keys that represent the 6 five-second intervals. As Redis does not have a performant RANGE query, the list of keys are built manually as follows -* Take current timestamp +- Take current timestamp -* Build each key by subtracting the interval from the timestamp (5 seconds) +- Build each key by subtracting the interval from the timestamp (5 seconds) -* Total keys for each metric (total requests, 500s etc.) is TTL / interval - 30/5 = 6 +- Total keys for each metric (total requests, 500s etc.) is TTL / interval + 30/5 = 6 Note: When Redis is queried, results from non-existent keys are set to 0. diff --git a/README.md b/README.md index 741dd1a0bb..6fcc933e2a 100644 --- a/README.md +++ b/README.md @@ -35,8 +35,7 @@ application on the go. ## Contributing In order to contribute, please follow the -[Contributing Guidelines]( -https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md). +[Contributing Guidelines](https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md). ## Installation @@ -69,6 +68,54 @@ If you get an error regarding level-down bindings, try clearing your yarn cache: yarn cache clean ``` +## Development + +### Code Style and Formatting + +This project uses ESLint for code quality and Prettier for consistent code formatting. + +#### Linting + +Check code quality with ESLint: + +```shell +yarn run lint +``` + +#### Formatting + +Format all code with Prettier: + +```shell +yarn run format +``` + +Check if code is properly formatted: + +```shell +yarn run check-format +``` + +#### Pre-commit Guidelines + +Before submitting a pull request, ensure that: + +1. **Code is properly formatted**: Run `yarn run check-format` to verify +2. **Code passes linting**: Run `yarn run lint` to check for issues +3. **Tests pass**: Run `yarn test` for unit tests + +The CI pipeline will automatically check formatting and linting, so make sure these pass locally first. + +#### Code Style Rules + +- **Quotes**: Use single quotes (`'`) instead of double quotes (`"`) +- **Indentation**: Use 4 spaces (no tabs) +- **Line Length**: Maximum 120 characters per line +- **Semicolons**: Always use semicolons +- **Trailing Commas**: Use trailing commas in ES5+ contexts + +Prettier will automatically enforce most of these rules when you run `yarn run format`. + ## Run it with a file backend ```shell @@ -85,8 +132,8 @@ a secret key of verySecretKey1. By default the metadata files will be saved in the localMetadata directory and the data files will be saved in the localData directory within the ./S3 directory on your -machine. These directories have been pre-created within the -repository. If you would like to save the data or metadata in +machine. These directories have been pre-created within the +repository. If you would like to save the data or metadata in different locations of your choice, you must specify them with absolute paths. So, when starting the server: diff --git a/TESTING.md b/TESTING.md index 68b8ff7a08..94f48527e0 100644 --- a/TESTING.md +++ b/TESTING.md @@ -10,26 +10,26 @@ ### Features tested - Authentication - - Building signature - - Checking timestamp - - Canonicalization - - Error Handling + - Building signature + - Checking timestamp + - Canonicalization + - Error Handling - Bucket Metadata API - - GET, PUT, DELETE Bucket Metadata + - GET, PUT, DELETE Bucket Metadata - s3 API - - GET Service - - GET, PUT, DELETE, HEAD Object - - GET, PUT, DELETE, HEAD Bucket - - ACL's - - Bucket Policies - - Lifecycle - - Range requests - - Multi-part upload + - GET Service + - GET, PUT, DELETE, HEAD Object + - GET, PUT, DELETE, HEAD Bucket + - ACL's + - Bucket Policies + - Lifecycle + - Range requests + - Multi-part upload - Routes - - GET, PUT, PUTRAW, DELETE, HEAD for objects and buckets + - GET, PUT, PUTRAW, DELETE, HEAD for objects and buckets ## Functional Tests diff --git a/bin/metrics_server.js b/bin/metrics_server.js index 752eb911f8..a44cf517fc 100755 --- a/bin/metrics_server.js +++ b/bin/metrics_server.js @@ -1,18 +1,11 @@ #!/usr/bin/env node 'use strict'; -const { - startWSManagementClient, - startPushConnectionHealthCheckServer, -} = require('../lib/management/push'); +const { startWSManagementClient, startPushConnectionHealthCheckServer } = require('../lib/management/push'); const logger = require('../lib/utilities/logger'); -const { - PUSH_ENDPOINT: pushEndpoint, - INSTANCE_ID: instanceId, - MANAGEMENT_TOKEN: managementToken, -} = process.env; +const { PUSH_ENDPOINT: pushEndpoint, INSTANCE_ID: instanceId, MANAGEMENT_TOKEN: managementToken } = process.env; if (!pushEndpoint) { logger.error('missing push endpoint env var'); diff --git a/bin/search_bucket.js b/bin/search_bucket.js index a8964f27e2..e2feb10457 100755 --- a/bin/search_bucket.js +++ b/bin/search_bucket.js @@ -9,15 +9,7 @@ const http = require('http'); const https = require('https'); const logger = require('../lib/utilities/logger'); -function _performSearch(host, - port, - bucketName, - query, - listVersions, - accessKey, - secretKey, - sessionToken, - verbose, ssl) { +function _performSearch(host, port, bucketName, query, listVersions, accessKey, secretKey, sessionToken, verbose, ssl) { const escapedSearch = encodeURIComponent(query); const options = { host, @@ -88,12 +80,13 @@ function searchBucket() { .option('-h, --host ', 'Host of the server') .option('-p, --port ', 'Port of the server') .option('-s', '--ssl', 'Enable ssl') - .option('-l, --list-versions', 'List all versions of the objects that meet the search query, ' + - 'otherwise only list the latest version') + .option( + '-l, --list-versions', + 'List all versions of the objects that meet the search query, ' + 'otherwise only list the latest version' + ) .option('-v, --verbose') .parse(process.argv); - const { host, port, accessKey, secretKey, sessionToken, bucket, query, listVersions, verbose, ssl } = - commander; + const { host, port, accessKey, secretKey, sessionToken, bucket, query, listVersions, verbose, ssl } = commander; if (!host || !port || !accessKey || !secretKey || !bucket || !query) { logger.error('missing parameter'); @@ -101,8 +94,7 @@ function searchBucket() { process.exit(1); } - _performSearch(host, port, bucket, query, listVersions, accessKey, secretKey, sessionToken, verbose, - ssl); + _performSearch(host, port, bucket, query, listVersions, accessKey, secretKey, sessionToken, verbose, ssl); } searchBucket(); diff --git a/bin/secure_channel_proxy.js b/bin/secure_channel_proxy.js index 165bf94c74..8b235f7fe0 100755 --- a/bin/secure_channel_proxy.js +++ b/bin/secure_channel_proxy.js @@ -1,18 +1,11 @@ #!/usr/bin/env node 'use strict'; -const { - startWSManagementClient, - startPushConnectionHealthCheckServer, -} = require('../lib/management/push'); +const { startWSManagementClient, startPushConnectionHealthCheckServer } = require('../lib/management/push'); const logger = require('../lib/utilities/logger'); -const { - PUSH_ENDPOINT: pushEndpoint, - INSTANCE_ID: instanceId, - MANAGEMENT_TOKEN: managementToken, -} = process.env; +const { PUSH_ENDPOINT: pushEndpoint, INSTANCE_ID: instanceId, MANAGEMENT_TOKEN: managementToken } = process.env; if (!pushEndpoint) { logger.error('missing push endpoint env var'); diff --git a/conf/authdata.json b/conf/authdata.json index 8cd34421c3..5f58ebde09 100644 --- a/conf/authdata.json +++ b/conf/authdata.json @@ -1,56 +1,69 @@ { - "accounts": [{ - "name": "Bart", - "email": "sampleaccount1@sampling.com", - "arn": "arn:aws:iam::123456789012:root", - "canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be", - "shortid": "123456789012", - "keys": [{ - "access": "accessKey1", - "secret": "verySecretKey1" - }] - }, { - "name": "Lisa", - "email": "sampleaccount2@sampling.com", - "arn": "arn:aws:iam::123456789013:root", - "canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf", - "shortid": "123456789013", - "keys": [{ - "access": "accessKey2", - "secret": "verySecretKey2" - }] - }, - { - "name": "Clueso", - "email": "inspector@clueso.info", - "arn": "arn:aws:iam::123456789014:root", - "canonicalID": "http://acs.zenko.io/accounts/service/clueso", - "shortid": "123456789014", - "keys": [{ - "access": "cluesoKey1", - "secret": "cluesoSecretKey1" - }] - }, - { - "name": "Replication", - "email": "inspector@replication.info", - "arn": "arn:aws:iam::123456789015:root", - "canonicalID": "http://acs.zenko.io/accounts/service/replication", - "shortid": "123456789015", - "keys": [{ - "access": "replicationKey1", - "secret": "replicationSecretKey1" - }] - }, - { - "name": "Lifecycle", - "email": "inspector@lifecycle.info", - "arn": "arn:aws:iam::123456789016:root", - "canonicalID": "http://acs.zenko.io/accounts/service/lifecycle", - "shortid": "123456789016", - "keys": [{ - "access": "lifecycleKey1", - "secret": "lifecycleSecretKey1" - }] - }] + "accounts": [ + { + "name": "Bart", + "email": "sampleaccount1@sampling.com", + "arn": "arn:aws:iam::123456789012:root", + "canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be", + "shortid": "123456789012", + "keys": [ + { + "access": "accessKey1", + "secret": "verySecretKey1" + } + ] + }, + { + "name": "Lisa", + "email": "sampleaccount2@sampling.com", + "arn": "arn:aws:iam::123456789013:root", + "canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf", + "shortid": "123456789013", + "keys": [ + { + "access": "accessKey2", + "secret": "verySecretKey2" + } + ] + }, + { + "name": "Clueso", + "email": "inspector@clueso.info", + "arn": "arn:aws:iam::123456789014:root", + "canonicalID": "http://acs.zenko.io/accounts/service/clueso", + "shortid": "123456789014", + "keys": [ + { + "access": "cluesoKey1", + "secret": "cluesoSecretKey1" + } + ] + }, + { + "name": "Replication", + "email": "inspector@replication.info", + "arn": "arn:aws:iam::123456789015:root", + "canonicalID": "http://acs.zenko.io/accounts/service/replication", + "shortid": "123456789015", + "keys": [ + { + "access": "replicationKey1", + "secret": "replicationSecretKey1" + } + ] + }, + { + "name": "Lifecycle", + "email": "inspector@lifecycle.info", + "arn": "arn:aws:iam::123456789016:root", + "canonicalID": "http://acs.zenko.io/accounts/service/lifecycle", + "shortid": "123456789016", + "keys": [ + { + "access": "lifecycleKey1", + "secret": "lifecycleSecretKey1" + } + ] + } + ] } diff --git a/config.json b/config.json index 185d752d87..ddb6e5cf36 100644 --- a/config.json +++ b/config.json @@ -14,29 +14,34 @@ "zenko-cloudserver-replicator": "us-east-1", "lb": "us-east-1" }, - "websiteEndpoints": ["s3-website-us-east-1.amazonaws.com", - "s3-website.us-east-2.amazonaws.com", - "s3-website-us-west-1.amazonaws.com", - "s3-website-us-west-2.amazonaws.com", - "s3-website.ap-south-1.amazonaws.com", - "s3-website.ap-northeast-2.amazonaws.com", - "s3-website-ap-southeast-1.amazonaws.com", - "s3-website-ap-southeast-2.amazonaws.com", - "s3-website-ap-northeast-1.amazonaws.com", - "s3-website.eu-central-1.amazonaws.com", - "s3-website-eu-west-1.amazonaws.com", - "s3-website-sa-east-1.amazonaws.com", - "s3-website.localhost", - "s3-website.scality.test", - "zenkoazuretest.blob.core.windows.net"], - "replicationEndpoints": [{ - "site": "zenko", - "servers": ["127.0.0.1:8000"], - "default": true - }, { - "site": "us-east-2", - "type": "aws_s3" - }], + "websiteEndpoints": [ + "s3-website-us-east-1.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.localhost", + "s3-website.scality.test", + "zenkoazuretest.blob.core.windows.net" + ], + "replicationEndpoints": [ + { + "site": "zenko", + "servers": ["127.0.0.1:8000"], + "default": true + }, + { + "site": "us-east-2", + "type": "aws_s3" + } + ], "backbeat": { "host": "localhost", "port": 8900 @@ -135,7 +140,7 @@ "kmsHideScalityArn": false, "kmsAWS": { "providerName": "aws", - "region": "us-east-1", + "region": "us-east-1", "endpoint": "http://127.0.0.1:8080", "ak": "tbd", "sk": "tbd" diff --git a/constants.js b/constants.js index 3d83d33f22..8e2e49a74a 100644 --- a/constants.js +++ b/constants.js @@ -46,8 +46,7 @@ const constants = { // only public resources publicId: 'http://acs.amazonaws.com/groups/global/AllUsers', // All Authenticated Users is an ACL group. - allAuthedUsersId: 'http://acs.amazonaws.com/groups/' + - 'global/AuthenticatedUsers', + allAuthedUsersId: 'http://acs.amazonaws.com/groups/' + 'global/AuthenticatedUsers', // LogId is used for the AWS logger to write the logs // to the destination bucket. This style of logging is // to be implemented later but the logId is used in the @@ -74,8 +73,7 @@ const constants = { // Max size on put part or copy part is 5GB. For functional // testing use 110 MB as max - maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 110100480 : - 5368709120, + maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 110100480 : 5368709120, // Max size allowed in a single put object request is 5GB // https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html @@ -90,8 +88,7 @@ const constants = { maxHttpHeadersSize: 14122, // hex digest of sha256 hash of empty string: - emptyStringHash: crypto.createHash('sha256') - .update('', 'binary').digest('hex'), + emptyStringHash: crypto.createHash('sha256').update('', 'binary').digest('hex'), // Queries supported by AWS that we do not currently support. // Non-bucket queries @@ -147,8 +144,7 @@ const constants = { /* eslint-enable camelcase */ mpuMDStoredOnS3Backend: { azure: true }, azureAccountNameRegex: /^[a-z0-9]{3,24}$/, - base64Regex: new RegExp('^(?:[A-Za-z0-9+/]{4})*' + - '(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$'), + base64Regex: new RegExp('^(?:[A-Za-z0-9+/]{4})*' + '(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$'), productName: 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko', // location constraint delimiter zenkoSeparator: ':', @@ -184,32 +180,15 @@ const constants = { invalidObjectUserMetadataHeader: 'x-amz-missing-meta', // Bucket specific queries supported by AWS that we do not currently support // these queries may or may not be supported at object level - unsupportedBucketQueries: [ - ], - suppressedUtapiEventFields: [ - 'object', - 'location', - 'versionId', - ], - allowedUtapiEventFilterFields: [ - 'operationId', - 'location', - 'account', - 'user', - 'bucket', - ], - arrayOfAllowed: [ - 'objectPutTagging', - 'objectPutLegalHold', - 'objectPutRetention', - ], + unsupportedBucketQueries: [], + suppressedUtapiEventFields: ['object', 'location', 'versionId'], + allowedUtapiEventFilterFields: ['operationId', 'location', 'account', 'user', 'bucket'], + arrayOfAllowed: ['objectPutTagging', 'objectPutLegalHold', 'objectPutRetention'], allowedUtapiEventFilterStates: ['allow', 'deny'], allowedRestoreObjectRequestTierValues: ['Standard'], // Only STANDARD class is supported, but keep the option to override supported values for now. // This should be removed in CLDSRV-639. - validStorageClasses: process.env.VALID_STORAGE_CLASSES?.split(',') || [ - 'STANDARD', - ], + validStorageClasses: process.env.VALID_STORAGE_CLASSES?.split(',') || ['STANDARD'], lifecycleListing: { CURRENT_TYPE: 'current', NON_CURRENT_TYPE: 'noncurrent', @@ -242,11 +221,7 @@ const constants = { assumedRoleArnResourceType: 'assumed-role', // Session name of the backbeat lifecycle assumed role session. backbeatLifecycleSessionName: 'backbeat-lifecycle', - actionsToConsiderAsObjectPut: [ - 'initiateMultipartUpload', - 'objectPutPart', - 'completeMultipartUpload', - ], + actionsToConsiderAsObjectPut: ['initiateMultipartUpload', 'objectPutPart', 'completeMultipartUpload'], // if requester is not bucket owner, bucket policy actions should be denied with // MethodNotAllowed error onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'], diff --git a/dataserver.js b/dataserver.js index f1321422fb..f6bcabe481 100644 --- a/dataserver.js +++ b/dataserver.js @@ -14,9 +14,10 @@ process.on('uncaughtException', err => { process.exit(1); }); -if (config.backends.data === 'file' || - (config.backends.data === 'multiple' && - config.backends.metadata !== 'scality')) { +if ( + config.backends.data === 'file' || + (config.backends.data === 'multiple' && config.backends.metadata !== 'scality') +) { const dataServer = new arsenal.network.rest.RESTServer({ bindAddress: config.dataDaemon.bindAddress, port: config.dataDaemon.port, @@ -30,8 +31,7 @@ if (config.backends.data === 'file' || }); dataServer.setup(err => { if (err) { - logger.error('Error initializing REST data server', - { error: err }); + logger.error('Error initializing REST data server', { error: err }); return; } dataServer.start(); diff --git a/docs/OBJECT_LOCK_TEST_PLAN.md b/docs/OBJECT_LOCK_TEST_PLAN.md index 2c97ae5666..5072451d08 100644 --- a/docs/OBJECT_LOCK_TEST_PLAN.md +++ b/docs/OBJECT_LOCK_TEST_PLAN.md @@ -21,7 +21,7 @@ the new API actions. ### putBucket tests - passing option to enable object lock updates bucket metadata and enables - bucket versioning + bucket versioning ### putBucketVersioning tests @@ -43,17 +43,17 @@ the new API actions. ### initiateMultipartUpload tests - mpu object initiated with retention information should include retention - information + information ### putObjectLockConfiguration tests - putting configuration as non-bucket-owner user returns AccessDenied error - disabling object lock on bucket created with object lock returns error - enabling object lock on bucket created without object lock returns - InvalidBucketState error + InvalidBucketState error - enabling object lock with token on bucket created without object lock succeeds - putting valid object lock configuration when bucket does not have object - lock enabled returns error (InvalidRequest?) + lock enabled returns error (InvalidRequest?) - putting valid object lock configuration updates bucket metadata - putting invalid object lock configuration returns error - ObjectLockEnabled !== "Enabled" @@ -66,35 +66,35 @@ the new API actions. - getting configuration as non-bucket-owner user returns AccessDenied error - getting configuration when none is set returns - ObjectLockConfigurationNotFoundError error + ObjectLockConfigurationNotFoundError error - getting configuration returns correct object lock configuration for bucket ### putObjectRetention - putting retention as non-bucket-owner user returns AccessDenied error - putting retention on object in bucket without object lock enabled returns - InvalidRequest error + InvalidRequest error - putting valid retention period updates object metadata ### getObjectRetention - getting retention as non-bucket-owner user returns AccessDenied error - getting retention when none is set returns NoSuchObjectLockConfiguration - error + error - getting retention returns correct object retention period ### putObjectLegalHold - putting legal hold as non-bucket-owner user returns AccessDenied error - putting legal hold on object in bucket without object lock enabled returns - InvalidRequest error + InvalidRequest error - putting valid legal hold updates object metadata ### getObjectLegalHold - getting legal hold as non-bucket-owner user returns AccessDenied error - getting legal hold when none is set returns NoSuchObjectLockConfiguration - error + error - getting legal hold returns correct object legal hold ## End to End Tests @@ -102,22 +102,22 @@ the new API actions. ### Scenarios - Create bucket with object lock enabled. Put object. Put object lock - configuration. Put another object. + configuration. Put another object. - Ensure object put before configuration does not have retention period set - Ensure object put after configuration does have retention period set - Create bucket without object lock. Put object. Enable object lock with token - and put object lock configuration. Put another object. + and put object lock configuration. Put another object. - Ensure object put before configuration does not have retention period set - Ensure object put after configuration does have retention period set - Create bucket with object lock enabled and put configuration with COMPLIANCE - mode. Put object. + mode. Put object. - Ensure object cannot be deleted (returns AccessDenied error). - Ensure object cannot be overwritten. - Create bucket with object lock enabled and put configuration with GOVERNANCE - mode. Put object. + mode. Put object. - Ensure user without permission cannot delete object - Ensure user without permission cannot overwrite object - Ensure user with permission can delete object @@ -126,29 +126,29 @@ the new API actions. - Ensure user with permission cannot shorten retention period - Create bucket with object lock enabled and put configuration. Edit bucket - metadata so retention period is expired. Put object. + metadata so retention period is expired. Put object. - Ensure object can be deleted. - Ensure object can be overwritten. - Create bucket with object lock enabled and put configuration. Edit bucket - metadata so retention period is expired. Put object. Put new retention - period on object. + metadata so retention period is expired. Put object. Put new retention + period on object. - Ensure object cannot be deleted. - Ensure object cannot be overwritten. - Create bucket with object locked enabled and put configuration. Put object. - Edit object metadata so retention period is past expiration. + Edit object metadata so retention period is past expiration. - Ensure object can be deleted. - Ensure object can be overwritten. - Create bucket with object lock enabled and put configuration. Edit bucket - metadata so retention period is expired. Put object. Put legal hold - on object. + metadata so retention period is expired. Put object. Put legal hold + on object. - Ensure object cannot be deleted. - Ensure object cannot be overwritten. - Create bucket with object lock enabled and put configuration. Put object. - Check object retention. Change bucket object lock configuration. + Check object retention. Change bucket object lock configuration. - Ensure object retention period has not changed with bucket configuration. - Create bucket with object lock enabled. Put object with legal hold. @@ -156,6 +156,6 @@ the new API actions. - Ensure object cannot be overwritten. - Create bucket with object lock enabled. Put object with legal hold. Remove - legal hold. + legal hold. - Ensure object can be deleted. - Ensure object can be overwritten. diff --git a/docs/RELEASE.md b/docs/RELEASE.md index 53c1c80732..7240fa8ff1 100644 --- a/docs/RELEASE.md +++ b/docs/RELEASE.md @@ -5,8 +5,8 @@ Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages). CloudServer has a few images there: -* Cloudserver container image: ghcr.io/scality/cloudserver -* Dashboard oras image: ghcr.io/scality/cloudserver/cloudserver-dashboards +- Cloudserver container image: ghcr.io/scality/cloudserver +- Dashboard oras image: ghcr.io/scality/cloudserver/cloudserver-dashboards With every CI build, the CI will push images, tagging the content with the developer branch's short SHA-1 commit hash. @@ -26,7 +26,7 @@ docker pull ghcr.io/scality/cloudserver: To release a production image: -* Create a PR to bump the package version +- Create a PR to bump the package version Update Cloudserver's `package.json` by bumping it to the relevant next version in a new PR. Per example if the last released version was `8.4.7`, the next version would be `8.4.8`. @@ -39,34 +39,31 @@ To release a production image: } ``` -* Review & merge the PR - -* Create the release on GitHub - - * Go the Release tab (https://github.com/scality/cloudserver/releases); - * Click on the `Draft new release button`; - * In the `tag` field, type the name of the release (`8.4.8`), and confirm - to create the tag on publish; - * Click on `Generate release notes` button to fill the fields; - * Rename the release to `Release x.y.z` (e.g. `Release 8.4.8` in this case); - * Click to `Publish the release` to create the GitHub release and git tag - - Notes: - * the Git tag will be created automatically. - * this should be done as soon as the PR is merged, so that the tag - is put on the "version bump" commit. - -* With the following parameters, [force a build here](https://eve.devsca.com/github/scality/cloudserver/#/builders/3/force/force) - - * Branch Name: The one used for the tag earlier. In this example `development/8.4` - * Override Stage: 'release' - * Extra properties: - * name: `'tag'`, value: `[release version]`, in this example`'8.4.8'` - -* Release the release version on Jira - - * Go to the [CloudServer release page](https://scality.atlassian.net/projects/CLDSRV?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page) - * Create a next version - * Name: `[next version]`, in this example `8.4.9` - * Click `...` and select `Release` on the recently released version (`8.4.8`) - * Fill in the field to move incomplete version to the next one +- Review & merge the PR + +- Create the release on GitHub + - Go the Release tab (https://github.com/scality/cloudserver/releases); + - Click on the `Draft new release button`; + - In the `tag` field, type the name of the release (`8.4.8`), and confirm + to create the tag on publish; + - Click on `Generate release notes` button to fill the fields; + - Rename the release to `Release x.y.z` (e.g. `Release 8.4.8` in this case); + - Click to `Publish the release` to create the GitHub release and git tag + + Notes: + - the Git tag will be created automatically. + - this should be done as soon as the PR is merged, so that the tag + is put on the "version bump" commit. + +- With the following parameters, [force a build here](https://eve.devsca.com/github/scality/cloudserver/#/builders/3/force/force) + - Branch Name: The one used for the tag earlier. In this example `development/8.4` + - Override Stage: 'release' + - Extra properties: + - name: `'tag'`, value: `[release version]`, in this example`'8.4.8'` + +- Release the release version on Jira + - Go to the [CloudServer release page](https://scality.atlassian.net/projects/CLDSRV?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page) + - Create a next version + - Name: `[next version]`, in this example `8.4.9` + - Click `...` and select `Release` on the recently released version (`8.4.8`) + - Fill in the field to move incomplete version to the next one diff --git a/eslint.config.mjs b/eslint.config.mjs index 7d2efd9cb0..df91c3f04c 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -1,71 +1,34 @@ -import mocha from "eslint-plugin-mocha"; -import path from "node:path"; -import { fileURLToPath } from "node:url"; -import js from "@eslint/js"; -import { FlatCompat } from "@eslint/eslintrc"; +import mocha from 'eslint-plugin-mocha'; +import path from 'node:path'; +import { fileURLToPath } from 'node:url'; +import js from '@eslint/js'; +import { FlatCompat } from '@eslint/eslintrc'; const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); const compat = new FlatCompat({ baseDirectory: __dirname, recommendedConfig: js.configs.recommended, - allConfig: js.configs.all + allConfig: js.configs.all, }); -export default [...compat.extends('@scality/scality'), { - plugins: { - mocha, - }, +export default [ + ...compat.extends('@scality/scality'), + ...compat.extends('prettier'), + { + plugins: { + mocha, + }, - languageOptions: { - ecmaVersion: 2021, - sourceType: "script", - }, + languageOptions: { + ecmaVersion: 2021, + sourceType: 'script', + }, - rules: { - "import/extensions": "off", - "lines-around-directive": "off", - "no-underscore-dangle": "off", - "indent": "off", - "object-curly-newline": "off", - "operator-linebreak": "off", - "function-paren-newline": "off", - "import/newline-after-import": "off", - "prefer-destructuring": "off", - "implicit-arrow-linebreak": "off", - "no-bitwise": "off", - "dot-location": "off", - "comma-dangle": "off", - "no-undef-init": "off", - "global-require": "off", - "import/no-dynamic-require": "off", - "class-methods-use-this": "off", - "no-plusplus": "off", - "no-else-return": "off", - "object-property-newline": "off", - "import/order": "off", - "no-continue": "off", - "no-tabs": "off", - "lines-between-class-members": "off", - "prefer-spread": "off", - "no-lonely-if": "off", - "no-useless-escape": "off", - "no-restricted-globals": "off", - "no-buffer-constructor": "off", - "import/no-extraneous-dependencies": "off", - "space-unary-ops": "off", - "no-useless-return": "off", - "no-unexpected-multiline": "off", - "no-mixed-operators": "off", - "newline-per-chained-call": "off", - "operator-assignment": "off", - "spaced-comment": "off", - "comma-style": "off", - "no-restricted-properties": "off", - "new-parens": "off", - "no-multi-spaces": "off", - "quote-props": "off", - "mocha/no-exclusive-tests": "error", - "no-redeclare": ["error", { "builtinGlobals": false }], + rules: { + 'no-useless-escape': 'off', + 'mocha/no-exclusive-tests': 'error', + 'no-redeclare': ['error', { builtinGlobals: false }], + }, }, -}]; +]; diff --git a/examples/node-md-search.js b/examples/node-md-search.js index a3f29bb12b..21368354cf 100644 --- a/examples/node-md-search.js +++ b/examples/node-md-search.js @@ -11,8 +11,7 @@ const config = { }; const s3Client = new S3(config); -const encodedSearch = - encodeURIComponent('x-amz-meta-color="blue"'); +const encodedSearch = encodeURIComponent('x-amz-meta-color="blue"'); const req = s3Client.listObjects({ Bucket: 'bucketname' }); // the build event diff --git a/index.js b/index.js index f5fa36c2e2..1b31eafa78 100644 --- a/index.js +++ b/index.js @@ -4,7 +4,7 @@ require('werelogs').stderrUtils.catchAndTimestampStderr( undefined, // Do not exit as workers have their own listener that will exit // But primary don't have another listener - require('cluster').isPrimary ? 1 : null, + require('cluster').isPrimary ? 1 : null ); require('./lib/server.js')(); diff --git a/lib/Config.js b/lib/Config.js index ee06af81ae..dfe03826aa 100644 --- a/lib/Config.js +++ b/lib/Config.js @@ -33,10 +33,7 @@ const { } = require('arsenal/build/lib/network/KMSInterface'); // config paths -const configSearchPaths = [ - path.join(__dirname, '../conf'), - path.join(__dirname, '..'), -]; +const configSearchPaths = [path.join(__dirname, '../conf'), path.join(__dirname, '..')]; function findConfigFile(fileName) { if (fileName[0] === '/') { @@ -47,8 +44,10 @@ function findConfigFile(fileName) { return fs.existsSync(testFilePath); }); if (!containingPath) { - throw new Error(`Unable to find the configuration file "${fileName}" ` + - `under the paths: ${JSON.stringify(configSearchPaths)}`); + throw new Error( + `Unable to find the configuration file "${fileName}" ` + + `under the paths: ${JSON.stringify(configSearchPaths)}` + ); } return path.join(containingPath, fileName); } @@ -83,26 +82,26 @@ function assertCertPaths(key, cert, ca, basePath) { certObj.certs = {}; if (key) { const keypath = key.startsWith('/') ? key : `${basePath}/${key}`; - assert.doesNotThrow(() => - fs.accessSync(keypath, fs.F_OK | fs.R_OK), - `File not found or unreachable: ${keypath}`); + assert.doesNotThrow( + () => fs.accessSync(keypath, fs.F_OK | fs.R_OK), + `File not found or unreachable: ${keypath}` + ); certObj.paths.key = keypath; certObj.certs.key = fs.readFileSync(keypath, 'ascii'); } if (cert) { const certpath = cert.startsWith('/') ? cert : `${basePath}/${cert}`; - assert.doesNotThrow(() => - fs.accessSync(certpath, fs.F_OK | fs.R_OK), - `File not found or unreachable: ${certpath}`); + assert.doesNotThrow( + () => fs.accessSync(certpath, fs.F_OK | fs.R_OK), + `File not found or unreachable: ${certpath}` + ); certObj.paths.cert = certpath; certObj.certs.cert = fs.readFileSync(certpath, 'ascii'); } if (ca) { const capath = ca.startsWith('/') ? ca : `${basePath}/${ca}`; - assert.doesNotThrow(() => - fs.accessSync(capath, fs.F_OK | fs.R_OK), - `File not found or unreachable: ${capath}`); + assert.doesNotThrow(() => fs.accessSync(capath, fs.F_OK | fs.R_OK), `File not found or unreachable: ${capath}`); certObj.paths.ca = capath; certObj.certs.ca = fs.readFileSync(capath, 'ascii'); } @@ -119,105 +118,99 @@ function parseSproxydConfig(configSproxyd) { } function parseRedisConfig(redisConfig) { - const joiSchema = joi.object({ - password: joi.string().allow(''), - host: joi.string(), - port: joi.number(), - retry: joi.object({ - connectBackoff: joi.object({ - min: joi.number().required(), - max: joi.number().required(), - jitter: joi.number().required(), - factor: joi.number().required(), - deadline: joi.number().required(), + const joiSchema = joi + .object({ + password: joi.string().allow(''), + host: joi.string(), + port: joi.number(), + retry: joi.object({ + connectBackoff: joi.object({ + min: joi.number().required(), + max: joi.number().required(), + jitter: joi.number().required(), + factor: joi.number().required(), + deadline: joi.number().required(), + }), }), - }), - // sentinel config - sentinels: joi.alternatives().try( - joi.string() - .pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/) - .custom(hosts => hosts.split(',').map(item => { - const [host, port] = item.split(':'); - return { host, port: Number.parseInt(port, 10) }; - })), - joi.array().items( - joi.object({ - host: joi.string().required(), - port: joi.number().required(), - }) - ).min(1), - ), - name: joi.string(), - sentinelPassword: joi.string().allow(''), - }) - .and('host', 'port') - .and('sentinels', 'name') - .xor('host', 'sentinels') - .without('sentinels', ['host', 'port']) - .without('host', ['sentinels', 'sentinelPassword']); + // sentinel config + sentinels: joi.alternatives().try( + joi + .string() + .pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/) + .custom(hosts => + hosts.split(',').map(item => { + const [host, port] = item.split(':'); + return { host, port: Number.parseInt(port, 10) }; + }) + ), + joi + .array() + .items( + joi.object({ + host: joi.string().required(), + port: joi.number().required(), + }) + ) + .min(1) + ), + name: joi.string(), + sentinelPassword: joi.string().allow(''), + }) + .and('host', 'port') + .and('sentinels', 'name') + .xor('host', 'sentinels') + .without('sentinels', ['host', 'port']) + .without('host', ['sentinels', 'sentinelPassword']); return joi.attempt(redisConfig, joiSchema, 'bad config'); } function parseSupportedLifecycleRules(supportedLifecycleRulesConfig) { - const supportedLifecycleRulesSchema = joi.array() + const supportedLifecycleRulesSchema = joi + .array() .items(joi.string().valid(...supportedLifecycleRules)) .default(supportedLifecycleRules) .min(1); return joi.attempt( supportedLifecycleRulesConfig, supportedLifecycleRulesSchema, - 'bad supported lifecycle rules config', + 'bad supported lifecycle rules config' ); } function restEndpointsAssert(restEndpoints, locationConstraints) { - assert(typeof restEndpoints === 'object', - 'bad config: restEndpoints must be an object of endpoints'); - assert(Object.keys(restEndpoints).every( - r => typeof restEndpoints[r] === 'string'), - 'bad config: each endpoint must be a string'); - assert(Object.keys(restEndpoints).every( - r => typeof locationConstraints[restEndpoints[r]] === 'object'), - 'bad config: rest endpoint target not in locationConstraints'); + assert(typeof restEndpoints === 'object', 'bad config: restEndpoints must be an object of endpoints'); + assert( + Object.keys(restEndpoints).every(r => typeof restEndpoints[r] === 'string'), + 'bad config: each endpoint must be a string' + ); + assert( + Object.keys(restEndpoints).every(r => typeof locationConstraints[restEndpoints[r]] === 'object'), + 'bad config: rest endpoint target not in locationConstraints' + ); } function gcpLocationConstraintAssert(location, locationObj) { - const { - gcpEndpoint, - bucketName, - mpuBucketName, - } = locationObj.details; - const stringFields = [ - gcpEndpoint, - bucketName, - mpuBucketName, - ]; + const { gcpEndpoint, bucketName, mpuBucketName } = locationObj.details; + const stringFields = [gcpEndpoint, bucketName, mpuBucketName]; stringFields.forEach(field => { if (field !== undefined) { - assert(typeof field === 'string', - `bad config: ${field} must be a string`); + assert(typeof field === 'string', `bad config: ${field} must be a string`); } }); } function azureGetStorageAccountName(location, locationDetails) { const { azureStorageAccountName } = locationDetails; - const storageAccountNameFromEnv = - process.env[`${location}_AZURE_STORAGE_ACCOUNT_NAME`]; + const storageAccountNameFromEnv = process.env[`${location}_AZURE_STORAGE_ACCOUNT_NAME`]; return storageAccountNameFromEnv || azureStorageAccountName; } function azureGetLocationCredentials(location, locationDetails) { const storageAccessKey = - process.env[`${location}_AZURE_STORAGE_ACCESS_KEY`] || - locationDetails.azureStorageAccessKey; - const sasToken = - process.env[`${location}_AZURE_SAS_TOKEN`] || - locationDetails.sasToken; - const clientKey = - process.env[`${location}_AZURE_CLIENT_KEY`] || - locationDetails.clientKey; + process.env[`${location}_AZURE_STORAGE_ACCESS_KEY`] || locationDetails.azureStorageAccessKey; + const sasToken = process.env[`${location}_AZURE_SAS_TOKEN`] || locationDetails.sasToken; + const clientKey = process.env[`${location}_AZURE_CLIENT_KEY`] || locationDetails.clientKey; const authMethod = process.env[`${location}_AZURE_AUTH_METHOD`] || @@ -228,32 +221,27 @@ function azureGetLocationCredentials(location, locationDetails) { 'shared-key'; switch (authMethod) { - case 'shared-key': - default: - return { - authMethod, - storageAccountName: - azureGetStorageAccountName(location, locationDetails), - storageAccessKey, - }; + case 'shared-key': + default: + return { + authMethod, + storageAccountName: azureGetStorageAccountName(location, locationDetails), + storageAccessKey, + }; - case 'shared-access-signature': - return { - authMethod, - sasToken, - }; + case 'shared-access-signature': + return { + authMethod, + sasToken, + }; - case 'client-secret': - return { - authMethod, - tenantId: - process.env[`${location}_AZURE_TENANT_ID`] || - locationDetails.tenantId, - clientId: - process.env[`${location}_AZURE_CLIENT_ID`] || - locationDetails.clientId, - clientKey, - }; + case 'client-secret': + return { + authMethod, + tenantId: process.env[`${location}_AZURE_TENANT_ID`] || locationDetails.tenantId, + clientId: process.env[`${location}_AZURE_CLIENT_ID`] || locationDetails.clientId, + clientKey, + }; } } @@ -261,120 +249,125 @@ function azureLocationConstraintAssert(location, locationObj) { const locationParams = { ...azureGetLocationCredentials(location, locationObj.details), azureStorageEndpoint: - process.env[`${location}_AZURE_STORAGE_ENDPOINT`] || - locationObj.details.azureStorageEndpoint, + process.env[`${location}_AZURE_STORAGE_ENDPOINT`] || locationObj.details.azureStorageEndpoint, azureContainerName: locationObj.details.azureContainerName, }; Object.keys(locationParams).forEach(param => { const value = locationParams[param]; - assert.notEqual(value, undefined, - `bad location constraint: "${location}" ${param} ` + - 'must be set in locationConfig or environment variable'); - assert.strictEqual(typeof value, 'string', - `bad location constraint: "${location}" ${param} ` + - `"${value}" must be a string`); + assert.notEqual( + value, + undefined, + `bad location constraint: "${location}" ${param} ` + 'must be set in locationConfig or environment variable' + ); + assert.strictEqual( + typeof value, + 'string', + `bad location constraint: "${location}" ${param} ` + `"${value}" must be a string` + ); }); if (locationParams.authMethod === 'shared-key') { - assert(azureAccountNameRegex.test(locationParams.storageAccountName), + assert( + azureAccountNameRegex.test(locationParams.storageAccountName), `bad location constraint: "${location}" azureStorageAccountName ` + - `"${locationParams.storageAccountName}" is an invalid value`); - assert(base64Regex.test(locationParams.storageAccessKey), - `bad location constraint: "${location}" ` + - 'azureStorageAccessKey is not a valid base64 string'); + `"${locationParams.storageAccountName}" is an invalid value` + ); + assert( + base64Regex.test(locationParams.storageAccessKey), + `bad location constraint: "${location}" ` + 'azureStorageAccessKey is not a valid base64 string' + ); } - assert(isValidBucketName(locationParams.azureContainerName, []), - `bad location constraint: "${location}" ` + - 'azureContainerName is an invalid container name'); + assert( + isValidBucketName(locationParams.azureContainerName, []), + `bad location constraint: "${location}" ` + 'azureContainerName is an invalid container name' + ); } function hdClientLocationConstraintAssert(configHd) { const hdclientFields = []; if (configHd.bootstrap !== undefined) { - assert(Array.isArray(configHd.bootstrap) - && configHd.bootstrap - .every(e => typeof e === 'string'), - 'bad config: hdclient.bootstrap must be an array of strings'); - assert(configHd.bootstrap.length > 0, - 'bad config: hdclient bootstrap list is empty'); + assert( + Array.isArray(configHd.bootstrap) && configHd.bootstrap.every(e => typeof e === 'string'), + 'bad config: hdclient.bootstrap must be an array of strings' + ); + assert(configHd.bootstrap.length > 0, 'bad config: hdclient bootstrap list is empty'); hdclientFields.push('bootstrap'); } return hdclientFields; } function locationConstraintAssert(locationConstraints) { - const supportedBackends = [ - 'mem', 'file', 'scality', 'mongodb', 'tlp', - ].concat(Object.keys(validExternalBackends)); - assert(typeof locationConstraints === 'object', - 'bad config: locationConstraints must be an object'); + const supportedBackends = ['mem', 'file', 'scality', 'mongodb', 'tlp'].concat(Object.keys(validExternalBackends)); + assert(typeof locationConstraints === 'object', 'bad config: locationConstraints must be an object'); Object.keys(locationConstraints).forEach(l => { - assert(typeof locationConstraints[l] === 'object', - 'bad config: locationConstraints[region] must be an object'); - assert(typeof locationConstraints[l].type === 'string', - 'bad config: locationConstraints[region].type is ' + - 'mandatory and must be a string'); - assert(supportedBackends.indexOf(locationConstraints[l].type) > -1, - 'bad config: locationConstraints[region].type must ' + - `be one of ${supportedBackends}`); - assert(typeof locationConstraints[l].objectId === 'string', - 'bad config: locationConstraints[region].objectId is ' + - 'mandatory and must be a unique string across locations'); - assert(Object.keys(locationConstraints) - .filter(loc => (locationConstraints[loc].objectId === - locationConstraints[l].objectId)) - .length === 1, - 'bad config: location constraint objectId ' + - `"${locationConstraints[l].objectId}" is not unique across ` + - 'configured locations'); - assert(typeof locationConstraints[l].legacyAwsBehavior - === 'boolean', - 'bad config: locationConstraints[region]' + - '.legacyAwsBehavior is mandatory and must be a boolean'); - assert(['undefined', 'boolean'].includes( - typeof locationConstraints[l].isTransient), - 'bad config: locationConstraints[region]' + - '.isTransient must be a boolean'); + assert(typeof locationConstraints[l] === 'object', 'bad config: locationConstraints[region] must be an object'); + assert( + typeof locationConstraints[l].type === 'string', + 'bad config: locationConstraints[region].type is ' + 'mandatory and must be a string' + ); + assert( + supportedBackends.indexOf(locationConstraints[l].type) > -1, + 'bad config: locationConstraints[region].type must ' + `be one of ${supportedBackends}` + ); + assert( + typeof locationConstraints[l].objectId === 'string', + 'bad config: locationConstraints[region].objectId is ' + + 'mandatory and must be a unique string across locations' + ); + assert( + Object.keys(locationConstraints).filter( + loc => locationConstraints[loc].objectId === locationConstraints[l].objectId + ).length === 1, + 'bad config: location constraint objectId ' + + `"${locationConstraints[l].objectId}" is not unique across ` + + 'configured locations' + ); + assert( + typeof locationConstraints[l].legacyAwsBehavior === 'boolean', + 'bad config: locationConstraints[region]' + '.legacyAwsBehavior is mandatory and must be a boolean' + ); + assert( + ['undefined', 'boolean'].includes(typeof locationConstraints[l].isTransient), + 'bad config: locationConstraints[region]' + '.isTransient must be a boolean' + ); if (locationConstraints[l].sizeLimitGB !== undefined) { - assert(typeof locationConstraints[l].sizeLimitGB === 'number' || - locationConstraints[l].sizeLimitGB === null, - 'bad config: locationConstraints[region].sizeLimitGB ' + - 'must be a number (in gigabytes)'); + assert( + typeof locationConstraints[l].sizeLimitGB === 'number' || locationConstraints[l].sizeLimitGB === null, + 'bad config: locationConstraints[region].sizeLimitGB ' + 'must be a number (in gigabytes)' + ); } const details = locationConstraints[l].details; - assert(typeof details === 'object', - 'bad config: locationConstraints[region].details is ' + - 'mandatory and must be an object'); + assert( + typeof details === 'object', + 'bad config: locationConstraints[region].details is ' + 'mandatory and must be an object' + ); if (details.serverSideEncryption !== undefined) { - assert(typeof details.serverSideEncryption === 'boolean', - 'bad config: locationConstraints[region]' + - '.details.serverSideEncryption must be a boolean'); - } - const stringFields = [ - 'awsEndpoint', - 'bucketName', - 'credentialsProfile', - 'region', - ]; + assert( + typeof details.serverSideEncryption === 'boolean', + 'bad config: locationConstraints[region]' + '.details.serverSideEncryption must be a boolean' + ); + } + const stringFields = ['awsEndpoint', 'bucketName', 'credentialsProfile', 'region']; stringFields.forEach(field => { if (details[field] !== undefined) { - assert(typeof details[field] === 'string', - `bad config: ${field} must be a string`); + assert(typeof details[field] === 'string', `bad config: ${field} must be a string`); } }); if (details.bucketMatch !== undefined) { - assert(typeof details.bucketMatch === 'boolean', - 'bad config: details.bucketMatch must be a boolean'); + assert(typeof details.bucketMatch === 'boolean', 'bad config: details.bucketMatch must be a boolean'); } if (details.credentials !== undefined) { - assert(typeof details.credentials === 'object', - 'bad config: details.credentials must be an object'); - assert(typeof details.credentials.accessKey === 'string', - 'bad config: credentials must include accessKey as string'); - assert(typeof details.credentials.secretKey === 'string', - 'bad config: credentials must include secretKey as string'); + assert(typeof details.credentials === 'object', 'bad config: details.credentials must be an object'); + assert( + typeof details.credentials.accessKey === 'string', + 'bad config: credentials must include accessKey as string' + ); + assert( + typeof details.credentials.secretKey === 'string', + 'bad config: credentials must include secretKey as string' + ); } if (locationConstraints[l].type === 'tlp') { @@ -389,16 +382,20 @@ function locationConstraintAssert(locationConstraints) { // eslint-disable-next-line no-param-reassign locationConstraints[l].details.https = false; } else if (details.https !== undefined) { - assert(typeof details.https === 'boolean', 'bad config: ' + - 'locationConstraints[region].details https must be a boolean'); + assert( + typeof details.https === 'boolean', + 'bad config: ' + 'locationConstraints[region].details https must be a boolean' + ); } else { // eslint-disable-next-line no-param-reassign locationConstraints[l].details.https = true; } if (details.pathStyle !== undefined) { - assert(typeof details.pathStyle === 'boolean', 'bad config: ' + - 'locationConstraints[region].pathStyle must be a boolean'); + assert( + typeof details.pathStyle === 'boolean', + 'bad config: ' + 'locationConstraints[region].pathStyle must be a boolean' + ); } else if (process.env.CI_CEPH === 'true') { // eslint-disable-next-line no-param-reassign locationConstraints[l].details.pathStyle = true; @@ -408,9 +405,10 @@ function locationConstraintAssert(locationConstraints) { } if (details.supportsVersioning !== undefined) { - assert(typeof details.supportsVersioning === 'boolean', - 'bad config: locationConstraints[region].supportsVersioning' + - 'must be a boolean'); + assert( + typeof details.supportsVersioning === 'boolean', + 'bad config: locationConstraints[region].supportsVersioning' + 'must be a boolean' + ); } else { // default to true // eslint-disable-next-line no-param-reassign @@ -424,52 +422,45 @@ function locationConstraintAssert(locationConstraints) { gcpLocationConstraintAssert(l, locationConstraints[l]); } if (locationConstraints[l].type === 'pfs') { - assert(typeof details.pfsDaemonEndpoint === 'object', - 'bad config: pfsDaemonEndpoint is mandatory and must be an object'); + assert( + typeof details.pfsDaemonEndpoint === 'object', + 'bad config: pfsDaemonEndpoint is mandatory and must be an object' + ); } - if (locationConstraints[l].type === 'scality' && + if ( + locationConstraints[l].type === 'scality' && locationConstraints[l].details.connector !== undefined && - locationConstraints[l].details.connector.hdclient !== undefined) { - hdClientLocationConstraintAssert( - locationConstraints[l].details.connector.hdclient); + locationConstraints[l].details.connector.hdclient !== undefined + ) { + hdClientLocationConstraintAssert(locationConstraints[l].details.connector.hdclient); } }); - assert(Object.keys(locationConstraints) - .includes('us-east-1'), 'bad locationConfig: must ' + - 'include us-east-1 as a locationConstraint'); + assert( + Object.keys(locationConstraints).includes('us-east-1'), + 'bad locationConfig: must ' + 'include us-east-1 as a locationConstraint' + ); } function parseUtapiReindex(config) { - const { - enabled, - schedule, - redis, - bucketd, - onlyCountLatestWhenObjectLocked, - } = config; - assert(typeof enabled === 'boolean', - 'bad config: utapi.reindex.enabled must be a boolean'); + const { enabled, schedule, redis, bucketd, onlyCountLatestWhenObjectLocked } = config; + assert(typeof enabled === 'boolean', 'bad config: utapi.reindex.enabled must be a boolean'); const parsedRedis = parseRedisConfig(redis); - assert(Array.isArray(parsedRedis.sentinels), - 'bad config: utapi reindex redis config requires a list of sentinels'); - - assert(typeof bucketd === 'object', - 'bad config: utapi.reindex.bucketd must be an object'); - assert(typeof bucketd.port === 'number', - 'bad config: utapi.reindex.bucketd.port must be a number'); - assert(typeof schedule === 'string', - 'bad config: utapi.reindex.schedule must be a string'); + assert(Array.isArray(parsedRedis.sentinels), 'bad config: utapi reindex redis config requires a list of sentinels'); + + assert(typeof bucketd === 'object', 'bad config: utapi.reindex.bucketd must be an object'); + assert(typeof bucketd.port === 'number', 'bad config: utapi.reindex.bucketd.port must be a number'); + assert(typeof schedule === 'string', 'bad config: utapi.reindex.schedule must be a string'); if (onlyCountLatestWhenObjectLocked !== undefined) { - assert(typeof onlyCountLatestWhenObjectLocked === 'boolean', - 'bad config: utapi.reindex.onlyCountLatestWhenObjectLocked must be a boolean'); + assert( + typeof onlyCountLatestWhenObjectLocked === 'boolean', + 'bad config: utapi.reindex.onlyCountLatestWhenObjectLocked must be a boolean' + ); } try { cronParser.parseExpression(schedule); } catch (e) { - assert(false, - 'bad config: utapi.reindex.schedule must be a valid ' + - `cron schedule. ${e.message}.`); + assert(false, 'bad config: utapi.reindex.schedule must be a valid ' + `cron schedule. ${e.message}.`); } return { enabled, @@ -482,52 +473,52 @@ function parseUtapiReindex(config) { function requestsConfigAssert(requestsConfig) { if (requestsConfig.viaProxy !== undefined) { - assert(typeof requestsConfig.viaProxy === 'boolean', - 'config: invalid requests configuration. viaProxy must be a ' + - 'boolean'); + assert( + typeof requestsConfig.viaProxy === 'boolean', + 'config: invalid requests configuration. viaProxy must be a ' + 'boolean' + ); if (requestsConfig.viaProxy) { - assert(Array.isArray(requestsConfig.trustedProxyCIDRs) && - requestsConfig.trustedProxyCIDRs.length > 0 && - requestsConfig.trustedProxyCIDRs - .every(ip => typeof ip === 'string'), - 'config: invalid requests configuration. ' + - 'trustedProxyCIDRs must be set if viaProxy is set to true ' + - 'and must be an array'); - - assert(typeof requestsConfig.extractClientIPFromHeader === 'string' - && requestsConfig.extractClientIPFromHeader.length > 0, - 'config: invalid requests configuration. ' + - 'extractClientIPFromHeader must be set if viaProxy is ' + - 'set to true and must be a string'); - - assert(typeof requestsConfig.extractProtocolFromHeader === 'string' - && requestsConfig.extractProtocolFromHeader.length > 0, - 'config: invalid requests configuration. ' + - 'extractProtocolFromHeader must be set if viaProxy is ' + - 'set to true and must be a string'); + assert( + Array.isArray(requestsConfig.trustedProxyCIDRs) && + requestsConfig.trustedProxyCIDRs.length > 0 && + requestsConfig.trustedProxyCIDRs.every(ip => typeof ip === 'string'), + 'config: invalid requests configuration. ' + + 'trustedProxyCIDRs must be set if viaProxy is set to true ' + + 'and must be an array' + ); + + assert( + typeof requestsConfig.extractClientIPFromHeader === 'string' && + requestsConfig.extractClientIPFromHeader.length > 0, + 'config: invalid requests configuration. ' + + 'extractClientIPFromHeader must be set if viaProxy is ' + + 'set to true and must be a string' + ); + + assert( + typeof requestsConfig.extractProtocolFromHeader === 'string' && + requestsConfig.extractProtocolFromHeader.length > 0, + 'config: invalid requests configuration. ' + + 'extractProtocolFromHeader must be set if viaProxy is ' + + 'set to true and must be a string' + ); } } } function bucketNotifAssert(bucketNotifConfig) { - assert(Array.isArray(bucketNotifConfig), - 'bad config: bucket notification configuration must be an array'); + assert(Array.isArray(bucketNotifConfig), 'bad config: bucket notification configuration must be an array'); bucketNotifConfig.forEach(c => { const { resource, type, host, port, auth } = c; - assert(typeof resource === 'string', - 'bad config: bucket notification configuration resource must be a string'); - assert(typeof type === 'string', - 'bad config: bucket notification configuration type must be a string'); - assert(typeof host === 'string' && host !== '', - 'bad config: hostname must be a non-empty string'); + assert(typeof resource === 'string', 'bad config: bucket notification configuration resource must be a string'); + assert(typeof type === 'string', 'bad config: bucket notification configuration type must be a string'); + assert(typeof host === 'string' && host !== '', 'bad config: hostname must be a non-empty string'); if (port) { - assert(Number.isInteger(port, 10) && port > 0, - 'bad config: port must be a positive integer'); + assert(Number.isInteger(port, 10) && port > 0, 'bad config: port must be a positive integer'); } if (auth) { - assert(typeof auth === 'object', - 'bad config: bucket notification auth must be an object'); + assert(typeof auth === 'object', 'bad config: bucket notification auth must be an object'); } }); return bucketNotifConfig; @@ -553,16 +544,13 @@ class Config extends EventEmitter { * the S3_LOCATION_FILE environment var. */ this._basePath = path.join(__dirname, '..'); - this.configPath = findConfigFile(process.env.S3_CONFIG_FILE || - 'config.json'); + this.configPath = findConfigFile(process.env.S3_CONFIG_FILE || 'config.json'); let locationConfigFileName = 'locationConfig.json'; if (process.env.CI === 'true' && !process.env.S3_END_TO_END) { - locationConfigFileName = - 'tests/locationConfig/locationConfigTests.json'; + locationConfigFileName = 'tests/locationConfig/locationConfigTests.json'; } - this.locationConfigPath = findConfigFile(process.env.S3_LOCATION_FILE || - locationConfigFileName); + this.locationConfigPath = findConfigFile(process.env.S3_LOCATION_FILE || locationConfigFileName); if (process.env.S3_REPLICATION_FILE !== undefined) { this.replicationConfigPath = process.env.S3_REPLICATION_FILE; @@ -584,13 +572,17 @@ class Config extends EventEmitter { const { providerName, region, endpoint, ak, sk, tls, noAwsArn } = config.kmsAWS; assert(providerName, 'Configuration Error: providerName must be defined in kmsAWS'); - assert(isValidProvider(providerName), - 'Configuration Error: kmsAWS.providerNamer must be lowercase alphanumeric only'); + assert( + isValidProvider(providerName), + 'Configuration Error: kmsAWS.providerNamer must be lowercase alphanumeric only' + ); assert(endpoint, 'Configuration Error: endpoint must be defined in kmsAWS'); assert(ak, 'Configuration Error: ak must be defined in kmsAWS'); assert(sk, 'Configuration Error: sk must be defined in kmsAWS'); - assert(['undefined', 'boolean'].some(type => type === typeof noAwsArn), - 'Configuration Error:: kmsAWS.noAwsArn must be a boolean or not set'); + assert( + ['undefined', 'boolean'].some(type => type === typeof noAwsArn), + 'Configuration Error:: kmsAWS.noAwsArn must be a boolean or not set' + ); kmsAWS = { providerName, @@ -616,13 +608,11 @@ class Config extends EventEmitter { // min & max TLS: One of 'TLSv1.3', 'TLSv1.2', 'TLSv1.1', or 'TLSv1' // (see https://nodejs.org/api/tls.html#tlscreatesecurecontextoptions) if (tls.minVersion !== undefined) { - assert(typeof tls.minVersion === 'string', - 'bad config: KMS AWS TLS minVersion must be a string'); + assert(typeof tls.minVersion === 'string', 'bad config: KMS AWS TLS minVersion must be a string'); kmsAWS.tls.minVersion = tls.minVersion; } if (tls.maxVersion !== undefined) { - assert(typeof tls.maxVersion === 'string', - 'bad config: KMS AWS TLS maxVersion must be a string'); + assert(typeof tls.maxVersion === 'string', 'bad config: KMS AWS TLS maxVersion must be a string'); kmsAWS.tls.maxVersion = tls.maxVersion; } if (tls.ca !== undefined) { @@ -657,11 +647,8 @@ class Config extends EventEmitter { // for customization per host host: process.env.S3KMIP_HOSTS || process.env.S3KMIP_HOST, key: this._loadTlsFile(process.env.S3KMIP_KEY || undefined), - cert: this._loadTlsFile(process.env.S3KMIP_CERT || - undefined), - ca: (process.env.S3KMIP_CA - ? process.env.S3KMIP_CA.split(',') - : []).map(ca => this._loadTlsFile(ca)), + cert: this._loadTlsFile(process.env.S3KMIP_CERT || undefined), + ca: (process.env.S3KMIP_CA ? process.env.S3KMIP_CA.split(',') : []).map(ca => this._loadTlsFile(ca)), }, }; if (transportKmip.pipelineDepth) { @@ -671,17 +658,14 @@ class Config extends EventEmitter { if (transportKmip.tls) { const { host, port, key, cert, ca } = transportKmip.tls; if (!!key !== !!cert) { - throw new Error('bad config: KMIP TLS certificate ' + - 'and key must come along'); + throw new Error('bad config: KMIP TLS certificate ' + 'and key must come along'); } if (port) { - assert(typeof port === 'number', - 'bad config: KMIP TLS Port must be a number'); + assert(typeof port === 'number', 'bad config: KMIP TLS Port must be a number'); transport.tls.port = port; } if (host) { - assert(typeof host === 'string', - 'bad config: KMIP TLS Host must be a string'); + assert(typeof host === 'string', 'bad config: KMIP TLS Host must be a string'); transport.tls.host = host; } if (key) { @@ -709,52 +693,50 @@ class Config extends EventEmitter { * time for `now' instead of client specified activation date * which also targets the present instant. */ - compoundCreateActivate: - (process.env.S3KMIP_COMPOUND_CREATE === 'true') || false, + compoundCreateActivate: process.env.S3KMIP_COMPOUND_CREATE === 'true' || false, /** Set the bucket name attribute name here if the KMIP * server supports storing custom attributes along * with the keys. */ - bucketNameAttributeName: - process.env.S3KMIP_BUCKET_ATTRIBUTE_NAME || '', + bucketNameAttributeName: process.env.S3KMIP_BUCKET_ATTRIBUTE_NAME || '', }, transport: this._parseKmipTransport({}), retries: 0, }; if (config.kmip) { assert(config.kmip.providerName, 'config.kmip.providerName must be defined'); - assert(isValidProvider(config.kmip.providerName), - 'config.kmip.providerName must be lowercase alphanumeric only'); + assert( + isValidProvider(config.kmip.providerName), + 'config.kmip.providerName must be lowercase alphanumeric only' + ); this.kmip.providerName = config.kmip.providerName; if (config.kmip.client) { if (config.kmip.client.compoundCreateActivate) { - assert(typeof config.kmip.client.compoundCreateActivate === - 'boolean'); - this.kmip.client.compoundCreateActivate = - config.kmip.client.compoundCreateActivate; + assert(typeof config.kmip.client.compoundCreateActivate === 'boolean'); + this.kmip.client.compoundCreateActivate = config.kmip.client.compoundCreateActivate; } if (config.kmip.client.bucketNameAttributeName) { - assert(typeof config.kmip.client.bucketNameAttributeName === - 'string'); - this.kmip.client.bucketNameAttributeName = - config.kmip.client.bucketNameAttributeName; + assert(typeof config.kmip.client.bucketNameAttributeName === 'string'); + this.kmip.client.bucketNameAttributeName = config.kmip.client.bucketNameAttributeName; } } if (config.kmip.transport) { if (Array.isArray(config.kmip.transport)) { - this.kmip.transport = config.kmip.transport.map(t => - this._parseKmipTransport(t)); + this.kmip.transport = config.kmip.transport.map(t => this._parseKmipTransport(t)); if (config.kmip.retries) { - assert(typeof config.kmip.retries === 'number', - 'bad config: KMIP Cluster retries must be a number'); - assert(config.kmip.retries <= this.kmip.transport.length - 1, - 'bad config: KMIP Cluster retries must be lower or equal to the number of hosts - 1'); + assert( + typeof config.kmip.retries === 'number', + 'bad config: KMIP Cluster retries must be a number' + ); + assert( + config.kmip.retries <= this.kmip.transport.length - 1, + 'bad config: KMIP Cluster retries must be lower or equal to the number of hosts - 1' + ); } else { this.kmip.retries = this.kmip.transport.length - 1; } } else { - this.kmip.transport = - this._parseKmipTransport(config.kmip.transport); + this.kmip.transport = this._parseKmipTransport(config.kmip.transport); } } } @@ -763,8 +745,7 @@ class Config extends EventEmitter { _getLocationConfig() { let locationConfig; try { - const data = fs.readFileSync(this.locationConfigPath, - { encoding: 'utf-8' }); + const data = fs.readFileSync(this.locationConfigPath, { encoding: 'utf-8' }); locationConfig = JSON.parse(data); } catch (err) { throw new Error(`could not parse location config file: @@ -777,12 +758,12 @@ class Config extends EventEmitter { Object.keys(locationConfig).forEach(l => { const details = this.locationConstraints[l].details; if (locationConfig[l].details.connector !== undefined) { - assert(typeof locationConfig[l].details.connector === - 'object', 'bad config: connector must be an object'); - if (locationConfig[l].details.connector.sproxyd !== - undefined) { - details.connector.sproxyd = parseSproxydConfig( - locationConfig[l].details.connector.sproxyd); + assert( + typeof locationConfig[l].details.connector === 'object', + 'bad config: connector must be an object' + ); + if (locationConfig[l].details.connector.sproxyd !== undefined) { + details.connector.sproxyd = parseSproxydConfig(locationConfig[l].details.connector.sproxyd); } } }); @@ -793,18 +774,14 @@ class Config extends EventEmitter { return undefined; } if (typeof tlsFileName !== 'string') { - throw new Error( - 'bad config: TLS file specification must be a string'); + throw new Error('bad config: TLS file specification must be a string'); } - const tlsFilePath = (tlsFileName[0] === '/') - ? tlsFileName - : path.join(this._basePath, tlsFileName); + const tlsFilePath = tlsFileName[0] === '/' ? tlsFileName : path.join(this._basePath, tlsFileName); let tlsFileContent; try { tlsFileContent = fs.readFileSync(tlsFilePath); } catch (err) { - throw new Error(`Could not load tls file '${tlsFileName}':` + - ` ${err.message}`); + throw new Error(`Could not load tls file '${tlsFileName}':` + ` ${err.message}`); } return tlsFileContent; } @@ -831,20 +808,18 @@ class Config extends EventEmitter { _parseEndpoints(listenOn, fieldName) { let result = []; if (listenOn !== undefined) { - assert(Array.isArray(listenOn) - && listenOn.every(e => typeof e === 'string'), - `bad config: ${fieldName} must be a list of strings`); + assert( + Array.isArray(listenOn) && listenOn.every(e => typeof e === 'string'), + `bad config: ${fieldName} must be a list of strings` + ); result = listenOn.map(item => { const lastColon = item.lastIndexOf(':'); // if address is IPv6 format, it includes brackets // that have to be removed from the final IP address - const ipAddress = item.indexOf(']') > 0 ? - item.substr(1, lastColon - 2) : - item.substr(0, lastColon); + const ipAddress = item.indexOf(']') > 0 ? item.substr(1, lastColon - 2) : item.substr(0, lastColon); // the port should not include the colon const port = item.substr(lastColon + 1); - assert(Number.parseInt(port, 10), - `bad config: ${fieldName} port must be a positive integer`); + assert(Number.parseInt(port, 10), `bad config: ${fieldName} port must be a positive integer`); return { ip: ipAddress, port }; }); } @@ -854,32 +829,30 @@ class Config extends EventEmitter { _getConfig() { let config; try { - const data = fs.readFileSync(this.configPath, - { encoding: 'utf-8' }); + const data = fs.readFileSync(this.configPath, { encoding: 'utf-8' }); config = JSON.parse(data); } catch (err) { throw new Error(`could not parse config file: ${err.message}`); } if (this.replicationConfigPath) { try { - const repData = fs.readFileSync(this.replicationConfigPath, - { encoding: 'utf-8' }); + const repData = fs.readFileSync(this.replicationConfigPath, { encoding: 'utf-8' }); const replicationEndpoints = JSON.parse(repData); config.replicationEndpoints.push(...replicationEndpoints); } catch (err) { - throw new Error( - `could not parse replication file: ${err.message}`); + throw new Error(`could not parse replication file: ${err.message}`); } } if (config.port !== undefined) { - assert(Number.isInteger(config.port) && config.port > 0, - 'bad config: port must be a positive integer'); + assert(Number.isInteger(config.port) && config.port > 0, 'bad config: port must be a positive integer'); } if (config.internalPort !== undefined) { - assert(Number.isInteger(config.internalPort) && config.internalPort > 0, - 'bad config: internalPort must be a positive integer'); + assert( + Number.isInteger(config.internalPort) && config.internalPort > 0, + 'bad config: internalPort must be a positive integer' + ); } this.port = config.port; @@ -895,16 +868,17 @@ class Config extends EventEmitter { this.metricsPort = 8002; if (config.metricsPort !== undefined) { - assert(Number.isInteger(config.metricsPort) && config.metricsPort > 0, - 'bad config: metricsPort must be a positive integer'); + assert( + Number.isInteger(config.metricsPort) && config.metricsPort > 0, + 'bad config: metricsPort must be a positive integer' + ); this.metricsPort = config.metricsPort; } this.metricsListenOn = this._parseEndpoints(config.metricsListenOn, 'metricsListenOn'); if (config.replicationGroupId) { - assert(typeof config.replicationGroupId === 'string', - 'bad config: replicationGroupId must be a string'); + assert(typeof config.replicationGroupId === 'string', 'bad config: replicationGroupId must be a string'); this.replicationGroupId = config.replicationGroupId; } else { this.replicationGroupId = 'RG001'; @@ -912,12 +886,10 @@ class Config extends EventEmitter { const instanceId = process.env.CLOUDSERVER_INSTANCE_ID || config.instanceId; if (instanceId) { - assert(typeof instanceId === 'string', - 'bad config: instanceId must be a string'); + assert(typeof instanceId === 'string', 'bad config: instanceId must be a string'); // versionID generation code will truncate instanceId to 6 characters // so we enforce this limit here to make the behavior predictable - assert(instanceId.length <= 6, - 'bad config: instanceId must be at most 6 characters long'); + assert(instanceId.length <= 6, 'bad config: instanceId must be at most 6 characters long'); this.instanceId = instanceId; } else if (process.env.HOSTNAME) { // If running in Kubernetes, the pod name is set in the HOSTNAME env var @@ -945,38 +917,59 @@ class Config extends EventEmitter { this.replicationEndpoints = []; if (config.replicationEndpoints) { const { replicationEndpoints } = config; - assert(replicationEndpoints instanceof Array, 'bad config: ' + - '`replicationEndpoints` property must be an array'); + assert( + replicationEndpoints instanceof Array, + 'bad config: ' + '`replicationEndpoints` property must be an array' + ); replicationEndpoints.forEach(replicationEndpoint => { - assert.strictEqual(typeof replicationEndpoint, 'object', - 'bad config: `replicationEndpoints` property must be an ' + - 'array of objects'); + assert.strictEqual( + typeof replicationEndpoint, + 'object', + 'bad config: `replicationEndpoints` property must be an ' + 'array of objects' + ); const { site, servers, type } = replicationEndpoint; - assert.notStrictEqual(site, undefined, 'bad config: each ' + - 'object of `replicationEndpoints` array must have a ' + - '`site` property'); - assert.strictEqual(typeof site, 'string', 'bad config: ' + - '`site` property of object in `replicationEndpoints` ' + - 'must be a string'); - assert.notStrictEqual(site, '', 'bad config: `site` property ' + - "of object in `replicationEndpoints` must not be ''"); + assert.notStrictEqual( + site, + undefined, + 'bad config: each ' + 'object of `replicationEndpoints` array must have a ' + '`site` property' + ); + assert.strictEqual( + typeof site, + 'string', + 'bad config: ' + '`site` property of object in `replicationEndpoints` ' + 'must be a string' + ); + assert.notStrictEqual( + site, + '', + 'bad config: `site` property ' + "of object in `replicationEndpoints` must not be ''" + ); if (type !== undefined) { - assert(validExternalBackends[type], 'bad config: `type` ' + - 'property of `replicationEndpoints` object must be ' + - 'a valid external backend (one of: "' + - `${Object.keys(validExternalBackends).join('", "')}")`); + assert( + validExternalBackends[type], + 'bad config: `type` ' + + 'property of `replicationEndpoints` object must be ' + + 'a valid external backend (one of: "' + + `${Object.keys(validExternalBackends).join('", "')}")` + ); } else { - assert.notStrictEqual(servers, undefined, 'bad config: ' + - 'each object of `replicationEndpoints` array that is ' + - 'not an external backend must have `servers` property'); - assert(servers instanceof Array, 'bad config: ' + - '`servers` property of object in ' + - '`replicationEndpoints` must be an array'); + assert.notStrictEqual( + servers, + undefined, + 'bad config: ' + + 'each object of `replicationEndpoints` array that is ' + + 'not an external backend must have `servers` property' + ); + assert( + servers instanceof Array, + 'bad config: ' + '`servers` property of object in ' + '`replicationEndpoints` must be an array' + ); servers.forEach(item => { - assert(typeof item === 'string' && item !== '', + assert( + typeof item === 'string' && item !== '', 'bad config: each item of ' + - '`replicationEndpoints:servers` must be a ' + - 'non-empty string'); + '`replicationEndpoints:servers` must be a ' + + 'non-empty string' + ); }); } }); @@ -985,27 +978,31 @@ class Config extends EventEmitter { if (config.backbeat) { const { backbeat } = config; - assert.strictEqual(typeof backbeat.host, 'string', - 'bad config: backbeat host must be a string'); - assert(Number.isInteger(backbeat.port) && backbeat.port > 0, - 'bad config: backbeat port must be a positive integer'); + assert.strictEqual(typeof backbeat.host, 'string', 'bad config: backbeat host must be a string'); + assert( + Number.isInteger(backbeat.port) && backbeat.port > 0, + 'bad config: backbeat port must be a positive integer' + ); this.backbeat = backbeat; } if (config.workflowEngineOperator) { const { workflowEngineOperator } = config; - assert.strictEqual(typeof workflowEngineOperator.host, 'string', - 'bad config: workflowEngineOperator host must be a string'); - assert(Number.isInteger(workflowEngineOperator.port) && - workflowEngineOperator.port > 0, - 'bad config: workflowEngineOperator port not a positive integer'); + assert.strictEqual( + typeof workflowEngineOperator.host, + 'string', + 'bad config: workflowEngineOperator host must be a string' + ); + assert( + Number.isInteger(workflowEngineOperator.port) && workflowEngineOperator.port > 0, + 'bad config: workflowEngineOperator port not a positive integer' + ); this.workflowEngineOperator = workflowEngineOperator; } // legacy if (config.regions !== undefined) { - throw new Error('bad config: regions key is deprecated. ' + - 'Please use restEndpoints and locationConfig'); + throw new Error('bad config: regions key is deprecated. ' + 'Please use restEndpoints and locationConfig'); } if (config.restEndpoints !== undefined) { @@ -1020,54 +1017,53 @@ class Config extends EventEmitter { this.websiteEndpoints = []; if (config.websiteEndpoints !== undefined) { - assert(Array.isArray(config.websiteEndpoints) - && config.websiteEndpoints.every(e => typeof e === 'string'), - 'bad config: websiteEndpoints must be a list of strings'); + assert( + Array.isArray(config.websiteEndpoints) && config.websiteEndpoints.every(e => typeof e === 'string'), + 'bad config: websiteEndpoints must be a list of strings' + ); this.websiteEndpoints = config.websiteEndpoints; } this.clusters = false; if (config.clusters !== undefined) { - assert(Number.isInteger(config.clusters) && config.clusters > 0, - 'bad config: clusters must be a positive integer'); + assert( + Number.isInteger(config.clusters) && config.clusters > 0, + 'bad config: clusters must be a positive integer' + ); this.clusters = config.clusters; } if (config.usEastBehavior !== undefined) { - throw new Error('bad config: usEastBehavior key is deprecated. ' + - 'Please use restEndpoints and locationConfig'); + throw new Error( + 'bad config: usEastBehavior key is deprecated. ' + 'Please use restEndpoints and locationConfig' + ); } // legacy if (config.sproxyd !== undefined) { - throw new Error('bad config: sproxyd key is deprecated. ' + - 'Please use restEndpoints and locationConfig'); + throw new Error('bad config: sproxyd key is deprecated. ' + 'Please use restEndpoints and locationConfig'); } this.cdmi = {}; if (config.cdmi !== undefined) { if (config.cdmi.host !== undefined) { - assert.strictEqual(typeof config.cdmi.host, 'string', - 'bad config: cdmi host must be a string'); + assert.strictEqual(typeof config.cdmi.host, 'string', 'bad config: cdmi host must be a string'); this.cdmi.host = config.cdmi.host; } if (config.cdmi.port !== undefined) { - assert(Number.isInteger(config.cdmi.port) - && config.cdmi.port > 0, - 'bad config: cdmi port must be a positive integer'); + assert( + Number.isInteger(config.cdmi.port) && config.cdmi.port > 0, + 'bad config: cdmi port must be a positive integer' + ); this.cdmi.port = config.cdmi.port; } if (config.cdmi.path !== undefined) { - assert(typeof config.cdmi.path === 'string', - 'bad config: cdmi.path must be a string'); - assert(config.cdmi.path.length > 0, - 'bad config: cdmi.path is empty'); - assert(config.cdmi.path.charAt(0) === '/', - 'bad config: cdmi.path should start with a "/"'); + assert(typeof config.cdmi.path === 'string', 'bad config: cdmi.path must be a string'); + assert(config.cdmi.path.length > 0, 'bad config: cdmi.path is empty'); + assert(config.cdmi.path.charAt(0) === '/', 'bad config: cdmi.path should start with a "/"'); this.cdmi.path = config.cdmi.path; } if (config.cdmi.readonly !== undefined) { - assert(typeof config.cdmi.readonly === 'boolean', - 'bad config: cdmi.readonly must be a boolean'); + assert(typeof config.cdmi.readonly === 'boolean', 'bad config: cdmi.readonly must be a boolean'); this.cdmi.readonly = config.cdmi.readonly; } else { this.cdmi.readonly = true; @@ -1075,88 +1071,98 @@ class Config extends EventEmitter { } this.bucketd = { bootstrap: [] }; - if (config.bucketd !== undefined - && config.bucketd.bootstrap !== undefined) { - assert(config.bucketd.bootstrap instanceof Array - && config.bucketd.bootstrap.every( - e => typeof e === 'string'), - 'bad config: bucketd.bootstrap must be a list of strings'); + if (config.bucketd !== undefined && config.bucketd.bootstrap !== undefined) { + assert( + config.bucketd.bootstrap instanceof Array && config.bucketd.bootstrap.every(e => typeof e === 'string'), + 'bad config: bucketd.bootstrap must be a list of strings' + ); this.bucketd.bootstrap = config.bucketd.bootstrap; } this.vaultd = {}; if (config.vaultd) { if (config.vaultd.port !== undefined) { - assert(Number.isInteger(config.vaultd.port) - && config.vaultd.port > 0, - 'bad config: vaultd port must be a positive integer'); + assert( + Number.isInteger(config.vaultd.port) && config.vaultd.port > 0, + 'bad config: vaultd port must be a positive integer' + ); this.vaultd.port = config.vaultd.port; } if (config.vaultd.host !== undefined) { - assert.strictEqual(typeof config.vaultd.host, 'string', - 'bad config: vaultd host must be a string'); + assert.strictEqual(typeof config.vaultd.host, 'string', 'bad config: vaultd host must be a string'); this.vaultd.host = config.vaultd.host; } if (process.env.VAULTD_HOST !== undefined) { - assert.strictEqual(typeof process.env.VAULTD_HOST, 'string', - 'bad config: vaultd host must be a string'); + assert.strictEqual( + typeof process.env.VAULTD_HOST, + 'string', + 'bad config: vaultd host must be a string' + ); this.vaultd.host = process.env.VAULTD_HOST; } } if (config.dataClient) { this.dataClient = {}; - assert.strictEqual(typeof config.dataClient.host, 'string', - 'bad config: data client host must be ' + - 'a string'); + assert.strictEqual( + typeof config.dataClient.host, + 'string', + 'bad config: data client host must be ' + 'a string' + ); this.dataClient.host = config.dataClient.host; - assert(Number.isInteger(config.dataClient.port) - && config.dataClient.port > 0, - 'bad config: dataClient port must be a positive ' + - 'integer'); + assert( + Number.isInteger(config.dataClient.port) && config.dataClient.port > 0, + 'bad config: dataClient port must be a positive ' + 'integer' + ); this.dataClient.port = config.dataClient.port; } if (config.metadataClient) { this.metadataClient = {}; assert.strictEqual( - typeof config.metadataClient.host, 'string', - 'bad config: metadata client host must be a string'); + typeof config.metadataClient.host, + 'string', + 'bad config: metadata client host must be a string' + ); this.metadataClient.host = config.metadataClient.host; - assert(Number.isInteger(config.metadataClient.port) - && config.metadataClient.port > 0, - 'bad config: metadata client port must be a ' + - 'positive integer'); + assert( + Number.isInteger(config.metadataClient.port) && config.metadataClient.port > 0, + 'bad config: metadata client port must be a ' + 'positive integer' + ); this.metadataClient.port = config.metadataClient.port; } if (config.pfsClient) { this.pfsClient = {}; - assert.strictEqual(typeof config.pfsClient.host, 'string', - 'bad config: pfsClient host must be ' + - 'a string'); + assert.strictEqual( + typeof config.pfsClient.host, + 'string', + 'bad config: pfsClient host must be ' + 'a string' + ); this.pfsClient.host = config.pfsClient.host; - assert(Number.isInteger(config.pfsClient.port) && - config.pfsClient.port > 0, - 'bad config: pfsClient port must be a positive ' + - 'integer'); + assert( + Number.isInteger(config.pfsClient.port) && config.pfsClient.port > 0, + 'bad config: pfsClient port must be a positive ' + 'integer' + ); this.pfsClient.port = config.pfsClient.port; } if (config.dataDaemon) { this.dataDaemon = {}; assert.strictEqual( - typeof config.dataDaemon.bindAddress, 'string', - 'bad config: data daemon bind address must be a string'); + typeof config.dataDaemon.bindAddress, + 'string', + 'bad config: data daemon bind address must be a string' + ); this.dataDaemon.bindAddress = config.dataDaemon.bindAddress; - assert(Number.isInteger(config.dataDaemon.port) - && config.dataDaemon.port > 0, - 'bad config: data daemon port must be a positive ' + - 'integer'); + assert( + Number.isInteger(config.dataDaemon.port) && config.dataDaemon.port > 0, + 'bad config: data daemon port must be a positive ' + 'integer' + ); this.dataDaemon.port = config.dataDaemon.port; /** @@ -1164,9 +1170,7 @@ class Config extends EventEmitter { * backend. If no path provided, uses data at the root of * the S3 project directory. */ - this.dataDaemon.dataPath = - process.env.S3DATAPATH ? - process.env.S3DATAPATH : `${__dirname}/../localData`; + this.dataDaemon.dataPath = process.env.S3DATAPATH ? process.env.S3DATAPATH : `${__dirname}/../localData`; this.dataDaemon.noSync = process.env.S3DATA_NOSYNC === 'true'; this.dataDaemon.noCache = process.env.S3DATA_NOCACHE === 'true'; } @@ -1174,35 +1178,37 @@ class Config extends EventEmitter { if (config.pfsDaemon) { this.pfsDaemon = {}; assert.strictEqual( - typeof config.pfsDaemon.bindAddress, 'string', - 'bad config: data daemon bind address must be a string'); + typeof config.pfsDaemon.bindAddress, + 'string', + 'bad config: data daemon bind address must be a string' + ); this.pfsDaemon.bindAddress = config.pfsDaemon.bindAddress; - assert(Number.isInteger(config.pfsDaemon.port) - && config.pfsDaemon.port > 0, - 'bad config: data daemon port must be a positive ' + - 'integer'); + assert( + Number.isInteger(config.pfsDaemon.port) && config.pfsDaemon.port > 0, + 'bad config: data daemon port must be a positive ' + 'integer' + ); this.pfsDaemon.port = config.pfsDaemon.port; - this.pfsDaemon.dataPath = - process.env.PFSD_MOUNT_PATH ? - process.env.PFSD_MOUNT_PATH : `${__dirname}/../localPfs`; + this.pfsDaemon.dataPath = process.env.PFSD_MOUNT_PATH + ? process.env.PFSD_MOUNT_PATH + : `${__dirname}/../localPfs`; this.pfsDaemon.noSync = process.env.PFSD_NOSYNC === 'true'; this.pfsDaemon.noCache = process.env.PFSD_NOCACHE === 'true'; - this.pfsDaemon.isReadOnly = - process.env.PFSD_READONLY === 'true'; + this.pfsDaemon.isReadOnly = process.env.PFSD_READONLY === 'true'; } if (config.metadataDaemon) { this.metadataDaemon = {}; assert.strictEqual( - typeof config.metadataDaemon.bindAddress, 'string', - 'bad config: metadata daemon bind address must be a string'); - this.metadataDaemon.bindAddress = - config.metadataDaemon.bindAddress; - - assert(Number.isInteger(config.metadataDaemon.port) - && config.metadataDaemon.port > 0, - 'bad config: metadata daemon port must be a ' + - 'positive integer'); + typeof config.metadataDaemon.bindAddress, + 'string', + 'bad config: metadata daemon bind address must be a string' + ); + this.metadataDaemon.bindAddress = config.metadataDaemon.bindAddress; + + assert( + Number.isInteger(config.metadataDaemon.port) && config.metadataDaemon.port > 0, + 'bad config: metadata daemon port must be a ' + 'positive integer' + ); this.metadataDaemon.port = config.metadataDaemon.port; /** @@ -1210,12 +1216,11 @@ class Config extends EventEmitter { * backend. If no path provided, uses data and metadata at * the root of the S3 project directory. */ - this.metadataDaemon.metadataPath = - process.env.S3METADATAPATH ? - process.env.S3METADATAPATH : `${__dirname}/../localMetadata`; + this.metadataDaemon.metadataPath = process.env.S3METADATAPATH + ? process.env.S3METADATAPATH + : `${__dirname}/../localMetadata`; - this.metadataDaemon.restEnabled = - config.metadataDaemon.restEnabled; + this.metadataDaemon.restEnabled = config.metadataDaemon.restEnabled; this.metadataDaemon.restPort = config.metadataDaemon.restPort; } @@ -1229,48 +1234,51 @@ class Config extends EventEmitter { this.localCache = defaultLocalCache; } if (config.localCache) { - assert(typeof config.localCache === 'object', - 'config: invalid local cache configuration. localCache must ' + - 'be an object'); + assert( + typeof config.localCache === 'object', + 'config: invalid local cache configuration. localCache must ' + 'be an object' + ); if (config.localCache.sentinels) { this.localCache = { sentinels: [], name: null }; - assert(typeof config.localCache.name === 'string', - 'bad config: localCache sentinel name must be a string'); + assert( + typeof config.localCache.name === 'string', + 'bad config: localCache sentinel name must be a string' + ); this.localCache.name = config.localCache.name; - assert(Array.isArray(config.localCache.sentinels) || - typeof config.localCache.sentinels === 'string', - 'bad config: localCache sentinels' + - 'must be an array or string'); + assert( + Array.isArray(config.localCache.sentinels) || typeof config.localCache.sentinels === 'string', + 'bad config: localCache sentinels' + 'must be an array or string' + ); if (typeof config.localCache.sentinels === 'string') { config.localCache.sentinels.split(',').forEach(item => { const [host, port] = item.split(':'); - this.localCache.sentinels.push({ host, - port: Number.parseInt(port, 10) }); + this.localCache.sentinels.push({ host, port: Number.parseInt(port, 10) }); }); } else if (Array.isArray(config.localCache.sentinels)) { config.localCache.sentinels.forEach(item => { const { host, port } = item; - assert(typeof host === 'string', - 'bad config: localCache' + - 'sentinel host must be a string'); - assert(typeof port === 'number', - 'bad config: localCache' + - 'sentinel port must be a number'); + assert(typeof host === 'string', 'bad config: localCache' + 'sentinel host must be a string'); + assert(typeof port === 'number', 'bad config: localCache' + 'sentinel port must be a number'); this.localCache.sentinels.push({ host, port }); }); } } else { - assert(typeof config.localCache.host === 'string', - 'config: bad host for localCache. host must be a string'); - assert(typeof config.localCache.port === 'number', - 'config: bad port for localCache. port must be a number'); + assert( + typeof config.localCache.host === 'string', + 'config: bad host for localCache. host must be a string' + ); + assert( + typeof config.localCache.port === 'number', + 'config: bad port for localCache. port must be a number' + ); if (config.localCache.password !== undefined) { - assert(typeof config.localCache.password === 'string', - 'config: vad password for localCache. password must' + - ' be a string'); + assert( + typeof config.localCache.password === 'string', + 'config: vad password for localCache. password must' + ' be a string' + ); } this.localCache = { host: config.localCache.host, @@ -1282,11 +1290,10 @@ class Config extends EventEmitter { if (config.mongodb) { this.mongodb = config.mongodb; - if (process.env.MONGODB_AUTH_USERNAME && - process.env.MONGODB_AUTH_PASSWORD) { + if (process.env.MONGODB_AUTH_USERNAME && process.env.MONGODB_AUTH_PASSWORD) { this.mongodb.authCredentials = { - username: process.env.MONGODB_AUTH_USERNAME, - password: process.env.MONGODB_AUTH_PASSWORD, + username: process.env.MONGODB_AUTH_USERNAME, + password: process.env.MONGODB_AUTH_PASSWORD, }; } } else { @@ -1299,23 +1306,23 @@ class Config extends EventEmitter { if (config.scuba) { this.scuba = {}; if (config.scuba.host) { - assert(typeof config.scuba.host === 'string', - 'bad config: scuba host must be a string'); + assert(typeof config.scuba.host === 'string', 'bad config: scuba host must be a string'); this.scuba.host = config.scuba.host; } if (config.scuba.port) { - assert(Number.isInteger(config.scuba.port) - && config.scuba.port > 0, - 'bad config: scuba port must be a positive integer'); + assert( + Number.isInteger(config.scuba.port) && config.scuba.port > 0, + 'bad config: scuba port must be a positive integer' + ); this.scuba.port = config.scuba.port; } } if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) { - assert(typeof process.env.SCUBA_HOST === 'string', - 'bad config: scuba host must be a string'); - assert(Number.isInteger(Number(process.env.SCUBA_PORT)) - && Number(process.env.SCUBA_PORT) > 0, - 'bad config: scuba port must be a positive integer'); + assert(typeof process.env.SCUBA_HOST === 'string', 'bad config: scuba host must be a string'); + assert( + Number.isInteger(Number(process.env.SCUBA_PORT)) && Number(process.env.SCUBA_PORT) > 0, + 'bad config: scuba port must be a positive integer' + ); this.scuba = { host: process.env.SCUBA_HOST, port: Number(process.env.SCUBA_PORT), @@ -1324,12 +1331,10 @@ class Config extends EventEmitter { if (this.scuba) { this.quotaEnabled = true; } - const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) || - config.quota?.maxStatenessMS || - 24 * 60 * 60 * 1000; + const maxStaleness = + Number(process.env.QUOTA_MAX_STALENESS_MS) || config.quota?.maxStatenessMS || 24 * 60 * 60 * 1000; assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer'); - const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' || - config.quota?.enableInflights || false; + const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' || config.quota?.enableInflights || false; this.quota = { maxStaleness, enableInflights, @@ -1337,30 +1342,29 @@ class Config extends EventEmitter { if (config.utapi) { this.utapi = { component: 's3' }; if (config.utapi.host) { - assert(typeof config.utapi.host === 'string', - 'bad config: utapi host must be a string'); + assert(typeof config.utapi.host === 'string', 'bad config: utapi host must be a string'); this.utapi.host = config.utapi.host; } if (config.utapi.port) { - assert(Number.isInteger(config.utapi.port) - && config.utapi.port > 0, - 'bad config: utapi port must be a positive integer'); + assert( + Number.isInteger(config.utapi.port) && config.utapi.port > 0, + 'bad config: utapi port must be a positive integer' + ); this.utapi.port = config.utapi.port; } if (utapiVersion === 1) { if (config.utapi.workers !== undefined) { - assert(Number.isInteger(config.utapi.workers) - && config.utapi.workers > 0, - 'bad config: utapi workers must be a positive integer'); + assert( + Number.isInteger(config.utapi.workers) && config.utapi.workers > 0, + 'bad config: utapi workers must be a positive integer' + ); this.utapi.workers = config.utapi.workers; } // Utapi uses the same localCache config defined for S3 to avoid // config duplication. - assert(config.localCache, 'missing required property of utapi ' + - 'configuration: localCache'); + assert(config.localCache, 'missing required property of utapi ' + 'configuration: localCache'); this.utapi.localCache = this.localCache; - assert(config.utapi.redis, 'missing required property of utapi ' + - 'configuration: redis'); + assert(config.utapi.redis, 'missing required property of utapi ' + 'configuration: redis'); this.utapi.redis = parseRedisConfig(config.utapi.redis); if (this.utapi.redis.retry === undefined) { this.utapi.redis.retry = { @@ -1379,29 +1383,36 @@ class Config extends EventEmitter { this.utapi.enabledOperationCounters = []; if (config.utapi.enabledOperationCounters !== undefined) { const { enabledOperationCounters } = config.utapi; - assert(Array.isArray(enabledOperationCounters), - 'bad config: utapi.enabledOperationCounters must be an ' + - 'array'); - assert(enabledOperationCounters.length > 0, - 'bad config: utapi.enabledOperationCounters cannot be ' + - 'empty'); + assert( + Array.isArray(enabledOperationCounters), + 'bad config: utapi.enabledOperationCounters must be an ' + 'array' + ); + assert( + enabledOperationCounters.length > 0, + 'bad config: utapi.enabledOperationCounters cannot be ' + 'empty' + ); this.utapi.enabledOperationCounters = enabledOperationCounters; } this.utapi.disableOperationCounters = false; if (config.utapi.disableOperationCounters !== undefined) { const { disableOperationCounters } = config.utapi; - assert(typeof disableOperationCounters === 'boolean', - 'bad config: utapi.disableOperationCounters must be a ' + - 'boolean'); + assert( + typeof disableOperationCounters === 'boolean', + 'bad config: utapi.disableOperationCounters must be a ' + 'boolean' + ); this.utapi.disableOperationCounters = disableOperationCounters; } - if (config.utapi.disableOperationCounters !== undefined && - config.utapi.enabledOperationCounters !== undefined) { - assert(config.utapi.disableOperationCounters === false, + if ( + config.utapi.disableOperationCounters !== undefined && + config.utapi.enabledOperationCounters !== undefined + ) { + assert( + config.utapi.disableOperationCounters === false, 'bad config: conflicting rules: ' + - 'utapi.disableOperationCounters and ' + - 'utapi.enabledOperationCounters cannot both be ' + - 'specified'); + 'utapi.disableOperationCounters and ' + + 'utapi.enabledOperationCounters cannot both be ' + + 'specified' + ); } if (config.utapi.component) { this.utapi.component = config.utapi.component; @@ -1409,17 +1420,23 @@ class Config extends EventEmitter { // (optional) The value of the replay schedule should be cron-style // scheduling. For example, every five minutes: '*/5 * * * *'. if (config.utapi.replaySchedule) { - assert(typeof config.utapi.replaySchedule === 'string', 'bad' + - 'config: utapi.replaySchedule must be a string'); + assert( + typeof config.utapi.replaySchedule === 'string', + 'bad' + 'config: utapi.replaySchedule must be a string' + ); this.utapi.replaySchedule = config.utapi.replaySchedule; } // (optional) The number of elements processed by each call to the // Redis local cache during a replay. For example, 50. if (config.utapi.batchSize) { - assert(typeof config.utapi.batchSize === 'number', 'bad' + - 'config: utapi.batchSize must be a number'); - assert(config.utapi.batchSize > 0, 'bad config:' + - 'utapi.batchSize must be a number greater than 0'); + assert( + typeof config.utapi.batchSize === 'number', + 'bad' + 'config: utapi.batchSize must be a number' + ); + assert( + config.utapi.batchSize > 0, + 'bad config:' + 'utapi.batchSize must be a number greater than 0' + ); this.utapi.batchSize = config.utapi.batchSize; } @@ -1427,16 +1444,20 @@ class Config extends EventEmitter { // Disabled by default this.utapi.expireMetrics = false; if (config.utapi.expireMetrics !== undefined) { - assert(typeof config.utapi.expireMetrics === 'boolean', 'bad' + - 'config: utapi.expireMetrics must be a boolean'); + assert( + typeof config.utapi.expireMetrics === 'boolean', + 'bad' + 'config: utapi.expireMetrics must be a boolean' + ); this.utapi.expireMetrics = config.utapi.expireMetrics; } // (optional) TTL controlling the expiry for bucket level metrics // keys when expireMetrics is enabled this.utapi.expireMetricsTTL = 0; if (config.utapi.expireMetricsTTL !== undefined) { - assert(typeof config.utapi.expireMetricsTTL === 'number', - 'bad config: utapi.expireMetricsTTL must be a number'); + assert( + typeof config.utapi.expireMetricsTTL === 'number', + 'bad config: utapi.expireMetricsTTL must be a number' + ); this.utapi.expireMetricsTTL = config.utapi.expireMetricsTTL; } @@ -1448,40 +1469,42 @@ class Config extends EventEmitter { if (utapiVersion === 2 && config.utapi.filter) { const { filter: filterConfig } = config.utapi; const utapiResourceFilters = {}; - allowedUtapiEventFilterFields.forEach( - field => allowedUtapiEventFilterStates.forEach( - state => { - const resources = (filterConfig[state] && filterConfig[state][field]) || null; - if (resources) { - assert.strictEqual(utapiResourceFilters[field], undefined, - `bad config: utapi.filter.${state}.${field} can't define an allow and a deny list`); - assert(resources.every(r => typeof r === 'string'), - `bad config: utapi.filter.${state}.${field} must be an array of strings`); - utapiResourceFilters[field] = { [state]: new Set(resources) }; - } + allowedUtapiEventFilterFields.forEach(field => + allowedUtapiEventFilterStates.forEach(state => { + const resources = (filterConfig[state] && filterConfig[state][field]) || null; + if (resources) { + assert.strictEqual( + utapiResourceFilters[field], + undefined, + `bad config: utapi.filter.${state}.${field} can't define an allow and a deny list` + ); + assert( + resources.every(r => typeof r === 'string'), + `bad config: utapi.filter.${state}.${field} must be an array of strings` + ); + utapiResourceFilters[field] = { [state]: new Set(resources) }; } - )); + }) + ); this.utapi.filter = utapiResourceFilters; } } - if (Object.keys(this.locationConstraints).some( - loc => this.locationConstraints[loc].sizeLimitGB)) { - assert(this.utapi && this.utapi.metrics && - this.utapi.metrics.includes('location'), + if (Object.keys(this.locationConstraints).some(loc => this.locationConstraints[loc].sizeLimitGB)) { + assert( + this.utapi && this.utapi.metrics && this.utapi.metrics.includes('location'), 'bad config: if storage size limit set on a location ' + - 'constraint, Utapi must also be configured correctly'); + 'constraint, Utapi must also be configured correctly' + ); } this.log = { logLevel: 'debug', dumpLevel: 'error' }; if (config.log !== undefined) { if (config.log.logLevel !== undefined) { - assert(typeof config.log.logLevel === 'string', - 'bad config: log.logLevel must be a string'); + assert(typeof config.log.logLevel === 'string', 'bad config: log.logLevel must be a string'); this.log.logLevel = config.log.logLevel; } if (config.log.dumpLevel !== undefined) { - assert(typeof config.log.dumpLevel === 'string', - 'bad config: log.dumpLevel must be a string'); + assert(typeof config.log.dumpLevel === 'string', 'bad config: log.dumpLevel must be a string'); this.log.dumpLevel = config.log.dumpLevel; } } @@ -1489,8 +1512,10 @@ class Config extends EventEmitter { this.kms = {}; if (config.kms) { assert(config.kms.providerName, 'config.kms.providerName must be provided'); - assert(isValidProvider(config.kms.providerName), - 'config.kms.providerName must be lowercase alphanumeric only'); + assert( + isValidProvider(config.kms.providerName), + 'config.kms.providerName must be lowercase alphanumeric only' + ); assert(typeof config.kms.userName === 'string'); assert(typeof config.kms.password === 'string'); this.kms.providerName = config.kms.providerName; @@ -1515,13 +1540,14 @@ class Config extends EventEmitter { const globalEncryptionEnabled = config.globalEncryptionEnabled; this.globalEncryptionEnabled = globalEncryptionEnabled || false; - assert(typeof this.globalEncryptionEnabled === 'boolean', - 'config.globalEncryptionEnabled must be a boolean'); + assert(typeof this.globalEncryptionEnabled === 'boolean', 'config.globalEncryptionEnabled must be a boolean'); const defaultEncryptionKeyPerAccount = config.defaultEncryptionKeyPerAccount; this.defaultEncryptionKeyPerAccount = defaultEncryptionKeyPerAccount || false; - assert(typeof this.defaultEncryptionKeyPerAccount === 'boolean', - 'config.defaultEncryptionKeyPerAccount must be a boolean'); + assert( + typeof this.defaultEncryptionKeyPerAccount === 'boolean', + 'config.defaultEncryptionKeyPerAccount must be a boolean' + ); this.kmsHideScalityArn = Object.hasOwnProperty.call(config, 'kmsHideScalityArn') ? config.kmsHideScalityArn @@ -1530,36 +1556,35 @@ class Config extends EventEmitter { this.healthChecks = defaultHealthChecks; if (config.healthChecks && config.healthChecks.allowFrom) { - assert(config.healthChecks.allowFrom instanceof Array, - 'config: invalid healthcheck configuration. allowFrom must ' + - 'be an array'); + assert( + config.healthChecks.allowFrom instanceof Array, + 'config: invalid healthcheck configuration. allowFrom must ' + 'be an array' + ); config.healthChecks.allowFrom.forEach(item => { - assert(typeof item === 'string', - 'config: invalid healthcheck configuration. allowFrom IP ' + - 'address must be a string'); + assert( + typeof item === 'string', + 'config: invalid healthcheck configuration. allowFrom IP ' + 'address must be a string' + ); }); - this.healthChecks.allowFrom = defaultHealthChecks.allowFrom - .concat(config.healthChecks.allowFrom); + this.healthChecks.allowFrom = defaultHealthChecks.allowFrom.concat(config.healthChecks.allowFrom); } if (config.certFilePaths) { - assert(typeof config.certFilePaths === 'object' && - typeof config.certFilePaths.key === 'string' && - typeof config.certFilePaths.cert === 'string' && (( - config.certFilePaths.ca && - typeof config.certFilePaths.ca === 'string') || - !config.certFilePaths.ca) - ); + assert( + typeof config.certFilePaths === 'object' && + typeof config.certFilePaths.key === 'string' && + typeof config.certFilePaths.cert === 'string' && + ((config.certFilePaths.ca && typeof config.certFilePaths.ca === 'string') || + !config.certFilePaths.ca) + ); } - const { key, cert, ca } = config.certFilePaths ? - config.certFilePaths : {}; + const { key, cert, ca } = config.certFilePaths ? config.certFilePaths : {}; let certObj = undefined; if (key && cert) { certObj = assertCertPaths(key, cert, ca, this._basePath); } else if (key || cert) { - throw new Error('bad config: both certFilePaths.key and ' + - 'certFilePaths.cert must be defined'); + throw new Error('bad config: both certFilePaths.key and ' + 'certFilePaths.cert must be defined'); } if (certObj) { if (Object.keys(certObj.certs).length > 0) { @@ -1571,30 +1596,29 @@ class Config extends EventEmitter { } this.outboundProxy = {}; - const envProxy = process.env.HTTP_PROXY || process.env.HTTPS_PROXY - || process.env.http_proxy || process.env.https_proxy; + const envProxy = + process.env.HTTP_PROXY || process.env.HTTPS_PROXY || process.env.http_proxy || process.env.https_proxy; const p = config.outboundProxy; const proxyUrl = envProxy || (p ? p.url : ''); if (proxyUrl) { - assert(typeof proxyUrl === 'string', - 'bad proxy config: url must be a string'); + assert(typeof proxyUrl === 'string', 'bad proxy config: url must be a string'); const { protocol, hostname, port, auth } = url.parse(proxyUrl); - assert(protocol === 'http:' || protocol === 'https:', - 'bad proxy config: protocol must be http or https'); - assert(typeof hostname === 'string' && hostname !== '', - 'bad proxy config: hostname must be a non-empty string'); + assert(protocol === 'http:' || protocol === 'https:', 'bad proxy config: protocol must be http or https'); + assert( + typeof hostname === 'string' && hostname !== '', + 'bad proxy config: hostname must be a non-empty string' + ); if (port) { const portInt = Number.parseInt(port, 10); - assert(!Number.isNaN(portInt) && portInt > 0, - 'bad proxy config: port must be a number greater than 0'); + assert(!Number.isNaN(portInt) && portInt > 0, 'bad proxy config: port must be a number greater than 0'); } if (auth) { - assert(typeof auth === 'string', - 'bad proxy config: auth must be string'); + assert(typeof auth === 'string', 'bad proxy config: auth must be string'); const authArray = auth.split(':'); - assert(authArray.length === 2 && authArray[0].length > 0 - && authArray[1].length > 0, 'bad proxy config: ' + - 'auth must be of format username:password'); + assert( + authArray.length === 2 && authArray[0].length > 0 && authArray[1].length > 0, + 'bad proxy config: ' + 'auth must be of format username:password' + ); } this.outboundProxy.url = proxyUrl; this.outboundProxy.certs = {}; @@ -1603,23 +1627,18 @@ class Config extends EventEmitter { const cert = p ? p.cert : ''; const caBundle = envCert || (p ? p.caBundle : ''); if (p) { - assert(typeof p === 'object', - 'bad config: "proxy" should be an object'); + assert(typeof p === 'object', 'bad config: "proxy" should be an object'); } if (key) { - assert(typeof key === 'string', - 'bad config: proxy.key should be a string'); + assert(typeof key === 'string', 'bad config: proxy.key should be a string'); } if (cert) { - assert(typeof cert === 'string', - 'bad config: proxy.cert should be a string'); + assert(typeof cert === 'string', 'bad config: proxy.cert should be a string'); } if (caBundle) { - assert(typeof caBundle === 'string', - 'bad config: proxy.caBundle should be a string'); + assert(typeof caBundle === 'string', 'bad config: proxy.caBundle should be a string'); } - const certObj = - assertCertPaths(key, cert, caBundle, this._basePath); + const certObj = assertCertPaths(key, cert, caBundle, this._basePath); this.outboundProxy.certs = certObj.certs; } @@ -1628,16 +1647,18 @@ class Config extends EventEmitter { this.managementAgent.host = 'localhost'; if (config.managementAgent !== undefined) { if (config.managementAgent.port !== undefined) { - assert(Number.isInteger(config.managementAgent.port) - && config.managementAgent.port > 0, - 'bad config: managementAgent port must be a positive ' + - 'integer'); + assert( + Number.isInteger(config.managementAgent.port) && config.managementAgent.port > 0, + 'bad config: managementAgent port must be a positive ' + 'integer' + ); this.managementAgent.port = config.managementAgent.port; } if (config.managementAgent.host !== undefined) { - assert.strictEqual(typeof config.managementAgent.host, 'string', - 'bad config: management agent host must ' + - 'be a string'); + assert.strictEqual( + typeof config.managementAgent.host, + 'string', + 'bad config: management agent host must ' + 'be a string' + ); this.managementAgent.host = config.managementAgent.host; } } @@ -1645,10 +1666,7 @@ class Config extends EventEmitter { // Ephemeral token to protect the reporting endpoint: // try inherited from parent first, then hardcoded in conf file, // then create a fresh one as last resort. - this.reportToken = - process.env.REPORT_TOKEN || - config.reportToken || - uuidv4(); + this.reportToken = process.env.REPORT_TOKEN || config.reportToken || uuidv4(); // External backends // Currently supports configuring httpAgent(s) for keepAlive @@ -1657,29 +1675,28 @@ class Config extends EventEmitter { const extBackendsConfig = Object.keys(config.externalBackends); extBackendsConfig.forEach(b => { // assert that it's a valid backend - assert(validExternalBackends[b] !== undefined, + assert( + validExternalBackends[b] !== undefined, `bad config: ${b} is not one of valid external backends: ` + - `${Object.keys(validExternalBackends).join(', ')}`); + `${Object.keys(validExternalBackends).join(', ')}` + ); const { httpAgent } = config.externalBackends[b]; - assert(typeof httpAgent === 'object', - `bad config: ${b} must have httpAgent object defined`); - const { keepAlive, keepAliveMsecs, maxFreeSockets, maxSockets } - = httpAgent; - assert(typeof keepAlive === 'boolean', - `bad config: ${b}.httpAgent.keepAlive must be a boolean`); - assert(typeof keepAliveMsecs === 'number' && - httpAgent.keepAliveMsecs > 0, - `bad config: ${b}.httpAgent.keepAliveMsecs must be` + - ' a number > 0'); - assert(typeof maxFreeSockets === 'number' && - httpAgent.maxFreeSockets >= 0, - `bad config: ${b}.httpAgent.maxFreeSockets must be ` + - 'a number >= 0'); - assert((typeof maxSockets === 'number' && maxSockets >= 0) || - maxSockets === null, - `bad config: ${b}.httpAgent.maxFreeSockets must be ` + - 'null or a number >= 0'); + assert(typeof httpAgent === 'object', `bad config: ${b} must have httpAgent object defined`); + const { keepAlive, keepAliveMsecs, maxFreeSockets, maxSockets } = httpAgent; + assert(typeof keepAlive === 'boolean', `bad config: ${b}.httpAgent.keepAlive must be a boolean`); + assert( + typeof keepAliveMsecs === 'number' && httpAgent.keepAliveMsecs > 0, + `bad config: ${b}.httpAgent.keepAliveMsecs must be` + ' a number > 0' + ); + assert( + typeof maxFreeSockets === 'number' && httpAgent.maxFreeSockets >= 0, + `bad config: ${b}.httpAgent.maxFreeSockets must be ` + 'a number >= 0' + ); + assert( + (typeof maxSockets === 'number' && maxSockets >= 0) || maxSockets === null, + `bad config: ${b}.httpAgent.maxFreeSockets must be ` + 'null or a number >= 0' + ); Object.assign(this.externalBackends[b].httpAgent, httpAgent); }); } @@ -1723,9 +1740,11 @@ class Config extends EventEmitter { // maxScannedLifecycleListingEntries > 2 is required as a minimum because we must // scan at least three entries to determine version eligibility. // Two entries representing the master key and the following one representing the non-current version. - assert(Number.isInteger(config.maxScannedLifecycleListingEntries) && - config.maxScannedLifecycleListingEntries > 2, - 'bad config: maxScannedLifecycleListingEntries must be greater than 2'); + assert( + Number.isInteger(config.maxScannedLifecycleListingEntries) && + config.maxScannedLifecycleListingEntries > 2, + 'bad config: maxScannedLifecycleListingEntries must be greater than 2' + ); this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries; } @@ -1745,9 +1764,12 @@ class Config extends EventEmitter { // decreases the weight attributed to a day in order to expedite the lifecycle of objects. const timeProgressionFactor = Number.parseInt(process.env.TIME_PROGRESSION_FACTOR, 10) || 1; - const isIncompatible = (expireOneDayEarlier || transitionOneDayEarlier) && (timeProgressionFactor > 1); - assert(!isIncompatible, 'The environment variables "EXPIRE_ONE_DAY_EARLIER" or ' + - '"TRANSITION_ONE_DAY_EARLIER" are not compatible with the "TIME_PROGRESSION_FACTOR" variable.'); + const isIncompatible = (expireOneDayEarlier || transitionOneDayEarlier) && timeProgressionFactor > 1; + assert( + !isIncompatible, + 'The environment variables "EXPIRE_ONE_DAY_EARLIER" or ' + + '"TRANSITION_ONE_DAY_EARLIER" are not compatible with the "TIME_PROGRESSION_FACTOR" variable.' + ); // The scaledMsPerDay value is initially set to the number of milliseconds per day // (24 * 60 * 60 * 1000) as the default value. @@ -1783,9 +1805,9 @@ class Config extends EventEmitter { let quota = 'none'; if (process.env.S3BACKEND) { const validBackends = ['mem', 'file', 'scality', 'cdmi']; - assert(validBackends.indexOf(process.env.S3BACKEND) > -1, - 'bad environment variable: S3BACKEND environment variable ' + - 'should be one of mem/file/scality/cdmi' + assert( + validBackends.indexOf(process.env.S3BACKEND) > -1, + 'bad environment variable: S3BACKEND environment variable ' + 'should be one of mem/file/scality/cdmi' ); auth = process.env.S3BACKEND; data = process.env.S3BACKEND; @@ -1799,11 +1821,11 @@ class Config extends EventEmitter { // Auth only checks for 'mem' since mem === file auth = 'mem'; let authData; - if (process.env.SCALITY_ACCESS_KEY_ID && - process.env.SCALITY_SECRET_ACCESS_KEY) { + if (process.env.SCALITY_ACCESS_KEY_ID && process.env.SCALITY_SECRET_ACCESS_KEY) { authData = buildAuthDataAccount( - process.env.SCALITY_ACCESS_KEY_ID, - process.env.SCALITY_SECRET_ACCESS_KEY); + process.env.SCALITY_ACCESS_KEY_ID, + process.env.SCALITY_SECRET_ACCESS_KEY + ); } else { authData = this._getAuthData(); } @@ -1811,7 +1833,7 @@ class Config extends EventEmitter { throw new Error('bad config: invalid auth config file.'); } this.authData = authData; - } else if (auth === 'multiple') { + } else if (auth === 'multiple') { const authData = this._getAuthData(); if (validateAuthConfig(authData)) { throw new Error('bad config: invalid auth config file.'); @@ -1821,17 +1843,17 @@ class Config extends EventEmitter { if (process.env.S3DATA) { const validData = ['mem', 'file', 'scality', 'multiple']; - assert(validData.indexOf(process.env.S3DATA) > -1, - 'bad environment variable: S3DATA environment variable ' + - 'should be one of mem/file/scality/multiple' + assert( + validData.indexOf(process.env.S3DATA) > -1, + 'bad environment variable: S3DATA environment variable ' + 'should be one of mem/file/scality/multiple' ); data = process.env.S3DATA; } if (data === 'scality' || data === 'multiple') { data = 'multiple'; } - assert(this.locationConstraints !== undefined && - this.restEndpoints !== undefined, + assert( + this.locationConstraints !== undefined && this.restEndpoints !== undefined, 'bad config: locationConstraints and restEndpoints must be set' ); @@ -1855,13 +1877,12 @@ class Config extends EventEmitter { // Mongodb backend does not support null keys, so we must enforce null version compatibility // mode. With other backends (esp. metadata), this is used during migration from v0 to v1 // bucket format. - this.nullVersionCompatMode = (metadata === 'mongodb') || - (process.env.ENABLE_NULL_VERSION_COMPAT_MODE === 'true'); + this.nullVersionCompatMode = metadata === 'mongodb' || process.env.ENABLE_NULL_VERSION_COMPAT_MODE === 'true'; // Multi-object delete optimizations is only supported for MongoDB at the moment. It relies // on `getObjectsMD()` to return the objects in a single call, which is not supported by // other backends. - this.multiObjectDeleteEnableOptimizations &&= (metadata === 'mongodb'); + this.multiObjectDeleteEnableOptimizations &&= metadata === 'mongodb'; } _sseMigration(config) { @@ -1874,14 +1895,12 @@ class Config extends EventEmitter { this.sseMigration = {}; const { previousKeyType, previousKeyProtocol, previousKeyProvider } = config.sseMigration; if (!previousKeyType) { - assert.fail( - 'NotImplemented: No dynamic KMS key migration. Set sseMigration.previousKeyType'); + assert.fail('NotImplemented: No dynamic KMS key migration. Set sseMigration.previousKeyType'); } // If previousKeyType is provided it's used as static value to migrate the format of the key // without additional dynamic evaluation if the key provider is unknown. - assert(isValidType(previousKeyType), - 'ssenMigration.previousKeyType must be "internal" or "external"'); + assert(isValidType(previousKeyType), 'ssenMigration.previousKeyType must be "internal" or "external"'); this.sseMigration.previousKeyType = previousKeyType; let expectedProtocol; @@ -1892,25 +1911,28 @@ class Config extends EventEmitter { expectedProtocol = [KmsProtocol.scality, KmsProtocol.mem, KmsProtocol.file]; } else if (previousKeyType === KmsType.external) { // No defaults allowed for external provider - assert(previousKeyProtocol, - 'sseMigration.previousKeyProtocol must be defined for external provider'); + assert(previousKeyProtocol, 'sseMigration.previousKeyProtocol must be defined for external provider'); this.sseMigration.previousKeyProtocol = previousKeyProtocol; - assert(previousKeyProvider, - 'sseMigration.previousKeyProvider must be defined for external provider'); + assert(previousKeyProvider, 'sseMigration.previousKeyProvider must be defined for external provider'); this.sseMigration.previousKeyProvider = previousKeyProvider; expectedProtocol = [KmsProtocol.kmip, KmsProtocol.aws_kms]; } - assert(isValidProtocol(previousKeyType, this.sseMigration.previousKeyProtocol), - `sseMigration.previousKeyProtocol must be one of ${expectedProtocol}`); - assert(isValidProvider(previousKeyProvider), - 'sseMigration.previousKeyProvider must be lowercase alphanumeric only'); + assert( + isValidProtocol(previousKeyType, this.sseMigration.previousKeyProtocol), + `sseMigration.previousKeyProtocol must be one of ${expectedProtocol}` + ); + assert( + isValidProvider(previousKeyProvider), + 'sseMigration.previousKeyProvider must be lowercase alphanumeric only' + ); if (this.sseMigration.previousKeyType === KmsType.external) { if ([KmsProtocol.file, KmsProtocol.mem].includes(this.backends.kms)) { assert.fail( `sseMigration.previousKeyType "external" can't migrate to "internal" KMS provider ${ - this.backends.kms}` + this.backends.kms + }` ); } // We'd have to compare protocol & providerName @@ -1929,10 +1951,7 @@ class Config extends EventEmitter { } getGcpBucketNames(locationConstraint) { - const { - bucketName, - mpuBucketName, - } = this.locationConstraints[locationConstraint].details; + const { bucketName, mpuBucketName } = this.locationConstraints[locationConstraint].details; return { bucketName, mpuBucketName }; } @@ -1958,9 +1977,10 @@ class Config extends EventEmitter { } setReplicationEndpoints(locationConstraints) { - this.replicationEndpoints = - Object.keys(locationConstraints) - .map(key => ({ site: key, type: locationConstraints[key].type })); + this.replicationEndpoints = Object.keys(locationConstraints).map(key => ({ + site: key, + type: locationConstraints[key].type, + })); } getAzureEndpoint(locationConstraint) { @@ -2005,31 +2025,27 @@ class Config extends EventEmitter { } getAzureStorageCredentials(locationConstraint) { - return azureGetLocationCredentials( - locationConstraint, - this.locationConstraints[locationConstraint].details - ); + return azureGetLocationCredentials(locationConstraint, this.locationConstraints[locationConstraint].details); } getPfsDaemonEndpoint(locationConstraint) { - return process.env[`${locationConstraint}_PFSD_ENDPOINT`] || - this.locationConstraints[locationConstraint].details.pfsDaemonEndpoint; + return ( + process.env[`${locationConstraint}_PFSD_ENDPOINT`] || + this.locationConstraints[locationConstraint].details.pfsDaemonEndpoint + ); } isSameAzureAccount(locationConstraintSrc, locationConstraintDest) { if (!locationConstraintDest) { return true; } - const azureSrcAccount = - this.getAzureStorageAccountName(locationConstraintSrc); - const azureDestAccount = - this.getAzureStorageAccountName(locationConstraintDest); + const azureSrcAccount = this.getAzureStorageAccountName(locationConstraintSrc); + const azureDestAccount = this.getAzureStorageAccountName(locationConstraintDest); return azureSrcAccount === azureDestAccount; } isAWSServerSideEncryption(locationConstraint) { - return this.locationConstraints[locationConstraint].details - .serverSideEncryption === true; + return this.locationConstraints[locationConstraint].details.serverSideEncryption === true; } getPublicInstanceId() { @@ -2037,9 +2053,7 @@ class Config extends EventEmitter { } setPublicInstanceId(instanceId) { - this.publicInstanceId = crypto.createHash('sha256') - .update(instanceId) - .digest('hex'); + this.publicInstanceId = crypto.createHash('sha256').update(instanceId).digest('hex'); } isQuotaEnabled() { diff --git a/lib/api/api.js b/lib/api/api.js index 3d36e3fe6b..173ae71841 100644 --- a/lib/api/api.js +++ b/lib/api/api.js @@ -63,8 +63,7 @@ const objectPutPart = require('./objectPutPart'); const objectPutCopyPart = require('./objectPutCopyPart'); const objectPutRetention = require('./objectPutRetention'); const objectRestore = require('./objectRestore'); -const prepareRequestContexts - = require('./apiUtils/authorization/prepareRequestContexts'); +const prepareRequestContexts = require('./apiUtils/authorization/prepareRequestContexts'); const serviceGet = require('./serviceGet'); const vault = require('../auth/vault'); const website = require('./website'); @@ -89,10 +88,7 @@ const api = { request.finalizerHooks = []; const actionLog = monitoringMap[apiMethod]; - if (!actionLog && - apiMethod !== 'websiteGet' && - apiMethod !== 'websiteHead' && - apiMethod !== 'corsPreflight') { + if (!actionLog && apiMethod !== 'websiteGet' && apiMethod !== 'websiteHead' && apiMethod !== 'corsPreflight') { log.error('callApiMethod(): No actionLog for this api method', { apiMethod, }); @@ -119,14 +115,15 @@ const api = { } // no need to check auth on website or cors preflight requests - if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' || - apiMethod === 'corsPreflight') { + if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' || apiMethod === 'corsPreflight') { request.actionImplicitDenies = false; return this[apiMethod](request, log, callback); } - const { sourceBucket, sourceObject, sourceVersionId, parsingError } = - parseCopySource(apiMethod, request.headers['x-amz-copy-source']); + const { sourceBucket, sourceObject, sourceVersionId, parsingError } = parseCopySource( + apiMethod, + request.headers['x-amz-copy-source'] + ); if (parsingError) { log.debug('error parsing copy source', { error: parsingError, @@ -142,8 +139,7 @@ const api = { return process.nextTick(callback, httpHeadersSizeError); } - const requestContexts = prepareRequestContexts(apiMethod, request, - sourceBucket, sourceObject, sourceVersionId); + const requestContexts = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); // Extract all the _apiMethods and store them in an array const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : []; @@ -165,8 +161,7 @@ const api = { isImplicitDeny[authResults[0].action] = authResults[0].isImplicit; // second item checks s3:GetObject(Version)Tagging action if (!authResults[1].isAllowed) { - log.trace('get tagging authorization denial ' + - 'from Vault'); + log.trace('get tagging authorization denial ' + 'from Vault'); returnTagCount = false; } } else { @@ -193,117 +188,142 @@ const api = { return { returnTagCount, isImplicitDeny }; } - return async.waterfall([ - next => auth.server.doAuth( - request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { - if (err) { - // VaultClient returns standard errors, but the route requires - // Arsenal errors - const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError; - log.trace('authentication error', { error: err }); - return next(arsenalError); + return async.waterfall( + [ + next => + auth.server.doAuth( + request, + log, + (err, userInfo, authorizationResults, streamingV4Params, infos) => { + if (err) { + // VaultClient returns standard errors, but the route requires + // Arsenal errors + const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError; + log.trace('authentication error', { error: err }); + return next(arsenalError); + } + return next(null, userInfo, authorizationResults, streamingV4Params, infos); + }, + 's3', + requestContexts + ), + (userInfo, authorizationResults, streamingV4Params, infos, next) => { + const authNames = { accountName: userInfo.getAccountDisplayName() }; + if (userInfo.isRequesterAnIAMUser()) { + authNames.userName = userInfo.getIAMdisplayName(); } - return next(null, userInfo, authorizationResults, streamingV4Params, infos); - }, 's3', requestContexts), - (userInfo, authorizationResults, streamingV4Params, infos, next) => { - const authNames = { accountName: userInfo.getAccountDisplayName() }; - if (userInfo.isRequesterAnIAMUser()) { - authNames.userName = userInfo.getIAMdisplayName(); - } - if (isRequesterASessionUser(userInfo)) { - authNames.sessionName = userInfo.getShortid().split(':')[1]; - } - log.addDefaultFields(authNames); - if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { - return next(null, userInfo, authorizationResults, streamingV4Params, infos); - } - // issue 100 Continue to the client - writeContinue(request, response); - const MAX_POST_LENGTH = request.method === 'POST' ? - 1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB - const post = []; - let postLength = 0; - request.on('data', chunk => { - postLength += chunk.length; - // Sanity check on post length - if (postLength <= MAX_POST_LENGTH) { - post.push(chunk); + if (isRequesterASessionUser(userInfo)) { + authNames.sessionName = userInfo.getShortid().split(':')[1]; + } + log.addDefaultFields(authNames); + if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { + return next(null, userInfo, authorizationResults, streamingV4Params, infos); } - }); + // issue 100 Continue to the client + writeContinue(request, response); + const MAX_POST_LENGTH = request.method === 'POST' ? 1024 * 1024 : (1024 * 1024) / 2; // 1 MB or 512 KB + const post = []; + let postLength = 0; + request.on('data', chunk => { + postLength += chunk.length; + // Sanity check on post length + if (postLength <= MAX_POST_LENGTH) { + post.push(chunk); + } + }); - request.on('error', err => { - log.trace('error receiving request', { - error: err, + request.on('error', err => { + log.trace('error receiving request', { + error: err, + }); + return next(errors.InternalError); }); - return next(errors.InternalError); - }); - request.on('end', () => { - if (postLength > MAX_POST_LENGTH) { - log.error('body length is too long for request type', - { postLength }); - return next(errors.InvalidRequest); - } - // Convert array of post buffers into one string - request.post = Buffer.concat(post, postLength).toString(); - return next(null, userInfo, authorizationResults, streamingV4Params, infos); - }); - return undefined; - }, - // Tag condition keys require information from CloudServer for evaluation - (userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth( - authorizationResults, - request, - requestContexts, - apiMethod, - log, - (err, authResultsWithTags) => { - if (err) { - log.trace('tag authentication error', { error: err }); - return next(err); - } - return next(null, userInfo, authResultsWithTags, streamingV4Params, infos); + request.on('end', () => { + if (postLength > MAX_POST_LENGTH) { + log.error('body length is too long for request type', { postLength }); + return next(errors.InvalidRequest); + } + // Convert array of post buffers into one string + request.post = Buffer.concat(post, postLength).toString(); + return next(null, userInfo, authorizationResults, streamingV4Params, infos); + }); + return undefined; }, - ), - ], (err, userInfo, authorizationResults, streamingV4Params, infos) => { - if (err) { - return callback(err); - } - request.accountQuotas = infos?.accountQuota; - if (authorizationResults) { - const checkedResults = checkAuthResults(authorizationResults); - if (checkedResults instanceof Error) { - return callback(checkedResults); + // Tag condition keys require information from CloudServer for evaluation + (userInfo, authorizationResults, streamingV4Params, infos, next) => + tagConditionKeyAuth( + authorizationResults, + request, + requestContexts, + apiMethod, + log, + (err, authResultsWithTags) => { + if (err) { + log.trace('tag authentication error', { error: err }); + return next(err); + } + return next(null, userInfo, authResultsWithTags, streamingV4Params, infos); + } + ), + ], + (err, userInfo, authorizationResults, streamingV4Params, infos) => { + if (err) { + return callback(err); } - returnTagCount = checkedResults.returnTagCount; - request.actionImplicitDenies = checkedResults.isImplicitDeny; - } else { - // create an object of keys apiMethods with all values to false: - // for backward compatibility, all apiMethods are allowed by default - // thus it is explicitly allowed, so implicit deny is false - request.actionImplicitDenies = apiMethods.reduce((acc, curr) => { - acc[curr] = false; - return acc; - }, {}); - } - const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5, - (hook, done) => hook(err, done), - () => callback(err, ...results)); + request.accountQuotas = infos?.accountQuota; + if (authorizationResults) { + const checkedResults = checkAuthResults(authorizationResults); + if (checkedResults instanceof Error) { + return callback(checkedResults); + } + returnTagCount = checkedResults.returnTagCount; + request.actionImplicitDenies = checkedResults.isImplicitDeny; + } else { + // create an object of keys apiMethods with all values to false: + // for backward compatibility, all apiMethods are allowed by default + // thus it is explicitly allowed, so implicit deny is false + request.actionImplicitDenies = apiMethods.reduce((acc, curr) => { + acc[curr] = false; + return acc; + }, {}); + } + const methodCallback = (err, ...results) => + async.forEachLimit( + request.finalizerHooks, + 5, + (hook, done) => hook(err, done), + () => callback(err, ...results) + ); - if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { - request._response = response; - return this[apiMethod](userInfo, request, streamingV4Params, - log, methodCallback, authorizationResults); - } - if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') { - return this[apiMethod](userInfo, request, sourceBucket, - sourceObject, sourceVersionId, log, methodCallback); - } - if (apiMethod === 'objectGet') { - return this[apiMethod](userInfo, request, returnTagCount, log, callback); + if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { + request._response = response; + return this[apiMethod]( + userInfo, + request, + streamingV4Params, + log, + methodCallback, + authorizationResults + ); + } + if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') { + return this[apiMethod]( + userInfo, + request, + sourceBucket, + sourceObject, + sourceVersionId, + log, + methodCallback + ); + } + if (apiMethod === 'objectGet') { + return this[apiMethod](userInfo, request, returnTagCount, log, callback); + } + return this[apiMethod](userInfo, request, log, methodCallback); } - return this[apiMethod](userInfo, request, log, methodCallback); - }); + ); }, bucketDelete, bucketDeleteCors, diff --git a/lib/api/apiUtils/authorization/permissionChecks.js b/lib/api/apiUtils/authorization/permissionChecks.js index 955fa75a3d..ea8d3bd7a9 100644 --- a/lib/api/apiUtils/authorization/permissionChecks.js +++ b/lib/api/apiUtils/authorization/permissionChecks.js @@ -16,8 +16,7 @@ const { } = constants; // whitelist buckets to allow public read on objects -const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS - ? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : []; +const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : []; function getServiceAccountProperties(canonicalID) { const canonicalIDArray = canonicalID.split('/'); @@ -51,12 +50,9 @@ function isRequesterNonAccountUser(authInfo) { function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) { // Same logic applies on the Versioned APIs, so let's simplify it. - let requestTypeParsed = requestType.endsWith('Version') ? - requestType.slice(0, 'Version'.length * -1) : requestType; - requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ? - 'objectPut' : requestTypeParsed; - const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ? - 'objectPut' : mainApiCall; + let requestTypeParsed = requestType.endsWith('Version') ? requestType.slice(0, 'Version'.length * -1) : requestType; + requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ? 'objectPut' : requestTypeParsed; + const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ? 'objectPut' : mainApiCall; if (bucket.getOwner() === canonicalID) { return true; } @@ -73,64 +69,66 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) { const bucketAcl = bucket.getAcl(); if (requestTypeParsed === 'bucketGet' || requestTypeParsed === 'bucketHead') { - if (bucketAcl.Canned === 'public-read' - || bucketAcl.Canned === 'public-read-write' - || (bucketAcl.Canned === 'authenticated-read' - && canonicalID !== publicId)) { + if ( + bucketAcl.Canned === 'public-read' || + bucketAcl.Canned === 'public-read-write' || + (bucketAcl.Canned === 'authenticated-read' && canonicalID !== publicId) + ) { return true; - } else if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 - || bucketAcl.READ.indexOf(canonicalID) > -1) { + } else if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 || bucketAcl.READ.indexOf(canonicalID) > -1) { return true; - } else if (bucketAcl.READ.indexOf(publicId) > -1 - || (bucketAcl.READ.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) { + } else if ( + bucketAcl.READ.indexOf(publicId) > -1 || + (bucketAcl.READ.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + bucketAcl.FULL_CONTROL.indexOf(publicId) > -1 + ) { return true; } } if (requestTypeParsed === 'bucketGetACL') { - if ((bucketAcl.Canned === 'log-delivery-write' - && canonicalID === logId) - || bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 - || bucketAcl.READ_ACP.indexOf(canonicalID) > -1) { + if ( + (bucketAcl.Canned === 'log-delivery-write' && canonicalID === logId) || + bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 || + bucketAcl.READ_ACP.indexOf(canonicalID) > -1 + ) { return true; - } else if (bucketAcl.READ_ACP.indexOf(publicId) > -1 - || (bucketAcl.READ_ACP.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) { + } else if ( + bucketAcl.READ_ACP.indexOf(publicId) > -1 || + (bucketAcl.READ_ACP.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + bucketAcl.FULL_CONTROL.indexOf(publicId) > -1 + ) { return true; } } if (requestTypeParsed === 'bucketPutACL') { - if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 - || bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) { + if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 || bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) { return true; - } else if (bucketAcl.WRITE_ACP.indexOf(publicId) > -1 - || (bucketAcl.WRITE_ACP.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) { + } else if ( + bucketAcl.WRITE_ACP.indexOf(publicId) > -1 || + (bucketAcl.WRITE_ACP.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + bucketAcl.FULL_CONTROL.indexOf(publicId) > -1 + ) { return true; } } if (requestTypeParsed === 'objectDelete' || requestTypeParsed === 'objectPut') { - if (bucketAcl.Canned === 'public-read-write' - || bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 - || bucketAcl.WRITE.indexOf(canonicalID) > -1) { + if ( + bucketAcl.Canned === 'public-read-write' || + bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 || + bucketAcl.WRITE.indexOf(canonicalID) > -1 + ) { return true; - } else if (bucketAcl.WRITE.indexOf(publicId) > -1 - || (bucketAcl.WRITE.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) { + } else if ( + bucketAcl.WRITE.indexOf(publicId) > -1 || + (bucketAcl.WRITE.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + bucketAcl.FULL_CONTROL.indexOf(publicId) > -1 + ) { return true; } } @@ -140,20 +138,28 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) { // objectPutACL, objectGetACL, objectHead or objectGet, the bucket // authorization check should just return true so can move on to check // rights at the object level. - return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL' - || requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead'); + return ( + requestTypeParsed === 'objectPutACL' || + requestTypeParsed === 'objectGetACL' || + requestTypeParsed === 'objectGet' || + requestTypeParsed === 'objectHead' + ); } -function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser, - isUserUnauthenticated, mainApiCall) { +function checkObjectAcls( + bucket, + objectMD, + requestType, + canonicalID, + requesterIsNotUser, + isUserUnauthenticated, + mainApiCall +) { const bucketOwner = bucket.getOwner(); - const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ? - 'objectPut' : requestType; - const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ? - 'objectPut' : mainApiCall; + const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ? 'objectPut' : requestType; + const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ? 'objectPut' : mainApiCall; // acls don't distinguish between users and accounts, so both should be allowed - if (bucketOwnerActions.includes(requestTypeParsed) - && (bucketOwner === canonicalID)) { + if (bucketOwnerActions.includes(requestTypeParsed) && bucketOwner === canonicalID) { return true; } if (objectMD['owner-id'] === canonicalID) { @@ -162,8 +168,10 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs // Backward compatibility if (parsedMainApiCall === 'objectGet') { - if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id'])) - && requestTypeParsed === 'objectGetTagging') { + if ( + (isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id'])) && + requestTypeParsed === 'objectGetTagging' + ) { return true; } } @@ -173,25 +181,26 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs } if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') { - if (objectMD.acl.Canned === 'public-read' - || objectMD.acl.Canned === 'public-read-write' - || (objectMD.acl.Canned === 'authenticated-read' - && canonicalID !== publicId)) { + if ( + objectMD.acl.Canned === 'public-read' || + objectMD.acl.Canned === 'public-read-write' || + (objectMD.acl.Canned === 'authenticated-read' && canonicalID !== publicId) + ) { return true; - } else if (objectMD.acl.Canned === 'bucket-owner-read' - && bucketOwner === canonicalID) { + } else if (objectMD.acl.Canned === 'bucket-owner-read' && bucketOwner === canonicalID) { return true; - } else if ((objectMD.acl.Canned === 'bucket-owner-full-control' - && bucketOwner === canonicalID) - || objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 - || objectMD.acl.READ.indexOf(canonicalID) > -1) { + } else if ( + (objectMD.acl.Canned === 'bucket-owner-full-control' && bucketOwner === canonicalID) || + objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 || + objectMD.acl.READ.indexOf(canonicalID) > -1 + ) { return true; - } else if (objectMD.acl.READ.indexOf(publicId) > -1 - || (objectMD.acl.READ.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) { + } else if ( + objectMD.acl.READ.indexOf(publicId) > -1 || + (objectMD.acl.READ.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1 + ) { return true; } } @@ -203,33 +212,35 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs } if (requestTypeParsed === 'objectPutACL') { - if ((objectMD.acl.Canned === 'bucket-owner-full-control' - && bucketOwner === canonicalID) - || objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 - || objectMD.acl.WRITE_ACP.indexOf(canonicalID) > -1) { + if ( + (objectMD.acl.Canned === 'bucket-owner-full-control' && bucketOwner === canonicalID) || + objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 || + objectMD.acl.WRITE_ACP.indexOf(canonicalID) > -1 + ) { return true; - } else if (objectMD.acl.WRITE_ACP.indexOf(publicId) > -1 - || (objectMD.acl.WRITE_ACP.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) { + } else if ( + objectMD.acl.WRITE_ACP.indexOf(publicId) > -1 || + (objectMD.acl.WRITE_ACP.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1 + ) { return true; } } if (requestTypeParsed === 'objectGetACL') { - if ((objectMD.acl.Canned === 'bucket-owner-full-control' - && bucketOwner === canonicalID) - || objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 - || objectMD.acl.READ_ACP.indexOf(canonicalID) > -1) { + if ( + (objectMD.acl.Canned === 'bucket-owner-full-control' && bucketOwner === canonicalID) || + objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 || + objectMD.acl.READ_ACP.indexOf(canonicalID) > -1 + ) { return true; - } else if (objectMD.acl.READ_ACP.indexOf(publicId) > -1 - || (objectMD.acl.READ_ACP.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 - && canonicalID !== publicId) - || objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) { + } else if ( + objectMD.acl.READ_ACP.indexOf(publicId) > -1 || + (objectMD.acl.READ_ACP.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1 && canonicalID !== publicId) || + objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1 + ) { return true; } } @@ -237,9 +248,10 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs // allow public reads on buckets that are whitelisted for anonymous reads // TODO: remove this after bucket policies are implemented const bucketAcl = bucket.getAcl(); - const allowPublicReads = publicReadBuckets.includes(bucket.getName()) - && bucketAcl.Canned === 'public-read' - && (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead'); + const allowPublicReads = + publicReadBuckets.includes(bucket.getName()) && + bucketAcl.Canned === 'public-read' && + (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead'); if (allowPublicReads) { return true; } @@ -275,7 +287,7 @@ function _getAccountId(arn) { } function _isAccountId(principal) { - return (principal.length === 12 && /^\d+$/.test(principal)); + return principal.length === 12 && /^\d+$/.test(principal); } function _checkPrincipal(requester, principal) { @@ -328,11 +340,30 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l const ip = request ? requestUtils.getClientIp(request, config) : undefined; const isSecure = request ? requestUtils.getHttpProtocolSecurity(request, config) : undefined; - const requestContext = request ? new RequestContext(request.headers, request.query, - request.bucketName, request.objectKey, ip, - isSecure, request.resourceType, 's3', null, null, - null, null, null, null, null, null, null, null, null, - request.objectLockRetentionDays) : undefined; + const requestContext = request + ? new RequestContext( + request.headers, + request.query, + request.bucketName, + request.objectKey, + ip, + isSecure, + request.resourceType, + 's3', + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + request.objectLockRetentionDays + ) + : undefined; while (copiedStatement.length > 0) { const s = copiedStatement[0]; @@ -354,15 +385,33 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l return permission; } -function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner, log, - request, aclPermission, results, actionImplicitDenies) { +function processBucketPolicy( + requestType, + bucket, + canonicalID, + arn, + bucketOwner, + log, + request, + aclPermission, + results, + actionImplicitDenies +) { const bucketPolicy = bucket.getBucketPolicy(); let processedResult = results[requestType]; if (!bucketPolicy || request?.bypassUserBucketPolicies) { processedResult = actionImplicitDenies[requestType] === false && aclPermission; } else { - const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, canonicalID, arn, - bucketOwner, log, request, actionImplicitDenies); + const bucketPolicyPermission = checkBucketPolicy( + bucketPolicy, + requestType, + canonicalID, + arn, + bucketOwner, + log, + request, + actionImplicitDenies + ); if (bucketPolicyPermission === 'explicitDeny') { processedResult = false; @@ -375,8 +424,16 @@ function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner, return processedResult; } -function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request, - actionImplicitDeniesInput = {}, isWebsite = false) { +function isBucketAuthorized( + bucket, + requestTypesInput, + canonicalID, + authInfo, + log, + request, + actionImplicitDeniesInput = {}, + isWebsite = false +) { const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput]; const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput; const mainApiCall = requestTypes[0]; @@ -395,7 +452,7 @@ function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, lo arn = authInfo.getArn(); } // if the bucket owner is an account, users should not have default access - if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) { + if ((bucket.getOwner() === canonicalID && requesterIsNotUser) || isServiceAccount(canonicalID)) { results[_requestType] = actionImplicitDenies[_requestType] === false; return results[_requestType]; } @@ -409,13 +466,30 @@ function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, lo _requestType = 'objectGet'; actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false; } - return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log, - request, aclPermission, results, actionImplicitDenies); + return processBucketPolicy( + _requestType, + bucket, + canonicalID, + arn, + bucket.getOwner(), + log, + request, + aclPermission, + results, + actionImplicitDenies + ); }); } -function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, authInfo, actionImplicitDeniesInput = {}, - log, request) { +function evaluateBucketPolicyWithIAM( + bucket, + requestTypesInput, + canonicalID, + authInfo, + actionImplicitDeniesInput = {}, + log, + request +) { const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput]; const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput; const results = {}; @@ -427,13 +501,32 @@ function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, aut if (authInfo) { arn = authInfo.getArn(); } - return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log, - request, true, results, actionImplicitDenies); + return processBucketPolicy( + _requestType, + bucket, + canonicalID, + arn, + bucket.getOwner(), + log, + request, + true, + results, + actionImplicitDenies + ); }); } -function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request, - actionImplicitDeniesInput = {}, isWebsite = false) { +function isObjAuthorized( + bucket, + objectMD, + requestTypesInput, + canonicalID, + authInfo, + log, + request, + actionImplicitDeniesInput = {}, + isWebsite = false +) { const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput]; const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput; const results = {}; @@ -442,8 +535,7 @@ function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authI // By default, all missing actions are defined as allowed from IAM, to be // backward compatible actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false; - const parsedMethodName = _requestType.endsWith('Version') - ? _requestType.slice(0, -7) : _requestType; + const parsedMethodName = _requestType.endsWith('Version') ? _requestType.slice(0, -7) : _requestType; const bucketOwner = bucket.getOwner(); if (!objectMD) { // check bucket has read access @@ -452,12 +544,22 @@ function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authI if (actionsToConsiderAsObjectPut.includes(_requestType)) { permission = 'objectPut'; } - results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request, - actionImplicitDenies, isWebsite); + results[_requestType] = isBucketAuthorized( + bucket, + permission, + canonicalID, + authInfo, + log, + request, + actionImplicitDenies, + isWebsite + ); // User is already authorized on the bucket for FULL_CONTROL or WRITE or // bucket has canned ACL public-read-write - if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete') - && results[_requestType] === false) { + if ( + (parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete') && + results[_requestType] === false + ) { results[_requestType] = actionImplicitDenies[_requestType] === false; } return results[_requestType]; @@ -470,7 +572,7 @@ function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authI arn = authInfo.getArn(); isUserUnauthenticated = arn === undefined; } - if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) { + if ((objectMD['owner-id'] === canonicalID && requesterIsNotUser) || isServiceAccount(canonicalID)) { results[_requestType] = actionImplicitDenies[_requestType] === false; return results[_requestType]; } @@ -478,16 +580,31 @@ function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authI // - requesttype is included in bucketOwnerActions and // - account is the bucket owner // - requester is account, not user - if (bucketOwnerActions.includes(parsedMethodName) - && (bucketOwner === canonicalID) - && requesterIsNotUser) { + if (bucketOwnerActions.includes(parsedMethodName) && bucketOwner === canonicalID && requesterIsNotUser) { results[_requestType] = actionImplicitDenies[_requestType] === false; return results[_requestType]; } - const aclPermission = checkObjectAcls(bucket, objectMD, parsedMethodName, - canonicalID, requesterIsNotUser, isUserUnauthenticated, mainApiCall); - return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucketOwner, - log, request, aclPermission, results, actionImplicitDenies); + const aclPermission = checkObjectAcls( + bucket, + objectMD, + parsedMethodName, + canonicalID, + requesterIsNotUser, + isUserUnauthenticated, + mainApiCall + ); + return processBucketPolicy( + _requestType, + bucket, + canonicalID, + arn, + bucketOwner, + log, + request, + aclPermission, + results, + actionImplicitDenies + ); }); } @@ -583,8 +700,8 @@ function validatePolicyConditions(policy) { for (const conditionOperator of conditionOperators) { const conditionKey = Object.keys(s.Condition[conditionOperator])[0]; const conditionValue = s.Condition[conditionOperator][conditionKey]; - const validCondition = validConditions.find(validCondition => - validCondition.conditionKey === conditionKey + const validCondition = validConditions.find( + validCondition => validCondition.conditionKey === conditionKey ); // AWS returns does not return an error if the condition starts with 'aws:' // so we reproduce this behaviour @@ -603,7 +720,6 @@ function validatePolicyConditions(policy) { return null; } - /** isLifecycleSession - check if it is the Lifecycle assumed role session arn. * @param {string} arn - Amazon resource name - example: * arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle @@ -622,9 +738,9 @@ function isLifecycleSession(arn) { const resourceType = resourceNames[0]; const sessionName = resourceNames[resourceNames.length - 1]; - return (service === 'sts' - && resourceType === assumedRoleArnResourceType - && sessionName === backbeatLifecycleSessionName); + return ( + service === 'sts' && resourceType === assumedRoleArnResourceType && sessionName === backbeatLifecycleSessionName + ); } module.exports = { diff --git a/lib/api/apiUtils/authorization/prepareRequestContexts.js b/lib/api/apiUtils/authorization/prepareRequestContexts.js index c33fcc9c82..24c370b9ca 100644 --- a/lib/api/apiUtils/authorization/prepareRequestContexts.js +++ b/lib/api/apiUtils/authorization/prepareRequestContexts.js @@ -18,9 +18,13 @@ const apiMethodWithVersion = { }; function isHeaderAcl(headers) { - return headers['x-amz-grant-read'] || headers['x-amz-grant-read-acp'] || - headers['x-amz-grant-write-acp'] || headers['x-amz-grant-full-control'] || - headers['x-amz-acl']; + return ( + headers['x-amz-grant-read'] || + headers['x-amz-grant-read-acp'] || + headers['x-amz-grant-write-acp'] || + headers['x-amz-grant-full-control'] || + headers['x-amz-acl'] + ); } /** @@ -32,8 +36,7 @@ function isHeaderAcl(headers) { * @param {string} sourceVersionId - value of sourceVersionId if copy request * @return {RequestContext []} array of requestContexts */ -function prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId) { +function prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId) { // if multiObjectDelete request, we want to authenticate // before parsing the post body and creating multiple requestContexts // so send null as requestContexts to Vault to avoid authorization @@ -48,17 +51,23 @@ function prepareRequestContexts(apiMethod, request, sourceBucket, const isSecure = requestUtils.getHttpProtocolSecurity(request, config); function generateRequestContext(apiMethod) { - return new RequestContext(request.headers, - request.query, request.bucketName, request.objectKey, - ip, isSecure, apiMethod, 's3'); + return new RequestContext( + request.headers, + request.query, + request.bucketName, + request.objectKey, + ip, + isSecure, + apiMethod, + 's3' + ); } if (apiMethod === 'bucketPut') { return null; } - if (apiMethodWithVersion[apiMethod] && request.query && - request.query.versionId) { + if (apiMethodWithVersion[apiMethod] && request.query && request.query.versionId) { apiMethodAfterVersionCheck = `${apiMethod}Version`; } else { apiMethodAfterVersionCheck = apiMethod; @@ -75,186 +84,157 @@ function prepareRequestContexts(apiMethod, request, sourceBucket, // In the API, we then ignore these authorization results, and we can use // any information returned, e.g., the quota. const requestContextMultiObjectDelete = generateRequestContext('objectDelete'); - requestContexts.push(requestContextMultiObjectDelete); - } else if (apiMethodAfterVersionCheck === 'objectCopy' - || apiMethodAfterVersionCheck === 'objectPutCopyPart') { - const objectGetAction = sourceVersionId ? 'objectGetVersion' : - 'objectGet'; - const reqQuery = Object.assign({}, request.query, - { versionId: sourceVersionId }); - const getRequestContext = new RequestContext(request.headers, - reqQuery, sourceBucket, sourceObject, - ip, isSecure, - objectGetAction, 's3'); + requestContexts.push(requestContextMultiObjectDelete); + } else if (apiMethodAfterVersionCheck === 'objectCopy' || apiMethodAfterVersionCheck === 'objectPutCopyPart') { + const objectGetAction = sourceVersionId ? 'objectGetVersion' : 'objectGet'; + const reqQuery = Object.assign({}, request.query, { versionId: sourceVersionId }); + const getRequestContext = new RequestContext( + request.headers, + reqQuery, + sourceBucket, + sourceObject, + ip, + isSecure, + objectGetAction, + 's3' + ); const putRequestContext = generateRequestContext('objectPut'); requestContexts.push(getRequestContext, putRequestContext); if (apiMethodAfterVersionCheck === 'objectCopy') { // if tagging directive is COPY, "s3:PutObjectTagging" don't need // to be included in the list of permitted actions in IAM policy - if (request.headers['x-amz-tagging'] && - request.headers['x-amz-tagging-directive'] === 'REPLACE') { - const putTaggingRequestContext = - generateRequestContext('objectPutTagging'); + if (request.headers['x-amz-tagging'] && request.headers['x-amz-tagging-directive'] === 'REPLACE') { + const putTaggingRequestContext = generateRequestContext('objectPutTagging'); requestContexts.push(putTaggingRequestContext); } if (isHeaderAcl(request.headers)) { - const putAclRequestContext = - generateRequestContext('objectPutACL'); + const putAclRequestContext = generateRequestContext('objectPutACL'); requestContexts.push(putAclRequestContext); } } - } else if (apiMethodAfterVersionCheck === 'objectGet' - || apiMethodAfterVersionCheck === 'objectGetVersion') { - const objectGetTaggingAction = (request.query && - request.query.versionId) ? 'objectGetTaggingVersion' : - 'objectGetTagging'; + } else if (apiMethodAfterVersionCheck === 'objectGet' || apiMethodAfterVersionCheck === 'objectGetVersion') { + const objectGetTaggingAction = + request.query && request.query.versionId ? 'objectGetTaggingVersion' : 'objectGetTagging'; if (request.headers['x-amz-version-id']) { const objectGetVersionAction = 'objectGetVersion'; - const getVersionResourceVersion = - generateRequestContext(objectGetVersionAction); + const getVersionResourceVersion = generateRequestContext(objectGetVersionAction); requestContexts.push(getVersionResourceVersion); } - const getRequestContext = - generateRequestContext(apiMethodAfterVersionCheck); - const getTaggingRequestContext = - generateRequestContext(objectGetTaggingAction); + const getRequestContext = generateRequestContext(apiMethodAfterVersionCheck); + const getTaggingRequestContext = generateRequestContext(objectGetTaggingAction); requestContexts.push(getRequestContext, getTaggingRequestContext); } else if (apiMethodAfterVersionCheck === 'objectGetTagging') { const objectGetTaggingAction = 'objectGetTagging'; - const getTaggingResourceVersion = - generateRequestContext(objectGetTaggingAction); + const getTaggingResourceVersion = generateRequestContext(objectGetTaggingAction); requestContexts.push(getTaggingResourceVersion); if (request.headers['x-amz-version-id']) { const objectGetTaggingVersionAction = 'objectGetTaggingVersion'; - const getTaggingVersionResourceVersion = - generateRequestContext(objectGetTaggingVersionAction); + const getTaggingVersionResourceVersion = generateRequestContext(objectGetTaggingVersionAction); requestContexts.push(getTaggingVersionResourceVersion); } } else if (apiMethodAfterVersionCheck === 'objectHead') { const objectHeadAction = 'objectHead'; - const headObjectAction = - generateRequestContext(objectHeadAction); + const headObjectAction = generateRequestContext(objectHeadAction); requestContexts.push(headObjectAction); if (request.headers['x-amz-version-id']) { const objectHeadVersionAction = 'objectGetVersion'; - const headObjectVersion = - generateRequestContext(objectHeadVersionAction); + const headObjectVersion = generateRequestContext(objectHeadVersionAction); requestContexts.push(headObjectVersion); } if (request.headers['x-amz-scal-archive-info']) { - const coldStatus = - generateRequestContext('objectGetArchiveInfo'); + const coldStatus = generateRequestContext('objectGetArchiveInfo'); requestContexts.push(coldStatus); } } else if (apiMethodAfterVersionCheck === 'objectPutTagging') { - const putObjectTaggingRequestContext = - generateRequestContext('objectPutTagging'); + const putObjectTaggingRequestContext = generateRequestContext('objectPutTagging'); requestContexts.push(putObjectTaggingRequestContext); if (request.headers['x-amz-version-id']) { - const putObjectVersionRequestContext = - generateRequestContext('objectPutTaggingVersion'); + const putObjectVersionRequestContext = generateRequestContext('objectPutTaggingVersion'); requestContexts.push(putObjectVersionRequestContext); } } else if (apiMethodAfterVersionCheck === 'objectPut') { // if put object with version - if (request.headers['x-scal-s3-version-id'] || - request.headers['x-scal-s3-version-id'] === '') { - const putVersionRequestContext = - generateRequestContext('objectPutVersion'); + if (request.headers['x-scal-s3-version-id'] || request.headers['x-scal-s3-version-id'] === '') { + const putVersionRequestContext = generateRequestContext('objectPutVersion'); requestContexts.push(putVersionRequestContext); } else { - const putRequestContext = - generateRequestContext(apiMethodAfterVersionCheck); + const putRequestContext = generateRequestContext(apiMethodAfterVersionCheck); requestContexts.push(putRequestContext); // if put object (versioning) with tag set if (request.headers['x-amz-tagging']) { - const putTaggingRequestContext = - generateRequestContext('objectPutTagging'); + const putTaggingRequestContext = generateRequestContext('objectPutTagging'); requestContexts.push(putTaggingRequestContext); } if (['ON', 'OFF'].includes(request.headers['x-amz-object-lock-legal-hold-status'])) { - const putLegalHoldStatusAction = - generateRequestContext('objectPutLegalHold'); + const putLegalHoldStatusAction = generateRequestContext('objectPutLegalHold'); requestContexts.push(putLegalHoldStatusAction); } // if put object (versioning) with ACL if (isHeaderAcl(request.headers)) { - const putAclRequestContext = - generateRequestContext('objectPutACL'); + const putAclRequestContext = generateRequestContext('objectPutACL'); requestContexts.push(putAclRequestContext); } if (request.headers['x-amz-object-lock-mode']) { - const putObjectLockRequestContext = - generateRequestContext('objectPutRetention'); + const putObjectLockRequestContext = generateRequestContext('objectPutRetention'); requestContexts.push(putObjectLockRequestContext); if (hasGovernanceBypassHeader(request.headers)) { - const checkUserGovernanceBypassRequestContext = - generateRequestContext('bypassGovernanceRetention'); + const checkUserGovernanceBypassRequestContext = generateRequestContext('bypassGovernanceRetention'); requestContexts.push(checkUserGovernanceBypassRequestContext); } } if (request.headers['x-amz-version-id']) { - const putObjectVersionRequestContext = - generateRequestContext('objectPutTaggingVersion'); + const putObjectVersionRequestContext = generateRequestContext('objectPutTaggingVersion'); requestContexts.push(putObjectVersionRequestContext); } } - } else if (apiMethodAfterVersionCheck === 'objectPutRetention' || - apiMethodAfterVersionCheck === 'objectPutRetentionVersion') { - const putRetentionRequestContext = - generateRequestContext(apiMethodAfterVersionCheck); + } else if ( + apiMethodAfterVersionCheck === 'objectPutRetention' || + apiMethodAfterVersionCheck === 'objectPutRetentionVersion' + ) { + const putRetentionRequestContext = generateRequestContext(apiMethodAfterVersionCheck); requestContexts.push(putRetentionRequestContext); if (hasGovernanceBypassHeader(request.headers)) { - const checkUserGovernanceBypassRequestContext = - generateRequestContext('bypassGovernanceRetention'); + const checkUserGovernanceBypassRequestContext = generateRequestContext('bypassGovernanceRetention'); requestContexts.push(checkUserGovernanceBypassRequestContext); } - } else if (apiMethodAfterVersionCheck === 'initiateMultipartUpload' || - apiMethodAfterVersionCheck === 'objectPutPart' || - apiMethodAfterVersionCheck === 'completeMultipartUpload' - ) { - if (request.headers['x-scal-s3-version-id'] || - request.headers['x-scal-s3-version-id'] === '') { - const putVersionRequestContext = - generateRequestContext('objectPutVersion'); + } else if ( + apiMethodAfterVersionCheck === 'initiateMultipartUpload' || + apiMethodAfterVersionCheck === 'objectPutPart' || + apiMethodAfterVersionCheck === 'completeMultipartUpload' + ) { + if (request.headers['x-scal-s3-version-id'] || request.headers['x-scal-s3-version-id'] === '') { + const putVersionRequestContext = generateRequestContext('objectPutVersion'); requestContexts.push(putVersionRequestContext); } else { - const putRequestContext = - generateRequestContext(apiMethodAfterVersionCheck); + const putRequestContext = generateRequestContext(apiMethodAfterVersionCheck); requestContexts.push(putRequestContext); } // if put object (versioning) with ACL if (isHeaderAcl(request.headers)) { - const putAclRequestContext = - generateRequestContext('objectPutACL'); + const putAclRequestContext = generateRequestContext('objectPutACL'); requestContexts.push(putAclRequestContext); } if (request.headers['x-amz-object-lock-mode']) { - const putObjectLockRequestContext = - generateRequestContext('objectPutRetention'); + const putObjectLockRequestContext = generateRequestContext('objectPutRetention'); requestContexts.push(putObjectLockRequestContext); } if (request.headers['x-amz-version-id']) { - const putObjectVersionRequestContext = - generateRequestContext('objectPutTaggingVersion'); + const putObjectVersionRequestContext = generateRequestContext('objectPutTaggingVersion'); requestContexts.push(putObjectVersionRequestContext); } - // AWS only returns an object lock error if a version id - // is specified, else continue to create a delete marker + // AWS only returns an object lock error if a version id + // is specified, else continue to create a delete marker } else if (sourceVersionId && apiMethodAfterVersionCheck === 'objectDeleteVersion') { - const deleteRequestContext = - generateRequestContext(apiMethodAfterVersionCheck); + const deleteRequestContext = generateRequestContext(apiMethodAfterVersionCheck); requestContexts.push(deleteRequestContext); if (hasGovernanceBypassHeader(request.headers)) { - const checkUserGovernanceBypassRequestContext = - generateRequestContext('bypassGovernanceRetention'); + const checkUserGovernanceBypassRequestContext = generateRequestContext('bypassGovernanceRetention'); requestContexts.push(checkUserGovernanceBypassRequestContext); } } else { - const requestContext = - generateRequestContext(apiMethodAfterVersionCheck); + const requestContext = generateRequestContext(apiMethodAfterVersionCheck); requestContexts.push(requestContext); } diff --git a/lib/api/apiUtils/authorization/tagConditionKeys.js b/lib/api/apiUtils/authorization/tagConditionKeys.js index 47c09ce0b3..7bdfaffe81 100644 --- a/lib/api/apiUtils/authorization/tagConditionKeys.js +++ b/lib/api/apiUtils/authorization/tagConditionKeys.js @@ -13,36 +13,36 @@ function makeTagQuery(tags) { } function updateRequestContextsWithTags(request, requestContexts, apiMethod, log, cb) { - async.waterfall([ - next => { - if (request.headers['x-amz-tagging']) { - return next(null, request.headers['x-amz-tagging']); - } - if (request.post && apiMethod === 'objectPutTagging') { - return parseTagXml(request.post, log, (err, tags) => { - if (err) { - log.trace('error parsing request tags'); - return next(err); - } - return next(null, makeTagQuery(tags)); - }); - } - return next(null, null); - }, - (requestTagsQuery, next) => { - const objectKey = request.objectKey; - const bucketName = request.bucketName; - const decodedVidResult = decodeVersionId(request.query); - if (decodedVidResult instanceof Error) { - log.trace('invalid versionId query', { - versionId: request.query.versionId, - error: decodedVidResult, - }); - return next(decodedVidResult); - } - const reqVersionId = decodedVidResult; - return metadata.getObjectMD( - bucketName, objectKey, { versionId: reqVersionId }, log, (err, objMD) => { + async.waterfall( + [ + next => { + if (request.headers['x-amz-tagging']) { + return next(null, request.headers['x-amz-tagging']); + } + if (request.post && apiMethod === 'objectPutTagging') { + return parseTagXml(request.post, log, (err, tags) => { + if (err) { + log.trace('error parsing request tags'); + return next(err); + } + return next(null, makeTagQuery(tags)); + }); + } + return next(null, null); + }, + (requestTagsQuery, next) => { + const objectKey = request.objectKey; + const bucketName = request.bucketName; + const decodedVidResult = decodeVersionId(request.query); + if (decodedVidResult instanceof Error) { + log.trace('invalid versionId query', { + versionId: request.query.versionId, + error: decodedVidResult, + }); + return next(decodedVidResult); + } + const reqVersionId = decodedVidResult; + return metadata.getObjectMD(bucketName, objectKey, { versionId: reqVersionId }, log, (err, objMD) => { if (err) { // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver if (err.NoSuchKey) { @@ -54,24 +54,26 @@ function updateRequestContextsWithTags(request, requestContexts, apiMethod, log, const existingTagsQuery = objMD.tags && makeTagQuery(objMD.tags); return next(null, requestTagsQuery, existingTagsQuery); }); - }, - ], (err, requestTagsQuery, existingTagsQuery) => { - if (err) { - log.trace('error processing tag condition key evaluation'); - return cb(err); - } - // FIXME introduced by CLDSRV-256, this syntax should be allowed by the linter - for (const rc of requestContexts) { - rc.setNeedTagEval(true); - if (requestTagsQuery) { - rc.setRequestObjTags(requestTagsQuery); + }, + ], + (err, requestTagsQuery, existingTagsQuery) => { + if (err) { + log.trace('error processing tag condition key evaluation'); + return cb(err); } - if (existingTagsQuery) { - rc.setExistingObjTag(existingTagsQuery); + // FIXME introduced by CLDSRV-256, this syntax should be allowed by the linter + for (const rc of requestContexts) { + rc.setNeedTagEval(true); + if (requestTagsQuery) { + rc.setRequestObjTags(requestTagsQuery); + } + if (existingTagsQuery) { + rc.setExistingObjTag(existingTagsQuery); + } } + return cb(); } - return cb(); - }); + ); } function tagConditionKeyAuth(authorizationResults, request, requestContexts, apiMethod, log, cb) { @@ -86,8 +88,13 @@ function tagConditionKeyAuth(authorizationResults, request, requestContexts, api if (err) { return cb(err); } - return auth.server.doAuth(request, log, - (err, userInfo, authResults) => cb(err, authResults), 's3', requestContexts); + return auth.server.doAuth( + request, + log, + (err, userInfo, authResults) => cb(err, authResults), + 's3', + requestContexts + ); }); } diff --git a/lib/api/apiUtils/bucket/bucketCors.js b/lib/api/apiUtils/bucket/bucketCors.js index e81c3b1199..b76cce7f9b 100644 --- a/lib/api/apiUtils/bucket/bucketCors.js +++ b/lib/api/apiUtils/bucket/bucketCors.js @@ -26,10 +26,8 @@ const escapeForXml = s3middleware.escapeForXml; */ const customizedErrs = { - numberRules: 'The number of CORS rules should not exceed allowed limit ' + - 'of 100 rules.', - originAndMethodExist: 'Each CORSRule must identify at least one origin ' + - 'and one method.', + numberRules: 'The number of CORS rules should not exceed allowed limit ' + 'of 100 rules.', + originAndMethodExist: 'Each CORSRule must identify at least one origin ' + 'and one method.', }; // Helper validation methods @@ -42,21 +40,20 @@ const _validator = { validateNumberWildcards(string) { const firstIndex = string.indexOf('*'); if (firstIndex !== -1) { - return (string.indexOf('*', firstIndex + 1) === -1); + return string.indexOf('*', firstIndex + 1) === -1; } return true; }, /** _validator.validateID - check value of optional ID - * @param {string[]} id - array containing id string - * @return {(Error|true|undefined)} - Arsenal error on failure, true on - * success, undefined if ID does not exist - */ + * @param {string[]} id - array containing id string + * @return {(Error|true|undefined)} - Arsenal error on failure, true on + * success, undefined if ID does not exist + */ validateID(id) { if (!id) { return undefined; // to indicate ID does not exist } - if (!Array.isArray(id) || id.length !== 1 - || typeof id[0] !== 'string') { + if (!Array.isArray(id) || id.length !== 1 || typeof id[0] !== 'string') { return errors.MalformedXML; } if (id[0] === '') { @@ -65,10 +62,10 @@ const _validator = { return true; }, /** _validator.validateMaxAgeSeconds - check value of optional MaxAgeSeconds - * @param {string[]} seconds - array containing number string - * @return {(Error|parsedValue|undefined)} - Arsenal error on failure, parsed - * value if valid, undefined if MaxAgeSeconds does not exist - */ + * @param {string[]} seconds - array containing number string + * @return {(Error|parsedValue|undefined)} - Arsenal error on failure, parsed + * value if valid, undefined if MaxAgeSeconds does not exist + */ validateMaxAgeSeconds(seconds) { if (!seconds) { return undefined; @@ -87,36 +84,37 @@ const _validator = { return parsedValue; }, /** _validator.validateNumberRules - return if number of rules exceeds 100 - * @param {number} length - array containing number string - * @return {(Error|true)} - Arsenal error on failure, true on success - */ + * @param {number} length - array containing number string + * @return {(Error|true)} - Arsenal error on failure, true on success + */ validateNumberRules(length) { if (length > 100) { - return errorInstances.InvalidRequest - .customizeDescription(customizedErrs.numberRules); + return errorInstances.InvalidRequest.customizeDescription(customizedErrs.numberRules); } return true; }, /** _validator.validateOriginAndMethodExist - * @param {string[]} allowedMethods - array of AllowedMethod's - * @param {string[]} allowedOrigins - array of AllowedOrigin's - * @return {(Error|true)} - Arsenal error on failure, true on success - */ + * @param {string[]} allowedMethods - array of AllowedMethod's + * @param {string[]} allowedOrigins - array of AllowedOrigin's + * @return {(Error|true)} - Arsenal error on failure, true on success + */ validateOriginAndMethodExist(allowedMethods, allowedOrigins) { - if (allowedOrigins && allowedMethods && - Array.isArray(allowedOrigins) && - Array.isArray(allowedMethods) && - allowedOrigins.length > 0 && - allowedMethods.length > 0) { + if ( + allowedOrigins && + allowedMethods && + Array.isArray(allowedOrigins) && + Array.isArray(allowedMethods) && + allowedOrigins.length > 0 && + allowedMethods.length > 0 + ) { return true; } - return errorInstances.MalformedXML - .customizeDescription(customizedErrs.originAndMethodExist); + return errorInstances.MalformedXML.customizeDescription(customizedErrs.originAndMethodExist); }, /** _validator.validateMethods - check values of AllowedMethod's - * @param {string[]} methods - array of AllowedMethod's - * @return {(Error|true)} - Arsenal error on failure, true on success - */ + * @param {string[]} methods - array of AllowedMethod's + * @return {(Error|true)} - Arsenal error on failure, true on success + */ validateMethods(methods) { let invalidMethod; function isValidMethod(method) { @@ -128,17 +126,17 @@ const _validator = { return false; } if (!methods.every(isValidMethod)) { - const errMsg = 'Found unsupported HTTP method in CORS config. ' + - `Unsupported method is "${invalidMethod}"`; + const errMsg = + 'Found unsupported HTTP method in CORS config. ' + `Unsupported method is "${invalidMethod}"`; return errorInstances.InvalidRequest.customizeDescription(errMsg); } return true; }, /** _validator.validateAllowedOriginsOrHeaders - check values - * @param {string[]} elementArr - array of elements to check - * @param {string} typeElement - type of element being checked - * @return {(Error|true)} - Arsenal error on failure, true on success - */ + * @param {string[]} elementArr - array of elements to check + * @param {string} typeElement - type of element being checked + * @return {(Error|true)} - Arsenal error on failure, true on success + */ validateAllowedOriginsOrHeaders(elementArr, typeElement) { for (let i = 0; i < elementArr.length; i++) { const element = elementArr[i]; @@ -146,18 +144,17 @@ const _validator = { return errors.MalformedXML; } if (!this.validateNumberWildcards(element)) { - const errMsg = `${typeElement} "${element}" can not have ` + - 'more than one wildcard.'; + const errMsg = `${typeElement} "${element}" can not have ` + 'more than one wildcard.'; return errorInstances.InvalidRequest.customizeDescription(errMsg); } } return true; }, /** _validator.validateAllowedHeaders - check values of AllowedHeader's - * @param {string[]} headers - array of AllowedHeader's - * @return {(Error|true|undefined)} - Arsenal error on failure, true if - * valid, undefined if optional AllowedHeader's do not exist - */ + * @param {string[]} headers - array of AllowedHeader's + * @return {(Error|true|undefined)} - Arsenal error on failure, true if + * valid, undefined if optional AllowedHeader's do not exist + */ validateAllowedHeaders(headers) { if (!headers) { return undefined; // to indicate AllowedHeaders do not exist @@ -165,18 +162,17 @@ const _validator = { if (!Array.isArray(headers) || headers.length === 0) { return errors.MalformedXML; } - const result = - this.validateAllowedOriginsOrHeaders(headers, 'AllowedHeader'); + const result = this.validateAllowedOriginsOrHeaders(headers, 'AllowedHeader'); if (result instanceof Error) { return result; } return true; }, /** _validator.validateExposeHeaders - check values of ExposeHeader's - * @param {string[]} headers - array of ExposeHeader's - * @return {(Error|true|undefined)} - Arsenal error on failure, true if - * valid, undefined if optional ExposeHeader's do not exist - */ + * @param {string[]} headers - array of ExposeHeader's + * @return {(Error|true|undefined)} - Arsenal error on failure, true if + * valid, undefined if optional ExposeHeader's do not exist + */ validateExposeHeaders(headers) { if (!headers) { return undefined; // indicate ExposeHeaders do not exist @@ -190,13 +186,13 @@ const _validator = { return errors.MalformedXML; } if (header.indexOf('*') !== -1) { - const errMsg = `ExposeHeader ${header} contains a wildcard. ` + - 'Wildcards are currently not supported for ExposeHeader.'; + const errMsg = + `ExposeHeader ${header} contains a wildcard. ` + + 'Wildcards are currently not supported for ExposeHeader.'; return errorInstances.InvalidRequest.customizeDescription(errMsg); } if (!/^[A-Za-z0-9-]*$/.test(header)) { - const errMsg = `ExposeHeader ${header} contains invalid ` + - 'character.'; + const errMsg = `ExposeHeader ${header} contains invalid ` + 'character.'; return errorInstances.InvalidRequest.customizeDescription(errMsg); } } @@ -205,31 +201,29 @@ const _validator = { }; /** _validateCorsXml - Validate XML, returning an error if any part is not valid -* @param {object[]} rules - CORSRule collection parsed from xml to be validated -* @param {string[]} [rules[].ID] - optional id to identify rule -* @param {string[]} rules[].AllowedMethod - methods allowed for CORS -* @param {string[]} rules[].AllowedOrigin - origins allowed for CORS -* @param {string[]} [rules[].AllowedHeader] - headers allowed in an OPTIONS -* request via the Access-Control-Request-Headers header -* @param {string[]} [rules[].MaxAgeSeconds] - seconds browsers should cache -* OPTIONS response -* @param {string[]} [rules[].ExposeHeader] - headers exposed to applications -* @return {(Error|object)} - return cors object on success; error on failure -*/ + * @param {object[]} rules - CORSRule collection parsed from xml to be validated + * @param {string[]} [rules[].ID] - optional id to identify rule + * @param {string[]} rules[].AllowedMethod - methods allowed for CORS + * @param {string[]} rules[].AllowedOrigin - origins allowed for CORS + * @param {string[]} [rules[].AllowedHeader] - headers allowed in an OPTIONS + * request via the Access-Control-Request-Headers header + * @param {string[]} [rules[].MaxAgeSeconds] - seconds browsers should cache + * OPTIONS response + * @param {string[]} [rules[].ExposeHeader] - headers exposed to applications + * @return {(Error|object)} - return cors object on success; error on failure + */ function _validateCorsXml(rules) { const cors = []; let result; if (rules.length > 100) { - return errorInstances.InvalidRequest - .customizeDescription(customizedErrs.numberRules); + return errorInstances.InvalidRequest.customizeDescription(customizedErrs.numberRules); } for (let i = 0; i < rules.length; i++) { const rule = rules[i]; const corsRule = {}; - result = _validator.validateOriginAndMethodExist(rule.AllowedMethod, - rule.AllowedOrigin); + result = _validator.validateOriginAndMethodExist(rule.AllowedMethod, rule.AllowedOrigin); if (result instanceof Error) { return result; } @@ -240,8 +234,7 @@ function _validateCorsXml(rules) { } corsRule.allowedMethods = rule.AllowedMethod; - result = _validator.validateAllowedOriginsOrHeaders(rule.AllowedOrigin, - 'AllowedOrigin'); + result = _validator.validateAllowedOriginsOrHeaders(rule.AllowedOrigin, 'AllowedOrigin'); if (result instanceof Error) { return result; } @@ -281,12 +274,12 @@ function _validateCorsXml(rules) { } /** parseCorsXml - Parse and validate xml body, returning cors object on success -* @param {string} xml - xml body to parse and validate -* @param {object} log - Werelogs logger -* @param {function} cb - callback to server -* @return {undefined} - calls callback with cors object on success, error on -* failure -*/ + * @param {string} xml - xml body to parse and validate + * @param {object} log - Werelogs logger + * @param {function} cb - callback to server + * @return {undefined} - calls callback with cors object on success, error on + * failure + */ function parseCorsXml(xml, log, cb) { parseString(xml, (err, result) => { if (err) { @@ -298,15 +291,17 @@ function parseCorsXml(xml, log, cb) { return cb(errors.MalformedXML); } - if (!result || !result.CORSConfiguration || + if ( + !result || + !result.CORSConfiguration || !result.CORSConfiguration.CORSRule || - !Array.isArray(result.CORSConfiguration.CORSRule)) { + !Array.isArray(result.CORSConfiguration.CORSRule) + ) { const errMsg = 'Invalid cors configuration xml'; return cb(errorInstances.MalformedXML.customizeDescription(errMsg)); } - const validationRes = - _validateCorsXml(result.CORSConfiguration.CORSRule); + const validationRes = _validateCorsXml(result.CORSConfiguration.CORSRule); if (validationRes instanceof Error) { log.debug('xml validation failed', { error: validationRes, @@ -322,18 +317,14 @@ function parseCorsXml(xml, log, cb) { function convertToXml(arrayRules) { const xml = []; - xml.push('', - ''); + xml.push('', ''); arrayRules.forEach(rule => { xml.push(''); - ['allowedMethods', 'allowedOrigins', 'allowedHeaders', 'exposeHeaders'] - .forEach(key => { + ['allowedMethods', 'allowedOrigins', 'allowedHeaders', 'exposeHeaders'].forEach(key => { if (rule[key]) { - const element = key.charAt(0).toUpperCase() + - key.slice(1, -1); + const element = key.charAt(0).toUpperCase() + key.slice(1, -1); rule[key].forEach(value => { - xml.push(`<${element}>${escapeForXml(value)}` + - ``); + xml.push(`<${element}>${escapeForXml(value)}` + ``); }); } }); diff --git a/lib/api/apiUtils/bucket/bucketCreation.js b/lib/api/apiUtils/bucket/bucketCreation.js index 458cbd6375..152c7187bb 100644 --- a/lib/api/apiUtils/bucket/bucketCreation.js +++ b/lib/api/apiUtils/bucket/bucketCreation.js @@ -18,7 +18,6 @@ const oldUsersBucket = constants.oldUsersBucket; const zenkoSeparator = constants.zenkoSeparator; const userBucketOwner = 'admin'; - function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) { // BACKWARD: Simplify once do not have to deal with old // usersbucket name and old splitter @@ -28,8 +27,7 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) { if (err && !err.is.NoSuchBucket && !err.is.BucketAlreadyExists) { return cb(err); } - const splitter = usersBucketAttrs ? - constants.splitter : constants.oldSplitter; + const splitter = usersBucketAttrs ? constants.splitter : constants.oldSplitter; let key = createKeyForUserBucket(canonicalID, splitter, bucketName); const omVal = { creationDate: new Date().toJSON(), @@ -38,46 +36,44 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) { // If the new format usersbucket does not exist, try to put the // key in the old usersBucket using the old splitter. // Otherwise put the key in the new format usersBucket - const usersBucketBeingCalled = usersBucketAttrs ? - usersBucket : oldUsersBucket; - return metadata.putObjectMD(usersBucketBeingCalled, key, - omVal, {}, log, err => { - if (err?.is?.NoSuchBucket) { - // There must be no usersBucket so createBucket - // one using the new format - log.trace('users bucket does not exist, ' + - 'creating users bucket'); - key = `${canonicalID}${constants.splitter}` + - `${bucketName}`; - const creationDate = new Date().toJSON(); - const freshBucket = new BucketInfo(usersBucket, - userBucketOwner, userBucketOwner, creationDate, - BucketInfo.currentModelVersion()); - return metadata.createBucket(usersBucket, - freshBucket, log, err => { - // Note: In the event that two - // users' requests try to create the - // usersBucket at the same time, - // this will prevent one of the users - // from getting a BucketAlreadyExists - // error with respect - // to the usersBucket. - // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver - if (err && !err.BucketAlreadyExists) { - log.error('error from metadata', { - error: err, - }); - return cb(err); - } - log.trace('Users bucket created'); - // Finally put the key in the new format - // usersBucket - return metadata.putObjectMD(usersBucket, - key, omVal, {}, log, cb); + const usersBucketBeingCalled = usersBucketAttrs ? usersBucket : oldUsersBucket; + return metadata.putObjectMD(usersBucketBeingCalled, key, omVal, {}, log, err => { + if (err?.is?.NoSuchBucket) { + // There must be no usersBucket so createBucket + // one using the new format + log.trace('users bucket does not exist, ' + 'creating users bucket'); + key = `${canonicalID}${constants.splitter}` + `${bucketName}`; + const creationDate = new Date().toJSON(); + const freshBucket = new BucketInfo( + usersBucket, + userBucketOwner, + userBucketOwner, + creationDate, + BucketInfo.currentModelVersion() + ); + return metadata.createBucket(usersBucket, freshBucket, log, err => { + // Note: In the event that two + // users' requests try to create the + // usersBucket at the same time, + // this will prevent one of the users + // from getting a BucketAlreadyExists + // error with respect + // to the usersBucket. + // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver + if (err && !err.BucketAlreadyExists) { + log.error('error from metadata', { + error: err, }); - } - return cb(err); - }); + return cb(err); + } + log.trace('Users bucket created'); + // Finally put the key in the new format + // usersBucket + return metadata.putObjectMD(usersBucket, key, omVal, {}, log, cb); + }); + } + return cb(err); + }); }); } @@ -139,16 +135,15 @@ function cleanUpBucket(bucketMD, canonicalID, log, callback) { * @callback called with (err, sseInfo: object) */ function bucketLevelServerSideEncryption(bucket, headers, log, cb) { - kms.bucketLevelEncryption( - bucket, headers, log, (err, sseInfo) => { - if (err) { - log.debug('error getting bucket encryption info', { - error: err, - }); - return cb(err); - } - return cb(null, sseInfo); - }); + kms.bucketLevelEncryption(bucket, headers, log, (err, sseInfo) => { + if (err) { + log.debug('error getting bucket encryption info', { + error: err, + }); + return cb(err); + } + return cb(null, sseInfo); + }); } /** @@ -163,27 +158,43 @@ function bucketLevelServerSideEncryption(bucket, headers, log, cb) { * @param {function} cb - callback to bucketPut * @return {undefined} */ -function createBucket(authInfo, bucketName, headers, - locationConstraint, log, cb) { +function createBucket(authInfo, bucketName, headers, locationConstraint, log, cb) { log.trace('Creating bucket'); assert.strictEqual(typeof bucketName, 'string'); const canonicalID = authInfo.getCanonicalID(); - const ownerDisplayName = - authInfo.getAccountDisplayName(); + const ownerDisplayName = authInfo.getAccountDisplayName(); const creationDate = new Date().toJSON(); const isNFSEnabled = headers['x-scal-nfs-enabled'] === 'true'; const headerObjectLock = headers['x-amz-bucket-object-lock-enabled']; - const objectLockEnabled - = headerObjectLock && headerObjectLock.toLowerCase() === 'true'; - const bucket = new BucketInfo(bucketName, canonicalID, ownerDisplayName, - creationDate, BucketInfo.currentModelVersion(), null, null, null, null, - null, null, null, null, null, null, null, null, null, isNFSEnabled, - null, null, objectLockEnabled); + const objectLockEnabled = headerObjectLock && headerObjectLock.toLowerCase() === 'true'; + const bucket = new BucketInfo( + bucketName, + canonicalID, + ownerDisplayName, + creationDate, + BucketInfo.currentModelVersion(), + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + isNFSEnabled, + null, + null, + objectLockEnabled + ); let locationConstraintVal = null; if (locationConstraint) { - const [locationConstraintStr, ingestion] = - locationConstraint.split(zenkoSeparator); + const [locationConstraintStr, ingestion] = locationConstraint.split(zenkoSeparator); if (locationConstraintStr) { locationConstraintVal = locationConstraintStr; bucket.setLocationConstraint(locationConstraintStr); @@ -209,88 +220,91 @@ function createBucket(authInfo, bucketName, headers, acl: bucket.acl, log, }; - async.parallel({ - prepareNewBucketMD: function prepareNewBucketMD(callback) { - acl.parseAclFromHeaders(parseAclParams, (err, parsedACL) => { - if (err) { - log.debug('error parsing acl from headers', { - error: err, - }); - return callback(err); - } - bucket.setFullAcl(parsedACL); - return callback(null, bucket); - }); - }, - getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) { - metadata.getBucket(bucketName, log, (err, data) => { - // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver - if (err && err.NoSuchBucket) { - return callback(null, 'NoBucketYet'); - } - if (err) { - return callback(err); - } - return callback(null, data); - }); + async.parallel( + { + prepareNewBucketMD: function prepareNewBucketMD(callback) { + acl.parseAclFromHeaders(parseAclParams, (err, parsedACL) => { + if (err) { + log.debug('error parsing acl from headers', { + error: err, + }); + return callback(err); + } + bucket.setFullAcl(parsedACL); + return callback(null, bucket); + }); + }, + getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) { + metadata.getBucket(bucketName, log, (err, data) => { + // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver + if (err && err.NoSuchBucket) { + return callback(null, 'NoBucketYet'); + } + if (err) { + return callback(err); + } + return callback(null, data); + }); + }, }, - }, - // Function to run upon finishing both parallel requests - (err, results) => { - if (err) { - return cb(err); - } - const existingBucketMD = results.getAnyExistingBucketInfo; - if (existingBucketMD instanceof BucketInfo && - existingBucketMD.getOwner() !== canonicalID && - !isServiceAccount(canonicalID)) { - // return existingBucketMD to collect cors headers - return cb(errors.BucketAlreadyExists, existingBucketMD); - } - const newBucketMD = results.prepareNewBucketMD; - if (existingBucketMD === 'NoBucketYet') { - const bucketSseConfig = parseBucketEncryptionHeaders(headers); + // Function to run upon finishing both parallel requests + (err, results) => { + if (err) { + return cb(err); + } + const existingBucketMD = results.getAnyExistingBucketInfo; + if ( + existingBucketMD instanceof BucketInfo && + existingBucketMD.getOwner() !== canonicalID && + !isServiceAccount(canonicalID) + ) { + // return existingBucketMD to collect cors headers + return cb(errors.BucketAlreadyExists, existingBucketMD); + } + const newBucketMD = results.prepareNewBucketMD; + if (existingBucketMD === 'NoBucketYet') { + const bucketSseConfig = parseBucketEncryptionHeaders(headers); - // Apply global SSE configuration when global encryption is enabled - // and no SSE settings were specified during bucket creation. - // Bucket-specific SSE headers override the default encryption. - const sseConfig = config.globalEncryptionEnabled && !bucketSseConfig.algorithm - ? { - algorithm: 'AES256', - mandatory: true, - } : bucketSseConfig; + // Apply global SSE configuration when global encryption is enabled + // and no SSE settings were specified during bucket creation. + // Bucket-specific SSE headers override the default encryption. + const sseConfig = + config.globalEncryptionEnabled && !bucketSseConfig.algorithm + ? { + algorithm: 'AES256', + mandatory: true, + } + : bucketSseConfig; - return bucketLevelServerSideEncryption( - bucket, sseConfig, log, - (err, sseInfo) => { + return bucketLevelServerSideEncryption(bucket, sseConfig, log, (err, sseInfo) => { if (err) { return cb(err); } newBucketMD.setServerSideEncryption(sseInfo); - log.trace( - 'new bucket without flags; adding transient label'); + log.trace('new bucket without flags; adding transient label'); newBucketMD.addTransientFlag(); - return freshStartCreateBucket(newBucketMD, canonicalID, - log, cb); + return freshStartCreateBucket(newBucketMD, canonicalID, log, cb); }); + } + if (existingBucketMD.hasTransientFlag() || existingBucketMD.hasDeletedFlag()) { + log.trace('bucket has transient flag or deleted flag. cleaning up'); + return cleanUpBucket(newBucketMD, canonicalID, log, cb); + } + // If bucket already exists in non-transient and non-deleted + // state and owned by requester, then return BucketAlreadyOwnedByYou + // error unless old AWS behavior (us-east-1) + // Existing locationConstraint must have legacyAwsBehavior === true + // New locationConstraint should have legacyAwsBehavior === true + if ( + isLegacyAWSBehavior(locationConstraintVal) && + isLegacyAWSBehavior(existingBucketMD.getLocationConstraint()) + ) { + log.trace('returning 200 instead of 409 to mirror us-east-1'); + return cb(null, existingBucketMD); + } + return cb(errors.BucketAlreadyOwnedByYou, existingBucketMD); } - if (existingBucketMD.hasTransientFlag() || - existingBucketMD.hasDeletedFlag()) { - log.trace('bucket has transient flag or deleted flag. cleaning up'); - return cleanUpBucket(newBucketMD, canonicalID, log, cb); - } - // If bucket already exists in non-transient and non-deleted - // state and owned by requester, then return BucketAlreadyOwnedByYou - // error unless old AWS behavior (us-east-1) - // Existing locationConstraint must have legacyAwsBehavior === true - // New locationConstraint should have legacyAwsBehavior === true - if (isLegacyAWSBehavior(locationConstraintVal) && - isLegacyAWSBehavior(existingBucketMD.getLocationConstraint())) { - log.trace('returning 200 instead of 409 to mirror us-east-1'); - return cb(null, existingBucketMD); - } - return cb(errors.BucketAlreadyOwnedByYou, existingBucketMD); - }); + ); } module.exports = { diff --git a/lib/api/apiUtils/bucket/bucketDeletion.js b/lib/api/apiUtils/bucket/bucketDeletion.js index 764727e2d2..036441c6a3 100644 --- a/lib/api/apiUtils/bucket/bucketDeletion.js +++ b/lib/api/apiUtils/bucket/bucketDeletion.js @@ -5,15 +5,13 @@ const { errors } = require('arsenal'); const abortMultipartUpload = require('../object/abortMultipartUpload'); const { pushMetric } = require('../../../utapi/utilities'); -const { splitter, oldSplitter, mpuBucketPrefix } = - require('../../../../constants'); +const { splitter, oldSplitter, mpuBucketPrefix } = require('../../../../constants'); const metadata = require('../../../metadata/wrapper'); const kms = require('../../../kms/wrapper'); const deleteUserBucketEntry = require('./deleteUserBucketEntry'); function _deleteMPUbucket(destinationBucketName, log, cb) { - const mpuBucketName = - `${mpuBucketPrefix}${destinationBucketName}`; + const mpuBucketName = `${mpuBucketPrefix}${destinationBucketName}`; return metadata.deleteBucket(mpuBucketName, log, err => { // If the mpu bucket does not exist, just move on // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver @@ -25,23 +23,34 @@ function _deleteMPUbucket(destinationBucketName, log, cb) { } function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log, cb) { - async.mapLimit(mpus, 1, (mpu, next) => { - const splitterChar = mpu.key.includes(oldSplitter) ? - oldSplitter : splitter; - // `overview${splitter}${objectKey}${splitter}${uploadId} - const [, objectKey, uploadId] = mpu.key.split(splitterChar); - abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, - (err, destBucket, partSizeSum) => { - pushMetric('abortMultipartUpload', log, { - authInfo, - canonicalID: bucketMD.getOwner(), - bucket: bucketName, - keys: [objectKey], - byteLength: partSizeSum, - }); - next(err); - }, request); - }, cb); + async.mapLimit( + mpus, + 1, + (mpu, next) => { + const splitterChar = mpu.key.includes(oldSplitter) ? oldSplitter : splitter; + // `overview${splitter}${objectKey}${splitter}${uploadId} + const [, objectKey, uploadId] = mpu.key.split(splitterChar); + abortMultipartUpload( + authInfo, + bucketName, + objectKey, + uploadId, + log, + (err, destBucket, partSizeSum) => { + pushMetric('abortMultipartUpload', log, { + authInfo, + canonicalID: bucketMD.getOwner(), + bucket: bucketName, + keys: [objectKey], + byteLength: partSizeSum, + }); + next(err); + }, + request + ); + }, + cb + ); } /** * deleteBucket - Delete bucket from namespace @@ -60,37 +69,36 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, assert.strictEqual(typeof bucketName, 'string'); assert.strictEqual(typeof canonicalID, 'string'); - return async.waterfall([ - function checkForObjectsStep(next) { - const params = { maxKeys: 1, listingType: 'DelimiterVersions' }; - // We list all the versions as we want to return BucketNotEmpty - // error if there are any versions or delete markers in the bucket. - // Works for non-versioned buckets as well since listing versions - // includes null (non-versioned) objects in the result. - return metadata.listObject(bucketName, params, log, - (err, list) => { + return async.waterfall( + [ + function checkForObjectsStep(next) { + const params = { maxKeys: 1, listingType: 'DelimiterVersions' }; + // We list all the versions as we want to return BucketNotEmpty + // error if there are any versions or delete markers in the bucket. + // Works for non-versioned buckets as well since listing versions + // includes null (non-versioned) objects in the result. + return metadata.listObject(bucketName, params, log, (err, list) => { if (err) { log.error('error from metadata', { error: err }); return next(err); } - const length = (list.Versions ? list.Versions.length : 0) + + const length = + (list.Versions ? list.Versions.length : 0) + (list.DeleteMarkers ? list.DeleteMarkers.length : 0); log.debug('listing result', { length }); if (length) { - log.debug('bucket delete failed', - { error: errors.BucketNotEmpty }); + log.debug('bucket delete failed', { error: errors.BucketNotEmpty }); return next(errors.BucketNotEmpty); } return next(); }); - }, + }, - function deleteMPUbucketStep(next) { - const MPUBucketName = `${mpuBucketPrefix}${bucketName}`; - // check to see if there are any mpu overview objects (so ignore - // any orphaned part objects) - return metadata.listObject(MPUBucketName, { prefix: 'overview' }, - log, (err, objectsListRes) => { + function deleteMPUbucketStep(next) { + const MPUBucketName = `${mpuBucketPrefix}${bucketName}`; + // check to see if there are any mpu overview objects (so ignore + // any orphaned part objects) + return metadata.listObject(MPUBucketName, { prefix: 'overview' }, log, (err, objectsListRes) => { // If no shadow bucket ever created, no ongoing MPU's, so // continue with deletion if (err?.is?.NoSuchBucket) { @@ -101,58 +109,67 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, return next(err); } if (objectsListRes.Contents.length) { - return _deleteOngoingMPUs(authInfo, bucketName, - bucketMD, objectsListRes.Contents, request, log, err => { + return _deleteOngoingMPUs( + authInfo, + bucketName, + bucketMD, + objectsListRes.Contents, + request, + log, + err => { if (err) { return next(err); } log.trace('deleting shadow MPU bucket'); return _deleteMPUbucket(bucketName, log, next); - }); + } + ); } log.trace('deleting shadow MPU bucket'); return _deleteMPUbucket(bucketName, log, next); }); - }, - function addDeleteFlagStep(next) { - log.trace('adding deleted attribute to bucket attributes'); - // Remove transient flag if any so never have both transient - // and deleted flags. - bucketMD.removeTransientFlag(); - bucketMD.addDeletedFlag(); - return metadata.updateBucket(bucketName, bucketMD, log, next); - }, - function deleteUserBucketEntryStep(next) { - log.trace('deleting bucket name from user bucket'); - return deleteUserBucketEntry(bucketName, canonicalID, log, next); - }, - ], - // eslint-disable-next-line prefer-arrow-callback - function actualDeletionStep(err) { - if (err) { - return cb(err); - } - return metadata.deleteBucket(bucketName, log, err => { - log.trace('deleting bucket from metadata'); + }, + function addDeleteFlagStep(next) { + log.trace('adding deleted attribute to bucket attributes'); + // Remove transient flag if any so never have both transient + // and deleted flags. + bucketMD.removeTransientFlag(); + bucketMD.addDeletedFlag(); + return metadata.updateBucket(bucketName, bucketMD, log, next); + }, + function deleteUserBucketEntryStep(next) { + log.trace('deleting bucket name from user bucket'); + return deleteUserBucketEntry(bucketName, canonicalID, log, next); + }, + ], + // eslint-disable-next-line prefer-arrow-callback + function actualDeletionStep(err) { if (err) { return cb(err); } - const serverSideEncryption = bucketMD.getServerSideEncryption(); - const isScalityManagedEncryptionKey = serverSideEncryption && serverSideEncryption.algorithm === 'AES256'; - const isAccountEncryptionEnabled = bucketMD.isAccountEncryptionEnabled(); + return metadata.deleteBucket(bucketName, log, err => { + log.trace('deleting bucket from metadata'); + if (err) { + return cb(err); + } + const serverSideEncryption = bucketMD.getServerSideEncryption(); + const isScalityManagedEncryptionKey = + serverSideEncryption && serverSideEncryption.algorithm === 'AES256'; + const isAccountEncryptionEnabled = bucketMD.isAccountEncryptionEnabled(); - /** - * If all of the following conditions are met, delete the master encryption key: - * - The encryption key is managed by Scality (not externally managed). - * - The encryption is bucket-specific (to prevent deleting default account encryption key). - */ - if (isScalityManagedEncryptionKey && !isAccountEncryptionEnabled) { - const masterKeyId = serverSideEncryption.masterKeyId; - return kms.destroyBucketKey(masterKeyId, log, cb); - } - return cb(); - }); - }); + /** + * If all of the following conditions are met, delete the master encryption key: + * - The encryption key is managed by Scality (not externally managed). + * - The encryption is bucket-specific (to prevent deleting default account encryption key). + */ + if (isScalityManagedEncryptionKey && !isAccountEncryptionEnabled) { + const masterKeyId = serverSideEncryption.masterKeyId; + return kms.destroyBucketKey(masterKeyId, log, cb); + } + return cb(); + }); + } + ); } module.exports = deleteBucket; diff --git a/lib/api/apiUtils/bucket/bucketEncryption.js b/lib/api/apiUtils/bucket/bucketEncryption.js index 8916e96f1b..d8a68d482a 100644 --- a/lib/api/apiUtils/bucket/bucketEncryption.js +++ b/lib/api/apiUtils/bucket/bucketEncryption.js @@ -11,7 +11,7 @@ const { isScalityKmsArn } = require('arsenal/build/lib/network/KMSInterface'); * @property {string} masterKeyId - Key id for the kms key used to encrypt data keys. * @property {string} configuredMasterKeyId - User configured master key id. * @property {boolean} mandatory - Whether a default encryption policy has been enabled. -*/ + */ /** * @callback ServerSideEncryptionInfo~callback @@ -37,9 +37,7 @@ function parseEncryptionXml(xml, log, cb) { return cb(errors.MalformedXML); } - if (!parsed - || !parsed.ServerSideEncryptionConfiguration - || !parsed.ServerSideEncryptionConfiguration.Rule) { + if (!parsed || !parsed.ServerSideEncryptionConfiguration || !parsed.ServerSideEncryptionConfiguration.Rule) { log.trace('error in sse config, invalid ServerSideEncryptionConfiguration section', { method: 'parseEncryptionXml', }); @@ -48,11 +46,13 @@ function parseEncryptionXml(xml, log, cb) { const { Rule } = parsed.ServerSideEncryptionConfiguration; - if (!Array.isArray(Rule) - || Rule.length > 1 - || !Rule[0] - || !Rule[0].ApplyServerSideEncryptionByDefault - || !Rule[0].ApplyServerSideEncryptionByDefault[0]) { + if ( + !Array.isArray(Rule) || + Rule.length > 1 || + !Rule[0] || + !Rule[0].ApplyServerSideEncryptionByDefault || + !Rule[0].ApplyServerSideEncryptionByDefault[0] + ) { log.trace('error in sse config, invalid ApplyServerSideEncryptionByDefault section', { method: 'parseEncryptionXml', }); @@ -84,8 +84,11 @@ function parseEncryptionXml(xml, log, cb) { log.trace('error in sse config, can not specify KMSMasterKeyID when using AES256', { method: 'parseEncryptionXml', }); - return cb(errorInstances.InvalidArgument.customizeDescription( - 'a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms')); + return cb( + errorInstances.InvalidArgument.customizeDescription( + 'a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms' + ) + ); } if (!encConfig.KMSMasterKeyID[0] || typeof encConfig.KMSMasterKeyID[0] !== 'string') { @@ -163,15 +166,17 @@ function parseObjectEncryptionHeaders(headers) { if (sseAlgorithm && sseAlgorithm !== 'AES256' && sseAlgorithm !== 'aws:kms') { return { - error: errorInstances.InvalidArgument - .customizeDescription('The encryption method specified is not supported'), + error: errorInstances.InvalidArgument.customizeDescription( + 'The encryption method specified is not supported' + ), }; } if (sseAlgorithm !== 'aws:kms' && configuredMasterKeyId) { return { error: errorInstances.InvalidArgument.customizeDescription( - 'a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms'), + 'a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms' + ), }; } return { objectSSE: hydrateEncryptionConfig(sseAlgorithm, configuredMasterKeyId) }; @@ -185,17 +190,13 @@ function parseObjectEncryptionHeaders(headers) { * @returns {undefined} */ function createDefaultBucketEncryptionMetadata(bucket, log, cb) { - return kms.bucketLevelEncryption( - bucket, - { algorithm: 'AES256', mandatory: false }, - log, - (error, sseConfig) => { - if (error) { - return cb(error); - } - bucket.setServerSideEncryption(sseConfig); - return metadata.updateBucket(bucket.getName(), bucket, log, err => cb(err, sseConfig)); - }); + return kms.bucketLevelEncryption(bucket, { algorithm: 'AES256', mandatory: false }, log, (error, sseConfig) => { + if (error) { + return cb(error); + } + bucket.setServerSideEncryption(sseConfig); + return metadata.updateBucket(bucket.getName(), bucket, log, err => cb(err, sseConfig)); + }); } /** diff --git a/lib/api/apiUtils/bucket/bucketShield.js b/lib/api/apiUtils/bucket/bucketShield.js index 483a092ee6..fe07b9eada 100644 --- a/lib/api/apiUtils/bucket/bucketShield.js +++ b/lib/api/apiUtils/bucket/bucketShield.js @@ -9,32 +9,32 @@ const constants = require('../../../../constants'); * @return {boolean} true if the bucket should be shielded, false otherwise */ function bucketShield(bucket, requestType) { - const invisiblyDeleteRequests = constants.bucketOwnerActions.concat( - [ - 'bucketGet', - 'bucketHead', - 'bucketGetACL', - 'objectGet', - 'objectGetACL', - 'objectHead', - 'objectPutACL', - 'objectDelete', - ]); - if (invisiblyDeleteRequests.indexOf(requestType) > -1 && - bucket.hasDeletedFlag()) { + const invisiblyDeleteRequests = constants.bucketOwnerActions.concat([ + 'bucketGet', + 'bucketHead', + 'bucketGetACL', + 'objectGet', + 'objectGetACL', + 'objectHead', + 'objectPutACL', + 'objectDelete', + ]); + if (invisiblyDeleteRequests.indexOf(requestType) > -1 && bucket.hasDeletedFlag()) { invisiblyDelete(bucket.getName(), bucket.getOwner()); return true; } - // If request is initiateMultipartUpload (requestType objectPut), - // objectPut, bucketPutACL or bucketDelete, proceed with request. - // Otherwise return an error to the client - if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) && - (requestType !== 'objectPut' && + // If request is initiateMultipartUpload (requestType objectPut), + // objectPut, bucketPutACL or bucketDelete, proceed with request. + // Otherwise return an error to the client + if ( + (bucket.hasDeletedFlag() || bucket.hasTransientFlag()) && + requestType !== 'objectPut' && requestType !== 'initiateMultipartUpload' && requestType !== 'objectPutPart' && requestType !== 'completeMultipartUpload' && requestType !== 'bucketPutACL' && - requestType !== 'bucketDelete')) { + requestType !== 'bucketDelete' + ) { return true; } return false; diff --git a/lib/api/apiUtils/bucket/bucketWebsite.js b/lib/api/apiUtils/bucket/bucketWebsite.js index e723e2387a..4fff5dbe88 100644 --- a/lib/api/apiUtils/bucket/bucketWebsite.js +++ b/lib/api/apiUtils/bucket/bucketWebsite.js @@ -2,8 +2,7 @@ const { parseString } = require('xml2js'); const { errors, errorInstances, s3middleware } = require('arsenal'); const escapeForXml = s3middleware.escapeForXml; -const { WebsiteConfiguration } = - require('arsenal').models.WebsiteConfiguration; +const { WebsiteConfiguration } = require('arsenal').models.WebsiteConfiguration; /* Format of xml request: @@ -30,31 +29,29 @@ const { WebsiteConfiguration } = */ - // Key names of redirect object values to check if are valid strings -const redirectValuesToCheck = ['HostName', 'ReplaceKeyPrefixWith', - 'ReplaceKeyWith']; +const redirectValuesToCheck = ['HostName', 'ReplaceKeyPrefixWith', 'ReplaceKeyWith']; /** Helper function for validating format of parsed xml element -* @param {array} elem - element to check -* @return {boolean} true / false - elem meets expected format -*/ + * @param {array} elem - element to check + * @return {boolean} true / false - elem meets expected format + */ function _isValidElem(elem) { - return (Array.isArray(elem) && elem.length === 1); + return Array.isArray(elem) && elem.length === 1; } /** Check if parsed xml element contains a specified child element -* @param {array} parent - represents xml element to check for child element -* @param {(string|string[])} requiredElem - name of child element(s) -* @param {object} [options] - specify additional options -* @param {boolean} [isList] - indicates if parent is list of children elements, -* used only in conjunction a singular requiredElem argument -* @param {boolean} [checkForAll] - return true only if parent element contains -* all children elements specified in requiredElem; by default, returns true if -* parent element contains at least one -* @param {boolean} [validateParent] - validate format of parent element -* @return {boolean} true / false - if parsed xml element contains child -*/ + * @param {array} parent - represents xml element to check for child element + * @param {(string|string[])} requiredElem - name of child element(s) + * @param {object} [options] - specify additional options + * @param {boolean} [isList] - indicates if parent is list of children elements, + * used only in conjunction a singular requiredElem argument + * @param {boolean} [checkForAll] - return true only if parent element contains + * all children elements specified in requiredElem; by default, returns true if + * parent element contains at least one + * @param {boolean} [validateParent] - validate format of parent element + * @return {boolean} true / false - if parsed xml element contains child + */ function xmlContainsElem(parent, requiredElem, options) { // Non-top level xml is parsed into object in the following manner. @@ -75,8 +72,7 @@ function xmlContainsElem(parent, requiredElem, options) { const checkForAll = options ? options.checkForAll : false; // true by default, validateParent only designated as false when // parent was validated in previous check - const validateParent = (options && options.validateParent !== undefined) ? - options.validateParent : true; + const validateParent = options && options.validateParent !== undefined ? options.validateParent : true; if (validateParent && !_isValidElem(parent)) { return false; @@ -88,8 +84,7 @@ function xmlContainsElem(parent, requiredElem, options) { return requiredElem.some(elem => _isValidElem(parent[0][elem])); } if (isList) { - if (!Array.isArray(parent[0][requiredElem]) || - parent[0][requiredElem].length === 0) { + if (!Array.isArray(parent[0][requiredElem]) || parent[0][requiredElem].length === 0) { return false; } } else { @@ -99,7 +94,6 @@ function xmlContainsElem(parent, requiredElem, options) { return true; } - /** Validate XML, returning an error if any part is not valid * @param {object} parsingResult - object parsed from xml to be validated * @param {object[]} parsingResult.IndexDocument - @@ -146,22 +140,19 @@ function _validateWebsiteConfigXml(parsingResult) { let errMsg; function _isValidString(value) { - return (typeof value === 'string' && value !== ''); + return typeof value === 'string' && value !== ''; } if (!parsingResult.IndexDocument && !parsingResult.RedirectAllRequestsTo) { - errMsg = 'Value for IndexDocument Suffix must be provided if ' + - 'RedirectAllRequestsTo is empty'; + errMsg = 'Value for IndexDocument Suffix must be provided if ' + 'RedirectAllRequestsTo is empty'; return errorInstances.InvalidArgument.customizeDescription(errMsg); } if (parsingResult.RedirectAllRequestsTo) { const parent = parsingResult.RedirectAllRequestsTo; const redirectAllObj = {}; - if (parsingResult.IndexDocument || parsingResult.ErrorDocument || - parsingResult.RoutingRules) { - errMsg = 'RedirectAllRequestsTo cannot be provided in ' + - 'conjunction with other Routing Rules.'; + if (parsingResult.IndexDocument || parsingResult.ErrorDocument || parsingResult.RoutingRules) { + errMsg = 'RedirectAllRequestsTo cannot be provided in ' + 'conjunction with other Routing Rules.'; return errorInstances.InvalidRequest.customizeDescription(errMsg); } if (!xmlContainsElem(parent, 'HostName')) { @@ -174,10 +165,10 @@ function _validateWebsiteConfigXml(parsingResult) { } redirectAllObj.hostName = parent[0].HostName[0]; if (xmlContainsElem(parent, 'Protocol', { validateParent: false })) { - if (parent[0].Protocol[0] !== 'http' && - parent[0].Protocol[0] !== 'https') { - errMsg = 'Invalid protocol, protocol can be http or https. ' + - 'If not defined, the protocol will be selected automatically.'; + if (parent[0].Protocol[0] !== 'http' && parent[0].Protocol[0] !== 'https') { + errMsg = + 'Invalid protocol, protocol can be http or https. ' + + 'If not defined, the protocol will be selected automatically.'; return errorInstances.InvalidRequest.customizeDescription(errMsg); } redirectAllObj.protocol = parent[0].Protocol[0]; @@ -190,8 +181,7 @@ function _validateWebsiteConfigXml(parsingResult) { if (!xmlContainsElem(parent, 'Suffix')) { errMsg = 'IndexDocument is not well-formed'; return errorInstances.MalformedXML.customizeDescription(errMsg); - } else if (!_isValidString(parent[0].Suffix[0]) - || parent[0].Suffix[0].indexOf('/') !== -1) { + } else if (!_isValidString(parent[0].Suffix[0]) || parent[0].Suffix[0].indexOf('/') !== -1) { errMsg = 'IndexDocument Suffix is not well-formed'; return errorInstances.InvalidArgument.customizeDescription(errMsg); } @@ -221,8 +211,7 @@ function _validateWebsiteConfigXml(parsingResult) { const rule = parent[0].RoutingRule[i]; const ruleObj = { redirect: {} }; if (!_isValidElem(rule.Redirect)) { - errMsg = 'RoutingRule requires Redirect, which is ' + - 'missing or not well-formed'; + errMsg = 'RoutingRule requires Redirect, which is ' + 'missing or not well-formed'; return errorInstances.MalformedXML.customizeDescription(errMsg); } // Looks like AWS doesn't actually make this check, but AWS @@ -231,28 +220,35 @@ function _validateWebsiteConfigXml(parsingResult) { // elements to know how to implement a redirect for a rule. // http://docs.aws.amazon.com/AmazonS3/latest/API/ // RESTBucketPUTwebsite.html - if (!xmlContainsElem(rule.Redirect, ['Protocol', 'HostName', - 'ReplaceKeyPrefixWith', 'ReplaceKeyWith', 'HttpRedirectCode'], - { validateParent: false })) { - errMsg = 'Redirect must contain at least one of ' + - 'following: Protocol, HostName, ReplaceKeyPrefixWith, ' + - 'ReplaceKeyWith, or HttpRedirectCode element'; + if ( + !xmlContainsElem( + rule.Redirect, + ['Protocol', 'HostName', 'ReplaceKeyPrefixWith', 'ReplaceKeyWith', 'HttpRedirectCode'], + { validateParent: false } + ) + ) { + errMsg = + 'Redirect must contain at least one of ' + + 'following: Protocol, HostName, ReplaceKeyPrefixWith, ' + + 'ReplaceKeyWith, or HttpRedirectCode element'; return errorInstances.MalformedXML.customizeDescription(errMsg); } if (rule.Redirect[0].Protocol) { - if (!_isValidElem(rule.Redirect[0].Protocol) || - (rule.Redirect[0].Protocol[0] !== 'http' && - rule.Redirect[0].Protocol[0] !== 'https')) { - errMsg = 'Invalid protocol, protocol can be http or ' + - 'https. If not defined, the protocol will be selected ' + - 'automatically.'; + if ( + !_isValidElem(rule.Redirect[0].Protocol) || + (rule.Redirect[0].Protocol[0] !== 'http' && rule.Redirect[0].Protocol[0] !== 'https') + ) { + errMsg = + 'Invalid protocol, protocol can be http or ' + + 'https. If not defined, the protocol will be selected ' + + 'automatically.'; return errorInstances.InvalidRequest.customizeDescription(errMsg); } ruleObj.redirect.protocol = rule.Redirect[0].Protocol[0]; } if (rule.Redirect[0].HttpRedirectCode) { - errMsg = 'The provided HTTP redirect code is not valid. ' + - 'It should be a string containing a number.'; + errMsg = + 'The provided HTTP redirect code is not valid. ' + 'It should be a string containing a number.'; if (!_isValidElem(rule.Redirect[0].HttpRedirectCode)) { return errorInstances.MalformedXML.customizeDescription(errMsg); } @@ -261,8 +257,8 @@ function _validateWebsiteConfigXml(parsingResult) { return errorInstances.MalformedXML.customizeDescription(errMsg); } if (!(code > 300 && code < 400)) { - errMsg = `The provided HTTP redirect code (${code}) is ` + - 'not valid. Valid codes are 3XX except 300'; + errMsg = + `The provided HTTP redirect code (${code}) is ` + 'not valid. Valid codes are 3XX except 300'; return errorInstances.InvalidRequest.customizeDescription(errMsg); } ruleObj.redirect.httpRedirectCode = code; @@ -273,57 +269,51 @@ function _validateWebsiteConfigXml(parsingResult) { if (elem) { if (!_isValidElem(elem) || !_isValidString(elem[0])) { errMsg = `Redirect ${elem} is not well-formed`; - return errorInstances.InvalidArgument - .customizeDescription(errMsg); + return errorInstances.InvalidArgument.customizeDescription(errMsg); } - ruleObj.redirect[`${elemName.charAt(0).toLowerCase()}` + - `${elemName.slice(1)}`] = elem[0]; + ruleObj.redirect[`${elemName.charAt(0).toLowerCase()}` + `${elemName.slice(1)}`] = elem[0]; } } - if (xmlContainsElem( - rule.Redirect, - ['ReplaceKeyPrefixWith', 'ReplaceKeyWith'], - { validateParent: false, checkForAll: true })) { - errMsg = 'Redirect must not contain both ReplaceKeyWith ' + - 'and ReplaceKeyPrefixWith'; + if ( + xmlContainsElem(rule.Redirect, ['ReplaceKeyPrefixWith', 'ReplaceKeyWith'], { + validateParent: false, + checkForAll: true, + }) + ) { + errMsg = 'Redirect must not contain both ReplaceKeyWith ' + 'and ReplaceKeyPrefixWith'; return errorInstances.InvalidRequest.customizeDescription(errMsg); } if (Array.isArray(rule.Condition) && rule.Condition.length === 1) { ruleObj.condition = {}; - if (!xmlContainsElem(rule.Condition, ['KeyPrefixEquals', - 'HttpErrorCodeReturnedEquals'])) { - errMsg = 'Condition is not well-formed. ' + - 'Condition should contain valid KeyPrefixEquals or ' + - 'HttpErrorCodeReturnEquals element.'; + if (!xmlContainsElem(rule.Condition, ['KeyPrefixEquals', 'HttpErrorCodeReturnedEquals'])) { + errMsg = + 'Condition is not well-formed. ' + + 'Condition should contain valid KeyPrefixEquals or ' + + 'HttpErrorCodeReturnEquals element.'; return errorInstances.InvalidRequest.customizeDescription(errMsg); } if (rule.Condition[0].KeyPrefixEquals) { const keyPrefixEquals = rule.Condition[0].KeyPrefixEquals; - if (!_isValidElem(keyPrefixEquals) || - !_isValidString(keyPrefixEquals[0])) { + if (!_isValidElem(keyPrefixEquals) || !_isValidString(keyPrefixEquals[0])) { errMsg = 'Condition KeyPrefixEquals is not well-formed'; - return errorInstances.InvalidArgument - .customizeDescription(errMsg); + return errorInstances.InvalidArgument.customizeDescription(errMsg); } ruleObj.condition.keyPrefixEquals = keyPrefixEquals[0]; } if (rule.Condition[0].HttpErrorCodeReturnedEquals) { - errMsg = 'The provided HTTP error code is not valid. ' + - 'It should be a string containing a number.'; - if (!_isValidElem(rule.Condition[0] - .HttpErrorCodeReturnedEquals)) { + errMsg = + 'The provided HTTP error code is not valid. ' + 'It should be a string containing a number.'; + if (!_isValidElem(rule.Condition[0].HttpErrorCodeReturnedEquals)) { return errorInstances.MalformedXML.customizeDescription(errMsg); } - const code = parseInt(rule.Condition[0] - .HttpErrorCodeReturnedEquals[0], 10); + const code = parseInt(rule.Condition[0].HttpErrorCodeReturnedEquals[0], 10); if (Number.isNaN(code)) { return errorInstances.MalformedXML.customizeDescription(errMsg); } if (!(code > 399 && code < 600)) { - errMsg = `The provided HTTP error code (${code}) is ` + - 'not valid. Valid codes are 4XX or 5XX.'; - return errorInstances.InvalidRequest - .customizeDescription(errMsg); + errMsg = + `The provided HTTP error code (${code}) is ` + 'not valid. Valid codes are 4XX or 5XX.'; + return errorInstances.InvalidRequest.customizeDescription(errMsg); } ruleObj.condition.httpErrorCodeReturnedEquals = code; } @@ -350,8 +340,7 @@ function parseWebsiteConfigXml(xml, log, cb) { return cb(errorInstances.MalformedXML.customizeDescription(errMsg)); } - const validationRes = - _validateWebsiteConfigXml(result.WebsiteConfiguration); + const validationRes = _validateWebsiteConfigXml(result.WebsiteConfiguration); if (validationRes instanceof Error) { log.debug('xml validation failed', { error: validationRes, @@ -375,35 +364,27 @@ function convertToXml(config) { function _pushChildren(obj) { Object.keys(obj).forEach(element => { - const xmlElem = `${element.charAt(0).toUpperCase()}` + - `${element.slice(1)}`; + const xmlElem = `${element.charAt(0).toUpperCase()}` + `${element.slice(1)}`; xml.push(`<${xmlElem}>${escapeForXml(obj[element])}`); }); } - xml.push('', - ''); + xml.push( + '', + '' + ); if (indexDocument) { - xml.push('', - `${escapeForXml(indexDocument)}`, - ''); + xml.push('', `${escapeForXml(indexDocument)}`, ''); } if (errorDocument) { - xml.push('', - `${escapeForXml(errorDocument)}`, - ''); + xml.push('', `${escapeForXml(errorDocument)}`, ''); } if (redirectAllRequestsTo) { xml.push(''); if (redirectAllRequestsTo.hostName) { - xml.push('', - `${escapeForXml(redirectAllRequestsTo.hostName)}`, - ''); + xml.push('', `${escapeForXml(redirectAllRequestsTo.hostName)}`, ''); } if (redirectAllRequestsTo.protocol) { - xml.push('', - `${redirectAllRequestsTo.protocol}`, - ''); + xml.push('', `${redirectAllRequestsTo.protocol}`, ''); } xml.push(''); } diff --git a/lib/api/apiUtils/bucket/checkPreferredLocations.js b/lib/api/apiUtils/bucket/checkPreferredLocations.js index c717c079c5..1c384479f0 100644 --- a/lib/api/apiUtils/bucket/checkPreferredLocations.js +++ b/lib/api/apiUtils/bucket/checkPreferredLocations.js @@ -2,10 +2,10 @@ const { errorInstances } = require('arsenal'); function checkPreferredLocations(location, locationConstraints, log) { const retError = loc => { - const errMsg = 'value of the location you are attempting to set - ' + - `${loc} - is not listed in the locationConstraint config`; - log.trace(`locationConstraint is invalid - ${errMsg}`, - { locationConstraint: loc }); + const errMsg = + 'value of the location you are attempting to set - ' + + `${loc} - is not listed in the locationConstraint config`; + log.trace(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: loc }); return errorInstances.InvalidLocationConstraint.customizeDescription(errMsg); }; if (typeof location === 'string' && !locationConstraints[location]) { diff --git a/lib/api/apiUtils/bucket/createKeyForUserBucket.js b/lib/api/apiUtils/bucket/createKeyForUserBucket.js index 36f23c3706..f4d46e8042 100644 --- a/lib/api/apiUtils/bucket/createKeyForUserBucket.js +++ b/lib/api/apiUtils/bucket/createKeyForUserBucket.js @@ -1,5 +1,4 @@ -function createKeyForUserBucket(canonicalID, - splitter, bucketName) { +function createKeyForUserBucket(canonicalID, splitter, bucketName) { return `${canonicalID}${splitter}${bucketName}`; } diff --git a/lib/api/apiUtils/bucket/deleteUserBucketEntry.js b/lib/api/apiUtils/bucket/deleteUserBucketEntry.js index ff1846e963..8257a67785 100644 --- a/lib/api/apiUtils/bucket/deleteUserBucketEntry.js +++ b/lib/api/apiUtils/bucket/deleteUserBucketEntry.js @@ -1,42 +1,35 @@ const createKeyForUserBucket = require('./createKeyForUserBucket'); -const { usersBucket, oldUsersBucket, splitter, oldSplitter } = - require('../../../../constants'); +const { usersBucket, oldUsersBucket, splitter, oldSplitter } = require('../../../../constants'); const metadata = require('../../../metadata/wrapper'); function deleteUserBucketEntry(bucketName, canonicalID, log, cb) { - log.trace('deleting bucket name from users bucket', { method: - '_deleteUserBucketEntry' }); - const keyForUserBucket = createKeyForUserBucket(canonicalID, splitter, - bucketName); + log.trace('deleting bucket name from users bucket', { method: '_deleteUserBucketEntry' }); + const keyForUserBucket = createKeyForUserBucket(canonicalID, splitter, bucketName); metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => { // If the object representing the bucket is not in the // users bucket just continue if (error?.is.NoSuchKey) { return cb(null); - // BACKWARDS COMPATIBILITY: Remove this once no longer - // have old user bucket format + // BACKWARDS COMPATIBILITY: Remove this once no longer + // have old user bucket format } else if (error?.is.NoSuchBucket) { - const keyForUserBucket2 = createKeyForUserBucket(canonicalID, - oldSplitter, bucketName); - return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2, - {}, log, error => { - // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver - if (error && !error.NoSuchKey) { - log.error('from metadata while deleting user bucket', - { error }); - return cb(error); - } - log.trace('deleted bucket from user bucket', - { method: '_deleteUserBucketEntry' }); - return cb(null); - }); + const keyForUserBucket2 = createKeyForUserBucket(canonicalID, oldSplitter, bucketName); + return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2, {}, log, error => { + // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver + if (error && !error.NoSuchKey) { + log.error('from metadata while deleting user bucket', { error }); + return cb(error); + } + log.trace('deleted bucket from user bucket', { method: '_deleteUserBucketEntry' }); + return cb(null); + }); } else if (error) { - log.error('from metadata while deleting user bucket', { error, - method: '_deleteUserBucketEntry' }); + log.error('from metadata while deleting user bucket', { error, method: '_deleteUserBucketEntry' }); return cb(error); } log.trace('deleted bucket from user bucket', { - method: '_deleteUserBucketEntry' }); + method: '_deleteUserBucketEntry', + }); return cb(null); }); } diff --git a/lib/api/apiUtils/bucket/getNotificationConfiguration.js b/lib/api/apiUtils/bucket/getNotificationConfiguration.js index 1f7151235f..edd49448b5 100644 --- a/lib/api/apiUtils/bucket/getNotificationConfiguration.js +++ b/lib/api/apiUtils/bucket/getNotificationConfiguration.js @@ -10,8 +10,11 @@ function getNotificationConfiguration(parsedXml) { return notifConfig; } if (!config.bucketNotificationDestinations) { - return { error: errorInstances.InvalidArgument.customizeDescription( - 'Unable to validate the following destination configurations') }; + return { + error: errorInstances.InvalidArgument.customizeDescription( + 'Unable to validate the following destination configurations' + ), + }; } const targets = new Set(config.bucketNotificationDestinations.map(t => t.resource)); const notifConfigTargets = notifConfig.queueConfig.map(t => t.queueArn.split(':')[5]); diff --git a/lib/api/apiUtils/bucket/getReplicationConfiguration.js b/lib/api/apiUtils/bucket/getReplicationConfiguration.js index 8d9f2dfcfc..a96818798f 100644 --- a/lib/api/apiUtils/bucket/getReplicationConfiguration.js +++ b/lib/api/apiUtils/bucket/getReplicationConfiguration.js @@ -1,7 +1,6 @@ const config = require('../../../Config').config; const parseXML = require('../../../utilities/parseXML'); -const ReplicationConfiguration = - require('arsenal').models.ReplicationConfiguration; +const ReplicationConfiguration = require('arsenal').models.ReplicationConfiguration; // Handle the steps for returning a valid replication configuration object. function getReplicationConfiguration(xml, log, cb) { diff --git a/lib/api/apiUtils/bucket/invisiblyDelete.js b/lib/api/apiUtils/bucket/invisiblyDelete.js index 5ccf5cd329..2e39cc39a4 100644 --- a/lib/api/apiUtils/bucket/invisiblyDelete.js +++ b/lib/api/apiUtils/bucket/invisiblyDelete.js @@ -15,20 +15,17 @@ function invisiblyDelete(bucketName, canonicalID) { log.trace('deleting bucket with deleted flag invisibly', { bucketName }); return deleteUserBucketEntry(bucketName, canonicalID, log, err => { if (err) { - log.error('error invisibly deleting bucket name from user bucket', - { error: err }); + log.error('error invisibly deleting bucket name from user bucket', { error: err }); return log.end(); } log.trace('deleted bucket name from user bucket'); return metadata.deleteBucket(bucketName, log, error => { - log.trace('deleting bucket from metadata', - { method: 'invisiblyDelete' }); + log.trace('deleting bucket from metadata', { method: 'invisiblyDelete' }); if (error) { log.error('error deleting bucket from metadata', { error }); return log.end(); } - log.trace('invisible deletion of bucket succeeded', - { method: 'invisiblyDelete' }); + log.trace('invisible deletion of bucket succeeded', { method: 'invisiblyDelete' }); return log.end(); }); }); diff --git a/lib/api/apiUtils/bucket/parseWhere.js b/lib/api/apiUtils/bucket/parseWhere.js index 4275f9e1cb..83deccfced 100644 --- a/lib/api/apiUtils/bucket/parseWhere.js +++ b/lib/api/apiUtils/bucket/parseWhere.js @@ -38,7 +38,7 @@ const exprMapper = { '<': '$lt', '>=': '$gte', '<=': '$lte', - 'LIKE': '$regex', + LIKE: '$regex', }; /* @@ -53,18 +53,12 @@ function parseWhere(root) { const e1 = parseWhere(root[operator][0]); const e2 = parseWhere(root[operator][1]); - return { '$and' : [ - e1, - e2, - ] }; + return { $and: [e1, e2] }; } else if (operator === 'OR') { const e1 = parseWhere(root[operator][0]); const e2 = parseWhere(root[operator][1]); - return { '$or' : [ - e1, - e2, - ] }; + return { $or: [e1, e2] }; } const field = root[operator][0]; const value = root[operator][1]; diff --git a/lib/api/apiUtils/bucket/updateEncryption.js b/lib/api/apiUtils/bucket/updateEncryption.js index 5db3152423..515b360dd2 100644 --- a/lib/api/apiUtils/bucket/updateEncryption.js +++ b/lib/api/apiUtils/bucket/updateEncryption.js @@ -87,8 +87,7 @@ function updateObjectEncryption(bucket, objMD, objectKey, log, keyArnPrefix, opt if (opts.skipObjectUpdate) { return cb(null, bucket, objMD); } - return metadata.putObjectMD(bucket.getName(), objectKey, objMD, params, - log, err => cb(err, bucket, objMD)); + return metadata.putObjectMD(bucket.getName(), objectKey, objMD, params, log, err => cb(err, bucket, objMD)); } /** diff --git a/lib/api/apiUtils/bucket/validateReplicationConfig.js b/lib/api/apiUtils/bucket/validateReplicationConfig.js index e0e64df2ca..36807ae14b 100644 --- a/lib/api/apiUtils/bucket/validateReplicationConfig.js +++ b/lib/api/apiUtils/bucket/validateReplicationConfig.js @@ -24,8 +24,7 @@ function validateReplicationConfig(repConfig, bucket) { return true; } const storageClasses = rule.storageClass.split(','); - return storageClasses.some( - site => site.endsWith(':preferred_read')); + return storageClasses.some(site => site.endsWith(':preferred_read')); }); } diff --git a/lib/api/apiUtils/bucket/validateSearch.js b/lib/api/apiUtils/bucket/validateSearch.js index 9a2e33f94c..3f811a2796 100644 --- a/lib/api/apiUtils/bucket/validateSearch.js +++ b/lib/api/apiUtils/bucket/validateSearch.js @@ -20,7 +20,7 @@ const sqlConfig = { ], tokenizer: { shouldTokenize: ['(', ')', '=', '!=', '<', '>', '<=', '>=', '<>'], - shouldMatch: ['"', '\'', '`'], + shouldMatch: ['"', "'", '`'], shouldDelimitBy: [' ', '\n', '\r', '\t'], }, }; @@ -39,10 +39,12 @@ function _validateTree(whereClause, possibleAttributes) { _searchTree(node[operator][1]); } else { const field = node[operator][0]; - if (!field.startsWith('tags.') && + if ( + !field.startsWith('tags.') && !possibleAttributes[field] && !field.startsWith('replicationInfo.') && - !field.startsWith('x-amz-meta-')) { + !field.startsWith('x-amz-meta-') + ) { invalidAttribute = field; } } @@ -68,15 +70,14 @@ function validateSearchParams(searchParams) { // allow using 'replicationStatus' as search param to increase // ease of use, pending metadata search rework // eslint-disable-next-line no-param-reassign - searchParams = searchParams.replace( - 'replication-status', 'replicationInfo.status'); + searchParams = searchParams.replace('replication-status', 'replicationInfo.status'); ast = parser.parse(searchParams); } catch (e) { if (e) { return { - error: errorInstances.InvalidArgument - .customizeDescription('Invalid sql where clause ' + - 'sent as search query'), + error: errorInstances.InvalidArgument.customizeDescription( + 'Invalid sql where clause ' + 'sent as search query' + ), }; } } @@ -84,9 +85,10 @@ function validateSearchParams(searchParams) { const invalidAttribute = _validateTree(ast, possibleAttributes); if (invalidAttribute) { return { - error: errorInstances.InvalidArgument - .customizeDescription('Search param ' + - `contains unknown attribute: ${invalidAttribute}`) }; + error: errorInstances.InvalidArgument.customizeDescription( + 'Search param ' + `contains unknown attribute: ${invalidAttribute}` + ), + }; } return { ast, diff --git a/lib/api/apiUtils/object/abortMultipartUpload.js b/lib/api/apiUtils/object/abortMultipartUpload.js index 40e1f00c5f..ad94ab5482 100644 --- a/lib/api/apiUtils/object/abortMultipartUpload.js +++ b/lib/api/apiUtils/object/abortMultipartUpload.js @@ -3,14 +3,12 @@ const async = require('async'); const constants = require('../../../../constants'); const { data } = require('../../../data/wrapper'); const locationConstraintCheck = require('../object/locationConstraintCheck'); -const { standardMetadataValidateBucketAndObj } = - require('../../../metadata/metadataUtils'); +const { standardMetadataValidateBucketAndObj } = require('../../../metadata/metadataUtils'); const { validateQuotas } = require('../quotas/quotaUtils'); const services = require('../../../services'); const metadata = require('../../../metadata/wrapper'); -function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, - callback, request) { +function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, callback, request) { const metadataValMPUparams = { authInfo, bucketName, @@ -28,153 +26,176 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, metadataValParams.requestType = 'objectPut'; const authzIdentityResult = request ? request.actionImplicitDenies : false; - async.waterfall([ - function checkDestBucketVal(next) { - standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log, - (err, destinationBucket, objectMD) => { - if (err) { - log.error('error validating request', { error: err }); - return next(err, destinationBucket); - } - if (destinationBucket.policies) { - // TODO: Check bucket policies to see if user is granted - // permission or forbidden permission to take - // given action. - // If permitted, add 'bucketPolicyGoAhead' - // attribute to params for validating at MPU level. - // This is GH Issue#76 - metadataValMPUparams.requestType = - 'bucketPolicyGoAhead'; + async.waterfall( + [ + function checkDestBucketVal(next) { + standardMetadataValidateBucketAndObj( + metadataValParams, + authzIdentityResult, + log, + (err, destinationBucket, objectMD) => { + if (err) { + log.error('error validating request', { error: err }); + return next(err, destinationBucket); + } + if (destinationBucket.policies) { + // TODO: Check bucket policies to see if user is granted + // permission or forbidden permission to take + // given action. + // If permitted, add 'bucketPolicyGoAhead' + // attribute to params for validating at MPU level. + // This is GH Issue#76 + metadataValMPUparams.requestType = 'bucketPolicyGoAhead'; + } + return next(null, destinationBucket, objectMD); } - return next(null, destinationBucket, objectMD); - }); - }, - function checkMPUval(destBucket, objectMD, next) { - metadataValParams.log = log; - services.metadataValidateMultipart(metadataValParams, - (err, mpuBucket, mpuOverviewObj) => { + ); + }, + function checkMPUval(destBucket, objectMD, next) { + metadataValParams.log = log; + services.metadataValidateMultipart(metadataValParams, (err, mpuBucket, mpuOverviewObj) => { if (err) { log.error('error validating multipart', { error: err }); return next(err, destBucket); } return next(err, mpuBucket, mpuOverviewObj, destBucket, objectMD); }); - }, - function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, objectMD, - next) { - const location = mpuOverviewObj.controllingLocationConstraint; - const originalIdentityAuthzResults = request.actionImplicitDenies; - // eslint-disable-next-line no-param-reassign - delete request.actionImplicitDenies; - return data.abortMPU(objectKey, uploadId, location, bucketName, - request, destBucket, locationConstraintCheck, log, - (err, skipDataDelete) => { + }, + function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, objectMD, next) { + const location = mpuOverviewObj.controllingLocationConstraint; + const originalIdentityAuthzResults = request.actionImplicitDenies; // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = originalIdentityAuthzResults; - if (err) { - log.error('error aborting MPU', { error: err }); - return next(err, destBucket); - } - // for Azure and GCP we do not need to delete data - // for all other backends, skipDataDelete will be set to false - return next(null, mpuBucket, destBucket, objectMD, skipDataDelete); - }); - }, - function getPartLocations(mpuBucket, destBucket, objectMD, skipDataDelete, - next) { - services.getMPUparts(mpuBucket.getName(), uploadId, log, - (err, result) => { + delete request.actionImplicitDenies; + return data.abortMPU( + objectKey, + uploadId, + location, + bucketName, + request, + destBucket, + locationConstraintCheck, + log, + (err, skipDataDelete) => { + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = originalIdentityAuthzResults; + if (err) { + log.error('error aborting MPU', { error: err }); + return next(err, destBucket); + } + // for Azure and GCP we do not need to delete data + // for all other backends, skipDataDelete will be set to false + return next(null, mpuBucket, destBucket, objectMD, skipDataDelete); + } + ); + }, + function getPartLocations(mpuBucket, destBucket, objectMD, skipDataDelete, next) { + services.getMPUparts(mpuBucket.getName(), uploadId, log, (err, result) => { if (err) { log.error('error getting parts', { error: err }); return next(err, destBucket); } const storedParts = result.Contents; - return next(null, mpuBucket, storedParts, destBucket, objectMD, - skipDataDelete); + return next(null, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); }); - }, - function deleteObjectMetadata(mpuBucket, storedParts, destBucket, objectMD, skipDataDelete, next) { - if (!objectMD || metadataValMPUparams.uploadId !== objectMD.uploadId) { - return next(null, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); - } - // In case there has been an error during cleanup after a complete MPU - // (e.g. failure to delete MPU MD in shadow bucket), - // we need to ensure that the MPU metadata is deleted. - log.debug('Object has existing metadata, deleting them', { - method: 'abortMultipartUpload', - bucketName, - objectKey, - uploadId, - versionId: objectMD.versionId, - }); - return metadata.deleteObjectMD(bucketName, objectKey, { versionId: objectMD.versionId }, log, err => { - if (err) { - log.error('error deleting object metadata', { error: err }); + }, + function deleteObjectMetadata(mpuBucket, storedParts, destBucket, objectMD, skipDataDelete, next) { + if (!objectMD || metadataValMPUparams.uploadId !== objectMD.uploadId) { + return next(null, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); } - return next(err, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); - }); - }, - function deleteData(mpuBucket, storedParts, destBucket, objectMD, - skipDataDelete, next) { - if (skipDataDelete) { - return next(null, mpuBucket, storedParts, destBucket); - } - // The locations were sent to metadata as an array - // under partLocations. Pull the partLocations. - const locations = storedParts.flatMap(item => item.value.partLocations); - if (locations.length === 0) { - return next(null, mpuBucket, storedParts, destBucket); - } - - if (objectMD && objectMD.location && objectMD.uploadId === metadataValMPUparams.uploadId) { - const existingLocations = new Set(locations.map(loc => loc.key)); - const remainingObjectLocations = objectMD.location.filter(loc => !existingLocations.has(loc.key)); - locations.push(...remainingObjectLocations); - } - - return async.eachLimit(locations, 5, (loc, cb) => { - data.delete(loc, log, err => { + // In case there has been an error during cleanup after a complete MPU + // (e.g. failure to delete MPU MD in shadow bucket), + // we need to ensure that the MPU metadata is deleted. + log.debug('Object has existing metadata, deleting them', { + method: 'abortMultipartUpload', + bucketName, + objectKey, + uploadId, + versionId: objectMD.versionId, + }); + return metadata.deleteObjectMD(bucketName, objectKey, { versionId: objectMD.versionId }, log, err => { if (err) { - log.fatal('delete ObjectPart failed', { err }); + log.error('error deleting object metadata', { error: err }); } - cb(); + return next(err, mpuBucket, storedParts, destBucket, objectMD, skipDataDelete); }); - }, () => { - const length = storedParts.reduce((length, loc) => length + loc.value.Size, 0); - return validateQuotas(request, destBucket, request.accountQuotas, - ['objectDelete'], 'objectDelete', -length, false, log, err => { - if (err) { - // Ignore error, as the data has been deleted already: only inflight count - // has not been updated, and will be eventually consistent anyway - log.warn('failed to update inflights', { - method: 'abortMultipartUpload', - locations, - error: err, - }); - } - next(null, mpuBucket, storedParts, destBucket); - }); - }); - }, - function deleteShadowObjectMetadata(mpuBucket, storedParts, destBucket, next) { - let splitter = constants.splitter; - // BACKWARD: Remove to remove the old splitter - if (mpuBucket.getMdBucketModelVersion() < 2) { - splitter = constants.oldSplitter; - } - // Reconstruct mpuOverviewKey - const mpuOverviewKey = - `overview${splitter}${objectKey}${splitter}${uploadId}`; + }, + function deleteData(mpuBucket, storedParts, destBucket, objectMD, skipDataDelete, next) { + if (skipDataDelete) { + return next(null, mpuBucket, storedParts, destBucket); + } + // The locations were sent to metadata as an array + // under partLocations. Pull the partLocations. + const locations = storedParts.flatMap(item => item.value.partLocations); + if (locations.length === 0) { + return next(null, mpuBucket, storedParts, destBucket); + } + + if (objectMD && objectMD.location && objectMD.uploadId === metadataValMPUparams.uploadId) { + const existingLocations = new Set(locations.map(loc => loc.key)); + const remainingObjectLocations = objectMD.location.filter(loc => !existingLocations.has(loc.key)); + locations.push(...remainingObjectLocations); + } + + return async.eachLimit( + locations, + 5, + (loc, cb) => { + data.delete(loc, log, err => { + if (err) { + log.fatal('delete ObjectPart failed', { err }); + } + cb(); + }); + }, + () => { + const length = storedParts.reduce((length, loc) => length + loc.value.Size, 0); + return validateQuotas( + request, + destBucket, + request.accountQuotas, + ['objectDelete'], + 'objectDelete', + -length, + false, + log, + err => { + if (err) { + // Ignore error, as the data has been deleted already: only inflight count + // has not been updated, and will be eventually consistent anyway + log.warn('failed to update inflights', { + method: 'abortMultipartUpload', + locations, + error: err, + }); + } + next(null, mpuBucket, storedParts, destBucket); + } + ); + } + ); + }, + function deleteShadowObjectMetadata(mpuBucket, storedParts, destBucket, next) { + let splitter = constants.splitter; + // BACKWARD: Remove to remove the old splitter + if (mpuBucket.getMdBucketModelVersion() < 2) { + splitter = constants.oldSplitter; + } + // Reconstruct mpuOverviewKey + const mpuOverviewKey = `overview${splitter}${objectKey}${splitter}${uploadId}`; - // Get the sum of all part sizes to include in pushMetric object - const partSizeSum = storedParts.map(item => item.value.Size) - .reduce((currPart, nextPart) => currPart + nextPart, 0); - const keysToDelete = storedParts.map(item => item.key); - keysToDelete.push(mpuOverviewKey); - services.batchDeleteObjectMetadata(mpuBucket.getName(), - keysToDelete, log, err => next(err, destBucket, partSizeSum)); - }, - ], callback); + // Get the sum of all part sizes to include in pushMetric object + const partSizeSum = storedParts + .map(item => item.value.Size) + .reduce((currPart, nextPart) => currPart + nextPart, 0); + const keysToDelete = storedParts.map(item => item.key); + keysToDelete.push(mpuOverviewKey); + services.batchDeleteObjectMetadata(mpuBucket.getName(), keysToDelete, log, err => + next(err, destBucket, partSizeSum) + ); + }, + ], + callback + ); } module.exports = abortMultipartUpload; diff --git a/lib/api/apiUtils/object/applyZenkoUserMD.js b/lib/api/apiUtils/object/applyZenkoUserMD.js index 928b56b9f3..48e661804d 100644 --- a/lib/api/apiUtils/object/applyZenkoUserMD.js +++ b/lib/api/apiUtils/object/applyZenkoUserMD.js @@ -9,8 +9,7 @@ const _config = require('../../../Config').config; * @return {undefined} */ function applyZenkoUserMD(metaHeaders) { - if (process.env.REMOTE_MANAGEMENT_DISABLE === '0' && - !metaHeaders[zenkoIDHeader]) { + if (process.env.REMOTE_MANAGEMENT_DISABLE === '0' && !metaHeaders[zenkoIDHeader]) { // eslint-disable-next-line no-param-reassign metaHeaders[zenkoIDHeader] = _config.getPublicInstanceId(); } diff --git a/lib/api/apiUtils/object/checkHttpHeadersSize.js b/lib/api/apiUtils/object/checkHttpHeadersSize.js index 01cf136d7d..9d653c25e7 100644 --- a/lib/api/apiUtils/object/checkHttpHeadersSize.js +++ b/lib/api/apiUtils/object/checkHttpHeadersSize.js @@ -10,8 +10,7 @@ function checkHttpHeadersSize(requestHeaders) { let httpHeadersSize = 0; Object.keys(requestHeaders).forEach(header => { - httpHeadersSize += Buffer.byteLength(header, 'utf8') + - Buffer.byteLength(requestHeaders[header], 'utf8'); + httpHeadersSize += Buffer.byteLength(header, 'utf8') + Buffer.byteLength(requestHeaders[header], 'utf8'); }); if (httpHeadersSize > maxHttpHeadersSize) { diff --git a/lib/api/apiUtils/object/checkReadLocation.js b/lib/api/apiUtils/object/checkReadLocation.js index ce21fee04f..13713385de 100644 --- a/lib/api/apiUtils/object/checkReadLocation.js +++ b/lib/api/apiUtils/object/checkReadLocation.js @@ -11,10 +11,8 @@ function checkReadLocation(config, locationName, objectKey, bucketName) { const readLocation = config.getLocationConstraint(locationName); if (readLocation) { - const bucketMatch = readLocation.details && - readLocation.details.bucketMatch; - const backendKey = bucketMatch ? objectKey : - `${bucketName}/${objectKey}`; + const bucketMatch = readLocation.details && readLocation.details.bucketMatch; + const backendKey = bucketMatch ? objectKey : `${bucketName}/${objectKey}`; return { location: locationName, key: backendKey, diff --git a/lib/api/apiUtils/object/checkUserMetadataSize.js b/lib/api/apiUtils/object/checkUserMetadataSize.js index eb8ac11a9c..28e83e55a7 100644 --- a/lib/api/apiUtils/object/checkUserMetadataSize.js +++ b/lib/api/apiUtils/object/checkUserMetadataSize.js @@ -1,5 +1,4 @@ -const { maximumMetaHeadersSize, - invalidObjectUserMetadataHeader } = require('../../../../constants'); +const { maximumMetaHeadersSize, invalidObjectUserMetadataHeader } = require('../../../../constants'); /** * Checks the size of the user metadata in the object metadata and removes @@ -13,8 +12,7 @@ const { maximumMetaHeadersSize, function checkUserMetadataSize(responseMetadata) { let userMetadataSize = 0; // collect the user metadata keys from the object metadata - const userMetadataHeaders = Object.keys(responseMetadata) - .filter(key => key.startsWith('x-amz-meta-')); + const userMetadataHeaders = Object.keys(responseMetadata).filter(key => key.startsWith('x-amz-meta-')); // compute the size of all user metadata key and its value userMetadataHeaders.forEach(header => { userMetadataSize += header.length + responseMetadata[header].length; diff --git a/lib/api/apiUtils/object/coldStorage.js b/lib/api/apiUtils/object/coldStorage.js index dd33022c99..fa4f3f5111 100644 --- a/lib/api/apiUtils/object/coldStorage.js +++ b/lib/api/apiUtils/object/coldStorage.js @@ -17,12 +17,9 @@ const { scaledMsPerDay } = config.getTimeOptions(); * @returns {string|undefined} x-amz-restore */ function getAmzRestoreResHeader(objMD) { - if (objMD.archive && - objMD.archive.restoreRequestedAt && - !objMD.archive.restoreCompletedAt) { + if (objMD.archive && objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt) { // Avoid race condition by relying on the `archive` MD of the object // and return the right header after a RESTORE request. - // eslint-disable-next-line return `ongoing-request="true"`; } if (objMD['x-amz-restore']) { @@ -83,11 +80,10 @@ function _validateStartRestore(objectMD, log) { if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) { // return InvalidObjectState error if the restored object is expired // but restore info md of this object has not yet been cleared - log.debug('The restored object already expired.', - { - archive: objectMD.archive, - method: '_validateStartRestore', - }); + log.debug('The restored object already expired.', { + archive: objectMD.archive, + method: '_validateStartRestore', + }); return errors.InvalidObjectState; } @@ -100,21 +96,19 @@ function _validateStartRestore(objectMD, log) { if (!isLocationCold) { // return InvalidObjectState error if the object is not in cold storage, // not in cold storage means either location cold flag not exists or cold flag is explicit false - log.debug('The bucket of the object is not in a cold storage location.', - { - isLocationCold, - method: '_validateStartRestore', - }); + log.debug('The bucket of the object is not in a cold storage location.', { + isLocationCold, + method: '_validateStartRestore', + }); return errors.InvalidObjectState; } if (objectMD.archive?.restoreRequestedAt) { // return RestoreAlreadyInProgress error if the object is currently being restored // check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists - log.debug('The object is currently being restored.', - { - archive: objectMD.archive, - method: '_validateStartRestore', - }); + log.debug('The object is currently being restored.', { + archive: objectMD.archive, + method: '_validateStartRestore', + }); return errors.RestoreAlreadyInProgress; } return undefined; @@ -142,21 +136,24 @@ function validatePutVersionId(objMD, versionId, log) { const isLocationCold = locationConstraints[objMD.dataStoreName]?.isCold; if (!isLocationCold) { - log.error('The object data is not stored in a cold storage location.', - { - isLocationCold, - dataStoreName: objMD.dataStoreName, - method: 'validatePutVersionId', - }); + log.error('The object data is not stored in a cold storage location.', { + isLocationCold, + dataStoreName: objMD.dataStoreName, + method: 'validatePutVersionId', + }); return errors.InvalidObjectState; } // make sure object archive restoration is in progress // NOTE: we do not use putObjectVersion to update the restoration period. - if (!objMD.archive || !objMD.archive.restoreRequestedAt || !objMD.archive.restoreRequestedDays - || objMD.archive.restoreCompletedAt || objMD.archive.restoreWillExpireAt) { - log.error('object archive restoration is not in progress', - { method: 'validatePutVersionId', versionId }); + if ( + !objMD.archive || + !objMD.archive.restoreRequestedAt || + !objMD.archive.restoreRequestedDays || + objMD.archive.restoreCompletedAt || + objMD.archive.restoreWillExpireAt + ) { + log.error('object archive restoration is not in progress', { method: 'validatePutVersionId', versionId }); return errors.InvalidObjectState; } @@ -180,11 +177,11 @@ function _updateObjectExpirationDate(objectMD, log) { const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt; log.debug('The restore status of the object.', { isObjectAlreadyRestored, - method: 'isObjectAlreadyRestored' + method: 'isObjectAlreadyRestored', }); if (isObjectAlreadyRestored) { const expiryDate = new Date(objectMD.archive.restoreRequestedAt); - expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay)); + expiryDate.setTime(expiryDate.getTime() + objectMD.archive.restoreRequestedDays * scaledMsPerDay); /* eslint-disable no-param-reassign */ objectMD.archive.restoreWillExpireAt = expiryDate; @@ -209,9 +206,9 @@ function _updateObjectExpirationDate(objectMD, log) { */ function _updateRestoreInfo(objectMD, restoreParam, log) { if (!objectMD.archive) { - log.debug('objectMD.archive doesn\'t exits', { + log.debug("objectMD.archive doesn't exits", { objectMD, - method: '_updateRestoreInfo' + method: '_updateRestoreInfo', }); return errorInstances.InternalError.customizeDescription('Archive metadata is missing.'); } @@ -223,7 +220,7 @@ function _updateRestoreInfo(objectMD, restoreParam, log) { if (!ObjectMDArchive.isValid(objectMD.archive)) { log.debug('archive is not valid', { archive: objectMD.archive, - method: '_updateRestoreInfo' + method: '_updateRestoreInfo', }); return errorInstances.InternalError.customizeDescription('Invalid archive metadata.'); } @@ -249,7 +246,7 @@ function startRestore(objectMD, restoreParam, log, cb) { if (checkResultError) { log.debug('Restore cannot be done.', { error: checkResultError, - method: 'startRestore' + method: 'startRestore', }); return cb(checkResultError); } @@ -257,12 +254,12 @@ function startRestore(objectMD, restoreParam, log, cb) { if (updateResultError) { log.debug('Failed to update restore information.', { error: updateResultError, - method: 'startRestore' + method: 'startRestore', }); return cb(updateResultError); } log.debug('Validated and updated restore information', { - method: 'startRestore' + method: 'startRestore', }); const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log); return cb(null, isObjectAlreadyRestored); @@ -275,13 +272,16 @@ function startRestore(objectMD, restoreParam, log, cb) { */ function verifyColdObjectAvailable(objMD) { // return error when object is cold - if (objMD.archive && + if ( + objMD.archive && // Object is in cold backend (!objMD.archive.restoreRequestedAt || // Object is being restored - (objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) { - const err = errorInstances.InvalidObjectState - .customizeDescription('The operation is not valid for the object\'s storage class'); + (objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt)) + ) { + const err = errorInstances.InvalidObjectState.customizeDescription( + "The operation is not valid for the object's storage class" + ); return err; } return null; diff --git a/lib/api/apiUtils/object/corsResponse.js b/lib/api/apiUtils/object/corsResponse.js index 42dc478c21..bed3cf020a 100644 --- a/lib/api/apiUtils/object/corsResponse.js +++ b/lib/api/apiUtils/object/corsResponse.js @@ -1,9 +1,9 @@ /** _matchesValue - compare two values to determine if they match -* @param {string} allowedValue - an allowed value in a CORS rule; -* may contain wildcards -* @param {string} value - value from CORS request -* @return {boolean} - true/false -*/ + * @param {string} allowedValue - an allowed value in a CORS rule; + * may contain wildcards + * @param {string} value - value from CORS request + * @return {boolean} - true/false + */ function _matchesValue(allowedValue, value) { const wildcardIndex = allowedValue.indexOf('*'); // If no wildcards, simply return whether strings are equal @@ -14,27 +14,28 @@ function _matchesValue(allowedValue, value) { // and after the wildcard const beginValue = allowedValue.substring(0, wildcardIndex); const endValue = allowedValue.substring(wildcardIndex + 1); - return (value.startsWith(beginValue) && value.endsWith(endValue)); + return value.startsWith(beginValue) && value.endsWith(endValue); } /** _matchesOneOf - check if header matches any AllowedHeaders of a rule -* @param {string[]} allowedHeaders - headers allowed in CORS rule -* @param {string} header - header from CORS request -* @return {boolean} - true/false -*/ + * @param {string[]} allowedHeaders - headers allowed in CORS rule + * @param {string} header - header from CORS request + * @return {boolean} - true/false + */ function _matchesOneOf(allowedHeaders, header) { return allowedHeaders.some(allowedHeader => // AllowedHeaders may have been stored with uppercase letters // during putBucketCors; ignore case when searching for match - _matchesValue(allowedHeader.toLowerCase(), header)); + _matchesValue(allowedHeader.toLowerCase(), header) + ); } /** _headersMatchRule - check if headers match AllowedHeaders of rule -* @param {string[]} headers - the value of the 'Access-Control-Request-Headers' -* in an OPTIONS request -* @param {string[]} allowedHeaders - AllowedHeaders of a CORS rule -* @return {boolean} - true/false -*/ + * @param {string[]} headers - the value of the 'Access-Control-Request-Headers' + * in an OPTIONS request + * @param {string[]} allowedHeaders - AllowedHeaders of a CORS rule + * @return {boolean} - true/false + */ function _headersMatchRule(headers, allowedHeaders) { if (!allowedHeaders) { return false; @@ -46,33 +47,31 @@ function _headersMatchRule(headers, allowedHeaders) { } /** _findCorsRule - Return first matching rule in cors rules that permits -* CORS request -* @param {object[]} rules - array of rules -* @param {string} [rules.id] - optional id to identify rule -* @param {string[]} rules[].allowedMethods - methods allowed for CORS -* @param {string[]} rules[].allowedOrigins - origins allowed for CORS -* @param {string[]} [rules[].allowedHeaders] - headers allowed in an -* OPTIONS request via the Access-Control-Request-Headers header -* @param {number} [rules[].maxAgeSeconds] - seconds browsers should cache -* OPTIONS response -* @param {string[]} [rules[].exposeHeaders] - headers to expose to external -* applications -* @param {string} origin - origin of CORS request -* @param {string} method - Access-Control-Request-Method header value in -* an OPTIONS request and the actual method in any other request -* @param {string[]} [headers] - Access-Control-Request-Headers header value -* in a preflight CORS request -* @return {(null|object)} - matching rule if found; null if no match -*/ + * CORS request + * @param {object[]} rules - array of rules + * @param {string} [rules.id] - optional id to identify rule + * @param {string[]} rules[].allowedMethods - methods allowed for CORS + * @param {string[]} rules[].allowedOrigins - origins allowed for CORS + * @param {string[]} [rules[].allowedHeaders] - headers allowed in an + * OPTIONS request via the Access-Control-Request-Headers header + * @param {number} [rules[].maxAgeSeconds] - seconds browsers should cache + * OPTIONS response + * @param {string[]} [rules[].exposeHeaders] - headers to expose to external + * applications + * @param {string} origin - origin of CORS request + * @param {string} method - Access-Control-Request-Method header value in + * an OPTIONS request and the actual method in any other request + * @param {string[]} [headers] - Access-Control-Request-Headers header value + * in a preflight CORS request + * @return {(null|object)} - matching rule if found; null if no match + */ function findCorsRule(rules, origin, method, headers) { return rules.find(rule => { if (rule.allowedMethods.indexOf(method) === -1) { return false; - } else if (!rule.allowedOrigins.some(allowedOrigin => - _matchesValue(allowedOrigin, origin))) { + } else if (!rule.allowedOrigins.some(allowedOrigin => _matchesValue(allowedOrigin, origin))) { return false; - } else if (headers && - !_headersMatchRule(headers, rule.allowedHeaders)) { + } else if (headers && !_headersMatchRule(headers, rule.allowedHeaders)) { return false; } return true; @@ -80,32 +79,30 @@ function findCorsRule(rules, origin, method, headers) { } /** _gatherResHeaders - Collect headers to return in response -* @param {object} rule - array of rules -* @param {string} [rule.id] - optional id to identify rule -* @param {string[]} rule[].allowedMethods - methods allowed for CORS -* @param {string[]} rule[].allowedOrigins - origins allowed for CORS -* @param {string[]} [rule[].allowedHeaders] - headers allowed in an -* OPTIONS request via the Access-Control-Request-Headers header -* @param {number} [rule[].maxAgeSeconds] - seconds browsers should cache -* OPTIONS response -* @param {string[]} [rule[].exposeHeaders] - headers to expose to external -* applications -* @param {string} origin - origin of CORS request -* @param {string} method - Access-Control-Request-Method header value in -* an OPTIONS request and the actual method in any other request -* @param {string[]} [headers] - Access-Control-Request-Headers header value -* in a preflight CORS request -* @param {boolean} [isPreflight] - indicates if cors headers are being gathered -* for a CORS preflight request -* @return {object} resHeaders - headers to include in response -*/ -function generateCorsResHeaders(rule, origin, method, headers, -isPreflight) { + * @param {object} rule - array of rules + * @param {string} [rule.id] - optional id to identify rule + * @param {string[]} rule[].allowedMethods - methods allowed for CORS + * @param {string[]} rule[].allowedOrigins - origins allowed for CORS + * @param {string[]} [rule[].allowedHeaders] - headers allowed in an + * OPTIONS request via the Access-Control-Request-Headers header + * @param {number} [rule[].maxAgeSeconds] - seconds browsers should cache + * OPTIONS response + * @param {string[]} [rule[].exposeHeaders] - headers to expose to external + * applications + * @param {string} origin - origin of CORS request + * @param {string} method - Access-Control-Request-Method header value in + * an OPTIONS request and the actual method in any other request + * @param {string[]} [headers] - Access-Control-Request-Headers header value + * in a preflight CORS request + * @param {boolean} [isPreflight] - indicates if cors headers are being gathered + * for a CORS preflight request + * @return {object} resHeaders - headers to include in response + */ +function generateCorsResHeaders(rule, origin, method, headers, isPreflight) { const resHeaders = { 'access-control-max-age': rule.maxAgeSeconds, 'access-control-allow-methods': rule.allowedMethods.join(', '), - 'vary': - 'Origin, Access-Control-Request-Headers, Access-Control-Request-Method', + vary: 'Origin, Access-Control-Request-Headers, Access-Control-Request-Method', }; // send back '*' if any origin allowed; otherwise send back // request Origin value @@ -121,8 +118,7 @@ isPreflight) { resHeaders['access-control-allow-headers'] = headers.join(', '); } if (rule.exposeHeaders) { - resHeaders['access-control-expose-headers'] = - rule.exposeHeaders.join(', '); + resHeaders['access-control-expose-headers'] = rule.exposeHeaders.join(', '); } if (isPreflight) { resHeaders['content-length'] = '0'; diff --git a/lib/api/apiUtils/object/createAndStoreObject.js b/lib/api/apiUtils/object/createAndStoreObject.js index f702540696..073de588dd 100644 --- a/lib/api/apiUtils/object/createAndStoreObject.js +++ b/lib/api/apiUtils/object/createAndStoreObject.js @@ -11,30 +11,34 @@ const { versioningPreprocessing, overwritingVersioning, decodeVID } = require('. const removeAWSChunked = require('./removeAWSChunked'); const getReplicationInfo = require('./getReplicationInfo'); const { config } = require('../../../Config'); -const validateWebsiteHeader = require('./websiteServing') - .validateWebsiteHeader; +const validateWebsiteHeader = require('./websiteServing').validateWebsiteHeader; const applyZenkoUserMD = require('./applyZenkoUserMD'); const { externalBackends, versioningNotImplBackends } = constants; -const externalVersioningErrorMessage = 'We do not currently support putting ' + -'a versioned object to a location-constraint of type Azure or GCP.'; +const externalVersioningErrorMessage = + 'We do not currently support putting ' + 'a versioned object to a location-constraint of type Azure or GCP.'; -function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle, - metadataStoreParams, dataToDelete, log, requestMethod, callback) { - services.metadataStoreObject(bucketName, dataGetInfo, - cipherBundle, metadataStoreParams, (err, result) => { - if (err) { - return callback(err); - } - if (dataToDelete) { - const newDataStoreName = Array.isArray(dataGetInfo) ? - dataGetInfo[0].dataStoreName : null; - return data.batchDelete(dataToDelete, requestMethod, - newDataStoreName, log, err => callback(err, result)); - } - return callback(null, result); - }); +function _storeInMDandDeleteData( + bucketName, + dataGetInfo, + cipherBundle, + metadataStoreParams, + dataToDelete, + log, + requestMethod, + callback +) { + services.metadataStoreObject(bucketName, dataGetInfo, cipherBundle, metadataStoreParams, (err, result) => { + if (err) { + return callback(err); + } + if (dataToDelete) { + const newDataStoreName = Array.isArray(dataGetInfo) ? dataGetInfo[0].dataStoreName : null; + return data.batchDelete(dataToDelete, requestMethod, newDataStoreName, log, err => callback(err, result)); + } + return callback(null, result); + }); } /** createAndStoreObject - store data, store metadata, and delete old data @@ -59,9 +63,22 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle, * result.contentMD5 - content md5 of new object or version * result.versionId - unencrypted versionId returned by metadata */ -function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, - canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params, - overheadField, log, originOp, callback) { +function createAndStoreObject( + bucketName, + bucketMD, + objectKey, + objMD, + authInfo, + canonicalID, + cipherBundle, + request, + isDeleteMarker, + streamingV4Params, + overheadField, + log, + originOp, + callback +) { const putVersionId = request.headers['x-scal-s3-version-id']; const isPutVersion = putVersionId || putVersionId === ''; @@ -70,12 +87,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, // delete marker, for our purposes we consider this to be a 'PUT' // operation const requestMethod = 'PUT'; - const websiteRedirectHeader = - request.headers['x-amz-website-redirect-location']; + const websiteRedirectHeader = request.headers['x-amz-website-redirect-location']; if (!validateWebsiteHeader(websiteRedirectHeader)) { const err = errors.InvalidRedirectLocation; - log.debug('invalid x-amz-website-redirect-location' + - `value ${websiteRedirectHeader}`, { error: err }); + log.debug('invalid x-amz-website-redirect-location' + `value ${websiteRedirectHeader}`, { error: err }); return callback(err); } @@ -115,8 +130,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, size, headers, isDeleteMarker, - replicationInfo: getReplicationInfo(config, - objectKey, bucketMD, false, size, null, null, authInfo), + replicationInfo: getReplicationInfo(config, objectKey, bucketMD, false, size, null, null, authInfo), overheadField, log, }; @@ -138,18 +152,14 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, if (!isDeleteMarker) { metadataStoreParams.contentType = request.headers['content-type']; metadataStoreParams.cacheControl = request.headers['cache-control']; - metadataStoreParams.contentDisposition = - request.headers['content-disposition']; - metadataStoreParams.contentEncoding = - removeAWSChunked(request.headers['content-encoding']); + metadataStoreParams.contentDisposition = request.headers['content-disposition']; + metadataStoreParams.contentEncoding = removeAWSChunked(request.headers['content-encoding']); metadataStoreParams.expires = request.headers.expires; metadataStoreParams.tagging = request.headers['x-amz-tagging']; metadataStoreParams.originOp = originOp; - const defaultObjectLockConfiguration - = bucketMD.getObjectLockConfiguration(); + const defaultObjectLockConfiguration = bucketMD.getObjectLockConfiguration(); if (defaultObjectLockConfiguration) { - metadataStoreParams.defaultRetention - = defaultObjectLockConfiguration; + metadataStoreParams.defaultRetention = defaultObjectLockConfiguration; } } @@ -157,13 +167,11 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, // the object's location constraint metaheader to determine backend info if (isDeleteMarker && objMD) { // eslint-disable-next-line no-param-reassign - request.headers[constants.objectLocationConstraintHeader] = - objMD[constants.objectLocationConstraintHeader]; + request.headers[constants.objectLocationConstraintHeader] = objMD[constants.objectLocationConstraintHeader]; metadataStoreParams.originOp = originOp; } - const backendInfoObj = - locationConstraintCheck(request, null, bucketMD, log); + const backendInfoObj = locationConstraintCheck(request, null, bucketMD, log); if (backendInfoObj.err) { return process.nextTick(() => { callback(backendInfoObj.err); @@ -172,8 +180,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, const backendInfo = backendInfoObj.backendInfo; const location = backendInfo.getControllingLocationConstraint(); - const locationType = backendInfoObj.defaultedToDataBackend ? location : - config.getLocationConstraintType(location); + const locationType = backendInfoObj.defaultedToDataBackend ? location : config.getLocationConstraintType(location); metadataStoreParams.dataStoreName = location; if (versioningNotImplBackends[locationType]) { @@ -181,11 +188,9 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, const isVersionedObj = vcfg && vcfg.Status === 'Enabled'; if (isVersionedObj) { - log.debug(externalVersioningErrorMessage, - { method: 'createAndStoreObject', error: errors.NotImplemented }); + log.debug(externalVersioningErrorMessage, { method: 'createAndStoreObject', error: errors.NotImplemented }); return process.nextTick(() => { - callback(errorInstances.NotImplemented.customizeDescription( - externalVersioningErrorMessage)); + callback(errorInstances.NotImplemented.customizeDescription(externalVersioningErrorMessage)); }); } } @@ -211,119 +216,146 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, const mdOnlyHeader = request.headers['x-amz-meta-mdonly']; const mdOnlySize = request.headers['x-amz-meta-size']; - return async.waterfall([ - function storeData(next) { - if (size === 0) { - if (!dontSkipBackend[locationType]) { - metadataStoreParams.contentMD5 = constants.emptyFileMd5; - return next(null, null, null); - } - // Handle mdOnlyHeader as a metadata only operation. If - // the object in question is actually 0 byte or has a body size - // then handle normally. - if (mdOnlyHeader === 'true' && mdOnlySize > 0) { - log.debug('metadata only operation x-amz-meta-mdonly'); - const md5 = request.headers['x-amz-meta-md5chksum'] - ? new Buffer(request.headers['x-amz-meta-md5chksum'], - 'base64').toString('hex') : null; - const numParts = request.headers['x-amz-meta-md5numparts']; - let _md5; - if (numParts === undefined) { - _md5 = md5; - } else { - _md5 = `${md5}-${numParts}`; + return async.waterfall( + [ + function storeData(next) { + if (size === 0) { + if (!dontSkipBackend[locationType]) { + metadataStoreParams.contentMD5 = constants.emptyFileMd5; + return next(null, null, null); } - const versionId = request.headers['x-amz-meta-version-id']; - const dataGetInfo = { - key: objectKey, - dataStoreName: location, - dataStoreType: locationType, - dataStoreVersionId: versionId, - dataStoreMD5: _md5, - }; - return next(null, dataGetInfo, _md5); - } - } - return dataStore(objectKeyContext, cipherBundle, request, size, - streamingV4Params, backendInfo, log, next); - }, - function processDataResult(dataGetInfo, calculatedHash, next) { - if (dataGetInfo === null || dataGetInfo === undefined) { - return next(null, null); - } - // So that data retrieval information for MPU's and - // regular puts are stored in the same data structure, - // place the retrieval info here into a single element array - const { key, dataStoreName, dataStoreType, dataStoreETag, - dataStoreVersionId } = dataGetInfo; - const prefixedDataStoreETag = dataStoreETag - ? `1:${dataStoreETag}` - : `1:${calculatedHash}`; - const dataGetInfoArr = [{ key, size, start: 0, dataStoreName, - dataStoreType, dataStoreETag: prefixedDataStoreETag, - dataStoreVersionId }]; - if (cipherBundle) { - dataGetInfoArr[0].cryptoScheme = cipherBundle.cryptoScheme; - dataGetInfoArr[0].cipheredDataKey = - cipherBundle.cipheredDataKey; - } - if (mdOnlyHeader === 'true') { - metadataStoreParams.size = mdOnlySize; - dataGetInfoArr[0].size = mdOnlySize; - } - metadataStoreParams.contentMD5 = calculatedHash; - return next(null, dataGetInfoArr); - }, - function getVersioningInfo(infoArr, next) { - // if x-scal-s3-version-id header is specified, we overwrite the object/version metadata. - if (isPutVersion) { - const options = overwritingVersioning(objMD, metadataStoreParams); - return process.nextTick(() => next(null, options, infoArr)); - } - if (!bucketMD.isVersioningEnabled() && objMD?.archive?.archiveInfo) { - // Ensure we trigger a "delete" event in the oplog for the previously archived object - metadataStoreParams.needOplogUpdate = 's3:ReplaceArchivedObject'; - } - return versioningPreprocessing(bucketName, bucketMD, - metadataStoreParams.objectKey, objMD, log, (err, options) => { - if (err) { - // TODO: check AWS error when user requested a specific - // version before any versions have been put - const logLvl = err.is.BadRequest ? - 'debug' : 'error'; - log[logLvl]('error getting versioning info', { - error: err, - method: 'versioningPreprocessing', - }); + // Handle mdOnlyHeader as a metadata only operation. If + // the object in question is actually 0 byte or has a body size + // then handle normally. + if (mdOnlyHeader === 'true' && mdOnlySize > 0) { + log.debug('metadata only operation x-amz-meta-mdonly'); + const md5 = request.headers['x-amz-meta-md5chksum'] + ? new Buffer(request.headers['x-amz-meta-md5chksum'], 'base64').toString('hex') + : null; + const numParts = request.headers['x-amz-meta-md5numparts']; + let _md5; + if (numParts === undefined) { + _md5 = md5; + } else { + _md5 = `${md5}-${numParts}`; + } + const versionId = request.headers['x-amz-meta-version-id']; + const dataGetInfo = { + key: objectKey, + dataStoreName: location, + dataStoreType: locationType, + dataStoreVersionId: versionId, + dataStoreMD5: _md5, + }; + return next(null, dataGetInfo, _md5); } + } + return dataStore( + objectKeyContext, + cipherBundle, + request, + size, + streamingV4Params, + backendInfo, + log, + next + ); + }, + function processDataResult(dataGetInfo, calculatedHash, next) { + if (dataGetInfo === null || dataGetInfo === undefined) { + return next(null, null); + } + // So that data retrieval information for MPU's and + // regular puts are stored in the same data structure, + // place the retrieval info here into a single element array + const { key, dataStoreName, dataStoreType, dataStoreETag, dataStoreVersionId } = dataGetInfo; + const prefixedDataStoreETag = dataStoreETag ? `1:${dataStoreETag}` : `1:${calculatedHash}`; + const dataGetInfoArr = [ + { + key, + size, + start: 0, + dataStoreName, + dataStoreType, + dataStoreETag: prefixedDataStoreETag, + dataStoreVersionId, + }, + ]; + if (cipherBundle) { + dataGetInfoArr[0].cryptoScheme = cipherBundle.cryptoScheme; + dataGetInfoArr[0].cipheredDataKey = cipherBundle.cipheredDataKey; + } + if (mdOnlyHeader === 'true') { + metadataStoreParams.size = mdOnlySize; + dataGetInfoArr[0].size = mdOnlySize; + } + metadataStoreParams.contentMD5 = calculatedHash; + return next(null, dataGetInfoArr); + }, + function getVersioningInfo(infoArr, next) { + // if x-scal-s3-version-id header is specified, we overwrite the object/version metadata. + if (isPutVersion) { + const options = overwritingVersioning(objMD, metadataStoreParams); + return process.nextTick(() => next(null, options, infoArr)); + } + if (!bucketMD.isVersioningEnabled() && objMD?.archive?.archiveInfo) { + // Ensure we trigger a "delete" event in the oplog for the previously archived object + metadataStoreParams.needOplogUpdate = 's3:ReplaceArchivedObject'; + } + return versioningPreprocessing( + bucketName, + bucketMD, + metadataStoreParams.objectKey, + objMD, + log, + (err, options) => { + if (err) { + // TODO: check AWS error when user requested a specific + // version before any versions have been put + const logLvl = err.is.BadRequest ? 'debug' : 'error'; + log[logLvl]('error getting versioning info', { + error: err, + method: 'versioningPreprocessing', + }); + } - const location = infoArr?.[0]?.dataStoreName; - if (location === bucketMD.getLocationConstraint() && bucketMD.isIngestionBucket()) { - // If the object is being written to the "ingested" storage location, keep the same - // versionId for consistency and to avoid creating an extra version when it gets - // ingested - const backendVersionId = decodeVID(infoArr[0].dataStoreVersionId); - if (!(backendVersionId instanceof Error)) { - options.versionId = backendVersionId; // eslint-disable-line no-param-reassign + const location = infoArr?.[0]?.dataStoreName; + if (location === bucketMD.getLocationConstraint() && bucketMD.isIngestionBucket()) { + // If the object is being written to the "ingested" storage location, keep the same + // versionId for consistency and to avoid creating an extra version when it gets + // ingested + const backendVersionId = decodeVID(infoArr[0].dataStoreVersionId); + if (!(backendVersionId instanceof Error)) { + options.versionId = backendVersionId; // eslint-disable-line no-param-reassign + } } - } - return next(err, options, infoArr); - }); - }, - function storeMDAndDeleteData(options, infoArr, next) { - metadataStoreParams.versionId = options.versionId; - metadataStoreParams.versioning = options.versioning; - metadataStoreParams.isNull = options.isNull; - metadataStoreParams.deleteNullKey = options.deleteNullKey; - if (options.extraMD) { - Object.assign(metadataStoreParams, options.extraMD); - } - return _storeInMDandDeleteData(bucketName, infoArr, - cipherBundle, metadataStoreParams, - options.dataToDelete, log, requestMethod, next); - }, - ], callback); + return next(err, options, infoArr); + } + ); + }, + function storeMDAndDeleteData(options, infoArr, next) { + metadataStoreParams.versionId = options.versionId; + metadataStoreParams.versioning = options.versioning; + metadataStoreParams.isNull = options.isNull; + metadataStoreParams.deleteNullKey = options.deleteNullKey; + if (options.extraMD) { + Object.assign(metadataStoreParams, options.extraMD); + } + return _storeInMDandDeleteData( + bucketName, + infoArr, + cipherBundle, + metadataStoreParams, + options.dataToDelete, + log, + requestMethod, + next + ); + }, + ], + callback + ); } module.exports = createAndStoreObject; diff --git a/lib/api/apiUtils/object/expirationHeaders.js b/lib/api/apiUtils/object/expirationHeaders.js index 6edcedeb9d..36361f4f95 100644 --- a/lib/api/apiUtils/object/expirationHeaders.js +++ b/lib/api/apiUtils/object/expirationHeaders.js @@ -1,16 +1,8 @@ const { LifecycleConfiguration } = require('arsenal').models; -const { - LifecycleDateTime, - LifecycleUtils, -} = require('arsenal').s3middleware.lifecycleHelpers; +const { LifecycleDateTime, LifecycleUtils } = require('arsenal').s3middleware.lifecycleHelpers; const { config } = require('../../../Config'); -const { - expireOneDayEarlier, - transitionOneDayEarlier, - timeProgressionFactor, - scaledMsPerDay, -} = config.getTimeOptions(); +const { expireOneDayEarlier, transitionOneDayEarlier, timeProgressionFactor, scaledMsPerDay } = config.getTimeOptions(); const lifecycleDateTime = new LifecycleDateTime({ transitionOneDayEarlier, @@ -21,7 +13,7 @@ const lifecycleDateTime = new LifecycleDateTime({ const lifecycleUtils = new LifecycleUtils(config.supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor); function calculateDate(objDate, expDays, datetime) { - return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay)); + return new Date(datetime.getTimestamp(objDate) + expDays * scaledMsPerDay); } function formatExpirationHeader(date, id) { @@ -35,13 +27,9 @@ const AMZ_ABORT_DATE_HEADER = 'x-amz-abort-date'; // format: x-amz-abort-rule-id: "rule id" const AMZ_ABORT_ID_HEADER = 'x-amz-abort-rule-id'; - function _generateExpHeadersObjects(rules, params, datetime) { const tags = { - TagSet: params.tags - ? Object.keys(params.tags) - .map(key => ({ Key: key, Value: params.tags[key] })) - : [], + TagSet: params.tags ? Object.keys(params.tags).map(key => ({ Key: key, Value: params.tags[key] })) : [], }; const objectInfo = { Key: params.key }; @@ -80,11 +68,7 @@ function _generateExpHeadresMPU(rules, params, datetime) { if (applicable.AbortIncompleteMultipartUpload) { const rule = applicable.AbortIncompleteMultipartUpload; - const date = calculateDate( - params.date, - rule.DaysAfterInitiation, - datetime - ); + const date = calculateDate(params.date, rule.DaysAfterInitiation, datetime); return { [AMZ_ABORT_ID_HEADER]: encodeURIComponent(rule.ID), diff --git a/lib/api/apiUtils/object/getReplicationBackendDataLocator.js b/lib/api/apiUtils/object/getReplicationBackendDataLocator.js index b5ba4956c8..e034ae12d7 100644 --- a/lib/api/apiUtils/object/getReplicationBackendDataLocator.js +++ b/lib/api/apiUtils/object/getReplicationBackendDataLocator.js @@ -26,25 +26,26 @@ const { errorInstances } = require('arsenal'); */ function getReplicationBackendDataLocator(locationObj, replicationInfo) { const repBackendResult = {}; - const locMatch = replicationInfo.backends.find( - backend => backend.site === locationObj.location); + const locMatch = replicationInfo.backends.find(backend => backend.site === locationObj.location); if (!locMatch) { - repBackendResult.error = errorInstances.InvalidLocationConstraint. - customizeDescription('Object is not replicated to location ' + - 'passed in location header'); + repBackendResult.error = errorInstances.InvalidLocationConstraint.customizeDescription( + 'Object is not replicated to location ' + 'passed in location header' + ); return repBackendResult; } repBackendResult.status = locMatch.status; if (['PENDING', 'FAILED'].includes(locMatch.status)) { - repBackendResult.reason = - `Object replication to specified backend is ${locMatch.status}`; + repBackendResult.reason = `Object replication to specified backend is ${locMatch.status}`; return repBackendResult; } - repBackendResult.dataLocator = [{ - key: locationObj.key, - dataStoreName: locationObj.location, - dataStoreType: locationObj.locationType, - dataStoreVersionId: locMatch.dataStoreVersionId }]; + repBackendResult.dataLocator = [ + { + key: locationObj.key, + dataStoreName: locationObj.location, + dataStoreType: locationObj.locationType, + dataStoreVersionId: locMatch.dataStoreVersionId, + }, + ]; return repBackendResult; } diff --git a/lib/api/apiUtils/object/getReplicationInfo.js b/lib/api/apiUtils/object/getReplicationInfo.js index b7e4afb25d..f9f8b92403 100644 --- a/lib/api/apiUtils/object/getReplicationInfo.js +++ b/lib/api/apiUtils/object/getReplicationInfo.js @@ -1,5 +1,4 @@ -const { isServiceAccount, getServiceAccountProperties } = - require('../authorization/permissionChecks'); +const { isServiceAccount, getServiceAccountProperties } = require('../authorization/permissionChecks'); const { replicationBackends } = require('arsenal').constants; function _getBackend(objectMD, site) { @@ -23,15 +22,13 @@ function _getStorageClasses(s3config, rule) { const { replicationEndpoints } = s3config; // If no storage class, use the given default endpoint or the sole endpoint if (replicationEndpoints.length > 0) { - const endPoint = - replicationEndpoints.find(endpoint => endpoint.default) || replicationEndpoints[0]; + const endPoint = replicationEndpoints.find(endpoint => endpoint.default) || replicationEndpoints[0]; return [endPoint.site]; } return undefined; } -function _getReplicationInfo(s3config, rule, replicationConfig, content, operationType, - objectMD, bucketMD) { +function _getReplicationInfo(s3config, rule, replicationConfig, content, operationType, objectMD, bucketMD) { const storageTypes = []; const backends = []; const storageClasses = _getStorageClasses(s3config, rule); @@ -39,9 +36,7 @@ function _getReplicationInfo(s3config, rule, replicationConfig, content, operati return undefined; } storageClasses.forEach(storageClass => { - const storageClassName = - storageClass.endsWith(':preferred_read') ? - storageClass.split(':')[0] : storageClass; + const storageClassName = storageClass.endsWith(':preferred_read') ? storageClass.split(':')[0] : storageClass; // TODO CLDSRV-646: for consistency, should we look at replicationEndpoints instead, like // `_getStorageClasses()` ? const location = s3config.locationConstraints[storageClassName]; @@ -80,8 +75,7 @@ function _getReplicationInfo(s3config, rule, replicationConfig, content, operati * @param {AuthInfo} [authInfo] - authentication info of object owner * @return {undefined} */ -function getReplicationInfo( - s3config, objKey, bucketMD, isMD, objSize, operationType, objectMD, authInfo) { +function getReplicationInfo(s3config, objKey, bucketMD, isMD, objSize, operationType, objectMD, authInfo) { const content = isMD || objSize === 0 ? ['METADATA'] : ['DATA', 'METADATA']; const config = bucketMD.getReplicationConfiguration(); @@ -106,17 +100,14 @@ function getReplicationInfo( if (!authInfo || !isServiceAccount(authInfo.getCanonicalID())) { doReplicate = true; } else { - const serviceAccountProps = getServiceAccountProperties( - authInfo.getCanonicalID()); + const serviceAccountProps = getServiceAccountProperties(authInfo.getCanonicalID()); doReplicate = serviceAccountProps.canReplicate; } if (doReplicate) { - const rule = config.rules.find( - rule => (objKey.startsWith(rule.prefix) && rule.enabled)); + const rule = config.rules.find(rule => objKey.startsWith(rule.prefix) && rule.enabled); if (rule) { // TODO CLDSRV-646 : should "merge" the replicationInfo for different rules - return _getReplicationInfo( - s3config, rule, config, content, operationType, objectMD, bucketMD); + return _getReplicationInfo(s3config, rule, config, content, operationType, objectMD, bucketMD); } } } diff --git a/lib/api/apiUtils/object/lifecycle.js b/lib/api/apiUtils/object/lifecycle.js index c2e0d3de07..7884a9b502 100644 --- a/lib/api/apiUtils/object/lifecycle.js +++ b/lib/api/apiUtils/object/lifecycle.js @@ -7,12 +7,11 @@ const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = lifecycleListing; function _makeTags(tags) { const res = []; Object.entries(tags).forEach(([key, value]) => - res.push( - { - Key: key, - Value: value, - } - )); + res.push({ + Key: key, + Value: value, + }) + ); return res; } @@ -51,8 +50,7 @@ function processCurrents(bucketName, listParams, isBucketVersioned, list) { // NOTE: The current versions listed to be lifecycle should include version id // if the bucket is versioned. if (isBucketVersioned) { - const versionId = (v.IsNull || v.VersionId === undefined) ? - 'null' : versionIdUtils.encode(v.VersionId); + const versionId = v.IsNull || v.VersionId === undefined ? 'null' : versionIdUtils.encode(v.VersionId); content.VersionId = versionId; } @@ -90,8 +88,7 @@ function processNonCurrents(bucketName, listParams, list) { list.Contents.forEach(item => { const v = item.value; - const versionId = (v.IsNull || v.VersionId === undefined) ? - 'null' : versionIdUtils.encode(v.VersionId); + const versionId = v.IsNull || v.VersionId === undefined ? 'null' : versionIdUtils.encode(v.VersionId); const content = { Key: item.key, @@ -131,8 +128,7 @@ function processOrphans(bucketName, listParams, list) { list.Contents.forEach(item => { const v = item.value; - const versionId = (v.IsNull || v.VersionId === undefined) ? - 'null' : versionIdUtils.encode(v.VersionId); + const versionId = v.IsNull || v.VersionId === undefined ? 'null' : versionIdUtils.encode(v.VersionId); data.Contents.push({ Key: item.key, LastModified: v.LastModified, @@ -150,8 +146,10 @@ function processOrphans(bucketName, listParams, list) { } function getLocationConstraintErrorMessage(locationName) { - return 'value of the location you are attempting to set ' + - `- ${locationName} - is not listed in the locationConstraint config`; + return ( + 'value of the location you are attempting to set ' + + `- ${locationName} - is not listed in the locationConstraint config` + ); } /** @@ -170,8 +168,11 @@ function validateMaxScannedEntries(params, config, min) { if (params['max-scanned-lifecycle-listing-entries']) { const maxEntriesParams = Number.parseInt(params['max-scanned-lifecycle-listing-entries'], 10); - if (Number.isNaN(maxEntriesParams) || maxEntriesParams < min || - maxEntriesParams > maxScannedLifecycleListingEntries) { + if ( + Number.isNaN(maxEntriesParams) || + maxEntriesParams < min || + maxEntriesParams > maxScannedLifecycleListingEntries + ) { return { isValid: false }; } diff --git a/lib/api/apiUtils/object/locationConstraintCheck.js b/lib/api/apiUtils/object/locationConstraintCheck.js index bc87fc249b..ca0dd7e773 100644 --- a/lib/api/apiUtils/object/locationConstraintCheck.js +++ b/lib/api/apiUtils/object/locationConstraintCheck.js @@ -20,28 +20,33 @@ function locationConstraintCheck(request, metaHeaders, bucket, log) { let objectLocationConstraint; if (metaHeaders) { - objectLocationConstraint = - metaHeaders[constants.objectLocationConstraintHeader]; + objectLocationConstraint = metaHeaders[constants.objectLocationConstraintHeader]; } else { - objectLocationConstraint = request - .headers[constants.objectLocationConstraintHeader]; + objectLocationConstraint = request.headers[constants.objectLocationConstraintHeader]; } const bucketLocationConstraint = bucket.getLocationConstraint(); const requestEndpoint = request.parsedHost; - const controllingBackend = BackendInfo.controllingBackendParam(config, - objectLocationConstraint, bucketLocationConstraint, - requestEndpoint, log); + const controllingBackend = BackendInfo.controllingBackendParam( + config, + objectLocationConstraint, + bucketLocationConstraint, + requestEndpoint, + log + ); if (!controllingBackend.isValid) { backendInfoObj = { - err: errorInstances.InvalidArgument.customizeDescription(controllingBackend. - description), + err: errorInstances.InvalidArgument.customizeDescription(controllingBackend.description), }; return backendInfoObj; } - const backendInfo = new BackendInfo(config, objectLocationConstraint, - bucketLocationConstraint, requestEndpoint, - controllingBackend.legacyLocationConstraint); + const backendInfo = new BackendInfo( + config, + objectLocationConstraint, + bucketLocationConstraint, + requestEndpoint, + controllingBackend.legacyLocationConstraint + ); backendInfoObj = { err: null, controllingLC: backendInfo.getControllingLocationConstraint(), diff --git a/lib/api/apiUtils/object/locationHeaderCheck.js b/lib/api/apiUtils/object/locationHeaderCheck.js index 6fc3ec24f1..2d68290897 100644 --- a/lib/api/apiUtils/object/locationHeaderCheck.js +++ b/lib/api/apiUtils/object/locationHeaderCheck.js @@ -19,11 +19,11 @@ function locationHeaderCheck(headers, objectKey, bucketName) { const validLocation = config.locationConstraints[location]; if (!validLocation) { return errorInstances.InvalidLocationConstraint.customizeDescription( - 'Invalid location constraint specified in header'); + 'Invalid location constraint specified in header' + ); } const bucketMatch = validLocation.details.bucketMatch; - const backendKey = bucketMatch ? objectKey : - `${bucketName}/${objectKey}`; + const backendKey = bucketMatch ? objectKey : `${bucketName}/${objectKey}`; return { location, key: backendKey, diff --git a/lib/api/apiUtils/object/locationKeysHaveChanged.js b/lib/api/apiUtils/object/locationKeysHaveChanged.js index 41c8560023..0f372cefdd 100644 --- a/lib/api/apiUtils/object/locationKeysHaveChanged.js +++ b/lib/api/apiUtils/object/locationKeysHaveChanged.js @@ -1,18 +1,18 @@ /** -* Check if all keys that exist in the current list which will be used -* in composing object are not present in the old object's list. -* -* This method can be used to check against accidentally removing data -* keys due to instability from the metadata layer, or for replay -* detection in general. -* -* @param {array|string|null} prev - list of keys from the object being -* overwritten -* @param {array|null} curr - list of keys to be used in composing -* current object -* @returns {boolean} true if no key in `curr` is present in `prev`, -* false otherwise -*/ + * Check if all keys that exist in the current list which will be used + * in composing object are not present in the old object's list. + * + * This method can be used to check against accidentally removing data + * keys due to instability from the metadata layer, or for replay + * detection in general. + * + * @param {array|string|null} prev - list of keys from the object being + * overwritten + * @param {array|null} curr - list of keys to be used in composing + * current object + * @returns {boolean} true if no key in `curr` is present in `prev`, + * false otherwise + */ function locationKeysHaveChanged(prev, curr) { if (!prev || prev.length === 0 || !curr) { return true; diff --git a/lib/api/apiUtils/object/locationStorageCheck.js b/lib/api/apiUtils/object/locationStorageCheck.js index 88cbbb18a3..b730d60a34 100644 --- a/lib/api/apiUtils/object/locationStorageCheck.js +++ b/lib/api/apiUtils/object/locationStorageCheck.js @@ -1,8 +1,7 @@ const { errorInstances } = require('arsenal'); const { config } = require('../../../Config'); -const { getLocationMetric, pushLocationMetric } = - require('../../../utapi/utilities'); +const { getLocationMetric, pushLocationMetric } = require('../../../utapi/utilities'); function _gbToBytes(gb) { return gb * 1024 * 1024 * 1024; @@ -37,9 +36,11 @@ function locationStorageCheck(location, updateSize, log, cb) { const newStorageSize = parseInt(bytesStored, 10) + updateSize; const sizeLimitBytes = _gbToBytes(sizeLimitGB); if (sizeLimitBytes < newStorageSize) { - return cb(errorInstances.AccessDenied.customizeDescription( - `The assigned storage space limit for location ${location} ` + - 'will be exceeded')); + return cb( + errorInstances.AccessDenied.customizeDescription( + `The assigned storage space limit for location ${location} ` + 'will be exceeded' + ) + ); } return pushLocationMetric(location, updateSize, log, cb); }); diff --git a/lib/api/apiUtils/object/objectLockHelpers.js b/lib/api/apiUtils/object/objectLockHelpers.js index e2e18fc640..ff12cabeba 100644 --- a/lib/api/apiUtils/object/objectLockHelpers.js +++ b/lib/api/apiUtils/object/objectLockHelpers.js @@ -22,8 +22,7 @@ function calculateRetainUntilDate(retention) { // Calculate the number of days to retain the lock on the object const retainUntilDays = days || years * 365; const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay; - const retainUntilDate - = date.add(retainUntilDaysInMs, 'ms'); + const retainUntilDate = date.add(retainUntilDaysInMs, 'ms'); return retainUntilDate.toISOString(); } /** @@ -40,33 +39,26 @@ function validateHeaders(bucket, headers, log) { const objectLockMode = headers['x-amz-object-lock-mode']; // If retention headers or legal hold header present but // object lock is not enabled on the bucket return error - if ((objectLockDate || objectLockMode || objectLegalHold) - && !bucketObjectLockEnabled) { + if ((objectLockDate || objectLockMode || objectLegalHold) && !bucketObjectLockEnabled) { log.trace('bucket is missing ObjectLockConfiguration'); - return errorInstances.InvalidRequest.customizeDescription( - 'Bucket is missing ObjectLockConfiguration'); + return errorInstances.InvalidRequest.customizeDescription('Bucket is missing ObjectLockConfiguration'); } - if ((objectLockMode || objectLockDate) && - !(objectLockMode && objectLockDate)) { + if ((objectLockMode || objectLockDate) && !(objectLockMode && objectLockDate)) { return errorInstances.InvalidArgument.customizeDescription( - 'x-amz-object-lock-retain-until-date and ' + - 'x-amz-object-lock-mode must both be supplied', + 'x-amz-object-lock-retain-until-date and ' + 'x-amz-object-lock-mode must both be supplied' ); } const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']); if (objectLockMode && !validModes.has(objectLockMode)) { - return errorInstances.InvalidArgument.customizeDescription( - 'Unknown wormMode directive'); + return errorInstances.InvalidArgument.customizeDescription('Unknown wormMode directive'); } const validLegalHolds = new Set(['ON', 'OFF']); if (objectLegalHold && !validLegalHolds.has(objectLegalHold)) { - return errorInstances.InvalidArgument.customizeDescription( - 'Legal hold status must be one of "ON", "OFF"'); + return errorInstances.InvalidArgument.customizeDescription('Legal hold status must be one of "ON", "OFF"'); } const currentDate = new Date().toISOString(); if (objectLockMode && objectLockDate <= currentDate) { - return errorInstances.InvalidArgument.customizeDescription( - 'The retain until date must be in the future!'); + return errorInstances.InvalidArgument.customizeDescription('The retain until date must be in the future!'); } return null; } @@ -121,8 +113,7 @@ function compareObjectLockInformation(headers, defaultRetention) { function setObjectLockInformation(headers, md, defaultRetention) { // Stores retention information if object either has its own retention // configuration or default retention configuration from its bucket - const finalObjectLockInfo = - compareObjectLockInformation(headers, defaultRetention); + const finalObjectLockInfo = compareObjectLockInformation(headers, defaultRetention); if (finalObjectLockInfo.retentionInfo) { md.setRetentionMode(finalObjectLockInfo.retentionInfo.mode); md.setRetentionDate(finalObjectLockInfo.retentionInfo.date); @@ -261,7 +252,6 @@ function hasGovernanceBypassHeader(headers) { return bypassHeader.toLowerCase() === 'true'; } - /** * checkUserGovernanceBypass * @@ -276,10 +266,9 @@ function hasGovernanceBypassHeader(headers) { * @returns {undefined} - */ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log, cb) { - log.trace( - 'object in GOVERNANCE mode and is user, checking for attached policies', - { method: 'checkUserPolicyGovernanceBypass' }, - ); + log.trace('object in GOVERNANCE mode and is user, checking for attached policies', { + method: 'checkUserPolicyGovernanceBypass', + }); const authParams = auth.server.extractParams(request, log, 's3', request.query); const ip = policies.requestUtils.getClientIp(request, config); @@ -301,41 +290,41 @@ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log, signatureAge: authParams.params.data.signatureAge, }, }; - return vault.checkPolicies(requestContextParams, - authInfo.getArn(), log, (err, authorizationResults) => { - if (err) { - return cb(err); - } - const explicitDenyExists = authorizationResults.some( - authzResult => authzResult.isAllowed === false && !authzResult.isImplicit); - if (explicitDenyExists) { - log.trace('authorization check failed for user', - { - 'method': 'checkUserPolicyGovernanceBypass', - 's3:BypassGovernanceRetention': false, - }); - return cb(errors.AccessDenied); - } - // Convert authorization results into an easier to handle format - const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => { - const apiMethod = authorizationResults[idx].action; - // eslint-disable-next-line no-param-reassign - acc[apiMethod] = curr.isImplicit; - return acc; - }, {}); + return vault.checkPolicies(requestContextParams, authInfo.getArn(), log, (err, authorizationResults) => { + if (err) { + return cb(err); + } + const explicitDenyExists = authorizationResults.some( + authzResult => authzResult.isAllowed === false && !authzResult.isImplicit + ); + if (explicitDenyExists) { + log.trace('authorization check failed for user', { + method: 'checkUserPolicyGovernanceBypass', + 's3:BypassGovernanceRetention': false, + }); + return cb(errors.AccessDenied); + } + // Convert authorization results into an easier to handle format + const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => { + const apiMethod = authorizationResults[idx].action; + // eslint-disable-next-line no-param-reassign + acc[apiMethod] = curr.isImplicit; + return acc; + }, {}); - // Evaluate against the bucket policies - const areAllActionsAllowed = evaluateBucketPolicyWithIAM( - bucketMD, - Object.keys(actionImplicitDenies), - authInfo.getCanonicalID(), - authInfo, - actionImplicitDenies, - log, - request); + // Evaluate against the bucket policies + const areAllActionsAllowed = evaluateBucketPolicyWithIAM( + bucketMD, + Object.keys(actionImplicitDenies), + authInfo.getCanonicalID(), + authInfo, + actionImplicitDenies, + log, + request + ); - return cb(areAllActionsAllowed === true ? null : errors.AccessDenied); - }); + return cb(areAllActionsAllowed === true ? null : errors.AccessDenied); + }); } module.exports = { diff --git a/lib/api/apiUtils/object/objectRestore.js b/lib/api/apiUtils/object/objectRestore.js index a893d12dba..f432de00e7 100644 --- a/lib/api/apiUtils/object/objectRestore.js +++ b/lib/api/apiUtils/object/objectRestore.js @@ -42,12 +42,11 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) { const decodedVidResult = decodeVersionId(request.query); if (decodedVidResult instanceof Error) { - log.trace('invalid versionId query', - { - method: METHOD, - versionId: request.query.versionId, - error: decodedVidResult, - }); + log.trace('invalid versionId query', { + method: METHOD, + versionId: request.query.versionId, + error: decodedVidResult, + }); return process.nextTick(() => callback(decodedVidResult)); } @@ -69,36 +68,41 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) { request, }; - return async.waterfall([ + return async.waterfall( + [ // get metadata of bucket and object function validateBucketAndObject(next) { - return mdUtils.standardMetadataValidateBucketAndObj(mdValueParams, request.actionImplicitDenies, - log, (err, bucketMD, objectMD) => { - if (err) { - log.trace('request authorization failed', { method: METHOD, error: err }); - return next(err); - } - // Call back error if object metadata could not be obtained - if (!objectMD) { - const err = decodedVidResult ? errors.NoSuchVersion : errors.NoSuchKey; - log.trace('error no object metadata found', { method: METHOD, error: err }); - return next(err, bucketMD); - } - // If object metadata is delete marker, - // call back NoSuchKey or MethodNotAllowed depending on specifying versionId - if (objectMD.isDeleteMarker) { - let err = errors.NoSuchKey; - if (decodedVidResult) { - err = errors.MethodNotAllowed; + return mdUtils.standardMetadataValidateBucketAndObj( + mdValueParams, + request.actionImplicitDenies, + log, + (err, bucketMD, objectMD) => { + if (err) { + log.trace('request authorization failed', { method: METHOD, error: err }); + return next(err); + } + // Call back error if object metadata could not be obtained + if (!objectMD) { + const err = decodedVidResult ? errors.NoSuchVersion : errors.NoSuchKey; + log.trace('error no object metadata found', { method: METHOD, error: err }); + return next(err, bucketMD); + } + // If object metadata is delete marker, + // call back NoSuchKey or MethodNotAllowed depending on specifying versionId + if (objectMD.isDeleteMarker) { + let err = errors.NoSuchKey; + if (decodedVidResult) { + err = errors.MethodNotAllowed; + } + log.trace('version is a delete marker', { method: METHOD, error: err }); + return next(err, bucketMD, objectMD); } - log.trace('version is a delete marker', { method: METHOD, error: err }); - return next(err, bucketMD, objectMD); + log.debug('acquired the object metadata.', { + method: METHOD, + }); + return next(null, bucketMD, objectMD); } - log.debug('acquired the object metadata.', { - 'method': METHOD, - }); - return next(null, bucketMD, objectMD); - }); + ); }, // generate restore param obj from xml of request body and check tier validity @@ -118,39 +122,47 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) { }, // start restore process function startRestore(bucketMD, objectMD, restoreInfo, next) { - return coldStorage.startRestore(objectMD, restoreInfo, log, - (err, _isObjectRestored) => { - isObjectRestored = _isObjectRestored; - return next(err, bucketMD, objectMD); - }); + return coldStorage.startRestore(objectMD, restoreInfo, log, (err, _isObjectRestored) => { + isObjectRestored = _isObjectRestored; + return next(err, bucketMD, objectMD); + }); }, function evaluateQuotas(bucketMD, objectMD, next) { if (isObjectRestored) { return next(null, bucketMD, objectMD); } - const actions = Array.isArray(mdValueParams.requestType) ? - mdValueParams.requestType : [mdValueParams.requestType]; + const actions = Array.isArray(mdValueParams.requestType) + ? mdValueParams.requestType + : [mdValueParams.requestType]; const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD); - return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes, - false, log, err => next(err, bucketMD, objectMD)); + return validateQuotas( + request, + bucketMD, + request.accountQuotas, + actions, + request.apiMethod, + bytes, + false, + log, + err => next(err, bucketMD, objectMD) + ); }, function updateObjectMD(bucketMD, objectMD, next) { const params = objectMD.versionId ? { versionId: objectMD.versionId } : {}; - metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params, - log, err => next(err, bucketMD, objectMD)); + metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params, log, err => + next(err, bucketMD, objectMD) + ); }, ], (err, bucketMD) => { // generate CORS response header const responseHeaders = collectCorsHeaders(request.headers.origin, request.method, bucketMD); if (err) { - log.trace('error processing request', - { - method: METHOD, - error: err, - }); - monitoring.promMetrics( - 'POST', bucketName, err.code, 'restoreObject'); + log.trace('error processing request', { + method: METHOD, + error: err, + }); + monitoring.promMetrics('POST', bucketName, err.code, 'restoreObject'); return callback(err, err.code, responseHeaders); } pushMetric('restoreObject', log, { @@ -158,15 +170,13 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) { bucket: bucketName, }); if (isObjectRestored) { - monitoring.promMetrics( - 'POST', bucketName, '200', 'restoreObject'); + monitoring.promMetrics('POST', bucketName, '200', 'restoreObject'); return callback(null, 200, responseHeaders); } - monitoring.promMetrics( - 'POST', bucketName, '202', 'restoreObject'); + monitoring.promMetrics('POST', bucketName, '202', 'restoreObject'); return callback(null, 202, responseHeaders); - }); + } + ); } - module.exports = objectRestore; diff --git a/lib/api/apiUtils/object/parseCopySource.js b/lib/api/apiUtils/object/parseCopySource.js index a28770ded3..27a262dbc0 100644 --- a/lib/api/apiUtils/object/parseCopySource.js +++ b/lib/api/apiUtils/object/parseCopySource.js @@ -26,8 +26,7 @@ function parseCopySource(apiMethod, copySourceHeader) { // Pull the source bucket and source object separated by / const sourceBucket = source.slice(0, slashSeparator); const sourceObject = source.slice(slashSeparator + 1); - const sourceVersionId = - decodeVersionId(query ? querystring.parse(query) : undefined); + const sourceVersionId = decodeVersionId(query ? querystring.parse(query) : undefined); if (sourceVersionId instanceof Error) { const err = sourceVersionId; return { parsingError: err }; diff --git a/lib/api/apiUtils/object/partInfo.js b/lib/api/apiUtils/object/partInfo.js index c8715d3628..3480bd0090 100644 --- a/lib/api/apiUtils/object/partInfo.js +++ b/lib/api/apiUtils/object/partInfo.js @@ -6,8 +6,7 @@ */ function getPartNumber(query) { if (query && query.partNumber !== undefined) { - return Number.isNaN(query.partNumber) ? - 0 : Number.parseInt(query.partNumber, 10); + return Number.isNaN(query.partNumber) ? 0 : Number.parseInt(query.partNumber, 10); } return undefined; } @@ -21,14 +20,12 @@ function getPartNumber(query) { function getPartSize(objMD, partNumber) { let size; let locationPartNumber; - if (partNumber && objMD && objMD.location - && objMD.location.length >= partNumber) { + if (partNumber && objMD && objMD.location && objMD.location.length >= partNumber) { const locations = []; for (let i = 0; i < objMD.location.length; i++) { const { dataStoreETag } = objMD.location[i]; if (dataStoreETag) { - locationPartNumber = - Number.parseInt(dataStoreETag.split(':')[0], 10); + locationPartNumber = Number.parseInt(dataStoreETag.split(':')[0], 10); } else { /** * Location objects prior to GA7.1 do not include the diff --git a/lib/api/apiUtils/object/prepareStream.js b/lib/api/apiUtils/object/prepareStream.js index 493e25f185..e1e7d8b49a 100644 --- a/lib/api/apiUtils/object/prepareStream.js +++ b/lib/api/apiUtils/object/prepareStream.js @@ -14,8 +14,7 @@ const TrailingChecksumTransform = require('../../../auth/streamingV4/trailingChe * the type of request requires them */ function prepareStream(stream, streamingV4Params, log, errCb) { - if (stream.headers['x-amz-content-sha256'] === - 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD') { + if (stream.headers['x-amz-content-sha256'] === 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD') { if (typeof streamingV4Params !== 'object') { // this might happen if the user provided a valid V2 // Authentication header, while the chunked upload method diff --git a/lib/api/apiUtils/object/setPartRanges.js b/lib/api/apiUtils/object/setPartRanges.js index 9a2587d1cf..f36a1273c6 100644 --- a/lib/api/apiUtils/object/setPartRanges.js +++ b/lib/api/apiUtils/object/setPartRanges.js @@ -28,8 +28,7 @@ function setPartRanges(dataLocations, outerRange) { // be allowed, so not an issue that size not modified here. if (dataLocations[0].size) { const partSize = parseInt(dataLocations[0].size, 10); - soleLocation.size = - Math.min(partSize, end - begin + 1).toString(); + soleLocation.size = Math.min(partSize, end - begin + 1).toString(); } parsedLocations.push(soleLocation); return parsedLocations; @@ -73,8 +72,7 @@ function setPartRanges(dataLocations, outerRange) { // Use full remaining part if remaining partSize is less // than byte range we need to satisfy. Or use byte range // we need to satisfy taking into account any startOffset - const endPart = Math.min(partSize - 1, - max - total + startOffset - 1); + const endPart = Math.min(partSize - 1, max - total + startOffset - 1); partWithRange.range = [startOffset, endPart]; // modify size to be stored for object put part copy partWithRange.size = (endPart - startOffset + 1).toString(); diff --git a/lib/api/apiUtils/object/setUpCopyLocator.js b/lib/api/apiUtils/object/setUpCopyLocator.js index a232fa3620..c44aff6065 100644 --- a/lib/api/apiUtils/object/setUpCopyLocator.js +++ b/lib/api/apiUtils/object/setUpCopyLocator.js @@ -1,8 +1,5 @@ const { errors, errorInstances } = require('arsenal'); -const { - parseRangeSpec, - parseRange, -} = require('arsenal').network.http.utils; +const { parseRangeSpec, parseRange } = require('arsenal').network.http.utils; const constants = require('../../../../constants'); const setPartRanges = require('./setPartRanges'); @@ -15,7 +12,8 @@ const setPartRanges = require('./setPartRanges'); function parseRangeHeader(header) { const { error } = parseRangeSpec(header); if (error) { - const description = 'The x-amz-copy-source-range value must be ' + + const description = + 'The x-amz-copy-source-range value must be ' + 'of the form bytes=first-last where first and last are the ' + 'zero-based offsets of the first and last bytes to copy'; return error.customizeDescription(description); @@ -42,21 +40,17 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) { // To provide for backwards compatibility before // md-model-version 2, need to handle cases where // objMD.location is just a string - dataLocator = Array.isArray(sourceObjMD.location) ? - sourceObjMD.location : [{ key: sourceObjMD.location }]; + dataLocator = Array.isArray(sourceObjMD.location) ? sourceObjMD.location : [{ key: sourceObjMD.location }]; } if (sourceObjMD['x-amz-server-side-encryption']) { for (let i = 0; i < dataLocator.length; i++) { - dataLocator[i].masterKeyId = - sourceObjMD['x-amz-server-side-encryption-aws-kms-key-id']; - dataLocator[i].algorithm = - sourceObjMD['x-amz-server-side-encryption']; + dataLocator[i].masterKeyId = sourceObjMD['x-amz-server-side-encryption-aws-kms-key-id']; + dataLocator[i].algorithm = sourceObjMD['x-amz-server-side-encryption']; } } - const sourceSize = - parseInt(sourceObjMD['content-length'], 10); + const sourceSize = parseInt(sourceObjMD['content-length'], 10); let copyObjectSize = sourceSize; if (rangeHeader) { const rangeHeaderError = parseRangeHeader(rangeHeader); @@ -70,15 +64,18 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) { // If have a data model before version 2, cannot // support get range copy (do not have size // stored with data locations) - if ((range && dataLocator.length >= 1) && - (dataLocator[0].start === undefined - || dataLocator[0].size === undefined)) { - log.trace('data model before version 2 so ' + - 'cannot support get range copy part'); - return { error: errorInstances.NotImplemented - .customizeDescription('Stored object ' + - 'has legacy data storage model so does' + - ' not support range headers on copy part'), + if ( + range && + dataLocator.length >= 1 && + (dataLocator[0].start === undefined || dataLocator[0].size === undefined) + ) { + log.trace('data model before version 2 so ' + 'cannot support get range copy part'); + return { + error: errorInstances.NotImplemented.customizeDescription( + 'Stored object ' + + 'has legacy data storage model so does' + + ' not support range headers on copy part' + ), }; } if (range) { @@ -87,8 +84,7 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) { } } if (copyObjectSize > constants.maximumAllowedPartSize) { - log.debug('copy part size too large', { sourceSize, rangeHeader, - copyObjectSize }); + log.debug('copy part size too large', { sourceSize, rangeHeader, copyObjectSize }); return { error: errors.EntityTooLarge }; } return { dataLocator, copyObjectSize }; diff --git a/lib/api/apiUtils/object/sseHeaders.js b/lib/api/apiUtils/object/sseHeaders.js index 8ed85a828e..6fdc60aee0 100644 --- a/lib/api/apiUtils/object/sseHeaders.js +++ b/lib/api/apiUtils/object/sseHeaders.js @@ -7,8 +7,9 @@ function setSSEHeaders(headers, algo, kmsKey) { headers['x-amz-server-side-encryption'] = algo; if (kmsKey && algo === 'aws:kms') { // eslint-disable-next-line no-param-reassign - headers['x-amz-server-side-encryption-aws-kms-key-id'] = - config.kmsHideScalityArn ? getKeyIdFromArn(kmsKey) : kmsKey; + headers['x-amz-server-side-encryption-aws-kms-key-id'] = config.kmsHideScalityArn + ? getKeyIdFromArn(kmsKey) + : kmsKey; } } } diff --git a/lib/api/apiUtils/object/storeObject.js b/lib/api/apiUtils/object/storeObject.js index 8beea03ecb..64f4afe2f9 100644 --- a/lib/api/apiUtils/object/storeObject.js +++ b/lib/api/apiUtils/object/storeObject.js @@ -55,8 +55,7 @@ function checkHashMatchMD5(stream, hashedStream, dataRetrievalInfo, log, cb) { * @param {function} cb - callback containing result for the next task * @return {undefined} */ -function dataStore(objectContext, cipherBundle, stream, size, - streamingV4Params, backendInfo, log, cb) { +function dataStore(objectContext, cipherBundle, stream, size, streamingV4Params, backendInfo, log, cb) { const cbOnce = jsutil.once(cb); const dataStreamTmp = prepareStream(stream, streamingV4Params, log, cbOnce); if (!dataStreamTmp) { @@ -64,7 +63,12 @@ function dataStore(objectContext, cipherBundle, stream, size, } const dataStream = stripTrailingChecksumStream(dataStreamTmp, log, cbOnce); return data.put( - cipherBundle, dataStream, size, objectContext, backendInfo, log, + cipherBundle, + dataStream, + size, + objectContext, + backendInfo, + log, (err, dataRetrievalInfo, hashedStream) => { if (err) { log.error('error in datastore', { @@ -81,9 +85,9 @@ function dataStore(objectContext, cipherBundle, stream, size, log.trace('dataStore: backend stored key', { dataRetrievalInfo, }); - return checkHashMatchMD5(stream, hashedStream, - dataRetrievalInfo, log, cbOnce); - }); + return checkHashMatchMD5(stream, hashedStream, dataRetrievalInfo, log, cbOnce); + } + ); } module.exports = { diff --git a/lib/api/apiUtils/object/validateChecksumHeaders.js b/lib/api/apiUtils/object/validateChecksumHeaders.js index d2a50395a3..9c079daaea 100644 --- a/lib/api/apiUtils/object/validateChecksumHeaders.js +++ b/lib/api/apiUtils/object/validateChecksumHeaders.js @@ -5,8 +5,10 @@ const { unsupportedSignatureChecksums, supportedSignatureChecksums } = require(' function validateChecksumHeaders(headers) { // If the x-amz-trailer header is present the request is using one of the // trailing checksum algorithms, which are not supported. - if (headers['x-amz-trailer'] !== undefined && - headers['x-amz-content-sha256'] !== 'STREAMING-UNSIGNED-PAYLOAD-TRAILER') { + if ( + headers['x-amz-trailer'] !== undefined && + headers['x-amz-content-sha256'] !== 'STREAMING-UNSIGNED-PAYLOAD-TRAILER' + ) { return errorInstances.BadRequest.customizeDescription('signed trailing checksum is not supported'); } diff --git a/lib/api/apiUtils/object/versioning.js b/lib/api/apiUtils/object/versioning.js index 9747f7f015..ee0c948640 100644 --- a/lib/api/apiUtils/object/versioning.js +++ b/lib/api/apiUtils/object/versioning.js @@ -10,8 +10,7 @@ const { scaledMsPerDay } = config.getTimeOptions(); const versionIdUtils = versioning.VersionID; // Use Arsenal function to generate a version ID used internally by metadata // for null versions that are created before bucket versioning is configured -const nonVersionedObjId = - versionIdUtils.getInfVid(config.replicationGroupId); +const nonVersionedObjId = versionIdUtils.getInfVid(config.replicationGroupId); /** decodeVID - decode the version id * @param {string} versionId - version ID @@ -100,24 +99,23 @@ function _storeNullVersionMD(bucketName, objKey, nullVersionId, objMD, log, cb) } metadata.putObjectMD(bucketName, objKey, nullVersionMD, { versionId }, log, err => { if (err) { - log.debug('error from metadata storing null version as new version', - { error: err }); + log.debug('error from metadata storing null version as new version', { error: err }); } cb(err); }); } /** check existence and get location of null version data for deletion -* @param {string} bucketName - name of bucket -* @param {string} objKey - name of object key -* @param {object} options - metadata options for getting object MD -* @param {string} options.versionId - version to get from metadata -* @param {object} mst - info about the master version -* @param {string} mst.versionId - the master version's version id -* @param {RequestLogger} log - logger instanceof -* @param {function} cb - callback -* @return {undefined} - and call callback with (err, dataToDelete) -*/ + * @param {string} bucketName - name of bucket + * @param {string} objKey - name of object key + * @param {object} options - metadata options for getting object MD + * @param {string} options.versionId - version to get from metadata + * @param {object} mst - info about the master version + * @param {string} mst.versionId - the master version's version id + * @param {RequestLogger} log - logger instanceof + * @param {function} cb - callback + * @return {undefined} - and call callback with (err, dataToDelete) + */ function _prepareNullVersionDeletion(bucketName, objKey, options, mst, log, cb) { const nullOptions = {}; if (!options.deleteData) { @@ -133,38 +131,40 @@ function _prepareNullVersionDeletion(bucketName, objKey, options, mst, log, cb) // PUT via this option nullOptions.deleteNullKey = true; } - return metadata.getObjectMD(bucketName, objKey, options, log, - (err, versionMD) => { - if (err) { - // the null key may not exist, hence it's a normal - // situation to have a NoSuchKey error, in which case - // there is nothing to delete - if (err.is.NoSuchKey) { - log.debug('null version does not exist', { - method: '_prepareNullVersionDeletion', - }); - } else { - log.warn('could not get null version metadata', { - error: err, - method: '_prepareNullVersionDeletion', - }); - } - return cb(err); - } - if (versionMD.location) { - const dataToDelete = Array.isArray(versionMD.location) ? - versionMD.location : [versionMD.location]; - nullOptions.dataToDelete = dataToDelete; + return metadata.getObjectMD(bucketName, objKey, options, log, (err, versionMD) => { + if (err) { + // the null key may not exist, hence it's a normal + // situation to have a NoSuchKey error, in which case + // there is nothing to delete + if (err.is.NoSuchKey) { + log.debug('null version does not exist', { + method: '_prepareNullVersionDeletion', + }); + } else { + log.warn('could not get null version metadata', { + error: err, + method: '_prepareNullVersionDeletion', + }); } - return cb(null, nullOptions); - }); + return cb(err); + } + if (versionMD.location) { + const dataToDelete = Array.isArray(versionMD.location) ? versionMD.location : [versionMD.location]; + nullOptions.dataToDelete = dataToDelete; + } + return cb(null, nullOptions); + }); } function _deleteNullVersionMD(bucketName, objKey, options, log, cb) { return metadata.deleteObjectMD(bucketName, objKey, options, log, err => { if (err) { - log.warn('metadata error deleting null versioned key', - { bucketName, objKey, error: err, method: '_deleteNullVersionMD' }); + log.warn('metadata error deleting null versioned key', { + bucketName, + objKey, + error: err, + method: '_deleteNullVersionMD', + }); } return cb(err); }); @@ -191,7 +191,7 @@ function _deleteNullVersionMD(bucketName, objKey, options, log, cb) { version key, if needed */ function processVersioningState(mst, vstat, nullVersionCompatMode) { - const versioningSuspended = (vstat === 'Suspended'); + const versioningSuspended = vstat === 'Suspended'; const masterIsNull = mst.exists && (mst.isNull || !mst.versionId); if (versioningSuspended) { @@ -242,7 +242,7 @@ function processVersioningState(mst, vstat, nullVersionCompatMode) { if (masterIsNull) { // if master is a null version or a non-versioned key, // copy it to a new null key - const nullVersionId = (mst.isNull && mst.versionId) ? mst.versionId : nonVersionedObjId; + const nullVersionId = mst.isNull && mst.versionId ? mst.versionId : nonVersionedObjId; if (nullVersionCompatMode) { options.extraMD = { nullVersionId, @@ -305,8 +305,7 @@ function getMasterState(objMD) { nullUploadId: objMD.nullUploadId, }; if (objMD.location) { - mst.objLocation = Array.isArray(objMD.location) ? - objMD.location : [objMD.location]; + mst.objLocation = Array.isArray(objMD.location) ? objMD.location : [objMD.location]; } return mst; } @@ -325,8 +324,7 @@ function getMasterState(objMD) { * options.versioning - (true/undefined) metadata instruction to create new ver * options.isNull - (true/undefined) whether new version is null or not */ -function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD, - log, callback) { +function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD, log, callback) { const mst = getMasterState(objMD); const vCfg = bucketMD.getVersioningConfiguration(); // bucket is not versioning configured @@ -335,47 +333,54 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD, return process.nextTick(callback, null, options); } // bucket is versioning configured - const { options, nullVersionId, delOptions } = - processVersioningState(mst, vCfg.Status, config.nullVersionCompatMode); - return async.series([ - function storeNullVersionMD(next) { - if (!nullVersionId) { - return process.nextTick(next); - } - return _storeNullVersionMD(bucketName, objectKey, nullVersionId, objMD, log, next); - }, - function prepareNullVersionDeletion(next) { - if (!delOptions) { - return process.nextTick(next); - } - return _prepareNullVersionDeletion( - bucketName, objectKey, delOptions, mst, log, - (err, nullOptions) => { + const { options, nullVersionId, delOptions } = processVersioningState( + mst, + vCfg.Status, + config.nullVersionCompatMode + ); + return async.series( + [ + function storeNullVersionMD(next) { + if (!nullVersionId) { + return process.nextTick(next); + } + return _storeNullVersionMD(bucketName, objectKey, nullVersionId, objMD, log, next); + }, + function prepareNullVersionDeletion(next) { + if (!delOptions) { + return process.nextTick(next); + } + return _prepareNullVersionDeletion(bucketName, objectKey, delOptions, mst, log, (err, nullOptions) => { if (err) { return next(err); } Object.assign(options, nullOptions); return next(); }); - }, - function deleteNullVersionMD(next) { - if (delOptions && - delOptions.versionId && - delOptions.versionId !== 'null') { - // backward-compat: delete old null versioned key - return _deleteNullVersionMD( - bucketName, objectKey, { versionId: delOptions.versionId, overheadField }, log, next); + }, + function deleteNullVersionMD(next) { + if (delOptions && delOptions.versionId && delOptions.versionId !== 'null') { + // backward-compat: delete old null versioned key + return _deleteNullVersionMD( + bucketName, + objectKey, + { versionId: delOptions.versionId, overheadField }, + log, + next + ); + } + return process.nextTick(next); + }, + ], + err => { + // it's possible there was a prior request that deleted the + // null version, so proceed with putting a new version + if (err && err.is.NoSuchKey) { + return callback(null, options); } - return process.nextTick(next); - }, - ], err => { - // it's possible there was a prior request that deleted the - // null version, so proceed with putting a new version - if (err && err.is.NoSuchKey) { - return callback(null, options); + return callback(err, options); } - return callback(err, options); - }); + ); } /** Return options to pass to Metadata layer for version-specific @@ -535,7 +540,7 @@ function overwritingVersioning(objMD, metadataStoreParams) { restoreRequestedAt: objMD.archive?.restoreRequestedAt, restoreRequestedDays: objMD.archive?.restoreRequestedDays, restoreCompletedAt: new Date(now), - restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)), + restoreWillExpireAt: new Date(now + days * scaledMsPerDay), }; /* eslint-enable no-param-reassign */ diff --git a/lib/api/apiUtils/object/websiteServing.js b/lib/api/apiUtils/object/websiteServing.js index d5f23f15ba..cf5a25f842 100644 --- a/lib/api/apiUtils/object/websiteServing.js +++ b/lib/api/apiUtils/object/websiteServing.js @@ -22,10 +22,8 @@ function findRoutingRule(routingRules, key, errCode) { // no error condition, will have match on first rule even if later // there is more specific rule with error condition. for (let i = 0; i < routingRules.length; i++) { - const prefixFromRule = - routingRules[i].getCondition().keyPrefixEquals; - const errorCodeFromRule = - routingRules[i].getCondition().httpErrorCodeReturnedEquals; + const prefixFromRule = routingRules[i].getCondition().keyPrefixEquals; + const errorCodeFromRule = routingRules[i].getCondition().httpErrorCodeReturnedEquals; if (prefixFromRule !== undefined) { if (!key.startsWith(prefixFromRule)) { // no key match, move on @@ -34,8 +32,7 @@ function findRoutingRule(routingRules, key, errCode) { // add the prefixFromRule to the redirect info // so we can replaceKeyPrefixWith if that is part of redirect // rule - const redirectInfo = Object.assign({ prefixFromRule }, - routingRules[i].getRedirect()); + const redirectInfo = Object.assign({ prefixFromRule }, routingRules[i].getRedirect()); // have key match so check error code match if (errorCodeFromRule !== undefined) { if (errCode === errorCodeFromRule) { @@ -51,8 +48,7 @@ function findRoutingRule(routingRules, key, errCode) { // we have an error code condition but no key condition if (errorCodeFromRule !== undefined) { if (errCode === errorCodeFromRule) { - const redirectInfo = Object.assign({}, - routingRules[i].getRedirect()); + const redirectInfo = Object.assign({}, routingRules[i].getRedirect()); return redirectInfo; } continue; @@ -97,8 +93,7 @@ function extractRedirectInfo(location) { * @return {boolean} true if valid, false if not */ function validateWebsiteHeader(header) { - return (!header || header.startsWith('/') || - header.startsWith('http://') || header.startsWith('https://')); + return !header || header.startsWith('/') || header.startsWith('http://') || header.startsWith('https://'); } /** @@ -115,10 +110,10 @@ function appendWebsiteIndexDocument(request, indexDocumentSuffix, force = false) // find index document if "directory" sent in request if (reqObjectKey.endsWith('/')) { request.objectKey += indexDocumentSuffix; - // find index document if no key provided + // find index document if no key provided } else if (reqObjectKey === '') { request.objectKey = indexDocumentSuffix; - // force for redirect 302 on folder without trailing / that has an index + // force for redirect 302 on folder without trailing / that has an index } else if (force) { request.objectKey += `/${indexDocumentSuffix}`; } diff --git a/lib/api/apiUtils/quotas/quotaUtils.js b/lib/api/apiUtils/quotas/quotaUtils.js index eb38a7befa..d2ea17cc7d 100644 --- a/lib/api/apiUtils/quotas/quotaUtils.js +++ b/lib/api/apiUtils/quotas/quotaUtils.js @@ -1,11 +1,7 @@ const async = require('async'); const { errors } = require('arsenal'); const monitoring = require('../../../utilities/monitoringHandler'); -const { - actionNeedQuotaCheckCopy, - actionNeedQuotaCheck, - actionWithDataDeletion, -} = require('arsenal').policies; +const { actionNeedQuotaCheckCopy, actionNeedQuotaCheck, actionWithDataDeletion } = require('arsenal').policies; const { config } = require('../../../Config'); const QuotaService = require('../../../quotas/quotas'); @@ -44,7 +40,7 @@ function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, // but it also replaces the target, which decreases storage bytes -= getHotContentLength(destObjMD); } - } else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) { + } else if (!bucket.isVersioningEnabled() || (bucket.isVersioningEnabled() && versionId)) { // object is being deleted (non versioned) or hard-deleted (versioned, as indicated by // the `versionId` field) bytes = -getHotContentLength(objMD); @@ -68,8 +64,7 @@ function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, * @returns {boolean} Returns true if the metric is stale, false otherwise. */ function isMetricStale(metric, resourceType, resourceName, action, inflight, log) { - if (metric.date && Date.now() - new Date(metric.date).getTime() > - QuotaService.maxStaleness) { + if (metric.date && Date.now() - new Date(metric.date).getTime() > QuotaService.maxStaleness) { log.warn('Stale metrics from the quota service, allowing the request', { resourceType, resourceName, @@ -105,75 +100,92 @@ function _evaluateQuotas( inflightForCheck, action, log, - callback, + callback ) { let bucketQuotaExceeded = false; let accountQuotaExceeded = false; const creationDate = new Date(bucket.getCreationDate()).getTime(); - return async.parallel({ - bucketQuota: parallelDone => { - if (bucketQuota > 0) { - return QuotaService.getUtilizationMetrics('bucket', - `${bucket.getName()}_${creationDate}`, null, { - action, - inflight, - }, (err, bucketMetrics) => { - if (err || inflight < 0) { - return parallelDone(err); - } - if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) && - BigInt(bucketMetrics.bytesTotal || 0) + BigInt(inflightForCheck || 0) > bucketQuota) { - log.debug('Bucket quota exceeded', { - bucket: bucket.getName(), + return async.parallel( + { + bucketQuota: parallelDone => { + if (bucketQuota > 0) { + return QuotaService.getUtilizationMetrics( + 'bucket', + `${bucket.getName()}_${creationDate}`, + null, + { action, inflight, - quota: bucketQuota, - bytesTotal: bucketMetrics.bytesTotal, - }); - bucketQuotaExceeded = true; - } - return parallelDone(); - }); - } - return parallelDone(); - }, - accountQuota: parallelDone => { - if (accountQuota > 0 && account?.account) { - return QuotaService.getUtilizationMetrics('account', - account.account, null, { - action, - inflight, - }, (err, accountMetrics) => { - if (err || inflight < 0) { - return parallelDone(err); - } - // Metrics are served as BigInt strings - if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) && - BigInt(accountMetrics.bytesTotal || 0) + BigInt(inflightForCheck || 0) > accountQuota) { - log.debug('Account quota exceeded', { - accountId: account.account, + }, + (err, bucketMetrics) => { + if (err || inflight < 0) { + return parallelDone(err); + } + if ( + !isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) && + BigInt(bucketMetrics.bytesTotal || 0) + BigInt(inflightForCheck || 0) > bucketQuota + ) { + log.debug('Bucket quota exceeded', { + bucket: bucket.getName(), + action, + inflight, + quota: bucketQuota, + bytesTotal: bucketMetrics.bytesTotal, + }); + bucketQuotaExceeded = true; + } + return parallelDone(); + } + ); + } + return parallelDone(); + }, + accountQuota: parallelDone => { + if (accountQuota > 0 && account?.account) { + return QuotaService.getUtilizationMetrics( + 'account', + account.account, + null, + { action, inflight, - quota: accountQuota, - bytesTotal: accountMetrics.bytesTotal, - }); - accountQuotaExceeded = true; - } - return parallelDone(); + }, + (err, accountMetrics) => { + if (err || inflight < 0) { + return parallelDone(err); + } + // Metrics are served as BigInt strings + if ( + !isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) && + BigInt(accountMetrics.bytesTotal || 0) + BigInt(inflightForCheck || 0) > accountQuota + ) { + log.debug('Account quota exceeded', { + accountId: account.account, + action, + inflight, + quota: accountQuota, + bytesTotal: accountMetrics.bytesTotal, + }); + accountQuotaExceeded = true; + } + return parallelDone(); + } + ); + } + return parallelDone(); + }, + }, + err => { + if (err) { + log.warn('Error evaluating quotas', { + error: err.name, + description: err.message, + isInflightDeletion: inflight < 0, }); } - return parallelDone(); - }, - }, err => { - if (err) { - log.warn('Error evaluating quotas', { - error: err.name, - description: err.message, - isInflightDeletion: inflight < 0, - }); + return callback(err, bucketQuotaExceeded, accountQuotaExceeded); } - return callback(err, bucketQuotaExceeded, accountQuotaExceeded); - }); + ); } /** @@ -186,11 +198,13 @@ function _evaluateQuotas( * @returns {undefined} - Returns nothing. */ function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) { - monitoring.quotaEvaluationDuration.labels({ - action: apiMethod, - type, - code, - }).observe(duration / 1e9); + monitoring.quotaEvaluationDuration + .labels({ + action: apiMethod, + type, + code, + }) + .observe(duration / 1e9); } /** @@ -248,76 +262,103 @@ function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, inflight = 0; } - return async.forEach(apiNames, (apiName, done) => { - // Object copy operations first check the target object, - // meaning the source object, containing the current bytes, - // is checked second. This logic handles these APIs calls by - // ensuring the bytes are positives (i.e., not an object - // replacement). - if (actionNeedQuotaCheckCopy(apiName, apiMethod)) { - // eslint-disable-next-line no-param-reassign - inflight = Math.abs(inflight); - } else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) { - return done(); - } - // When inflights are disabled, the sum of the current utilization metrics - // and the current bytes are compared with the quota. The current bytes - // are not sent to the utilization service. When inflights are enabled, - // the sum of the current utilization metrics only are compared with the - // quota. They include the current inflight bytes sent in the request. - let _inflights = shouldSendInflights ? inflight : undefined; - const inflightForCheck = shouldSendInflights ? 0 : inflight; - return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights, - inflightForCheck, apiName, log, - (err, _bucketQuotaExceeded, _accountQuotaExceeded) => { - if (err) { - return done(err); - } + return async.forEach( + apiNames, + (apiName, done) => { + // Object copy operations first check the target object, + // meaning the source object, containing the current bytes, + // is checked second. This logic handles these APIs calls by + // ensuring the bytes are positives (i.e., not an object + // replacement). + if (actionNeedQuotaCheckCopy(apiName, apiMethod)) { + // eslint-disable-next-line no-param-reassign + inflight = Math.abs(inflight); + } else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) { + return done(); + } + // When inflights are disabled, the sum of the current utilization metrics + // and the current bytes are compared with the quota. The current bytes + // are not sent to the utilization service. When inflights are enabled, + // the sum of the current utilization metrics only are compared with the + // quota. They include the current inflight bytes sent in the request. + let _inflights = shouldSendInflights ? inflight : undefined; + const inflightForCheck = shouldSendInflights ? 0 : inflight; + return _evaluateQuotas( + bucketQuota, + accountQuota, + bucket, + account, + _inflights, + inflightForCheck, + apiName, + log, + (err, _bucketQuotaExceeded, _accountQuotaExceeded) => { + if (err) { + return done(err); + } - bucketQuotaExceeded = _bucketQuotaExceeded; - accountQuotaExceeded = _accountQuotaExceeded; + bucketQuotaExceeded = _bucketQuotaExceeded; + accountQuotaExceeded = _accountQuotaExceeded; - // Inflights are inverted: in case of cleanup, we just re-issue - // the same API call. - if (_inflights) { - _inflights = -_inflights; - } + // Inflights are inverted: in case of cleanup, we just re-issue + // the same API call. + if (_inflights) { + _inflights = -_inflights; + } - request.finalizerHooks.push((errorFromAPI, _done) => { - const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200; - const quotaCleanUpStartTime = process.hrtime.bigint(); - // Quotas are cleaned only in case of error in the API - async.waterfall([ - cb => { - if (errorFromAPI) { - return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights, - null, apiName, log, cb); + request.finalizerHooks.push((errorFromAPI, _done) => { + const code = bucketQuotaExceeded || accountQuotaExceeded ? 429 : 200; + const quotaCleanUpStartTime = process.hrtime.bigint(); + // Quotas are cleaned only in case of error in the API + async.waterfall( + [ + cb => { + if (errorFromAPI) { + return _evaluateQuotas( + bucketQuota, + accountQuota, + bucket, + account, + _inflights, + null, + apiName, + log, + cb + ); + } + return cb(); + }, + ], + () => { + monitorQuotaEvaluationDuration( + apiMethod, + type, + code, + quotaEvaluationDuration + Number(process.hrtime.bigint() - quotaCleanUpStartTime) + ); + return _done(); } - return cb(); - }, - ], () => { - monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration + - Number(process.hrtime.bigint() - quotaCleanUpStartTime)); - return _done(); + ); }); - }); - return done(); - }); - }, err => { - quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime); - if (err) { - log.warn('Error getting metrics from the quota service, allowing the request', { - error: err.name, - description: err.message, - }); - } - if (!actionWithDataDeletion[apiMethod] && - (bucketQuotaExceeded || accountQuotaExceeded)) { - return callback(errors.QuotaExceeded); + return done(); + } + ); + }, + err => { + quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime); + if (err) { + log.warn('Error getting metrics from the quota service, allowing the request', { + error: err.name, + description: err.message, + }); + } + if (!actionWithDataDeletion[apiMethod] && (bucketQuotaExceeded || accountQuotaExceeded)) { + return callback(errors.QuotaExceeded); + } + return callback(); } - return callback(); - }); + ); } module.exports = { diff --git a/lib/api/backbeat/listLifecycleCurrents.js b/lib/api/backbeat/listLifecycleCurrents.js index 4799418847..d6a93b2cf9 100644 --- a/lib/api/backbeat/listLifecycleCurrents.js +++ b/lib/api/backbeat/listLifecycleCurrents.js @@ -4,12 +4,14 @@ const services = require('../../services'); const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils'); const { pushMetric } = require('../../utapi/utilities'); const monitoring = require('../../utilities/monitoringHandler'); -const { getLocationConstraintErrorMessage, processCurrents, - validateMaxScannedEntries } = require('../apiUtils/object/lifecycle'); +const { + getLocationConstraintErrorMessage, + processCurrents, + validateMaxScannedEntries, +} = require('../apiUtils/object/lifecycle'); const { config } = require('../../Config'); -function handleResult(listParams, requestMaxKeys, authInfo, - bucketName, list, isBucketVersioned, log, callback) { +function handleResult(listParams, requestMaxKeys, authInfo, bucketName, list, isBucketVersioned, log, callback) { // eslint-disable-next-line no-param-reassign listParams.maxKeys = requestMaxKeys; const res = processCurrents(bucketName, listParams, isBucketVersioned, list); @@ -35,18 +37,19 @@ function listLifecycleCurrents(authInfo, locationConstraints, request, log, call const bucketName = request.bucketName; log.debug('processing request', { method: 'listLifecycleCurrents' }); - const requestMaxKeys = params['max-keys'] ? - Number.parseInt(params['max-keys'], 10) : 1000; + const requestMaxKeys = params['max-keys'] ? Number.parseInt(params['max-keys'], 10) : 1000; if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) { - monitoring.promMetrics( - 'GET', bucketName, 400, 'listLifecycleCurrents'); + monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents'); return callback(errors.InvalidArgument); } const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys); const minEntriesToBeScanned = 1; - const { isValid, maxScannedLifecycleListingEntries } = - validateMaxScannedEntries(params, config, minEntriesToBeScanned); + const { isValid, maxScannedLifecycleListingEntries } = validateMaxScannedEntries( + params, + config, + minEntriesToBeScanned + ); if (!isValid) { monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents'); return callback(errors.InvalidArgument); @@ -80,8 +83,7 @@ function listLifecycleCurrents(authInfo, locationConstraints, request, log, call return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { if (err) { log.debug('error processing request', { method: 'metadataValidateBucket', error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'listLifecycleCurrents'); + monitoring.promMetrics('GET', bucketName, err.code, 'listLifecycleCurrents'); return callback(err, null); } @@ -93,21 +95,35 @@ function listLifecycleCurrents(authInfo, locationConstraints, request, log, call Contents: [], IsTruncated: false, }; - return handleResult(listParams, requestMaxKeys, authInfo, - bucketName, emptyList, isBucketVersioned, log, callback); + return handleResult( + listParams, + requestMaxKeys, + authInfo, + bucketName, + emptyList, + isBucketVersioned, + log, + callback + ); } - return services.getLifecycleListing(bucketName, listParams, log, - (err, list) => { + return services.getLifecycleListing(bucketName, listParams, log, (err, list) => { if (err) { log.debug('error processing request', { method: 'services.getLifecycleListing', error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'listLifecycleCurrents'); + monitoring.promMetrics('GET', bucketName, err.code, 'listLifecycleCurrents'); return callback(err, null); } - return handleResult(listParams, requestMaxKeys, authInfo, - bucketName, list, isBucketVersioned, log, callback); + return handleResult( + listParams, + requestMaxKeys, + authInfo, + bucketName, + list, + isBucketVersioned, + log, + callback + ); }); }); } diff --git a/lib/api/backbeat/listLifecycleNonCurrents.js b/lib/api/backbeat/listLifecycleNonCurrents.js index e1808be3d8..9a17ba8b72 100644 --- a/lib/api/backbeat/listLifecycleNonCurrents.js +++ b/lib/api/backbeat/listLifecycleNonCurrents.js @@ -5,12 +5,14 @@ const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils const { pushMetric } = require('../../utapi/utilities'); const versionIdUtils = versioning.VersionID; const monitoring = require('../../utilities/monitoringHandler'); -const { getLocationConstraintErrorMessage, processNonCurrents, - validateMaxScannedEntries } = require('../apiUtils/object/lifecycle'); +const { + getLocationConstraintErrorMessage, + processNonCurrents, + validateMaxScannedEntries, +} = require('../apiUtils/object/lifecycle'); const { config } = require('../../Config'); -function handleResult(listParams, requestMaxKeys, authInfo, - bucketName, list, log, callback) { +function handleResult(listParams, requestMaxKeys, authInfo, bucketName, list, log, callback) { // eslint-disable-next-line no-param-reassign listParams.maxKeys = requestMaxKeys; const res = processNonCurrents(bucketName, listParams, list); @@ -36,11 +38,9 @@ function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, c const bucketName = request.bucketName; log.debug('processing request', { method: 'listLifecycleNonCurrents' }); - const requestMaxKeys = params['max-keys'] ? - Number.parseInt(params['max-keys'], 10) : 1000; + const requestMaxKeys = params['max-keys'] ? Number.parseInt(params['max-keys'], 10) : 1000; if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) { - monitoring.promMetrics( - 'GET', bucketName, 400, 'listLifecycleNonCurrents'); + monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleNonCurrents'); return callback(errors.InvalidArgument); } const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys); @@ -48,8 +48,11 @@ function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, c // 3 is required as a minimum because we must scan at least three entries to determine version eligibility. // Two entries representing the master key and the following one representing the non-current version. const minEntriesToBeScanned = 3; - const { isValid, maxScannedLifecycleListingEntries } = - validateMaxScannedEntries(params, config, minEntriesToBeScanned); + const { isValid, maxScannedLifecycleListingEntries } = validateMaxScannedEntries( + params, + config, + minEntriesToBeScanned + ); if (!isValid) { monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleNonCurrents'); return callback(errors.InvalidArgument); @@ -80,14 +83,14 @@ function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, c maxScannedLifecycleListingEntries, }; - listParams.versionIdMarker = params['version-id-marker'] ? - versionIdUtils.decode(params['version-id-marker']) : undefined; + listParams.versionIdMarker = params['version-id-marker'] + ? versionIdUtils.decode(params['version-id-marker']) + : undefined; return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { if (err) { log.debug('error processing request', { method: 'metadataValidateBucket', error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'listLifecycleNonCurrents'); + monitoring.promMetrics('GET', bucketName, err.code, 'listLifecycleNonCurrents'); return callback(err, null); } @@ -95,8 +98,7 @@ function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, c const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended'); if (!isBucketVersioned) { log.debug('bucket is not versioned'); - return callback(errorInstances.InvalidRequest.customizeDescription( - 'bucket is not versioned'), null); + return callback(errorInstances.InvalidRequest.customizeDescription('bucket is not versioned'), null); } if (!requestMaxKeys) { @@ -104,20 +106,16 @@ function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, c Contents: [], IsTruncated: false, }; - return handleResult(listParams, requestMaxKeys, authInfo, - bucketName, emptyList, log, callback); + return handleResult(listParams, requestMaxKeys, authInfo, bucketName, emptyList, log, callback); } - return services.getLifecycleListing(bucketName, listParams, log, - (err, list) => { + return services.getLifecycleListing(bucketName, listParams, log, (err, list) => { if (err) { log.debug('error processing request', { method: 'services.getLifecycleListing', error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'listLifecycleNonCurrents'); + monitoring.promMetrics('GET', bucketName, err.code, 'listLifecycleNonCurrents'); return callback(err, null); } - return handleResult(listParams, requestMaxKeys, authInfo, - bucketName, list, log, callback); + return handleResult(listParams, requestMaxKeys, authInfo, bucketName, list, log, callback); }); }); } diff --git a/lib/api/backbeat/listLifecycleOrphanDeleteMarkers.js b/lib/api/backbeat/listLifecycleOrphanDeleteMarkers.js index 89cab46088..5c01c57a60 100644 --- a/lib/api/backbeat/listLifecycleOrphanDeleteMarkers.js +++ b/lib/api/backbeat/listLifecycleOrphanDeleteMarkers.js @@ -7,8 +7,7 @@ const monitoring = require('../../utilities/monitoringHandler'); const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle'); const { config } = require('../../Config'); -function handleResult(listParams, requestMaxKeys, authInfo, - bucketName, list, log, callback) { +function handleResult(listParams, requestMaxKeys, authInfo, bucketName, list, log, callback) { // eslint-disable-next-line no-param-reassign listParams.maxKeys = requestMaxKeys; const res = processOrphans(bucketName, listParams, list); @@ -34,11 +33,9 @@ function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request const bucketName = request.bucketName; log.debug('processing request', { method: 'listLifecycleOrphanDeleteMarkers' }); - const requestMaxKeys = params['max-keys'] ? - Number.parseInt(params['max-keys'], 10) : 1000; + const requestMaxKeys = params['max-keys'] ? Number.parseInt(params['max-keys'], 10) : 1000; if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) { - monitoring.promMetrics( - 'GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers'); + monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers'); return callback(errors.InvalidArgument); } const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys); @@ -46,8 +43,11 @@ function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request // 3 is required as a minimum because we must scan at least three entries to determine version eligibility. // Two entries representing the master key and the following one representing the non-current version. const minEntriesToBeScanned = 3; - const { isValid, maxScannedLifecycleListingEntries } = - validateMaxScannedEntries(params, config, minEntriesToBeScanned); + const { isValid, maxScannedLifecycleListingEntries } = validateMaxScannedEntries( + params, + config, + minEntriesToBeScanned + ); if (!isValid) { monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers'); return callback(errors.InvalidArgument); @@ -71,8 +71,7 @@ function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { if (err) { log.debug('error processing request', { method: 'metadataValidateBucket', error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers'); + monitoring.promMetrics('GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers'); return callback(err, null); } @@ -80,8 +79,7 @@ function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended'); if (!isBucketVersioned) { log.debug('bucket is not versioned or suspended'); - return callback(errorInstances.InvalidRequest.customizeDescription( - 'bucket is not versioned'), null); + return callback(errorInstances.InvalidRequest.customizeDescription('bucket is not versioned'), null); } if (!requestMaxKeys) { @@ -89,20 +87,16 @@ function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request Contents: [], IsTruncated: false, }; - return handleResult(listParams, requestMaxKeys, authInfo, - bucketName, emptyList, log, callback); + return handleResult(listParams, requestMaxKeys, authInfo, bucketName, emptyList, log, callback); } - return services.getLifecycleListing(bucketName, listParams, log, - (err, list) => { + return services.getLifecycleListing(bucketName, listParams, log, (err, list) => { if (err) { log.debug('error processing request', { error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers'); + monitoring.promMetrics('GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers'); return callback(err, null); } - return handleResult(listParams, requestMaxKeys, authInfo, - bucketName, list, log, callback); + return handleResult(listParams, requestMaxKeys, authInfo, bucketName, list, log, callback); }); }); } diff --git a/lib/api/bucketDelete.js b/lib/api/bucketDelete.js index 78af94dec4..4f08704692 100644 --- a/lib/api/bucketDelete.js +++ b/lib/api/bucketDelete.js @@ -21,8 +21,7 @@ function bucketDelete(authInfo, request, log, cb) { if (authInfo.isRequesterPublicUser()) { log.debug('operation not available for public user'); - monitoring.promMetrics( - 'DELETE', request.bucketName, 403, 'deleteBucket'); + monitoring.promMetrics('DELETE', request.bucketName, 403, 'deleteBucket'); return cb(errors.AccessDenied); } const bucketName = request.bucketName; @@ -34,35 +33,27 @@ function bucketDelete(authInfo, request, log, cb) { request, }; - return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, bucketMD) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucketMD); + return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucketMD) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucketMD); + if (err) { + log.debug('error processing request', { method: 'metadataValidateBucket', error: err }); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteBucket'); + return cb(err, corsHeaders); + } + log.trace('passed checks', { method: 'metadataValidateBucket' }); + return deleteBucket(authInfo, bucketMD, bucketName, authInfo.getCanonicalID(), request, log, err => { if (err) { - log.debug('error processing request', - { method: 'metadataValidateBucket', error: err }); - monitoring.promMetrics( - 'DELETE', bucketName, err.code, 'deleteBucket'); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteBucket'); return cb(err, corsHeaders); } - log.trace('passed checks', - { method: 'metadataValidateBucket' }); - return deleteBucket(authInfo, bucketMD, bucketName, - authInfo.getCanonicalID(), request, log, err => { - if (err) { - monitoring.promMetrics( - 'DELETE', bucketName, err.code, 'deleteBucket'); - return cb(err, corsHeaders); - } - pushMetric('deleteBucket', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics( - 'DELETE', bucketName, '204', 'deleteBucket'); - return cb(null, corsHeaders); - }); + pushMetric('deleteBucket', log, { + authInfo, + bucket: bucketName, + }); + monitoring.promMetrics('DELETE', bucketName, '204', 'deleteBucket'); + return cb(null, corsHeaders); }); + }); } module.exports = bucketDelete; diff --git a/lib/api/bucketDeleteCors.js b/lib/api/bucketDeleteCors.js index b08b6c0f13..e6f6f02875 100644 --- a/lib/api/bucketDeleteCors.js +++ b/lib/api/bucketDeleteCors.js @@ -2,8 +2,7 @@ const { errors } = require('arsenal'); const bucketShield = require('./apiUtils/bucket/bucketShield'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); -const { isBucketAuthorized } = - require('./apiUtils/authorization/permissionChecks'); +const { isBucketAuthorized } = require('./apiUtils/authorization/permissionChecks'); const metadata = require('../metadata/wrapper'); const { pushMetric } = require('../utapi/utilities'); const monitoring = require('../utilities/monitoringHandler'); @@ -23,29 +22,34 @@ function bucketDeleteCors(authInfo, request, log, callback) { const canonicalID = authInfo.getCanonicalID(); return metadata.getBucket(bucketName, log, (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); if (err) { log.debug('metadata getbucket failed', { error: err }); - monitoring.promMetrics('DELETE', bucketName, 400, - 'deleteBucketCors'); + monitoring.promMetrics('DELETE', bucketName, 400, 'deleteBucketCors'); return callback(err); } if (bucketShield(bucket, requestType)) { - monitoring.promMetrics('DELETE', bucketName, 400, - 'deleteBucketCors'); + monitoring.promMetrics('DELETE', bucketName, 400, 'deleteBucketCors'); return callback(errors.NoSuchBucket); } log.trace('found bucket in metadata'); - if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, - authInfo, log, request, request.actionImplicitDenies)) { + if ( + !isBucketAuthorized( + bucket, + request.apiMethods || requestType, + canonicalID, + authInfo, + log, + request, + request.actionImplicitDenies + ) + ) { log.debug('access denied for user on bucket', { requestType, method: 'bucketDeleteCors', }); - monitoring.promMetrics('DELETE', bucketName, 403, - 'deleteBucketCors'); + monitoring.promMetrics('DELETE', bucketName, 403, 'deleteBucketCors'); return callback(errors.AccessDenied, corsHeaders); } @@ -65,16 +69,14 @@ function bucketDeleteCors(authInfo, request, log, callback) { bucket.setCors(null); return metadata.updateBucket(bucketName, bucket, log, err => { if (err) { - monitoring.promMetrics('DELETE', bucketName, 400, - 'deleteBucketCors'); + monitoring.promMetrics('DELETE', bucketName, 400, 'deleteBucketCors'); return callback(err, corsHeaders); } pushMetric('deleteBucketCors', log, { authInfo, bucket: bucketName, }); - monitoring.promMetrics( - 'DELETE', bucketName, '204', 'deleteBucketCors'); + monitoring.promMetrics('DELETE', bucketName, '204', 'deleteBucketCors'); return callback(err, corsHeaders); }); }); diff --git a/lib/api/bucketDeleteEncryption.js b/lib/api/bucketDeleteEncryption.js index 35bf57c6c9..cb34d4a437 100644 --- a/lib/api/bucketDeleteEncryption.js +++ b/lib/api/bucketDeleteEncryption.js @@ -25,53 +25,55 @@ function bucketDeleteEncryption(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next), - (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), - (bucket, next) => { - const sseConfig = bucket.getServerSideEncryption(); + return async.waterfall( + [ + next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next), + (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), + (bucket, next) => { + const sseConfig = bucket.getServerSideEncryption(); - if (sseConfig === null) { - return next(null, bucket); - } + if (sseConfig === null) { + return next(null, bucket); + } - const { isAccountEncryptionEnabled, masterKeyId, algorithm, cryptoScheme } = sseConfig; + const { isAccountEncryptionEnabled, masterKeyId, algorithm, cryptoScheme } = sseConfig; - let updatedSseConfig = null; + let updatedSseConfig = null; - if (!isAccountEncryptionEnabled && masterKeyId) { - // Keep the encryption configuration as a "cache" to avoid generating a new master key: - // - if the default encryption master key is defined at the bucket level (!isAccountEncryptionEnabled), - // - and if a bucket-level default encryption key is already set. - // This "cache" is implemented by storing the configuration in the bucket metadata - // with mandatory set to false, making sure it remains hidden for `getBucketEncryption` operations. - // There is no need to cache the configuration if the default encryption master key is - // managed at the account level, as the master key id in that case is stored directly in - // the account metadata. - updatedSseConfig = { - mandatory: false, - algorithm, - cryptoScheme, - masterKeyId, - }; - } + if (!isAccountEncryptionEnabled && masterKeyId) { + // Keep the encryption configuration as a "cache" to avoid generating a new master key: + // - if the default encryption master key is defined at the bucket level (!isAccountEncryptionEnabled), + // - and if a bucket-level default encryption key is already set. + // This "cache" is implemented by storing the configuration in the bucket metadata + // with mandatory set to false, making sure it remains hidden for `getBucketEncryption` operations. + // There is no need to cache the configuration if the default encryption master key is + // managed at the account level, as the master key id in that case is stored directly in + // the account metadata. + updatedSseConfig = { + mandatory: false, + algorithm, + cryptoScheme, + masterKeyId, + }; + } - bucket.setServerSideEncryption(updatedSseConfig); - return metadata.updateBucket(bucketName, bucket, log, err => next(err, bucket)); - }, - ], - (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, method: 'bucketDeleteEncryption' }); - return callback(err, corsHeaders); + bucket.setServerSideEncryption(updatedSseConfig); + return metadata.updateBucket(bucketName, bucket, log, err => next(err, bucket)); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'bucketDeleteEncryption' }); + return callback(err, corsHeaders); + } + pushMetric('deleteBucketEncryption', log, { + authInfo, + bucket: bucketName, + }); + return callback(null, corsHeaders); } - pushMetric('deleteBucketEncryption', log, { - authInfo, - bucket: bucketName, - }); - return callback(null, corsHeaders); - }); + ); } module.exports = bucketDeleteEncryption; diff --git a/lib/api/bucketDeleteLifecycle.js b/lib/api/bucketDeleteLifecycle.js index 31a4148a74..db1b123454 100644 --- a/lib/api/bucketDeleteLifecycle.js +++ b/lib/api/bucketDeleteLifecycle.js @@ -28,8 +28,7 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) { error: err, method: 'bucketDeleteLifecycle', }); - monitoring.promMetrics( - 'DELETE', bucketName, err.code, 'deleteBucketLifecycle'); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteBucketLifecycle'); return callback(err, corsHeaders); } if (!bucket.getLifecycleConfiguration()) { @@ -46,16 +45,14 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) { bucket.setLifecycleConfiguration(null); return metadata.updateBucket(bucketName, bucket, log, err => { if (err) { - monitoring.promMetrics( - 'DELETE', bucketName, err.code, 'deleteBucketLifecycle'); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteBucketLifecycle'); return callback(err, corsHeaders); } pushMetric('deleteBucketLifecycle', log, { authInfo, bucket: bucketName, }); - monitoring.promMetrics( - 'DELETE', bucketName, '200', 'deleteBucketLifecycle'); + monitoring.promMetrics('DELETE', bucketName, '200', 'deleteBucketLifecycle'); return callback(null, corsHeaders); }); }); diff --git a/lib/api/bucketDeleteQuota.js b/lib/api/bucketDeleteQuota.js index 849072a45f..3821240d56 100644 --- a/lib/api/bucketDeleteQuota.js +++ b/lib/api/bucketDeleteQuota.js @@ -25,34 +25,35 @@ function bucketDeleteQuota(authInfo, request, log, callback) { requestType: request.apiMethods || requestType, request, }; - return waterfall([ - next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, bucket) => next(err, bucket)), - (bucket, next) => { - bucket.setQuota(0); - metadata.updateBucket(bucket.getName(), bucket, log, err => - next(err, bucket)); - }, - ], (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.debug('error processing request', { - error: err, - method: 'bucketDeleteQuota' + return waterfall( + [ + next => + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => + next(err, bucket) + ), + (bucket, next) => { + bucket.setQuota(0); + metadata.updateBucket(bucket.getName(), bucket, log, err => next(err, bucket)); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.debug('error processing request', { + error: err, + method: 'bucketDeleteQuota', + }); + monitoring.promMetrics('DELETE', bucketName, err.code, 'bucketDeleteQuota'); + return callback(err, err.code, corsHeaders); + } + monitoring.promMetrics('DELETE', bucketName, '204', 'bucketDeleteQuota'); + pushMetric('bucketDeleteQuota', log, { + authInfo, + bucket: bucketName, }); - monitoring.promMetrics('DELETE', bucketName, err.code, - 'bucketDeleteQuota'); - return callback(err, err.code, corsHeaders); + return callback(null, 204, corsHeaders); } - monitoring.promMetrics( - 'DELETE', bucketName, '204', 'bucketDeleteQuota'); - pushMetric('bucketDeleteQuota', log, { - authInfo, - bucket: bucketName, - }); - return callback(null, 204, corsHeaders); - }); + ); } module.exports = bucketDeleteQuota; diff --git a/lib/api/bucketDeleteReplication.js b/lib/api/bucketDeleteReplication.js index ff9912171e..220284ffcb 100644 --- a/lib/api/bucketDeleteReplication.js +++ b/lib/api/bucketDeleteReplication.js @@ -28,8 +28,7 @@ function bucketDeleteReplication(authInfo, request, log, callback) { error: err, method: 'bucketDeleteReplication', }); - monitoring.promMetrics( - 'DELETE', bucketName, err.code, 'deleteBucketReplication'); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteBucketReplication'); return callback(err, corsHeaders); } if (!bucket.getReplicationConfiguration()) { @@ -46,16 +45,14 @@ function bucketDeleteReplication(authInfo, request, log, callback) { bucket.setReplicationConfiguration(null); return metadata.updateBucket(bucketName, bucket, log, err => { if (err) { - monitoring.promMetrics( - 'DELETE', bucketName, err.code, 'deleteBucketReplication'); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteBucketReplication'); return callback(err, corsHeaders); } pushMetric('deleteBucketReplication', log, { authInfo, bucket: bucketName, }); - monitoring.promMetrics( - 'DELETE', bucketName, '200', 'deleteBucketReplication'); + monitoring.promMetrics('DELETE', bucketName, '200', 'deleteBucketReplication'); return callback(null, corsHeaders); }); }); diff --git a/lib/api/bucketDeleteTagging.js b/lib/api/bucketDeleteTagging.js index 8997d760ef..cdb6aecaf1 100644 --- a/lib/api/bucketDeleteTagging.js +++ b/lib/api/bucketDeleteTagging.js @@ -25,38 +25,38 @@ function bucketDeleteTagging(authInfo, request, log, callback) { }; let bucket = null; - return waterfall([ - next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, b) => { - if (err) { - return next(err); - } - bucket = b; - bucket.setTags([]); - return next(); - }), - next => metadata.updateBucket(bucket.getName(), bucket, log, next), - ], err => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.error('error processing request', { - error: err, - method: 'deleteBucketTagging', - bucketName + return waterfall( + [ + next => + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, b) => { + if (err) { + return next(err); + } + bucket = b; + bucket.setTags([]); + return next(); + }), + next => metadata.updateBucket(bucket.getName(), bucket, log, next), + ], + err => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.error('error processing request', { + error: err, + method: 'deleteBucketTagging', + bucketName, + }); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteBucketTagging'); + return callback(err, corsHeaders); + } + pushMetric('deleteBucketTagging', log, { + authInfo, + bucket: bucketName, }); - monitoring.promMetrics('DELETE', bucketName, err.code, - 'deleteBucketTagging'); + monitoring.promMetrics('DELETE', bucketName, '200', 'deleteBucketTagging'); return callback(err, corsHeaders); } - pushMetric('deleteBucketTagging', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics( - 'DELETE', bucketName, '200', 'deleteBucketTagging'); - return callback(err, corsHeaders); - }); + ); } module.exports = bucketDeleteTagging; diff --git a/lib/api/bucketDeleteWebsite.js b/lib/api/bucketDeleteWebsite.js index 2f83391871..3591578d42 100644 --- a/lib/api/bucketDeleteWebsite.js +++ b/lib/api/bucketDeleteWebsite.js @@ -2,8 +2,7 @@ const { errors } = require('arsenal'); const bucketShield = require('./apiUtils/bucket/bucketShield'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); -const { isBucketAuthorized } = - require('./apiUtils/authorization/permissionChecks'); +const { isBucketAuthorized } = require('./apiUtils/authorization/permissionChecks'); const metadata = require('../metadata/wrapper'); const { pushMetric } = require('../utapi/utilities'); const monitoring = require('../utilities/monitoringHandler'); @@ -15,29 +14,34 @@ function bucketDeleteWebsite(authInfo, request, log, callback) { const canonicalID = authInfo.getCanonicalID(); return metadata.getBucket(bucketName, log, (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); if (err) { log.debug('metadata getbucket failed', { error: err }); - monitoring.promMetrics( - 'DELETE', bucketName, err.code, 'deleteBucketWebsite'); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteBucketWebsite'); return callback(err); } if (bucketShield(bucket, requestType)) { - monitoring.promMetrics( - 'DELETE', bucketName, 404, 'deleteBucketWebsite'); + monitoring.promMetrics('DELETE', bucketName, 404, 'deleteBucketWebsite'); return callback(errors.NoSuchBucket); } log.trace('found bucket in metadata'); - if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, - authInfo, log, request, request.actionImplicitDenies)) { + if ( + !isBucketAuthorized( + bucket, + request.apiMethods || requestType, + canonicalID, + authInfo, + log, + request, + request.actionImplicitDenies + ) + ) { log.debug('access denied for user on bucket', { requestType, method: 'bucketDeleteWebsite', }); - monitoring.promMetrics( - 'DELETE', bucketName, 403, 'deleteBucketWebsite'); + monitoring.promMetrics('DELETE', bucketName, 403, 'deleteBucketWebsite'); return callback(errors.AccessDenied, corsHeaders); } @@ -57,16 +61,14 @@ function bucketDeleteWebsite(authInfo, request, log, callback) { bucket.setWebsiteConfiguration(null); return metadata.updateBucket(bucketName, bucket, log, err => { if (err) { - monitoring.promMetrics( - 'DELETE', bucketName, err.code, 'deleteBucketWebsite'); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteBucketWebsite'); return callback(err, corsHeaders); } pushMetric('deleteBucketWebsite', log, { authInfo, bucket: bucketName, }); - monitoring.promMetrics( - 'DELETE', bucketName, '200', 'deleteBucketWebsite'); + monitoring.promMetrics('DELETE', bucketName, '200', 'deleteBucketWebsite'); return callback(null, corsHeaders); }); }); diff --git a/lib/api/bucketGet.js b/lib/api/bucketGet.js index 8eef7694e4..a62d5bda4d 100644 --- a/lib/api/bucketGet.js +++ b/lib/api/bucketGet.js @@ -8,14 +8,10 @@ const escapeForXml = s3middleware.escapeForXml; const { pushMetric } = require('../utapi/utilities'); const versionIdUtils = versioning.VersionID; const monitoring = require('../utilities/monitoringHandler'); -const { generateToken, decryptToken } - = require('../api/apiUtils/object/continueToken'); +const { generateToken, decryptToken } = require('../api/apiUtils/object/continueToken'); // do not url encode the continuation tokens -const skipUrlEncoding = new Set([ - 'ContinuationToken', - 'NextContinuationToken', -]); +const skipUrlEncoding = new Set(['ContinuationToken', 'NextContinuationToken']); /* Sample XML response for GET bucket objects V2: @@ -107,7 +103,9 @@ function processVersions(bucketName, listParams, list) { xml.push( '', '', - '', bucketName, '' + '', + bucketName, + '' ); const isTruncated = list.IsTruncated ? 'true' : 'false'; const xmlParams = [ @@ -122,17 +120,15 @@ function processVersions(bucketName, listParams, list) { { tag: 'IsTruncated', value: isTruncated }, ]; - const escapeXmlFn = listParams.encoding === 'url' ? - querystring.escape : escapeForXml; + const escapeXmlFn = listParams.encoding === 'url' ? querystring.escape : escapeForXml; xmlParams.forEach(p => { if (p.value) { - const val = p.tag !== 'NextVersionIdMarker' || p.value === 'null' ? - p.value : versionIdUtils.encode(p.value); + const val = + p.tag !== 'NextVersionIdMarker' || p.value === 'null' ? p.value : versionIdUtils.encode(p.value); xml.push(`<${p.tag}>${escapeXmlFn(val)}`); } }); - let lastKey = listParams.keyMarker ? - escapeXmlFn(listParams.keyMarker) : undefined; + let lastKey = listParams.keyMarker ? escapeXmlFn(listParams.keyMarker) : undefined; list.Versions.forEach(item => { const v = item.value; const objectKey = escapeXmlFn(item.key); @@ -142,8 +138,7 @@ function processVersions(bucketName, listParams, list) { v.IsDeleteMarker ? '' : '', `${objectKey}`, '', - (v.IsNull || v.VersionId === undefined) ? - 'null' : versionIdUtils.encode(v.VersionId), + v.IsNull || v.VersionId === undefined ? 'null' : versionIdUtils.encode(v.VersionId), '', `${isLatest}`, `${v.LastModified}`, @@ -170,7 +165,9 @@ function processMasterVersions(bucketName, listParams, list) { xml.push( '', '', - '', bucketName, '' + '', + bucketName, + '' ); const isTruncated = list.IsTruncated ? 'true' : 'false'; const xmlParams = [ @@ -182,10 +179,8 @@ function processMasterVersions(bucketName, listParams, list) { ]; if (listParams.v2) { - xmlParams.push( - { tag: 'StartAfter', value: listParams.startAfter || '' }); - xmlParams.push( - { tag: 'FetchOwner', value: `${listParams.fetchOwner}` }); + xmlParams.push({ tag: 'StartAfter', value: listParams.startAfter || '' }); + xmlParams.push({ tag: 'FetchOwner', value: `${listParams.fetchOwner}` }); xmlParams.push({ tag: 'ContinuationToken', value: generateToken(listParams.continuationToken) || '', @@ -203,18 +198,19 @@ function processMasterVersions(bucketName, listParams, list) { xmlParams.push({ tag: 'NextMarker', value: list.NextMarker }); } - const escapeXmlFn = listParams.encoding === 'url' ? - querystring.escape : escapeForXml; + const escapeXmlFn = listParams.encoding === 'url' ? querystring.escape : escapeForXml; xmlParams.forEach(p => { if (p.value && skipUrlEncoding.has(p.tag)) { xml.push(`<${p.tag}>${p.value}`); } else if (p.value || p.tag === 'KeyCount' || p.tag === 'MaxKeys') { xml.push(`<${p.tag}>${escapeXmlFn(p.value)}`); - } else if (p.tag !== 'NextMarker' && - p.tag !== 'EncodingType' && - p.tag !== 'Delimiter' && - p.tag !== 'StartAfter' && - p.tag !== 'NextContinuationToken') { + } else if ( + p.tag !== 'NextMarker' && + p.tag !== 'EncodingType' && + p.tag !== 'Delimiter' && + p.tag !== 'StartAfter' && + p.tag !== 'NextContinuationToken' + ) { xml.push(`<${p.tag}/>`); } }); @@ -240,10 +236,7 @@ function processMasterVersions(bucketName, listParams, list) { '' ); } - return xml.push( - `${v.StorageClass}`, - '' - ); + return xml.push(`${v.StorageClass}`, ''); }); list.CommonPrefixes.forEach(item => { const val = escapeXmlFn(item); @@ -253,8 +246,7 @@ function processMasterVersions(bucketName, listParams, list) { return xml.join(''); } -function handleResult(listParams, requestMaxKeys, encoding, authInfo, - bucketName, list, corsHeaders, log, callback) { +function handleResult(listParams, requestMaxKeys, encoding, authInfo, bucketName, list, corsHeaders, log, callback) { // eslint-disable-next-line no-param-reassign listParams.maxKeys = requestMaxKeys; // eslint-disable-next-line no-param-reassign @@ -285,8 +277,9 @@ function bucketGet(authInfo, request, log, callback) { const bucketName = request.bucketName; const v2 = params['list-type']; if (v2 !== undefined && Number.parseInt(v2, 10) !== 2) { - return callback(errorInstances.InvalidArgument.customizeDescription('Invalid ' + - 'List Type specified in Request')); + return callback( + errorInstances.InvalidArgument.customizeDescription('Invalid ' + 'List Type specified in Request') + ); } if (v2) { log.addDefaultFields({ @@ -300,16 +293,14 @@ function bucketGet(authInfo, request, log, callback) { log.debug('processing request', { method: 'bucketGet' }); const encoding = params['encoding-type']; if (encoding !== undefined && encoding !== 'url') { - monitoring.promMetrics( - 'GET', bucketName, 400, 'listBucket'); - return callback(errorInstances.InvalidArgument.customizeDescription('Invalid ' + - 'Encoding Method specified in Request')); + monitoring.promMetrics('GET', bucketName, 400, 'listBucket'); + return callback( + errorInstances.InvalidArgument.customizeDescription('Invalid ' + 'Encoding Method specified in Request') + ); } - const requestMaxKeys = params['max-keys'] ? - Number.parseInt(params['max-keys'], 10) : 1000; + const requestMaxKeys = params['max-keys'] ? Number.parseInt(params['max-keys'], 10) : 1000; if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) { - monitoring.promMetrics( - 'GET', bucketName, 400, 'listBucket'); + monitoring.promMetrics('GET', bucketName, 400, 'listBucket'); return callback(errors.InvalidArgument); } // AWS only returns 1000 keys even if max keys are greater. @@ -336,28 +327,26 @@ function bucketGet(authInfo, request, log, callback) { if (v2) { listParams.v2 = true; listParams.startAfter = params['start-after']; - listParams.continuationToken = - decryptToken(params['continuation-token']); + listParams.continuationToken = decryptToken(params['continuation-token']); listParams.fetchOwner = params['fetch-owner'] === 'true'; } else { listParams.marker = params.marker; } standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); if (err) { log.debug('error processing request', { error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'listBucket'); + monitoring.promMetrics('GET', bucketName, err.code, 'listBucket'); return callback(err, null, corsHeaders); } if (params.versions !== undefined) { listParams.listingType = 'DelimiterVersions'; delete listParams.marker; listParams.keyMarker = params['key-marker']; - listParams.versionIdMarker = params['version-id-marker'] ? - versionIdUtils.decode(params['version-id-marker']) : undefined; + listParams.versionIdMarker = params['version-id-marker'] + ? versionIdUtils.decode(params['version-id-marker']) + : undefined; } if (!requestMaxKeys) { const emptyList = { @@ -366,19 +355,35 @@ function bucketGet(authInfo, request, log, callback) { Versions: [], IsTruncated: false, }; - return handleResult(listParams, requestMaxKeys, encoding, authInfo, - bucketName, emptyList, corsHeaders, log, callback); + return handleResult( + listParams, + requestMaxKeys, + encoding, + authInfo, + bucketName, + emptyList, + corsHeaders, + log, + callback + ); } - return services.getObjectListing(bucketName, listParams, log, - (err, list) => { + return services.getObjectListing(bucketName, listParams, log, (err, list) => { if (err) { log.debug('error processing request', { error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'listBucket'); + monitoring.promMetrics('GET', bucketName, err.code, 'listBucket'); return callback(err, null, corsHeaders); } - return handleResult(listParams, requestMaxKeys, encoding, authInfo, - bucketName, list, corsHeaders, log, callback); + return handleResult( + listParams, + requestMaxKeys, + encoding, + authInfo, + bucketName, + list, + corsHeaders, + log, + callback + ); }); }); return undefined; diff --git a/lib/api/bucketGetACL.js b/lib/api/bucketGetACL.js index 0726e06549..7eb47ced9a 100644 --- a/lib/api/bucketGetACL.js +++ b/lib/api/bucketGetACL.js @@ -26,7 +26,6 @@ const monitoring = require('../utilities/monitoringHandler'); */ - /** * bucketGetACL - Return ACL's for bucket * @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info @@ -56,13 +55,10 @@ function bucketGetACL(authInfo, request, log, callback) { }; standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); if (err) { - log.debug('error processing request', - { method: 'bucketGetACL', error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getBucketAcl'); + log.debug('error processing request', { method: 'bucketGetACL', error: err }); + monitoring.promMetrics('GET', bucketName, err.code, 'getBucketAcl'); return callback(err, null, corsHeaders); } const bucketACL = bucket.getAcl(); @@ -75,8 +71,7 @@ function bucketGetACL(authInfo, request, log, callback) { }; if (bucketACL.Canned !== '') { - const cannedGrants = aclUtils.handleCannedGrant( - bucketACL.Canned, ownerGrant); + const cannedGrants = aclUtils.handleCannedGrant(bucketACL.Canned, ownerGrant); grantInfo.grants = grantInfo.grants.concat(cannedGrants); const xml = aclUtils.convertToXml(grantInfo); pushMetric('getBucketAcl', log, { @@ -86,19 +81,19 @@ function bucketGetACL(authInfo, request, log, callback) { return callback(null, xml, corsHeaders); } /** - * Build array of all canonicalIDs used in ACLs so duplicates - * will be retained (e.g. if an account has both read and write - * privileges, want to display both and not lose the duplicate - * when receive one dictionary entry back from Vault) - */ + * Build array of all canonicalIDs used in ACLs so duplicates + * will be retained (e.g. if an account has both read and write + * privileges, want to display both and not lose the duplicate + * when receive one dictionary entry back from Vault) + */ const canonicalIDs = aclUtils.getCanonicalIDs(bucketACL); // Build array with grants by URI const uriGrantInfo = aclUtils.getUriGrantInfo(bucketACL); if (canonicalIDs.length === 0) { /** - * If no acl's set by account canonicalID, just add URI - * grants (if any) and return - */ + * If no acl's set by account canonicalID, just add URI + * grants (if any) and return + */ grantInfo.grants = grantInfo.grants.concat(uriGrantInfo); const xml = aclUtils.convertToXml(grantInfo); pushMetric('getBucketAcl', log, { @@ -108,22 +103,18 @@ function bucketGetACL(authInfo, request, log, callback) { return callback(null, xml, corsHeaders); } /** - * If acl's set by account canonicalID, get emails from Vault to serve - * as display names - */ + * If acl's set by account canonicalID, get emails from Vault to serve + * as display names + */ return vault.getEmailAddresses(canonicalIDs, log, (err, emails) => { if (err) { - log.debug('error processing request', - { method: 'vault.getEmailAddresses', error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getBucketAcl'); + log.debug('error processing request', { method: 'vault.getEmailAddresses', error: err }); + monitoring.promMetrics('GET', bucketName, err.code, 'getBucketAcl'); return callback(err, null, corsHeaders); } - const individualGrants = - aclUtils.getIndividualGrants(bucketACL, canonicalIDs, emails); + const individualGrants = aclUtils.getIndividualGrants(bucketACL, canonicalIDs, emails); // Add to grantInfo any individual grants and grants by uri - grantInfo.grants = grantInfo.grants - .concat(individualGrants).concat(uriGrantInfo); + grantInfo.grants = grantInfo.grants.concat(individualGrants).concat(uriGrantInfo); // parse info about accounts and owner info to convert to xml const xml = aclUtils.convertToXml(grantInfo); pushMetric('getBucketAcl', log, { diff --git a/lib/api/bucketGetCors.js b/lib/api/bucketGetCors.js index a59b57d451..c0f63b18eb 100644 --- a/lib/api/bucketGetCors.js +++ b/lib/api/bucketGetCors.js @@ -3,8 +3,7 @@ const { errors } = require('arsenal'); const bucketShield = require('./apiUtils/bucket/bucketShield'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const { convertToXml } = require('./apiUtils/bucket/bucketCors'); -const { isBucketAuthorized } = - require('./apiUtils/authorization/permissionChecks'); +const { isBucketAuthorized } = require('./apiUtils/authorization/permissionChecks'); const metadata = require('../metadata/wrapper'); const { pushMetric } = require('../utapi/utilities'); const monitoring = require('../utilities/monitoringHandler'); @@ -26,27 +25,32 @@ function bucketGetCors(authInfo, request, log, callback) { metadata.getBucket(bucketName, log, (err, bucket) => { if (err) { log.debug('metadata getbucket failed', { error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getBucketCors'); + monitoring.promMetrics('GET', bucketName, err.code, 'getBucketCors'); return callback(err); } if (bucketShield(bucket, requestType)) { - monitoring.promMetrics( - 'GET', bucketName, 404, 'getBucketCors'); + monitoring.promMetrics('GET', bucketName, 404, 'getBucketCors'); return callback(errors.NoSuchBucket); } log.trace('found bucket in metadata'); - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); - if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, - authInfo, log, request, request.actionImplicitDenies)) { + if ( + !isBucketAuthorized( + bucket, + request.apiMethods || requestType, + canonicalID, + authInfo, + log, + request, + request.actionImplicitDenies + ) + ) { log.debug('access denied for user on bucket', { requestType, method: 'bucketGetCors', }); - monitoring.promMetrics( - 'GET', bucketName, 403, 'getBucketCors'); + monitoring.promMetrics('GET', bucketName, 403, 'getBucketCors'); return callback(errors.AccessDenied, null, corsHeaders); } @@ -55,8 +59,7 @@ function bucketGetCors(authInfo, request, log, callback) { log.debug('cors configuration does not exist', { method: 'bucketGetCors', }); - monitoring.promMetrics( - 'GET', bucketName, 404, 'getBucketCors'); + monitoring.promMetrics('GET', bucketName, 404, 'getBucketCors'); return callback(errors.NoSuchCORSConfiguration, null, corsHeaders); } log.trace('converting cors configuration to xml'); diff --git a/lib/api/bucketGetEncryption.js b/lib/api/bucketGetEncryption.js index db5d31432b..268d89e865 100644 --- a/lib/api/bucketGetEncryption.js +++ b/lib/api/bucketGetEncryption.js @@ -28,61 +28,65 @@ function bucketGetEncryption(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next), - (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), - (bucket, next) => { - // If sseInfo is present but the `mandatory` flag is not set - // then this info was not created using bucketPutEncryption - // or by using the x-amz-scal-server-side-encryption header at - // bucket creation and should not be returned - const sseInfo = bucket.getServerSideEncryption(); - if (sseInfo === null || !sseInfo.mandatory) { - log.trace('no server side encryption config found', { - bucket: bucketName, - method: 'bucketGetEncryption', - }); - return next(errors.ServerSideEncryptionConfigurationNotFoundError); + return async.waterfall( + [ + next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next), + (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), + (bucket, next) => { + // If sseInfo is present but the `mandatory` flag is not set + // then this info was not created using bucketPutEncryption + // or by using the x-amz-scal-server-side-encryption header at + // bucket creation and should not be returned + const sseInfo = bucket.getServerSideEncryption(); + if (sseInfo === null || !sseInfo.mandatory) { + log.trace('no server side encryption config found', { + bucket: bucketName, + method: 'bucketGetEncryption', + }); + return next(errors.ServerSideEncryptionConfigurationNotFoundError); + } + return next(null, bucket, sseInfo); + }, + ], + (error, bucket, sseInfo) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (error) { + return callback(error, corsHeaders); } - return next(null, bucket, sseInfo); - }, - ], - (error, bucket, sseInfo) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); - if (error) { - return callback(error, corsHeaders); - } - const xml = [ - '', - '', - '', - '', - `${escapeForXml(sseInfo.algorithm)}`, - ]; + const xml = [ + '', + '', + '', + '', + `${escapeForXml(sseInfo.algorithm)}`, + ]; - if (sseInfo.configuredMasterKeyId) { - xml.push(`${escapeForXml( - config.kmsHideScalityArn - ? getKeyIdFromArn(sseInfo.configuredMasterKeyId) - : sseInfo.configuredMasterKeyId - )}`); - } + if (sseInfo.configuredMasterKeyId) { + xml.push( + `${escapeForXml( + config.kmsHideScalityArn + ? getKeyIdFromArn(sseInfo.configuredMasterKeyId) + : sseInfo.configuredMasterKeyId + )}` + ); + } - xml.push( - '', - 'false', - '', - '' - ); + xml.push( + '', + 'false', + '', + '' + ); - pushMetric('getBucketEncryption', log, { - authInfo, - bucket: bucketName, - }); + pushMetric('getBucketEncryption', log, { + authInfo, + bucket: bucketName, + }); - return callback(null, xml.join(''), corsHeaders); - }); + return callback(null, xml.join(''), corsHeaders); + } + ); } module.exports = bucketGetEncryption; diff --git a/lib/api/bucketGetLifecycle.js b/lib/api/bucketGetLifecycle.js index bc2851172c..8a8e5bca72 100644 --- a/lib/api/bucketGetLifecycle.js +++ b/lib/api/bucketGetLifecycle.js @@ -1,6 +1,5 @@ const { errors } = require('arsenal'); -const LifecycleConfiguration = - require('arsenal').models.LifecycleConfiguration; +const LifecycleConfiguration = require('arsenal').models.LifecycleConfiguration; const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); @@ -31,8 +30,7 @@ function bucketGetLifecycle(authInfo, request, log, callback) { error: err, method: 'bucketGetLifecycle', }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getBucketLifecycle'); + monitoring.promMetrics('GET', bucketName, err.code, 'getBucketLifecycle'); return callback(err, null, corsHeaders); } const lifecycleConfig = bucket.getLifecycleConfiguration(); @@ -41,10 +39,8 @@ function bucketGetLifecycle(authInfo, request, log, callback) { error: errors.NoSuchLifecycleConfiguration, method: 'bucketGetLifecycle', }); - monitoring.promMetrics( - 'GET', bucketName, 404, 'getBucketLifecycle'); - return callback(errors.NoSuchLifecycleConfiguration, null, - corsHeaders); + monitoring.promMetrics('GET', bucketName, 404, 'getBucketLifecycle'); + return callback(errors.NoSuchLifecycleConfiguration, null, corsHeaders); } const xml = LifecycleConfiguration.getConfigXml(lifecycleConfig); pushMetric('getBucketLifecycle', log, { diff --git a/lib/api/bucketGetLocation.js b/lib/api/bucketGetLocation.js index 75aac4a29b..eee8c1c2e4 100644 --- a/lib/api/bucketGetLocation.js +++ b/lib/api/bucketGetLocation.js @@ -1,8 +1,7 @@ const { errors, s3middleware } = require('arsenal'); const bucketShield = require('./apiUtils/bucket/bucketShield'); -const { isBucketAuthorized } = - require('./apiUtils/authorization/permissionChecks'); +const { isBucketAuthorized } = require('./apiUtils/authorization/permissionChecks'); const metadata = require('../metadata/wrapper'); const { pushMetric } = require('../utapi/utilities'); const escapeForXml = s3middleware.escapeForXml; @@ -27,48 +26,53 @@ function bucketGetLocation(authInfo, request, log, callback) { return metadata.getBucket(bucketName, log, (err, bucket) => { if (err) { log.debug('metadata getbucket failed', { error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getBucketLocation'); + monitoring.promMetrics('GET', bucketName, err.code, 'getBucketLocation'); return callback(err); } if (bucketShield(bucket, requestType)) { - monitoring.promMetrics( - 'GET', bucketName, 404, 'getBucketLocation'); + monitoring.promMetrics('GET', bucketName, 404, 'getBucketLocation'); return callback(errors.NoSuchBucket); } log.trace('found bucket in metadata'); - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); - if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, - authInfo, log, request, request.actionImplicitDenies)) { + if ( + !isBucketAuthorized( + bucket, + request.apiMethods || requestType, + canonicalID, + authInfo, + log, + request, + request.actionImplicitDenies + ) + ) { log.debug('access denied for account on bucket', { requestType, method: 'bucketGetLocation', }); - monitoring.promMetrics( - 'GET', bucketName, 403, 'getBucketLocation'); + monitoring.promMetrics('GET', bucketName, 403, 'getBucketLocation'); return callback(errors.AccessDenied, null, corsHeaders); } let locationConstraint = bucket.getLocationConstraint(); if (!locationConstraint || locationConstraint === 'us-east-1') { - // AWS returns empty string if no region has been - // provided or for us-east-1 - // Note: AWS JS SDK sends a request with locationConstraint us-east-1 - // if no locationConstraint provided. + // AWS returns empty string if no region has been + // provided or for us-east-1 + // Note: AWS JS SDK sends a request with locationConstraint us-east-1 + // if no locationConstraint provided. locationConstraint = ''; } - const xml = ` + const xml = + ` ` + `${escapeForXml(locationConstraint)}`; pushMetric('getBucketLocation', log, { authInfo, bucket: bucketName, }); - monitoring.promMetrics( - 'GET', bucketName, '200', 'getBucketLocation'); + monitoring.promMetrics('GET', bucketName, '200', 'getBucketLocation'); return callback(null, xml, corsHeaders); }); } diff --git a/lib/api/bucketGetObjectLock.js b/lib/api/bucketGetObjectLock.js index a96e7cb4e4..e2aab76c56 100644 --- a/lib/api/bucketGetObjectLock.js +++ b/lib/api/bucketGetObjectLock.js @@ -2,8 +2,7 @@ const { errors } = require('arsenal'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); -const ObjectLockConfiguration = - require('arsenal').models.ObjectLockConfiguration; +const ObjectLockConfiguration = require('arsenal').models.ObjectLockConfiguration; // Format of the xml response: /** @@ -53,8 +52,7 @@ function bucketGetObjectLock(authInfo, request, log, callback) { error: errors.ObjectLockConfigurationNotFoundError, method: 'bucketGetObjectLock', }); - return callback(errors.ObjectLockConfigurationNotFoundError, null, - corsHeaders); + return callback(errors.ObjectLockConfigurationNotFoundError, null, corsHeaders); } const xml = ObjectLockConfiguration.getConfigXML(objLockConfig); pushMetric('getBucketObjectLock', log, { diff --git a/lib/api/bucketGetPolicy.js b/lib/api/bucketGetPolicy.js index d6f3b6a576..1f521dc1d9 100644 --- a/lib/api/bucketGetPolicy.js +++ b/lib/api/bucketGetPolicy.js @@ -36,8 +36,7 @@ function bucketGetPolicy(authInfo, request, log, callback) { error: errors.NoSuchBucketPolicy, method: 'bucketGetPolicy', }); - return callback(errors.NoSuchBucketPolicy, null, - corsHeaders); + return callback(errors.NoSuchBucketPolicy, null, corsHeaders); } // TODO: implement Utapi metric support // bucketPolicy needs to be JSON stringified on return for proper diff --git a/lib/api/bucketGetQuota.js b/lib/api/bucketGetQuota.js index 9556e31d4d..963eaecacb 100644 --- a/lib/api/bucketGetQuota.js +++ b/lib/api/bucketGetQuota.js @@ -31,21 +31,15 @@ function bucketGetQuota(authInfo, request, log, callback) { }); return callback(err, null, corsHeaders); } - xml.push( - '', - '', - '', bucket.getName(), '', - ); + xml.push('', '', '', bucket.getName(), ''); const bucketQuota = bucket.getQuota(); if (!bucketQuota) { log.debug('bucket has no quota', { method: 'bucketGetQuota', }); - return callback(errors.NoSuchQuota, null, - corsHeaders); + return callback(errors.NoSuchQuota, null, corsHeaders); } - xml.push('', bucketQuota, '', - ''); + xml.push('', bucketQuota, '', ''); pushMetric('getBucketQuota', log, { authInfo, diff --git a/lib/api/bucketGetReplication.js b/lib/api/bucketGetReplication.js index 8891f96bc1..e1a75a1e20 100644 --- a/lib/api/bucketGetReplication.js +++ b/lib/api/bucketGetReplication.js @@ -2,8 +2,7 @@ const { errors } = require('arsenal'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); -const { getReplicationConfigurationXML } = - require('./apiUtils/bucket/getReplicationConfiguration'); +const { getReplicationConfigurationXML } = require('./apiUtils/bucket/getReplicationConfiguration'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const monitoring = require('../utilities/monitoringHandler'); @@ -31,8 +30,7 @@ function bucketGetReplication(authInfo, request, log, callback) { error: err, method: 'bucketGetReplication', }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getBucketReplication'); + monitoring.promMetrics('GET', bucketName, err.code, 'getBucketReplication'); return callback(err, null, corsHeaders); } const replicationConfig = bucket.getReplicationConfiguration(); @@ -41,18 +39,15 @@ function bucketGetReplication(authInfo, request, log, callback) { error: errors.ReplicationConfigurationNotFoundError, method: 'bucketGetReplication', }); - monitoring.promMetrics( - 'GET', bucketName, 404, 'getBucketReplication'); - return callback(errors.ReplicationConfigurationNotFoundError, null, - corsHeaders); + monitoring.promMetrics('GET', bucketName, 404, 'getBucketReplication'); + return callback(errors.ReplicationConfigurationNotFoundError, null, corsHeaders); } const xml = getReplicationConfigurationXML(replicationConfig); pushMetric('getBucketReplication', log, { authInfo, bucket: bucketName, }); - monitoring.promMetrics( - 'GET', bucketName, '200', 'getBucketReplication'); + monitoring.promMetrics('GET', bucketName, '200', 'getBucketReplication'); return callback(null, xml, corsHeaders); }); } diff --git a/lib/api/bucketGetTagging.js b/lib/api/bucketGetTagging.js index c31a14a08a..24b2f4580c 100644 --- a/lib/api/bucketGetTagging.js +++ b/lib/api/bucketGetTagging.js @@ -74,44 +74,44 @@ function bucketGetTagging(authInfo, request, log, callback) { let xml = null; let tags = null; - return waterfall([ - next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, b) => { - bucket = b; - return next(err); - }), - next => checkExpectedBucketOwner(headers, bucket, log, next), - next => { - tags = bucket.getTags(); - if (!tags || !tags.length) { - log.debug('bucket TagSet does not exist', { + return waterfall( + [ + next => + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, b) => { + bucket = b; + return next(err); + }), + next => checkExpectedBucketOwner(headers, bucket, log, next), + next => { + tags = bucket.getTags(); + if (!tags || !tags.length) { + log.debug('bucket TagSet does not exist', { + method: 'bucketGetTagging', + }); + return next(errors.NoSuchTagSet); + } + xml = tagsToXml(tags); + return next(); + }, + ], + err => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.debug('error processing request', { + error: err, method: 'bucketGetTagging', }); - return next(errors.NoSuchTagSet); + monitoring.promMetrics('GET', bucketName, err.code, 'getBucketTagging'); + } else { + pushMetric('getBucketTagging', log, { + authInfo, + bucket: bucketName, + }); + monitoring.promMetrics('GET', bucketName, '200', 'getBucketTagging'); } - xml = tagsToXml(tags); - return next(); - } - ], err => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.debug('error processing request', { - error: err, - method: 'bucketGetTagging' - }); - monitoring.promMetrics('GET', bucketName, err.code, - 'getBucketTagging'); - } else { - pushMetric('getBucketTagging', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics( - 'GET', bucketName, '200', 'getBucketTagging'); + return callback(err, xml, corsHeaders); } - return callback(err, xml, corsHeaders); - }); + ); } module.exports = bucketGetTagging; diff --git a/lib/api/bucketGetVersioning.js b/lib/api/bucketGetVersioning.js index f38cc31053..3e53223b20 100644 --- a/lib/api/bucketGetVersioning.js +++ b/lib/api/bucketGetVersioning.js @@ -19,9 +19,9 @@ const monitoring = require('../utilities/monitoringHandler'); function convertToXml(versioningConfiguration) { const xml = []; - xml.push('', - '' + xml.push( + '', + '' ); if (versioningConfiguration && versioningConfiguration.Status) { @@ -59,13 +59,10 @@ function bucketGetVersioning(authInfo, request, log, callback) { }; standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); if (err) { - log.debug('error processing request', - { method: 'bucketGetVersioning', error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getBucketVersioning'); + log.debug('error processing request', { method: 'bucketGetVersioning', error: err }); + monitoring.promMetrics('GET', bucketName, err.code, 'getBucketVersioning'); return callback(err, null, corsHeaders); } const versioningConfiguration = bucket.getVersioningConfiguration(); @@ -74,8 +71,7 @@ function bucketGetVersioning(authInfo, request, log, callback) { authInfo, bucket: bucketName, }); - monitoring.promMetrics( - 'GET', bucketName, '200', 'getBucketVersioning'); + monitoring.promMetrics('GET', bucketName, '200', 'getBucketVersioning'); return callback(null, xml, corsHeaders); }); } diff --git a/lib/api/bucketGetWebsite.js b/lib/api/bucketGetWebsite.js index 35093fa457..9e2f7277bc 100644 --- a/lib/api/bucketGetWebsite.js +++ b/lib/api/bucketGetWebsite.js @@ -3,8 +3,7 @@ const { errors } = require('arsenal'); const bucketShield = require('./apiUtils/bucket/bucketShield'); const { convertToXml } = require('./apiUtils/bucket/bucketWebsite'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); -const { isBucketAuthorized } = - require('./apiUtils/authorization/permissionChecks'); +const { isBucketAuthorized } = require('./apiUtils/authorization/permissionChecks'); const metadata = require('../metadata/wrapper'); const { pushMetric } = require('../utapi/utilities'); const monitoring = require('../utilities/monitoringHandler'); @@ -26,27 +25,32 @@ function bucketGetWebsite(authInfo, request, log, callback) { metadata.getBucket(bucketName, log, (err, bucket) => { if (err) { log.debug('metadata getbucket failed', { error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getBucketWebsite'); + monitoring.promMetrics('GET', bucketName, err.code, 'getBucketWebsite'); return callback(err); } if (bucketShield(bucket, requestType)) { - monitoring.promMetrics( - 'GET', bucketName, 404, 'getBucketWebsite'); + monitoring.promMetrics('GET', bucketName, 404, 'getBucketWebsite'); return callback(errors.NoSuchBucket); } log.trace('found bucket in metadata'); - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, - authInfo, log, request, request.actionImplicitDenies)) { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if ( + !isBucketAuthorized( + bucket, + request.apiMethods || requestType, + canonicalID, + authInfo, + log, + request, + request.actionImplicitDenies + ) + ) { log.debug('access denied for user on bucket', { requestType, method: 'bucketGetWebsite', }); - monitoring.promMetrics( - 'GET', bucketName, 403, 'getBucketWebsite'); + monitoring.promMetrics('GET', bucketName, 403, 'getBucketWebsite'); return callback(errors.AccessDenied, null, corsHeaders); } @@ -55,10 +59,8 @@ function bucketGetWebsite(authInfo, request, log, callback) { log.debug('bucket website configuration does not exist', { method: 'bucketGetWebsite', }); - monitoring.promMetrics( - 'GET', bucketName, 404, 'getBucketWebsite'); - return callback(errors.NoSuchWebsiteConfiguration, null, - corsHeaders); + monitoring.promMetrics('GET', bucketName, 404, 'getBucketWebsite'); + return callback(errors.NoSuchWebsiteConfiguration, null, corsHeaders); } log.trace('converting website configuration to xml'); const xml = convertToXml(websiteConfig); @@ -67,8 +69,7 @@ function bucketGetWebsite(authInfo, request, log, callback) { authInfo, bucket: bucketName, }); - monitoring.promMetrics( - 'GET', bucketName, '200', 'getBucketWebsite'); + monitoring.promMetrics('GET', bucketName, '200', 'getBucketWebsite'); return callback(null, xml, corsHeaders); }); } diff --git a/lib/api/bucketHead.js b/lib/api/bucketHead.js index bafc52eb2d..506b3f9986 100644 --- a/lib/api/bucketHead.js +++ b/lib/api/bucketHead.js @@ -23,11 +23,9 @@ function bucketHead(authInfo, request, log, callback) { request, }; standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); if (err) { - monitoring.promMetrics( - 'HEAD', bucketName, err.code, 'headBucket'); + monitoring.promMetrics('HEAD', bucketName, err.code, 'headBucket'); return callback(err, corsHeaders); } pushMetric('headBucket', log, { diff --git a/lib/api/bucketPut.js b/lib/api/bucketPut.js index 43bfa41525..b291922ad0 100644 --- a/lib/api/bucketPut.js +++ b/lib/api/bucketPut.js @@ -45,19 +45,17 @@ function checkLocationConstraint(request, locationConstraint, log) { } else if (parsedHost && restEndpoints[parsedHost]) { locationConstraintChecked = restEndpoints[parsedHost]; } else { - log.trace('no location constraint provided on bucket put;' + - 'setting us-east-1'); + log.trace('no location constraint provided on bucket put;' + 'setting us-east-1'); locationConstraintChecked = 'us-east-1'; } if (!locationConstraints[locationConstraintChecked]) { - const errMsg = 'value of the location you are attempting to set - ' + + const errMsg = + 'value of the location you are attempting to set - ' + `${locationConstraintChecked} - is not listed in the ` + 'locationConstraint config'; - log.trace(`locationConstraint is invalid - ${errMsg}`, - { locationConstraint: locationConstraintChecked }); - return { error: errorInstances.InvalidLocationConstraint. - customizeDescription(errMsg) }; + log.trace(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: locationConstraintChecked }); + return { error: errorInstances.InvalidLocationConstraint.customizeDescription(errMsg) }; } if (locationConstraints[locationConstraintChecked].isCold) { return { error: errors.InvalidLocationConstraint }; @@ -83,18 +81,18 @@ function checkLocationConstraint(request, locationConstraint, log) { function _parseXML(request, log, cb) { if (request.post) { return parseString(request.post, (err, result) => { - if (err || !result.CreateBucketConfiguration - || !result.CreateBucketConfiguration.LocationConstraint - || !result.CreateBucketConfiguration.LocationConstraint[0]) { + if ( + err || + !result.CreateBucketConfiguration || + !result.CreateBucketConfiguration.LocationConstraint || + !result.CreateBucketConfiguration.LocationConstraint[0] + ) { log.debug('request xml is malformed'); return cb(errors.MalformedXML); } - const locationConstraint = result.CreateBucketConfiguration - .LocationConstraint[0]; - log.trace('location constraint', - { locationConstraint }); - const locationCheck = checkLocationConstraint(request, - locationConstraint, log); + const locationConstraint = result.CreateBucketConfiguration.LocationConstraint[0]; + log.trace('location constraint', { locationConstraint }); + const locationCheck = checkLocationConstraint(request, locationConstraint, log); if (locationCheck.error) { return cb(locationCheck.error); } @@ -102,8 +100,7 @@ function _parseXML(request, log, cb) { }); } return process.nextTick(() => { - const locationCheck = checkLocationConstraint(request, - undefined, log); + const locationCheck = checkLocationConstraint(request, undefined, log); if (locationCheck.error) { return cb(locationCheck.error); } @@ -112,7 +109,15 @@ function _parseXML(request, log, cb) { } function _buildConstantParams({ - request, bucketName, authInfo, authParams, ip, isSecure, locationConstraint, apiMethod }) { + request, + bucketName, + authInfo, + authParams, + ip, + isSecure, + locationConstraint, + apiMethod, +}) { return { constantParams: { headers: request.headers, @@ -139,16 +144,15 @@ function _handleAuthResults(locationConstraint, log, cb) { if (err) { return cb(err); } - if (!authorizationResults.every(res => { - if (Array.isArray(res)) { - return res.every(subRes => subRes.isAllowed); - } - return res.isAllowed; - })) { - log.trace( - 'authorization check failed for user', - { locationConstraint }, - ); + if ( + !authorizationResults.every(res => { + if (Array.isArray(res)) { + return res.every(subRes => subRes.isAllowed); + } + return res.isAllowed; + }) + ) { + log.trace('authorization check failed for user', { locationConstraint }); return cb(errors.AccessDenied); } return cb(null, locationConstraint); @@ -176,30 +180,15 @@ function authBucketPut(authParams, bucketName, locationConstraint, request, auth authInfo, locationConstraint, }; - const requestConstantParams = [Object.assign( - baseParams, - { apiMethod: 'bucketPut' }, - )]; + const requestConstantParams = [Object.assign(baseParams, { apiMethod: 'bucketPut' })]; if (_isObjectLockEnabled(request.headers)) { - requestConstantParams.push(Object.assign( - {}, - baseParams, - { apiMethod: 'bucketPutObjectLock' }, - )); - requestConstantParams.push(Object.assign( - {}, - baseParams, - { apiMethod: 'bucketPutVersioning' }, - )); + requestConstantParams.push(Object.assign({}, baseParams, { apiMethod: 'bucketPutObjectLock' })); + requestConstantParams.push(Object.assign({}, baseParams, { apiMethod: 'bucketPutVersioning' })); } if (_isAclProvided(request.headers)) { - requestConstantParams.push(Object.assign( - {}, - baseParams, - { apiMethod: 'bucketPutACL' }, - )); + requestConstantParams.push(Object.assign({}, baseParams, { apiMethod: 'bucketPutACL' })); } return requestConstantParams; @@ -218,66 +207,70 @@ function bucketPut(authInfo, request, log, callback) { if (authInfo.isRequesterPublicUser()) { log.debug('operation not available for public user'); - monitoring.promMetrics( - 'PUT', request.bucketName, 403, 'createBucket'); + monitoring.promMetrics('PUT', request.bucketName, 403, 'createBucket'); return callback(errors.AccessDenied); } if (!aclUtils.checkGrantHeaderValidity(request.headers)) { log.trace('invalid acl header'); - monitoring.promMetrics( - 'PUT', request.bucketName, 400, 'createBucket'); + monitoring.promMetrics('PUT', request.bucketName, 400, 'createBucket'); return callback(errors.InvalidArgument); } const { bucketName } = request; - - if (request.bucketName === 'METADATA' - // Note: for this to work with Vault, would need way to set - // canonical ID to http://acs.zenko.io/accounts/service/clueso - && !authInfo.isRequesterThisServiceAccount('clueso')) { - monitoring.promMetrics( - 'PUT', bucketName, 403, 'createBucket'); - return callback(errorInstances.AccessDenied - .customizeDescription('The bucket METADATA is used ' + - 'for internal purposes')); + if ( + request.bucketName === 'METADATA' && + // Note: for this to work with Vault, would need way to set + // canonical ID to http://acs.zenko.io/accounts/service/clueso + !authInfo.isRequesterThisServiceAccount('clueso') + ) { + monitoring.promMetrics('PUT', bucketName, 403, 'createBucket'); + return callback( + errorInstances.AccessDenied.customizeDescription('The bucket METADATA is used ' + 'for internal purposes') + ); } - return waterfall([ - next => _parseXML(request, log, next), - (locationConstraint, next) => { - if (!isRequesterNonAccountUser(authInfo)) { - return next(null, locationConstraint); - } + return waterfall( + [ + next => _parseXML(request, log, next), + (locationConstraint, next) => { + if (!isRequesterNonAccountUser(authInfo)) { + return next(null, locationConstraint); + } - const authParams = auth.server.extractParams(request, log, 's3', request.query); - const requestConstantParams = authBucketPut( - authParams, bucketName, locationConstraint, request, authInfo - ); + const authParams = auth.server.extractParams(request, log, 's3', request.query); + const requestConstantParams = authBucketPut( + authParams, + bucketName, + locationConstraint, + request, + authInfo + ); - return vault.checkPolicies( - requestConstantParams.map(_buildConstantParams), - authInfo.getArn(), - log, - _handleAuthResults(locationConstraint, log, next), - ); - }, - (locationConstraint, next) => createBucket(authInfo, bucketName, - request.headers, locationConstraint, log, (err, previousBucket) => { - // if bucket already existed, gather any relevant cors - // headers - const corsHeaders = collectCorsHeaders( - request.headers.origin, request.method, previousBucket); - if (err) { - return next(err, corsHeaders); - } - pushMetric('createBucket', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics('PUT', bucketName, '200', 'createBucket'); - return next(null, corsHeaders); - }), - ], callback); + return vault.checkPolicies( + requestConstantParams.map(_buildConstantParams), + authInfo.getArn(), + log, + _handleAuthResults(locationConstraint, log, next) + ); + }, + (locationConstraint, next) => + createBucket(authInfo, bucketName, request.headers, locationConstraint, log, (err, previousBucket) => { + // if bucket already existed, gather any relevant cors + // headers + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, previousBucket); + if (err) { + return next(err, corsHeaders); + } + pushMetric('createBucket', log, { + authInfo, + bucket: bucketName, + }); + monitoring.promMetrics('PUT', bucketName, '200', 'createBucket'); + return next(null, corsHeaders); + }), + ], + callback + ); } module.exports = { diff --git a/lib/api/bucketPutACL.js b/lib/api/bucketPutACL.js index a6e86a4dba..e924d0844d 100644 --- a/lib/api/bucketPutACL.js +++ b/lib/api/bucketPutACL.js @@ -54,18 +54,14 @@ function bucketPutACL(authInfo, request, log, callback) { 'authenticated-read', 'log-delivery-write', ]; - const possibleGroups = [constants.allAuthedUsersId, - constants.publicId, - constants.logId, - ]; + const possibleGroups = [constants.allAuthedUsersId, constants.publicId, constants.logId]; const metadataValParams = { authInfo, bucketName, requestType: request.apiMethods || 'bucketPutACL', request, }; - const possibleGrants = ['FULL_CONTROL', 'WRITE', - 'WRITE_ACP', 'READ', 'READ_ACP']; + const possibleGrants = ['FULL_CONTROL', 'WRITE', 'WRITE_ACP', 'READ', 'READ_ACP']; const addACLParams = { Canned: '', FULL_CONTROL: [], @@ -75,235 +71,226 @@ function bucketPutACL(authInfo, request, log, callback) { READ_ACP: [], }; - const grantReadHeader = - aclUtils.parseGrant(request.headers[ - 'x-amz-grant-read'], 'READ'); - const grantWriteHeader = - aclUtils.parseGrant(request.headers['x-amz-grant-write'], 'WRITE'); - const grantReadACPHeader = - aclUtils.parseGrant(request.headers['x-amz-grant-read-acp'], - 'READ_ACP'); - const grantWriteACPHeader = - aclUtils.parseGrant(request.headers['x-amz-grant-write-acp'], - 'WRITE_ACP'); - const grantFullControlHeader = - aclUtils.parseGrant(request.headers['x-amz-grant-full-control'], - 'FULL_CONTROL'); + const grantReadHeader = aclUtils.parseGrant(request.headers['x-amz-grant-read'], 'READ'); + const grantWriteHeader = aclUtils.parseGrant(request.headers['x-amz-grant-write'], 'WRITE'); + const grantReadACPHeader = aclUtils.parseGrant(request.headers['x-amz-grant-read-acp'], 'READ_ACP'); + const grantWriteACPHeader = aclUtils.parseGrant(request.headers['x-amz-grant-write-acp'], 'WRITE_ACP'); + const grantFullControlHeader = aclUtils.parseGrant(request.headers['x-amz-grant-full-control'], 'FULL_CONTROL'); - return async.waterfall([ - function waterfall1(next) { - standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, bucket) => { - if (err) { - log.trace('request authorization failed', { - error: err, - method: 'metadataValidateBucket', - }); - return next(err, bucket); - } - // if the API call is allowed, ensure that the parameters are valid - if (newCannedACL && possibleCannedACL.indexOf(newCannedACL) === -1) { - log.trace('invalid canned acl argument', { - acl: newCannedACL, - method: 'bucketPutACL', - }); - return next(errors.InvalidArgument); - } - if (!aclUtils.checkGrantHeaderValidity(request.headers)) { - log.trace('invalid acl header'); - return next(errors.InvalidArgument); - } - return next(null, bucket); - }); - }, - function waterfall2(bucket, next) { - // If not setting acl through headers, parse body - if (newCannedACL === undefined - && grantReadHeader === undefined - && grantWriteHeader === undefined - && grantReadACPHeader === undefined - && grantWriteACPHeader === undefined - && grantFullControlHeader === undefined) { - if (request.post) { - log.trace('parsing acls from request body'); - return aclUtils.parseAclXml(request.post, log, - (err, jsonGrants) => next(err, bucket, jsonGrants)); - } - // If no ACLs sent with request at all - return next(errors.MalformedXML, bucket); - } - /** - * If acl set in headers (including canned acl) pass bucket and - * undefined to the next function - */ - log.trace('using acls from request headers'); - return next(null, bucket, undefined); - }, - function waterfall3(bucket, jsonGrants, next) { - // If canned ACL just move on and set them - if (newCannedACL) { - log.trace('canned acl', { cannedAcl: newCannedACL }); - addACLParams.Canned = newCannedACL; - return next(null, bucket, addACLParams); - } - let usersIdentifiedByEmail = []; - let usersIdentifiedByGroup = []; - let usersIdentifiedByID = []; - let hasError = false; - /** - * If grants set by xml, loop through the grants - * and separate grant types so parsed in same manner - * as header grants - */ - if (jsonGrants) { - log.trace('parsing acl grants'); - jsonGrants.forEach(grant => { - const grantee = grant.Grantee[0]; - const granteeType = grantee.$['xsi:type']; - const permission = grant.Permission[0]; - let skip = false; - if (possibleGrants.indexOf(permission) < 0) { - skip = true; - } - if (!skip && granteeType === 'AmazonCustomerByEmail') { - usersIdentifiedByEmail.push({ - identifier: grantee.EmailAddress[0], - grantType: permission, - userIDType: 'emailaddress', + return async.waterfall( + [ + function waterfall1(next) { + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { + if (err) { + log.trace('request authorization failed', { + error: err, + method: 'metadataValidateBucket', }); + return next(err, bucket); } - if (!skip && granteeType === 'CanonicalUser') { - usersIdentifiedByID.push({ - identifier: grantee.ID[0], - grantType: permission, - userIDType: 'id', + // if the API call is allowed, ensure that the parameters are valid + if (newCannedACL && possibleCannedACL.indexOf(newCannedACL) === -1) { + log.trace('invalid canned acl argument', { + acl: newCannedACL, + method: 'bucketPutACL', }); + return next(errors.InvalidArgument); } - if (!skip && granteeType === 'Group') { - if (possibleGroups.indexOf(grantee.URI[0]) < 0) { - log.trace('invalid user group', - { userGroup: grantee.URI[0] }); - hasError = true; - return next(errors.InvalidArgument, bucket); - } - return usersIdentifiedByGroup.push({ - identifier: grantee.URI[0], - grantType: permission, - userIDType: 'uri', - }); + if (!aclUtils.checkGrantHeaderValidity(request.headers)) { + log.trace('invalid acl header'); + return next(errors.InvalidArgument); } - return undefined; + return next(null, bucket); }); - if (hasError) { - return undefined; + }, + function waterfall2(bucket, next) { + // If not setting acl through headers, parse body + if ( + newCannedACL === undefined && + grantReadHeader === undefined && + grantWriteHeader === undefined && + grantReadACPHeader === undefined && + grantWriteACPHeader === undefined && + grantFullControlHeader === undefined + ) { + if (request.post) { + log.trace('parsing acls from request body'); + return aclUtils.parseAclXml(request.post, log, (err, jsonGrants) => + next(err, bucket, jsonGrants) + ); + } + // If no ACLs sent with request at all + return next(errors.MalformedXML, bucket); } - } else { - // If no canned ACL and no parsed xml, loop - // through the access headers - const allGrantHeaders = - [].concat(grantReadHeader, grantWriteHeader, - grantReadACPHeader, grantWriteACPHeader, - grantFullControlHeader); + /** + * If acl set in headers (including canned acl) pass bucket and + * undefined to the next function + */ + log.trace('using acls from request headers'); + return next(null, bucket, undefined); + }, + function waterfall3(bucket, jsonGrants, next) { + // If canned ACL just move on and set them + if (newCannedACL) { + log.trace('canned acl', { cannedAcl: newCannedACL }); + addACLParams.Canned = newCannedACL; + return next(null, bucket, addACLParams); + } + let usersIdentifiedByEmail = []; + let usersIdentifiedByGroup = []; + let usersIdentifiedByID = []; + let hasError = false; + /** + * If grants set by xml, loop through the grants + * and separate grant types so parsed in same manner + * as header grants + */ + if (jsonGrants) { + log.trace('parsing acl grants'); + jsonGrants.forEach(grant => { + const grantee = grant.Grantee[0]; + const granteeType = grantee.$['xsi:type']; + const permission = grant.Permission[0]; + let skip = false; + if (possibleGrants.indexOf(permission) < 0) { + skip = true; + } + if (!skip && granteeType === 'AmazonCustomerByEmail') { + usersIdentifiedByEmail.push({ + identifier: grantee.EmailAddress[0], + grantType: permission, + userIDType: 'emailaddress', + }); + } + if (!skip && granteeType === 'CanonicalUser') { + usersIdentifiedByID.push({ + identifier: grantee.ID[0], + grantType: permission, + userIDType: 'id', + }); + } + if (!skip && granteeType === 'Group') { + if (possibleGroups.indexOf(grantee.URI[0]) < 0) { + log.trace('invalid user group', { userGroup: grantee.URI[0] }); + hasError = true; + return next(errors.InvalidArgument, bucket); + } + return usersIdentifiedByGroup.push({ + identifier: grantee.URI[0], + grantType: permission, + userIDType: 'uri', + }); + } + return undefined; + }); + if (hasError) { + return undefined; + } + } else { + // If no canned ACL and no parsed xml, loop + // through the access headers + const allGrantHeaders = [].concat( + grantReadHeader, + grantWriteHeader, + grantReadACPHeader, + grantWriteACPHeader, + grantFullControlHeader + ); - usersIdentifiedByEmail = allGrantHeaders.filter(item => - item && item.userIDType.toLowerCase() === 'emailaddress'); + usersIdentifiedByEmail = allGrantHeaders.filter( + item => item && item.userIDType.toLowerCase() === 'emailaddress' + ); - usersIdentifiedByGroup = allGrantHeaders - .filter(itm => itm && itm.userIDType - .toLowerCase() === 'uri'); - for (let i = 0; i < usersIdentifiedByGroup.length; i++) { - const userGroup = usersIdentifiedByGroup[i].identifier; - if (possibleGroups.indexOf(userGroup) < 0) { - log.trace('invalid user group', { userGroup, - method: 'bucketPutACL' }); - return next(errors.InvalidArgument, bucket); + usersIdentifiedByGroup = allGrantHeaders.filter( + itm => itm && itm.userIDType.toLowerCase() === 'uri' + ); + for (let i = 0; i < usersIdentifiedByGroup.length; i++) { + const userGroup = usersIdentifiedByGroup[i].identifier; + if (possibleGroups.indexOf(userGroup) < 0) { + log.trace('invalid user group', { userGroup, method: 'bucketPutACL' }); + return next(errors.InvalidArgument, bucket); + } } + /** TODO: Consider whether want to verify with Vault + * whether canonicalID is associated with existing + * account before adding to ACL */ + usersIdentifiedByID = allGrantHeaders.filter( + item => item && item.userIDType.toLowerCase() === 'id' + ); } - /** TODO: Consider whether want to verify with Vault - * whether canonicalID is associated with existing - * account before adding to ACL */ - usersIdentifiedByID = allGrantHeaders - .filter(item => item && item.userIDType - .toLowerCase() === 'id'); - } - // For now, at least make sure ID is 64-char alphanumeric - // string before adding to ACL (check can be removed if - // verifying with Vault for associated accounts first) - for (let i = 0; i < usersIdentifiedByID.length; i++) { - const id = usersIdentifiedByID[i].identifier; - if (!aclUtils.isValidCanonicalId(id)) { - log.trace('invalid user id argument', { - id, - method: 'bucketPutACL', - }); - monitoring.promMetrics('PUT', bucketName, 400, - 'bucketPutACL'); - return callback(errors.InvalidArgument, bucket); + // For now, at least make sure ID is 64-char alphanumeric + // string before adding to ACL (check can be removed if + // verifying with Vault for associated accounts first) + for (let i = 0; i < usersIdentifiedByID.length; i++) { + const id = usersIdentifiedByID[i].identifier; + if (!aclUtils.isValidCanonicalId(id)) { + log.trace('invalid user id argument', { + id, + method: 'bucketPutACL', + }); + monitoring.promMetrics('PUT', bucketName, 400, 'bucketPutACL'); + return callback(errors.InvalidArgument, bucket); + } } - } - const justEmails = usersIdentifiedByEmail - .map(item => item.identifier); - // If have to lookup canonicalID's do that asynchronously - if (justEmails.length > 0) { - return vault.getCanonicalIds(justEmails, log, - (err, results) => { + const justEmails = usersIdentifiedByEmail.map(item => item.identifier); + // If have to lookup canonicalID's do that asynchronously + if (justEmails.length > 0) { + return vault.getCanonicalIds(justEmails, log, (err, results) => { if (err) { log.trace('error looking up canonical ids', { - error: err, method: 'vault.getCanonicalIDs' }); + error: err, + method: 'vault.getCanonicalIDs', + }); return next(err, bucket); } - const reconstructedUsersIdentifiedByEmail = aclUtils - .reconstructUsersIdentifiedByEmail(results, - usersIdentifiedByEmail); + const reconstructedUsersIdentifiedByEmail = aclUtils.reconstructUsersIdentifiedByEmail( + results, + usersIdentifiedByEmail + ); const allUsers = [].concat( reconstructedUsersIdentifiedByEmail, usersIdentifiedByID, - usersIdentifiedByGroup); - const revisedAddACLParams = aclUtils - .sortHeaderGrants(allUsers, addACLParams); + usersIdentifiedByGroup + ); + const revisedAddACLParams = aclUtils.sortHeaderGrants(allUsers, addACLParams); return next(null, bucket, revisedAddACLParams); }); + } + const allUsers = [].concat(usersIdentifiedByID, usersIdentifiedByGroup); + const revisedAddACLParams = aclUtils.sortHeaderGrants(allUsers, addACLParams); + return next(null, bucket, revisedAddACLParams); + }, + function waterfall4(bucket, addACLParams, next) { + if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) { + log.trace('deleted flag on bucket'); + return next(errors.NoSuchBucket); + } + if (bucket.hasTransientFlag() || bucket.hasDeletedFlag()) { + log.trace('transient or deleted flag so cleaning up bucket'); + bucket.setFullAcl(addACLParams); + return cleanUpBucket(bucket, canonicalID, log, err => next(err, bucket)); + } + // If no bucket flags, just add acl's to bucket metadata + return acl.addACL(bucket, addACLParams, log, err => next(err, bucket)); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'bucketPutACL' }); + monitoring.promMetrics('PUT', bucketName, err.code, 'bucketPutACL'); + } else { + pushMetric('putBucketAcl', log, { + authInfo, + bucket: bucketName, + }); + monitoring.promMetrics('PUT', bucketName, '200', 'bucketPutACL'); } - const allUsers = [].concat( - usersIdentifiedByID, - usersIdentifiedByGroup); - const revisedAddACLParams = - aclUtils.sortHeaderGrants(allUsers, addACLParams); - return next(null, bucket, revisedAddACLParams); - }, - function waterfall4(bucket, addACLParams, next) { - if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) { - log.trace('deleted flag on bucket'); - return next(errors.NoSuchBucket); - } - if (bucket.hasTransientFlag() || bucket.hasDeletedFlag()) { - log.trace('transient or deleted flag so cleaning up bucket'); - bucket.setFullAcl(addACLParams); - return cleanUpBucket(bucket, canonicalID, log, err => - next(err, bucket)); - } - // If no bucket flags, just add acl's to bucket metadata - return acl.addACL(bucket, addACLParams, log, err => - next(err, bucket)); - }, - ], (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, - method: 'bucketPutACL' }); - monitoring.promMetrics('PUT', bucketName, err.code, 'bucketPutACL'); - } else { - pushMetric('putBucketAcl', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics('PUT', bucketName, '200', 'bucketPutACL'); + return callback(err, corsHeaders); } - return callback(err, corsHeaders); - }); + ); } module.exports = bucketPutACL; diff --git a/lib/api/bucketPutCors.js b/lib/api/bucketPutCors.js index 44e65df6f5..4754ae232b 100644 --- a/lib/api/bucketPutCors.js +++ b/lib/api/bucketPutCors.js @@ -4,8 +4,7 @@ const { errors, errorInstances } = require('arsenal'); const bucketShield = require('./apiUtils/bucket/bucketShield'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); -const { isBucketAuthorized } = - require('./apiUtils/authorization/permissionChecks'); +const { isBucketAuthorized } = require('./apiUtils/authorization/permissionChecks'); const metadata = require('../metadata/wrapper'); const { parseCorsXml } = require('./apiUtils/bucket/bucketCors'); const { pushMetric } = require('../utapi/utilities'); @@ -27,14 +26,12 @@ function bucketPutCors(authInfo, request, log, callback) { const canonicalID = authInfo.getCanonicalID(); if (!request.post) { - log.debug('CORS xml body is missing', - { error: errors.MissingRequestBodyError }); + log.debug('CORS xml body is missing', { error: errors.MissingRequestBodyError }); monitoring.promMetrics('PUT', bucketName, 400, 'putBucketCors'); return callback(errors.MissingRequestBodyError); } - const md5 = crypto.createHash('md5') - .update(request.post, 'utf8').digest('base64'); + const md5 = crypto.createHash('md5').update(request.post, 'utf8').digest('base64'); if (md5 !== request.headers['content-md5']) { log.debug('bad md5 digest', { error: errors.BadDigest }); monitoring.promMetrics('PUT', bucketName, 400, 'putBucketCors'); @@ -48,57 +45,65 @@ function bucketPutCors(authInfo, request, log, callback) { return callback(errorInstances.MalformedXML.customizeDescription(errMsg)); } - return async.waterfall([ - function parseXmlBody(next) { - log.trace('parsing cors rules'); - return parseCorsXml(request.post, log, next); - }, - function getBucketfromMetadata(rules, next) { - metadata.getBucket(bucketName, log, (err, bucket) => { - if (err) { - log.debug('metadata getbucket failed', { error: err }); - return next(err); - } - if (bucketShield(bucket, requestType)) { - return next(errors.NoSuchBucket); + return async.waterfall( + [ + function parseXmlBody(next) { + log.trace('parsing cors rules'); + return parseCorsXml(request.post, log, next); + }, + function getBucketfromMetadata(rules, next) { + metadata.getBucket(bucketName, log, (err, bucket) => { + if (err) { + log.debug('metadata getbucket failed', { error: err }); + return next(err); + } + if (bucketShield(bucket, requestType)) { + return next(errors.NoSuchBucket); + } + log.trace('found bucket in metadata'); + // get corsHeaders before CORSConfiguration is updated + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + return next(null, bucket, rules, corsHeaders); + }); + }, + function validateBucketAuthorization(bucket, rules, corsHeaders, next) { + if ( + !isBucketAuthorized( + bucket, + request.apiMethods || requestType, + canonicalID, + authInfo, + log, + request, + request.actionImplicitDenies + ) + ) { + log.debug('access denied for account on bucket', { + requestType, + }); + return next(errors.AccessDenied, corsHeaders); } - log.trace('found bucket in metadata'); - // get corsHeaders before CORSConfiguration is updated - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); return next(null, bucket, rules, corsHeaders); - }); - }, - function validateBucketAuthorization(bucket, rules, corsHeaders, next) { - if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, - authInfo, log, request, request.actionImplicitDenies)) { - log.debug('access denied for account on bucket', { - requestType, - }); - return next(errors.AccessDenied, corsHeaders); + }, + function updateBucketMetadata(bucket, rules, corsHeaders, next) { + log.trace('updating bucket cors rules in metadata'); + bucket.setCors(rules); + metadata.updateBucket(bucketName, bucket, log, err => next(err, corsHeaders)); + }, + ], + (err, corsHeaders) => { + if (err) { + log.trace('error processing request', { error: err, method: 'bucketPutCors' }); + monitoring.promMetrics('PUT', bucketName, err.code, 'putBucketCors'); } - return next(null, bucket, rules, corsHeaders); - }, - function updateBucketMetadata(bucket, rules, corsHeaders, next) { - log.trace('updating bucket cors rules in metadata'); - bucket.setCors(rules); - metadata.updateBucket(bucketName, bucket, log, err => - next(err, corsHeaders)); - }, - ], (err, corsHeaders) => { - if (err) { - log.trace('error processing request', { error: err, - method: 'bucketPutCors' }); - monitoring.promMetrics('PUT', bucketName, err.code, - 'putBucketCors'); + pushMetric('putBucketCors', log, { + authInfo, + bucket: bucketName, + }); + monitoring.promMetrics('PUT', bucketName, '200', 'putBucketCors'); + return callback(err, corsHeaders); } - pushMetric('putBucketCors', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics('PUT', bucketName, '200', 'putBucketCors'); - return callback(err, corsHeaders); - }); + ); } module.exports = bucketPutCors; diff --git a/lib/api/bucketPutEncryption.js b/lib/api/bucketPutEncryption.js index e214407ed7..e02ccff5e8 100644 --- a/lib/api/bucketPutEncryption.js +++ b/lib/api/bucketPutEncryption.js @@ -27,73 +27,74 @@ function bucketPutEncryption(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next), - (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), - (bucket, next) => { - log.trace('parsing encryption config', { method: 'bucketPutEncryption' }); - return parseEncryptionXml(request.post, log, (err, encryptionConfig) => { - if (err) { - return next(err); - } - return next(null, bucket, encryptionConfig); - }); - }, - (bucket, encryptionConfig, next) => { - const existingConfig = bucket.getServerSideEncryption(); - // Check if encryption is not configured or if a default master key has not been created yet. - if (existingConfig === null || !existingConfig.masterKeyId) { - return kms.bucketLevelEncryption(bucket, encryptionConfig, log, - (err, updatedConfig) => { + return async.waterfall( + [ + next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next), + (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), + (bucket, next) => { + log.trace('parsing encryption config', { method: 'bucketPutEncryption' }); + return parseEncryptionXml(request.post, log, (err, encryptionConfig) => { + if (err) { + return next(err); + } + return next(null, bucket, encryptionConfig); + }); + }, + (bucket, encryptionConfig, next) => { + const existingConfig = bucket.getServerSideEncryption(); + // Check if encryption is not configured or if a default master key has not been created yet. + if (existingConfig === null || !existingConfig.masterKeyId) { + return kms.bucketLevelEncryption(bucket, encryptionConfig, log, (err, updatedConfig) => { if (err) { return next(err); } return next(null, bucket, updatedConfig); }); - } + } - // If encryption is already configured and a default master key exists + // If encryption is already configured and a default master key exists - // If the request does not specify a custom key, reuse the existing default master key id - // This ensures that a new default master key is not generated every time - // `putBucketEncryption` is called, avoiding unnecessary key creation - const updatedConfig = { - mandatory: true, - algorithm: encryptionConfig.algorithm, - cryptoScheme: existingConfig.cryptoScheme, - masterKeyId: existingConfig.masterKeyId, - }; + // If the request does not specify a custom key, reuse the existing default master key id + // This ensures that a new default master key is not generated every time + // `putBucketEncryption` is called, avoiding unnecessary key creation + const updatedConfig = { + mandatory: true, + algorithm: encryptionConfig.algorithm, + cryptoScheme: existingConfig.cryptoScheme, + masterKeyId: existingConfig.masterKeyId, + }; - // If the request specifies a custom master key id, store it in the updated configuration - const { configuredMasterKeyId } = encryptionConfig; - if (configuredMasterKeyId) { - updatedConfig.configuredMasterKeyId = configuredMasterKeyId; - } + // If the request specifies a custom master key id, store it in the updated configuration + const { configuredMasterKeyId } = encryptionConfig; + if (configuredMasterKeyId) { + updatedConfig.configuredMasterKeyId = configuredMasterKeyId; + } - const { isAccountEncryptionEnabled } = existingConfig; - if (isAccountEncryptionEnabled) { - updatedConfig.isAccountEncryptionEnabled = isAccountEncryptionEnabled; - } + const { isAccountEncryptionEnabled } = existingConfig; + if (isAccountEncryptionEnabled) { + updatedConfig.isAccountEncryptionEnabled = isAccountEncryptionEnabled; + } - return next(null, bucket, updatedConfig); - }, - (bucket, updatedConfig, next) => { - bucket.setServerSideEncryption(updatedConfig); - metadata.updateBucket(bucket.getName(), bucket, log, err => next(err, bucket)); - }, - ], - (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, method: 'bucketPutEncryption' }); - return callback(err, corsHeaders); + return next(null, bucket, updatedConfig); + }, + (bucket, updatedConfig, next) => { + bucket.setServerSideEncryption(updatedConfig); + metadata.updateBucket(bucket.getName(), bucket, log, err => next(err, bucket)); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'bucketPutEncryption' }); + return callback(err, corsHeaders); + } + pushMetric('putBucketEncryption', log, { + authInfo, + bucket: bucketName, + }); + return callback(null, corsHeaders); } - pushMetric('putBucketEncryption', log, { - authInfo, - bucket: bucketName, - }); - return callback(null, corsHeaders); - }); + ); } module.exports = bucketPutEncryption; diff --git a/lib/api/bucketPutLifecycle.js b/lib/api/bucketPutLifecycle.js index b1a30e27bf..4552b56982 100644 --- a/lib/api/bucketPutLifecycle.js +++ b/lib/api/bucketPutLifecycle.js @@ -1,7 +1,6 @@ const { waterfall } = require('async'); const uuid = require('uuid').v4; -const LifecycleConfiguration = - require('arsenal').models.LifecycleConfiguration; +const LifecycleConfiguration = require('arsenal').models.LifecycleConfiguration; const config = require('../Config').config; const parseXML = require('../utilities/parseXML'); @@ -30,53 +29,51 @@ function bucketPutLifecycle(authInfo, request, log, callback) { requestType: request.apiMethods || 'bucketPutLifecycle', request, }; - return waterfall([ - next => parseXML(request.post, log, next), - (parsedXml, next) => { - const lcConfigClass = - new LifecycleConfiguration(parsedXml, config); - // if there was an error getting lifecycle configuration, - // returned configObj will contain 'error' key - process.nextTick(() => { - const configObj = lcConfigClass.getLifecycleConfiguration(); - if (configObj.error) { - return next(configObj.error); + return waterfall( + [ + next => parseXML(request.post, log, next), + (parsedXml, next) => { + const lcConfigClass = new LifecycleConfiguration(parsedXml, config); + // if there was an error getting lifecycle configuration, + // returned configObj will contain 'error' key + process.nextTick(() => { + const configObj = lcConfigClass.getLifecycleConfiguration(); + if (configObj.error) { + return next(configObj.error); + } + return next(null, configObj); + }); + }, + (lcConfig, next) => + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { + if (err) { + return next(err, bucket); + } + return next(null, bucket, lcConfig); + }), + (bucket, lcConfig, next) => { + if (!bucket.getUid()) { + bucket.setUid(uuid()); } - return next(null, configObj); - }); - }, - (lcConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, bucket) => { - if (err) { - return next(err, bucket); - } - return next(null, bucket, lcConfig); - }), - (bucket, lcConfig, next) => { - if (!bucket.getUid()) { - bucket.setUid(uuid()); + bucket.setLifecycleConfiguration(lcConfig); + metadata.updateBucket(bucket.getName(), bucket, log, err => next(err, bucket)); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'bucketPutLifecycle' }); + monitoring.promMetrics('PUT', bucketName, err.code, 'putBucketLifecycle'); + return callback(err, corsHeaders); } - bucket.setLifecycleConfiguration(lcConfig); - metadata.updateBucket(bucket.getName(), bucket, log, err => - next(err, bucket)); - }, - ], (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, - method: 'bucketPutLifecycle' }); - monitoring.promMetrics( - 'PUT', bucketName, err.code, 'putBucketLifecycle'); - return callback(err, corsHeaders); + pushMetric('putBucketLifecycle', log, { + authInfo, + bucket: bucketName, + }); + monitoring.promMetrics('PUT', bucketName, '200', 'putBucketLifecycle'); + return callback(null, corsHeaders); } - pushMetric('putBucketLifecycle', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics('PUT', bucketName, '200', 'putBucketLifecycle'); - return callback(null, corsHeaders); - }); + ); } module.exports = bucketPutLifecycle; diff --git a/lib/api/bucketPutNotification.js b/lib/api/bucketPutNotification.js index ab82b5109a..28129f2c8e 100644 --- a/lib/api/bucketPutNotification.js +++ b/lib/api/bucketPutNotification.js @@ -27,34 +27,36 @@ function bucketPutNotification(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => parseXML(request.post, log, next), - (parsedXml, next) => { - const notificationConfig = getNotificationConfiguration(parsedXml); - const notifConfig = notificationConfig.error ? undefined : notificationConfig; - process.nextTick(() => next(notificationConfig.error, notifConfig)); - }, - (notifConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, bucket) => next(err, bucket, notifConfig)), - (bucket, notifConfig, next) => { - bucket.setNotificationConfiguration(notifConfig); - metadata.updateBucket(bucket.getName(), bucket, log, - err => next(err, bucket)); - }, - ], (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, - method: 'bucketPutNotification' }); - return callback(err, corsHeaders); + return async.waterfall( + [ + next => parseXML(request.post, log, next), + (parsedXml, next) => { + const notificationConfig = getNotificationConfiguration(parsedXml); + const notifConfig = notificationConfig.error ? undefined : notificationConfig; + process.nextTick(() => next(notificationConfig.error, notifConfig)); + }, + (notifConfig, next) => + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => + next(err, bucket, notifConfig) + ), + (bucket, notifConfig, next) => { + bucket.setNotificationConfiguration(notifConfig); + metadata.updateBucket(bucket.getName(), bucket, log, err => next(err, bucket)); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'bucketPutNotification' }); + return callback(err, corsHeaders); + } + pushMetric('putBucketNotification', log, { + authInfo, + bucket: bucketName, + }); + return callback(null, corsHeaders); } - pushMetric('putBucketNotification', log, { - authInfo, - bucket: bucketName, - }); - return callback(null, corsHeaders); - }); + ); } module.exports = bucketPutNotification; diff --git a/lib/api/bucketPutObjectLock.js b/lib/api/bucketPutObjectLock.js index a3d549789a..eb76256545 100644 --- a/lib/api/bucketPutObjectLock.js +++ b/lib/api/bucketPutObjectLock.js @@ -2,7 +2,9 @@ const { waterfall } = require('async'); const arsenal = require('arsenal'); const { errorInstances } = arsenal; -const { models: { ObjectLockConfiguration } } = arsenal; +const { + models: { ObjectLockConfiguration }, +} = arsenal; const parseXML = require('../utilities/parseXML'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); @@ -29,55 +31,57 @@ function bucketPutObjectLock(authInfo, request, log, callback) { requestType: request.apiMethods || 'bucketPutObjectLock', request, }; - return waterfall([ - next => parseXML(request.post, log, next), - (parsedXml, next) => { - const lockConfigClass = new ObjectLockConfiguration(parsedXml); - // if there was an error getting object lock configuration, - // returned configObj will contain 'error' key - process.nextTick(() => { - const configObj = lockConfigClass. - getValidatedObjectLockConfiguration(); - return next(configObj.error || null, configObj); + return waterfall( + [ + next => parseXML(request.post, log, next), + (parsedXml, next) => { + const lockConfigClass = new ObjectLockConfiguration(parsedXml); + // if there was an error getting object lock configuration, + // returned configObj will contain 'error' key + process.nextTick(() => { + const configObj = lockConfigClass.getValidatedObjectLockConfiguration(); + return next(configObj.error || null, configObj); + }); + }, + (objectLockConfig, next) => + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { + if (err) { + return next(err, bucket); + } + return next(null, bucket, objectLockConfig); + }), + (bucket, objectLockConfig, next) => { + const isObjectLockEnabled = bucket.isObjectLockEnabled(); + process.nextTick(() => { + if (!isObjectLockEnabled) { + return next( + errorInstances.InvalidBucketState.customizeDescription( + 'Object Lock configuration cannot be enabled on ' + 'existing buckets' + ), + bucket + ); + } + return next(null, bucket, objectLockConfig); + }); + }, + (bucket, objectLockConfig, next) => { + bucket.setObjectLockConfiguration(objectLockConfig); + metadata.updateBucket(bucket.getName(), bucket, log, err => next(err, bucket)); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'bucketPutObjectLock' }); + return callback(err, corsHeaders); + } + pushMetric('putBucketObjectLock', log, { + authInfo, + bucket: bucketName, }); - }, - (objectLockConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, - log, (err, bucket) => { - if (err) { - return next(err, bucket); - } - return next(null, bucket, objectLockConfig); - }), - (bucket, objectLockConfig, next) => { - const isObjectLockEnabled = bucket.isObjectLockEnabled(); - process.nextTick(() => { - if (!isObjectLockEnabled) { - return next(errorInstances.InvalidBucketState.customizeDescription( - 'Object Lock configuration cannot be enabled on ' + - 'existing buckets'), bucket); - } - return next(null, bucket, objectLockConfig); - }); - }, - (bucket, objectLockConfig, next) => { - bucket.setObjectLockConfiguration(objectLockConfig); - metadata.updateBucket(bucket.getName(), bucket, log, err => - next(err, bucket)); - }, - ], (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, - method: 'bucketPutObjectLock' }); - return callback(err, corsHeaders); + return callback(null, corsHeaders); } - pushMetric('putBucketObjectLock', log, { - authInfo, - bucket: bucketName, - }); - return callback(null, corsHeaders); - }); + ); } module.exports = bucketPutObjectLock; diff --git a/lib/api/bucketPutPolicy.js b/lib/api/bucketPutPolicy.js index 56f48e2a7f..822cf84d05 100644 --- a/lib/api/bucketPutPolicy.js +++ b/lib/api/bucketPutPolicy.js @@ -3,8 +3,7 @@ const { errorInstances, models } = require('arsenal'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const metadata = require('../metadata/wrapper'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); -const { validatePolicyResource, validatePolicyConditions } = - require('./apiUtils/authorization/permissionChecks'); +const { validatePolicyResource, validatePolicyConditions } = require('./apiUtils/authorization/permissionChecks'); const { BucketPolicy } = models; /** @@ -16,8 +15,7 @@ const { BucketPolicy } = models; function _checkNotImplementedPolicy(policyString) { // bucket names and key names cannot include "", so including those // isolates not implemented keys - return policyString.includes('"Service"') - || policyString.includes('"Federated"'); + return policyString.includes('"Service"') || policyString.includes('"Federated"'); } /** @@ -39,58 +37,57 @@ function bucketPutPolicy(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => { - const bucketPolicy = new BucketPolicy(request.post); - // if there was an error getting bucket policy, - // returned policyObj will contain 'error' key - process.nextTick(() => { - const policyObj = bucketPolicy.getBucketPolicy(); - if (_checkNotImplementedPolicy(request.post)) { - const err = errorInstances.NotImplemented.customizeDescription( - 'Bucket policy contains element not yet implemented'); - return next(err); - } - if (policyObj.error) { - const err = errorInstances.MalformedPolicy.customizeDescription( - policyObj.error.description); - return next(err); - } - return next(null, policyObj); - }); - }, - (bucketPolicy, next) => { - process.nextTick(() => { - if (!validatePolicyResource(bucketName, bucketPolicy)) { - return next(errorInstances.MalformedPolicy.customizeDescription( - 'Policy has invalid resource')); - } - return next(validatePolicyConditions(bucketPolicy), bucketPolicy); - }); - }, - (bucketPolicy, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, bucket) => { - if (err) { - return next(err, bucket); - } - return next(null, bucket, bucketPolicy); - }), - (bucket, bucketPolicy, next) => { - bucket.setBucketPolicy(bucketPolicy); - metadata.updateBucket(bucket.getName(), bucket, log, - err => next(err, bucket)); - }, - ], (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', - { error: err, method: 'bucketPutPolicy' }); - return callback(err, corsHeaders); + return async.waterfall( + [ + next => { + const bucketPolicy = new BucketPolicy(request.post); + // if there was an error getting bucket policy, + // returned policyObj will contain 'error' key + process.nextTick(() => { + const policyObj = bucketPolicy.getBucketPolicy(); + if (_checkNotImplementedPolicy(request.post)) { + const err = errorInstances.NotImplemented.customizeDescription( + 'Bucket policy contains element not yet implemented' + ); + return next(err); + } + if (policyObj.error) { + const err = errorInstances.MalformedPolicy.customizeDescription(policyObj.error.description); + return next(err); + } + return next(null, policyObj); + }); + }, + (bucketPolicy, next) => { + process.nextTick(() => { + if (!validatePolicyResource(bucketName, bucketPolicy)) { + return next(errorInstances.MalformedPolicy.customizeDescription('Policy has invalid resource')); + } + return next(validatePolicyConditions(bucketPolicy), bucketPolicy); + }); + }, + (bucketPolicy, next) => + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { + if (err) { + return next(err, bucket); + } + return next(null, bucket, bucketPolicy); + }), + (bucket, bucketPolicy, next) => { + bucket.setBucketPolicy(bucketPolicy); + metadata.updateBucket(bucket.getName(), bucket, log, err => next(err, bucket)); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'bucketPutPolicy' }); + return callback(err, corsHeaders); + } + // TODO: implement Utapi metric support + return callback(null, corsHeaders); } - // TODO: implement Utapi metric support - return callback(null, corsHeaders); - }); + ); } module.exports = bucketPutPolicy; diff --git a/lib/api/bucketPutReplication.js b/lib/api/bucketPutReplication.js index 5fca68aaee..37a9c7e27e 100644 --- a/lib/api/bucketPutReplication.js +++ b/lib/api/bucketPutReplication.js @@ -4,17 +4,15 @@ const { errorInstances } = require('arsenal'); const metadata = require('../metadata/wrapper'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); -const { getReplicationConfiguration } = - require('./apiUtils/bucket/getReplicationConfiguration'); -const validateConfiguration = - require('./apiUtils/bucket/validateReplicationConfig'); +const { getReplicationConfiguration } = require('./apiUtils/bucket/getReplicationConfiguration'); +const validateConfiguration = require('./apiUtils/bucket/validateReplicationConfig'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const monitoring = require('../utilities/monitoringHandler'); // The error response when a bucket does not have versioning 'Enabled'. const versioningNotEnabledError = errorInstances.InvalidRequest.customizeDescription( - 'Versioning must be \'Enabled\' on the bucket to apply a replication ' + - 'configuration'); + "Versioning must be 'Enabled' on the bucket to apply a replication " + 'configuration' +); /** * bucketPutReplication - Create or update bucket replication configuration @@ -33,57 +31,55 @@ function bucketPutReplication(authInfo, request, log, callback) { requestType: request.apiMethods || 'bucketPutReplication', request, }; - return waterfall([ - // Validate the request XML and return the replication configuration. - next => getReplicationConfiguration(post, log, next), - // Check bucket user privileges and ensure versioning is 'Enabled'. - (config, next) => - // TODO: Validate that destination bucket exists and has versioning. - standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { - if (err) { - return next(err); + return waterfall( + [ + // Validate the request XML and return the replication configuration. + next => getReplicationConfiguration(post, log, next), + // Check bucket user privileges and ensure versioning is 'Enabled'. + (config, next) => + // TODO: Validate that destination bucket exists and has versioning. + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { + if (err) { + return next(err); + } + // Replication requires that versioning is 'Enabled' unless it + // is an NFS bucket. + if (!bucket.isNFS() && !bucket.isVersioningEnabled(bucket)) { + return next(versioningNotEnabledError); + } + return next(null, config, bucket); + }), + // Set the replication configuration and update the bucket metadata. + (config, bucket, next) => { + // validate there's a preferred read location in case the + // bucket location is a transient source + if (!validateConfiguration(config, bucket)) { + const msg = 'Replication configuration lacks a preferred ' + 'read location'; + log.error(msg, { bucketName: bucket.getName() }); + return next(errorInstances.ValidationError.customizeDescription(msg)); } - // Replication requires that versioning is 'Enabled' unless it - // is an NFS bucket. - if (!bucket.isNFS() && !bucket.isVersioningEnabled(bucket)) { - return next(versioningNotEnabledError); - } - return next(null, config, bucket); - }), - // Set the replication configuration and update the bucket metadata. - (config, bucket, next) => { - // validate there's a preferred read location in case the - // bucket location is a transient source - if (!validateConfiguration(config, bucket)) { - const msg = 'Replication configuration lacks a preferred ' + - 'read location'; - log.error(msg, { bucketName: bucket.getName() }); - return next(errorInstances.ValidationError - .customizeDescription(msg)); + bucket.setReplicationConfiguration(config); + return metadata.updateBucket(bucket.getName(), bucket, log, err => next(err, bucket)); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); + if (err) { + log.trace('error processing request', { + error: err, + method: 'bucketPutReplication', + }); + monitoring.promMetrics('PUT', bucketName, err.code, 'putBucketReplication'); + return callback(err, corsHeaders); } - bucket.setReplicationConfiguration(config); - return metadata.updateBucket(bucket.getName(), bucket, log, err => - next(err, bucket)); - }, - ], (err, bucket) => { - const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); - if (err) { - log.trace('error processing request', { - error: err, - method: 'bucketPutReplication', + pushMetric('putBucketReplication', log, { + authInfo, + bucket: bucketName, }); - monitoring.promMetrics( - 'PUT', bucketName, err.code, 'putBucketReplication'); - return callback(err, corsHeaders); + monitoring.promMetrics('PUT', bucketName, '200', 'putBucketReplication'); + return callback(null, corsHeaders); } - pushMetric('putBucketReplication', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics( - 'PUT', bucketName, '200', 'putBucketReplication'); - return callback(null, corsHeaders); - }); + ); } module.exports = bucketPutReplication; diff --git a/lib/api/bucketPutTagging.js b/lib/api/bucketPutTagging.js index 9023f48504..520721bad9 100644 --- a/lib/api/bucketPutTagging.js +++ b/lib/api/bucketPutTagging.js @@ -1,7 +1,6 @@ const { waterfall } = require('async'); const { s3middleware } = require('arsenal'); - const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const metadata = require('../metadata/wrapper'); @@ -42,43 +41,42 @@ function bucketPutTagging(authInfo, request, log, callback) { request, }; let bucket = null; - return waterfall([ - next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, b) => { - bucket = b; - return next(err); - }), - next => checkExpectedBucketOwner(headers, bucket, log, next), - next => parseTagXml(request.post, log, next), - (tags, next) => { - const tagArray = []; - Object.keys(tags).forEach(key => { - tagArray.push({ Value: tags[key], Key: key }); - }); - bucket.setTags(tagArray); - metadata.updateBucket(bucket.getName(), bucket, log, err => - next(err)); - }, - ], err => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.debug('error processing request', { - error: err, - method: 'bucketPutTagging' - }); - monitoring.promMetrics('PUT', bucketName, err.code, - 'putBucketTagging'); - } else { - monitoring.promMetrics( - 'PUT', bucketName, '200', 'putBucketTagging'); - pushMetric('putBucketTagging', log, { - authInfo, - bucket: bucketName, - }); + return waterfall( + [ + next => + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, b) => { + bucket = b; + return next(err); + }), + next => checkExpectedBucketOwner(headers, bucket, log, next), + next => parseTagXml(request.post, log, next), + (tags, next) => { + const tagArray = []; + Object.keys(tags).forEach(key => { + tagArray.push({ Value: tags[key], Key: key }); + }); + bucket.setTags(tagArray); + metadata.updateBucket(bucket.getName(), bucket, log, err => next(err)); + }, + ], + err => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.debug('error processing request', { + error: err, + method: 'bucketPutTagging', + }); + monitoring.promMetrics('PUT', bucketName, err.code, 'putBucketTagging'); + } else { + monitoring.promMetrics('PUT', bucketName, '200', 'putBucketTagging'); + pushMetric('putBucketTagging', log, { + authInfo, + bucket: bucketName, + }); + } + return callback(err, corsHeaders); } - return callback(err, corsHeaders); - }); + ); } module.exports = bucketPutTagging; diff --git a/lib/api/bucketPutVersioning.js b/lib/api/bucketPutVersioning.js index 5f872cc0dd..9c4a5eab96 100644 --- a/lib/api/bucketPutVersioning.js +++ b/lib/api/bucketPutVersioning.js @@ -6,23 +6,23 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const metadata = require('../metadata/wrapper'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); -const versioningNotImplBackends = - require('../../constants').versioningNotImplBackends; +const versioningNotImplBackends = require('../../constants').versioningNotImplBackends; const { config } = require('../Config'); const monitoring = require('../utilities/monitoringHandler'); -const externalVersioningErrorMessage = 'We do not currently support putting ' + -'a versioned object to a location-constraint of type Azure or GCP.'; +const externalVersioningErrorMessage = + 'We do not currently support putting ' + 'a versioned object to a location-constraint of type Azure or GCP.'; -const replicationVersioningErrorMessage = 'A replication configuration is ' + -'present on this bucket, so you cannot change the versioning state. To ' + -'change the versioning state, first delete the replication configuration.'; +const replicationVersioningErrorMessage = + 'A replication configuration is ' + + 'present on this bucket, so you cannot change the versioning state. To ' + + 'change the versioning state, first delete the replication configuration.'; -const ingestionVersioningErrorMessage = 'Versioning cannot be suspended for ' -+ 'buckets setup with Out of Band updates from a location'; +const ingestionVersioningErrorMessage = + 'Versioning cannot be suspended for ' + 'buckets setup with Out of Band updates from a location'; -const objectLockErrorMessage = 'An Object Lock configuration is present on ' + - 'this bucket, so the versioning state cannot be changed.'; +const objectLockErrorMessage = + 'An Object Lock configuration is present on ' + 'this bucket, so the versioning state cannot be changed.'; /** * Format of xml request: @@ -47,21 +47,17 @@ function _parseXML(request, log, cb) { return cb(errors.MalformedXML); } const versioningConf = result.VersioningConfiguration; - const status = versioningConf.Status ? - versioningConf.Status[0] : undefined; - const mfaDelete = versioningConf.MfaDelete ? - versioningConf.MfaDelete[0] : undefined; + const status = versioningConf.Status ? versioningConf.Status[0] : undefined; + const mfaDelete = versioningConf.MfaDelete ? versioningConf.MfaDelete[0] : undefined; const validStatuses = ['Enabled', 'Suspended']; const validMfaDeletes = [undefined, 'Enabled', 'Disabled']; - if (validStatuses.indexOf(status) < 0 || - validMfaDeletes.indexOf(mfaDelete) < 0) { + if (validStatuses.indexOf(status) < 0 || validMfaDeletes.indexOf(mfaDelete) < 0) { log.debug('illegal versioning configuration'); return cb(errors.IllegalVersioningConfigurationException); } if (versioningConf && mfaDelete === 'Enabled') { log.debug('mfa deletion is not implemented'); - return cb(errorInstances.NotImplemented - .customizeDescription('MFA Deletion is not supported yet.')); + return cb(errorInstances.NotImplemented.customizeDescription('MFA Deletion is not supported yet.')); } return process.nextTick(() => cb(null)); }); @@ -103,90 +99,89 @@ function bucketPutVersioning(authInfo, request, log, callback) { requestType: request.apiMethods || 'bucketPutVersioning', request, }; - return waterfall([ - next => _parseXML(request, log, next), - next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, bucket) => next(err, bucket)), // ignore extra null object, - (bucket, next) => parseString(request.post, (err, result) => { - // just for linting; there should not be any parsing error here - if (err) { - return next(err, bucket); - } - // prevent enabling versioning on an nfs exported bucket - if (bucket.isNFS()) { - const error = new Error(); - error.code = 'NFSBUCKET'; - return next(error); - } - // _checkBackendVersioningImplemented returns false if versioning - // is not implemented on the bucket backend - if (!_checkBackendVersioningImplemented(bucket)) { - log.debug(externalVersioningErrorMessage, - { method: 'bucketPutVersioning', - error: errors.NotImplemented }); - const error = errorInstances.NotImplemented.customizeDescription( - externalVersioningErrorMessage); - return next(error, bucket); - } - const versioningConfiguration = {}; - if (result.VersioningConfiguration.Status) { - versioningConfiguration.Status = - result.VersioningConfiguration.Status[0]; - } - if (result.VersioningConfiguration.MfaDelete) { - versioningConfiguration.MfaDelete = - result.VersioningConfiguration.MfaDelete[0]; + return waterfall( + [ + next => _parseXML(request, log, next), + next => + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => + next(err, bucket) + ), // ignore extra null object, + (bucket, next) => + parseString(request.post, (err, result) => { + // just for linting; there should not be any parsing error here + if (err) { + return next(err, bucket); + } + // prevent enabling versioning on an nfs exported bucket + if (bucket.isNFS()) { + const error = new Error(); + error.code = 'NFSBUCKET'; + return next(error); + } + // _checkBackendVersioningImplemented returns false if versioning + // is not implemented on the bucket backend + if (!_checkBackendVersioningImplemented(bucket)) { + log.debug(externalVersioningErrorMessage, { + method: 'bucketPutVersioning', + error: errors.NotImplemented, + }); + const error = + errorInstances.NotImplemented.customizeDescription(externalVersioningErrorMessage); + return next(error, bucket); + } + const versioningConfiguration = {}; + if (result.VersioningConfiguration.Status) { + versioningConfiguration.Status = result.VersioningConfiguration.Status[0]; + } + if (result.VersioningConfiguration.MfaDelete) { + versioningConfiguration.MfaDelete = result.VersioningConfiguration.MfaDelete[0]; + } + // the configuration has been checked before + return next(null, bucket, versioningConfiguration); + }), + (bucket, versioningConfiguration, next) => { + // check if replication is enabled if versioning is being suspended + const replicationConfig = bucket.getReplicationConfiguration(); + const isIngestionBucket = bucket.isIngestionBucket && bucket.isIngestionBucket(); + const invalidAction = + versioningConfiguration.Status === 'Suspended' && + (isIngestionBucket || replicationConfig?.rules?.some(r => r.enabled)); + if (invalidAction) { + const errorMsg = isIngestionBucket + ? ingestionVersioningErrorMessage + : replicationVersioningErrorMessage; + next(errorInstances.InvalidBucketState.customizeDescription(errorMsg)); + return; + } + const objectLockEnabled = bucket.isObjectLockEnabled(); + if (objectLockEnabled) { + next(errorInstances.InvalidBucketState.customizeDescription(objectLockErrorMessage)); + return; + } + bucket.setVersioningConfiguration(versioningConfiguration); + // TODO all metadata updates of bucket should be using CAS + metadata.updateBucket(bucket.getName(), bucket, log, err => next(err, bucket)); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err && err.code === 'NFSBUCKET') { + log.trace('skipping versioning for nfs exported bucket'); + return callback(null, corsHeaders); } - // the configuration has been checked before - return next(null, bucket, versioningConfiguration); - }), - (bucket, versioningConfiguration, next) => { - // check if replication is enabled if versioning is being suspended - const replicationConfig = bucket.getReplicationConfiguration(); - const isIngestionBucket = bucket.isIngestionBucket && bucket.isIngestionBucket(); - const invalidAction = - versioningConfiguration.Status === 'Suspended' - && (isIngestionBucket || replicationConfig?.rules?.some(r => r.enabled)); - if (invalidAction) { - const errorMsg = isIngestionBucket ? - ingestionVersioningErrorMessage : replicationVersioningErrorMessage; - next(errorInstances.InvalidBucketState - .customizeDescription(errorMsg)); - return; - } - const objectLockEnabled = bucket.isObjectLockEnabled(); - if (objectLockEnabled) { - next(errorInstances.InvalidBucketState - .customizeDescription(objectLockErrorMessage)); - return; + if (err) { + log.trace('error processing request', { error: err, method: 'bucketPutVersioning' }); + monitoring.promMetrics('PUT', bucketName, err.code, 'putBucketVersioning'); + } else { + pushMetric('putBucketVersioning', log, { + authInfo, + bucket: bucketName, + }); + monitoring.promMetrics('PUT', bucketName, '200', 'putBucketVersioning'); } - bucket.setVersioningConfiguration(versioningConfiguration); - // TODO all metadata updates of bucket should be using CAS - metadata.updateBucket(bucket.getName(), bucket, log, err => - next(err, bucket)); - }, - ], (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err && err.code === 'NFSBUCKET') { - log.trace('skipping versioning for nfs exported bucket'); - return callback(null, corsHeaders); + return callback(err, corsHeaders); } - if (err) { - log.trace('error processing request', { error: err, - method: 'bucketPutVersioning' }); - monitoring.promMetrics( - 'PUT', bucketName, err.code, 'putBucketVersioning'); - } else { - pushMetric('putBucketVersioning', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics( - 'PUT', bucketName, '200', 'putBucketVersioning'); - } - return callback(err, corsHeaders); - }); + ); } module.exports = bucketPutVersioning; diff --git a/lib/api/bucketPutWebsite.js b/lib/api/bucketPutWebsite.js index 5b546a743b..b6014fdf65 100644 --- a/lib/api/bucketPutWebsite.js +++ b/lib/api/bucketPutWebsite.js @@ -3,8 +3,7 @@ const { errors } = require('arsenal'); const bucketShield = require('./apiUtils/bucket/bucketShield'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); -const { isBucketAuthorized } = - require('./apiUtils/authorization/permissionChecks'); +const { isBucketAuthorized } = require('./apiUtils/authorization/permissionChecks'); const metadata = require('../metadata/wrapper'); const { parseWebsiteConfigXml } = require('./apiUtils/bucket/bucketWebsite'); const { pushMetric } = require('../utapi/utilities'); @@ -26,64 +25,71 @@ function bucketPutWebsite(authInfo, request, log, callback) { const canonicalID = authInfo.getCanonicalID(); if (!request.post) { - monitoring.promMetrics( - 'PUT', bucketName, 400, 'putBucketWebsite'); + monitoring.promMetrics('PUT', bucketName, 400, 'putBucketWebsite'); return callback(errors.MissingRequestBodyError); } - return async.waterfall([ - function parseXmlBody(next) { - log.trace('parsing website configuration'); - return parseWebsiteConfigXml(request.post, log, next); - }, - function getBucketfromMetadata(config, next) { - metadata.getBucket(bucketName, log, (err, bucket) => { - if (err) { - log.debug('metadata getbucket failed', { error: err }); - return next(err); - } - if (bucketShield(bucket, requestType)) { - return next(errors.NoSuchBucket); + return async.waterfall( + [ + function parseXmlBody(next) { + log.trace('parsing website configuration'); + return parseWebsiteConfigXml(request.post, log, next); + }, + function getBucketfromMetadata(config, next) { + metadata.getBucket(bucketName, log, (err, bucket) => { + if (err) { + log.debug('metadata getbucket failed', { error: err }); + return next(err); + } + if (bucketShield(bucket, requestType)) { + return next(errors.NoSuchBucket); + } + log.trace('found bucket in metadata'); + return next(null, bucket, config); + }); + }, + function validateBucketAuthorization(bucket, config, next) { + if ( + !isBucketAuthorized( + bucket, + request.apiMethods || requestType, + canonicalID, + authInfo, + log, + request, + request.actionImplicitDenies + ) + ) { + log.debug('access denied for user on bucket', { + requestType, + method: 'bucketPutWebsite', + }); + return next(errors.AccessDenied, bucket); } - log.trace('found bucket in metadata'); return next(null, bucket, config); - }); - }, - function validateBucketAuthorization(bucket, config, next) { - if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, - authInfo, log, request, request.actionImplicitDenies)) { - log.debug('access denied for user on bucket', { - requestType, - method: 'bucketPutWebsite', + }, + function updateBucketMetadata(bucket, config, next) { + log.trace('updating bucket website configuration in metadata'); + bucket.setWebsiteConfiguration(config); + metadata.updateBucket(bucketName, bucket, log, err => { + next(err, bucket); + }); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'bucketPutWebsite' }); + monitoring.promMetrics('PUT', bucketName, err.code, 'putBucketWebsite'); + } else { + pushMetric('putBucketWebsite', log, { + authInfo, + bucket: bucketName, }); - return next(errors.AccessDenied, bucket); + monitoring.promMetrics('PUT', bucketName, '200', 'putBucketWebsite'); } - return next(null, bucket, config); - }, - function updateBucketMetadata(bucket, config, next) { - log.trace('updating bucket website configuration in metadata'); - bucket.setWebsiteConfiguration(config); - metadata.updateBucket(bucketName, bucket, log, err => { - next(err, bucket); - }); - }, - ], (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, - method: 'bucketPutWebsite' }); - monitoring.promMetrics( - 'PUT', bucketName, err.code, 'putBucketWebsite'); - } else { - pushMetric('putBucketWebsite', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics( - 'PUT', bucketName, '200', 'putBucketWebsite'); + return callback(err, corsHeaders); } - return callback(err, corsHeaders); - }); + ); } module.exports = bucketPutWebsite; diff --git a/lib/api/bucketUpdateQuota.js b/lib/api/bucketUpdateQuota.js index 1b319bc5e3..98f88d25ef 100644 --- a/lib/api/bucketUpdateQuota.js +++ b/lib/api/bucketUpdateQuota.js @@ -47,39 +47,39 @@ function bucketUpdateQuota(authInfo, request, log, callback) { request, }; let bucket = null; - return waterfall([ - next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, b) => { - bucket = b; - return next(err, bucket); - }), - (bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)), - (bucket, requestBody, next) => validateBucketQuotaProperty(requestBody, (err, quotaValue) => - next(err, bucket, quotaValue)), - (bucket, quotaValue, next) => { - bucket.setQuota(quotaValue); - return metadata.updateBucket(bucket.getName(), bucket, log, next); - }, - ], (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.debug('error processing request', { - error: err, - method: 'bucketUpdateQuota' + return waterfall( + [ + next => + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, b) => { + bucket = b; + return next(err, bucket); + }), + (bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)), + (bucket, requestBody, next) => + validateBucketQuotaProperty(requestBody, (err, quotaValue) => next(err, bucket, quotaValue)), + (bucket, quotaValue, next) => { + bucket.setQuota(quotaValue); + return metadata.updateBucket(bucket.getName(), bucket, log, next); + }, + ], + (err, bucket) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.debug('error processing request', { + error: err, + method: 'bucketUpdateQuota', + }); + monitoring.promMetrics('PUT', bucketName, err.code, 'updateBucketQuota'); + return callback(err, err.code, corsHeaders); + } + monitoring.promMetrics('PUT', bucketName, '200', 'updateBucketQuota'); + pushMetric('updateBucketQuota', log, { + authInfo, + bucket: bucketName, }); - monitoring.promMetrics('PUT', bucketName, err.code, - 'updateBucketQuota'); - return callback(err, err.code, corsHeaders); + return callback(null, corsHeaders); } - monitoring.promMetrics( - 'PUT', bucketName, '200', 'updateBucketQuota'); - pushMetric('updateBucketQuota', log, { - authInfo, - bucket: bucketName, - }); - return callback(null, corsHeaders); - }); + ); } module.exports = bucketUpdateQuota; diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js index 19957e5a06..4808b63aa5 100644 --- a/lib/api/completeMultipartUpload.js +++ b/lib/api/completeMultipartUpload.js @@ -10,17 +10,18 @@ const { data } = require('../data/wrapper'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const constants = require('../../constants'); const { config } = require('../Config'); -const { versioningPreprocessing, checkQueryVersionId, decodeVID, overwritingVersioning } - = require('./apiUtils/object/versioning'); +const { + versioningPreprocessing, + checkQueryVersionId, + decodeVID, + overwritingVersioning, +} = require('./apiUtils/object/versioning'); const services = require('../services'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); -const locationConstraintCheck - = require('./apiUtils/object/locationConstraintCheck'); +const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck'); const { skipMpuPartProcessing } = storage.data.external.backendUtils; -const { validateAndFilterMpuParts, generateMpuPartStorageInfo } = - s3middleware.processMpuParts; -const locationKeysHaveChanged - = require('./apiUtils/object/locationKeysHaveChanged'); +const { validateAndFilterMpuParts, generateMpuPartStorageInfo } = s3middleware.processMpuParts; +const locationKeysHaveChanged = require('./apiUtils/object/locationKeysHaveChanged'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { validatePutVersionId } = require('./apiUtils/object/coldStorage'); const { validateQuotas } = require('./apiUtils/quotas/quotaUtils'); @@ -49,8 +50,7 @@ const REPLICATION_ACTION = 'MPU'; */ - - /* +/* Format of xml response: { - if (err || !result || !result.CompleteMultipartUpload - || !result.CompleteMultipartUpload.Part) { + if (err || !result || !result.CompleteMultipartUpload || !result.CompleteMultipartUpload.Part) { return next(errors.MalformedXML); } const jsonList = result.CompleteMultipartUpload; @@ -126,283 +125,467 @@ function completeMultipartUpload(authInfo, request, log, callback) { }); } - return async.waterfall([ - function validateDestBucket(next) { - const metadataValParams = { - objectKey, - authInfo, - bucketName, - // Required permissions for this action - // at the destinationBucket level are same as objectPut - requestType: request.apiMethods || 'completeMultipartUpload', - versionId, - request, - }; - standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next); - }, - function validateMultipart(destBucket, objMD, next) { - if (objMD) { - oldByteLength = objMD['content-length']; - } - - if (isPutVersion) { - const error = validatePutVersionId(objMD, putVersionId, log); - if (error) { - return next(error, destBucket); + return async.waterfall( + [ + function validateDestBucket(next) { + const metadataValParams = { + objectKey, + authInfo, + bucketName, + // Required permissions for this action + // at the destinationBucket level are same as objectPut + requestType: request.apiMethods || 'completeMultipartUpload', + versionId, + request, + }; + standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next); + }, + function validateMultipart(destBucket, objMD, next) { + if (objMD) { + oldByteLength = objMD['content-length']; } - } - return services.metadataValidateMultipart(metadataValParams, - (err, mpuBucket, mpuOverview, storedMetadata) => { - if (err) { - log.error('error validating request', { error: err }); - return next(err, destBucket); + if (isPutVersion) { + const error = validatePutVersionId(objMD, putVersionId, log); + if (error) { + return next(error, destBucket); } - return next(null, destBucket, objMD, mpuBucket, - storedMetadata); - }); - }, - function parsePartsList(destBucket, objMD, mpuBucket, - storedMetadata, next) { - const location = storedMetadata.controllingLocationConstraint; - // BACKWARD: Remove to remove the old splitter - if (mpuBucket.getMdBucketModelVersion() < 2) { - splitter = constants.oldSplitter; - } - // Reconstruct mpuOverviewKey to point to metadata - // originally stored when mpu initiated - const mpuOverviewKey = - `overview${splitter}${objectKey}${splitter}${uploadId}`; - if (request.post) { - return parseXml(request.post, (err, jsonList) => { - if (err) { - log.error('error parsing XML', { error: err }); - return next(err, destBucket); + } + + return services.metadataValidateMultipart( + metadataValParams, + (err, mpuBucket, mpuOverview, storedMetadata) => { + if (err) { + log.error('error validating request', { error: err }); + return next(err, destBucket); + } + return next(null, destBucket, objMD, mpuBucket, storedMetadata); } - return next(null, destBucket, objMD, mpuBucket, - jsonList, storedMetadata, location, mpuOverviewKey); - }); - } - return next(errors.MalformedXML, destBucket); - }, - function markOverviewForCompletion(destBucket, objMD, mpuBucket, jsonList, - storedMetadata, location, mpuOverviewKey, next) { - return services.metadataMarkMPObjectForCompletion({ - bucketName: mpuBucket.getName(), - objectKey, - uploadId, - splitter, + ); + }, + function parsePartsList(destBucket, objMD, mpuBucket, storedMetadata, next) { + const location = storedMetadata.controllingLocationConstraint; + // BACKWARD: Remove to remove the old splitter + if (mpuBucket.getMdBucketModelVersion() < 2) { + splitter = constants.oldSplitter; + } + // Reconstruct mpuOverviewKey to point to metadata + // originally stored when mpu initiated + const mpuOverviewKey = `overview${splitter}${objectKey}${splitter}${uploadId}`; + if (request.post) { + return parseXml(request.post, (err, jsonList) => { + if (err) { + log.error('error parsing XML', { error: err }); + return next(err, destBucket); + } + return next( + null, + destBucket, + objMD, + mpuBucket, + jsonList, + storedMetadata, + location, + mpuOverviewKey + ); + }); + } + return next(errors.MalformedXML, destBucket); + }, + function markOverviewForCompletion( + destBucket, + objMD, + mpuBucket, + jsonList, storedMetadata, - }, log, err => { - if (err) { - log.error('error marking MPU object for completion', { + location, + mpuOverviewKey, + next + ) { + return services.metadataMarkMPObjectForCompletion( + { bucketName: mpuBucket.getName(), objectKey, uploadId, - error: err, - }); - return next(err); - } - return next(null, destBucket, objMD, mpuBucket, - jsonList, storedMetadata, location, mpuOverviewKey); - }); - }, - function retrieveParts(destBucket, objMD, mpuBucket, jsonList, - storedMetadata, location, mpuOverviewKey, next) { - return services.getMPUparts(mpuBucket.getName(), uploadId, log, - (err, result) => { + splitter, + storedMetadata, + }, + log, + err => { + if (err) { + log.error('error marking MPU object for completion', { + bucketName: mpuBucket.getName(), + objectKey, + uploadId, + error: err, + }); + return next(err); + } + return next( + null, + destBucket, + objMD, + mpuBucket, + jsonList, + storedMetadata, + location, + mpuOverviewKey + ); + } + ); + }, + function retrieveParts( + destBucket, + objMD, + mpuBucket, + jsonList, + storedMetadata, + location, + mpuOverviewKey, + next + ) { + return services.getMPUparts(mpuBucket.getName(), uploadId, log, (err, result) => { if (err) { log.error('error getting parts', { error: err }); return next(err, destBucket); } const storedParts = result.Contents; const totalMPUSize = storedParts.reduce((acc, part) => acc + part.value.Size, 0); - return next(null, destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, location, mpuOverviewKey, totalMPUSize); + return next( + null, + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + location, + mpuOverviewKey, + totalMPUSize + ); }); - }, - function completeExternalMpu(destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, location, mpuOverviewKey, totalMPUSize, next) { - const mdInfo = { storedParts, mpuOverviewKey, splitter }; - const mpuInfo = - { objectKey, uploadId, jsonList, bucketName, destBucket }; - const originalIdentityImpDenies = request.actionImplicitDenies; - // eslint-disable-next-line no-param-reassign - delete request.actionImplicitDenies; - return data.completeMPU(request, mpuInfo, mdInfo, location, - null, null, null, locationConstraintCheck, log, - (err, completeObjData) => { + }, + function completeExternalMpu( + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + location, + mpuOverviewKey, + totalMPUSize, + next + ) { + const mdInfo = { storedParts, mpuOverviewKey, splitter }; + const mpuInfo = { objectKey, uploadId, jsonList, bucketName, destBucket }; + const originalIdentityImpDenies = request.actionImplicitDenies; // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = originalIdentityImpDenies; - if (err) { - log.error('error completing MPU externally', { error: err }); - return next(err, destBucket); + delete request.actionImplicitDenies; + return data.completeMPU( + request, + mpuInfo, + mdInfo, + location, + null, + null, + null, + locationConstraintCheck, + log, + (err, completeObjData) => { + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = originalIdentityImpDenies; + if (err) { + log.error('error completing MPU externally', { error: err }); + return next(err, destBucket); + } + // if mpu not handled externally, completeObjData will be null + return next( + null, + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + completeObjData, + mpuOverviewKey, + totalMPUSize + ); + } + ); + }, + function validateAndFilterParts( + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + completeObjData, + mpuOverviewKey, + totalMPUSize, + next + ) { + if (completeObjData) { + return next( + null, + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + completeObjData, + mpuOverviewKey, + completeObjData.filteredPartsObj, + totalMPUSize + ); + } + const filteredPartsObj = validateAndFilterMpuParts( + storedParts, + jsonList, + mpuOverviewKey, + splitter, + log + ); + if (filteredPartsObj.error) { + return next(filteredPartsObj.error, destBucket); + } + return next( + null, + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + completeObjData, + mpuOverviewKey, + filteredPartsObj, + totalMPUSize + ); + }, + function processParts( + destBucket, + objMD, + mpuBucket, + storedParts, + jsonList, + storedMetadata, + completeObjData, + mpuOverviewKey, + filteredPartsObj, + totalMPUSize, + next + ) { + // if mpu was completed on backend that stored mpu MD externally, + // skip MD processing steps + if (completeObjData && skipMpuPartProcessing(completeObjData)) { + const dataLocations = [ + { + key: completeObjData.key, + size: completeObjData.contentLength, + start: 0, + dataStoreVersionId: completeObjData.dataStoreVersionId, + dataStoreName: storedMetadata.dataStoreName, + dataStoreETag: completeObjData.eTag, + dataStoreType: completeObjData.dataStoreType, + }, + ]; + const calculatedSize = completeObjData.contentLength; + return next( + null, + destBucket, + objMD, + mpuBucket, + storedMetadata, + completeObjData.eTag, + calculatedSize, + dataLocations, + [mpuOverviewKey], + null, + completeObjData, + totalMPUSize + ); } - // if mpu not handled externally, completeObjData will be null - return next(null, destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, completeObjData, mpuOverviewKey, - totalMPUSize); - }); - }, - function validateAndFilterParts(destBucket, objMD, mpuBucket, - storedParts, jsonList, storedMetadata, completeObjData, mpuOverviewKey, - totalMPUSize, next) { - if (completeObjData) { - return next(null, destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, completeObjData, mpuOverviewKey, - completeObjData.filteredPartsObj, totalMPUSize); - } - const filteredPartsObj = validateAndFilterMpuParts(storedParts, - jsonList, mpuOverviewKey, splitter, log); - if (filteredPartsObj.error) { - return next(filteredPartsObj.error, destBucket); - } - return next(null, destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, completeObjData, mpuOverviewKey, - filteredPartsObj, totalMPUSize); - }, - function processParts(destBucket, objMD, mpuBucket, storedParts, - jsonList, storedMetadata, completeObjData, mpuOverviewKey, - filteredPartsObj, totalMPUSize, next) { - // if mpu was completed on backend that stored mpu MD externally, - // skip MD processing steps - if (completeObjData && skipMpuPartProcessing(completeObjData)) { - const dataLocations = [ - { - key: completeObjData.key, - size: completeObjData.contentLength, - start: 0, - dataStoreVersionId: completeObjData.dataStoreVersionId, - dataStoreName: storedMetadata.dataStoreName, - dataStoreETag: completeObjData.eTag, - dataStoreType: completeObjData.dataStoreType, - }, - ]; - const calculatedSize = completeObjData.contentLength; - return next(null, destBucket, objMD, mpuBucket, storedMetadata, - completeObjData.eTag, calculatedSize, dataLocations, - [mpuOverviewKey], null, completeObjData, totalMPUSize); - } - const partsInfo = - generateMpuPartStorageInfo(filteredPartsObj.partList); - if (partsInfo.error) { - return next(partsInfo.error, destBucket); - } - const { keysToDelete, extraPartLocations } = filteredPartsObj; - const { aggregateETag, dataLocations, calculatedSize } = partsInfo; + const partsInfo = generateMpuPartStorageInfo(filteredPartsObj.partList); + if (partsInfo.error) { + return next(partsInfo.error, destBucket); + } + const { keysToDelete, extraPartLocations } = filteredPartsObj; + const { aggregateETag, dataLocations, calculatedSize } = partsInfo; - if (completeObjData) { - const dataLocations = [ - { - key: completeObjData.key, - size: calculatedSize, - start: 0, - dataStoreName: storedMetadata.dataStoreName, - dataStoreETag: aggregateETag, - dataStoreType: completeObjData.dataStoreType, - }, + if (completeObjData) { + const dataLocations = [ + { + key: completeObjData.key, + size: calculatedSize, + start: 0, + dataStoreName: storedMetadata.dataStoreName, + dataStoreETag: aggregateETag, + dataStoreType: completeObjData.dataStoreType, + }, + ]; + return next( + null, + destBucket, + objMD, + mpuBucket, + storedMetadata, + aggregateETag, + calculatedSize, + dataLocations, + keysToDelete, + extraPartLocations, + completeObjData, + totalMPUSize + ); + } + return next( + null, + destBucket, + objMD, + mpuBucket, + storedMetadata, + aggregateETag, + calculatedSize, + dataLocations, + keysToDelete, + extraPartLocations, + null, + totalMPUSize + ); + }, + function prepForStoring( + destBucket, + objMD, + mpuBucket, + storedMetadata, + aggregateETag, + calculatedSize, + dataLocations, + keysToDelete, + extraPartLocations, + completeObjData, + totalMPUSize, + next + ) { + const metaHeaders = {}; + const keysNotNeeded = [ + 'initiator', + 'partLocations', + 'key', + 'initiated', + 'uploadId', + 'content-type', + 'expires', + 'eventualStorageBucket', + 'dataStoreName', ]; - return next(null, destBucket, objMD, mpuBucket, storedMetadata, - aggregateETag, calculatedSize, dataLocations, keysToDelete, - extraPartLocations, completeObjData, totalMPUSize); - } - return next(null, destBucket, objMD, mpuBucket, storedMetadata, - aggregateETag, calculatedSize, dataLocations, keysToDelete, - extraPartLocations, null, totalMPUSize); - }, - function prepForStoring(destBucket, objMD, mpuBucket, storedMetadata, - aggregateETag, calculatedSize, dataLocations, keysToDelete, - extraPartLocations, completeObjData, totalMPUSize, next) { - const metaHeaders = {}; - const keysNotNeeded = - ['initiator', 'partLocations', 'key', - 'initiated', 'uploadId', 'content-type', 'expires', - 'eventualStorageBucket', 'dataStoreName']; - const metadataKeysToPull = - Object.keys(storedMetadata).filter(item => - keysNotNeeded.indexOf(item) === -1); - metadataKeysToPull.forEach(item => { - metaHeaders[item] = storedMetadata[item]; - }); + const metadataKeysToPull = Object.keys(storedMetadata).filter( + item => keysNotNeeded.indexOf(item) === -1 + ); + metadataKeysToPull.forEach(item => { + metaHeaders[item] = storedMetadata[item]; + }); - const droppedMPUSize = totalMPUSize - calculatedSize; + const droppedMPUSize = totalMPUSize - calculatedSize; - const metaStoreParams = { - authInfo, - objectKey, - metaHeaders, - uploadId, - dataStoreName: storedMetadata.dataStoreName, - contentType: storedMetadata['content-type'], - cacheControl: storedMetadata['cache-control'], - contentDisposition: storedMetadata['content-disposition'], - contentEncoding: storedMetadata['content-encoding'], - expires: storedMetadata.expires, - contentMD5: aggregateETag, - size: calculatedSize, - multipart: true, - isDeleteMarker: false, - replicationInfo: getReplicationInfo(config, - objectKey, destBucket, false, calculatedSize, REPLICATION_ACTION), - originOp: 's3:ObjectCreated:CompleteMultipartUpload', - overheadField: constants.overheadField, - log, - }; - // If key already exists - if (objMD) { - // Re-use creation-time if we can - if (objMD['creation-time']) { - metaStoreParams.creationTime = objMD['creation-time']; - // Otherwise fallback to last-modified + const metaStoreParams = { + authInfo, + objectKey, + metaHeaders, + uploadId, + dataStoreName: storedMetadata.dataStoreName, + contentType: storedMetadata['content-type'], + cacheControl: storedMetadata['cache-control'], + contentDisposition: storedMetadata['content-disposition'], + contentEncoding: storedMetadata['content-encoding'], + expires: storedMetadata.expires, + contentMD5: aggregateETag, + size: calculatedSize, + multipart: true, + isDeleteMarker: false, + replicationInfo: getReplicationInfo( + config, + objectKey, + destBucket, + false, + calculatedSize, + REPLICATION_ACTION + ), + originOp: 's3:ObjectCreated:CompleteMultipartUpload', + overheadField: constants.overheadField, + log, + }; + // If key already exists + if (objMD) { + // Re-use creation-time if we can + if (objMD['creation-time']) { + metaStoreParams.creationTime = objMD['creation-time']; + // Otherwise fallback to last-modified + } else { + metaStoreParams.creationTime = objMD['last-modified']; + } + // If its a new key, create a new timestamp } else { - metaStoreParams.creationTime = objMD['last-modified']; + metaStoreParams.creationTime = new Date().toJSON(); + } + if (storedMetadata['x-amz-tagging']) { + metaStoreParams.tagging = storedMetadata['x-amz-tagging']; + } + if (storedMetadata.retentionMode && storedMetadata.retentionDate) { + metaStoreParams.retentionMode = storedMetadata.retentionMode; + metaStoreParams.retentionDate = storedMetadata.retentionDate; + } + if (storedMetadata.legalHold) { + metaStoreParams.legalHold = storedMetadata.legalHold; + } + const serverSideEncryption = storedMetadata['x-amz-server-side-encryption']; + let pseudoCipherBundle = null; + if (serverSideEncryption) { + const kmsKey = storedMetadata['x-amz-server-side-encryption-aws-kms-key-id']; + pseudoCipherBundle = { + algorithm: serverSideEncryption, + masterKeyId: kmsKey, + }; + setSSEHeaders(responseHeaders, serverSideEncryption, kmsKey); } - // If its a new key, create a new timestamp - } else { - metaStoreParams.creationTime = new Date().toJSON(); - } - if (storedMetadata['x-amz-tagging']) { - metaStoreParams.tagging = storedMetadata['x-amz-tagging']; - } - if (storedMetadata.retentionMode && storedMetadata.retentionDate) { - metaStoreParams.retentionMode = storedMetadata.retentionMode; - metaStoreParams.retentionDate = storedMetadata.retentionDate; - } - if (storedMetadata.legalHold) { - metaStoreParams.legalHold = storedMetadata.legalHold; - } - const serverSideEncryption = storedMetadata['x-amz-server-side-encryption']; - let pseudoCipherBundle = null; - if (serverSideEncryption) { - const kmsKey = storedMetadata['x-amz-server-side-encryption-aws-kms-key-id']; - pseudoCipherBundle = { - algorithm: serverSideEncryption, - masterKeyId: kmsKey, - }; - setSSEHeaders(responseHeaders, serverSideEncryption, kmsKey); - } - if (authInfo.getCanonicalID() !== destBucket.getOwner()) { - metaStoreParams.bucketOwnerId = destBucket.getOwner(); - } + if (authInfo.getCanonicalID() !== destBucket.getOwner()) { + metaStoreParams.bucketOwnerId = destBucket.getOwner(); + } - // if x-scal-s3-version-id header is specified, we overwrite the object/version metadata. - if (isPutVersion) { - const options = overwritingVersioning(objMD, metaStoreParams); - return process.nextTick(() => next(null, destBucket, dataLocations, - metaStoreParams, mpuBucket, keysToDelete, aggregateETag, - objMD, extraPartLocations, pseudoCipherBundle, - completeObjData, options, droppedMPUSize)); - } + // if x-scal-s3-version-id header is specified, we overwrite the object/version metadata. + if (isPutVersion) { + const options = overwritingVersioning(objMD, metaStoreParams); + return process.nextTick(() => + next( + null, + destBucket, + dataLocations, + metaStoreParams, + mpuBucket, + keysToDelete, + aggregateETag, + objMD, + extraPartLocations, + pseudoCipherBundle, + completeObjData, + options, + droppedMPUSize + ) + ); + } - if (!destBucket.isVersioningEnabled() && objMD?.archive?.archiveInfo) { - // Ensure we trigger a "delete" event in the oplog for the previously archived object - metaStoreParams.needOplogUpdate = 's3:ReplaceArchivedObject'; - } + if (!destBucket.isVersioningEnabled() && objMD?.archive?.archiveInfo) { + // Ensure we trigger a "delete" event in the oplog for the previously archived object + metaStoreParams.needOplogUpdate = 's3:ReplaceArchivedObject'; + } - return versioningPreprocessing(bucketName, - destBucket, objectKey, objMD, log, (err, options) => { + return versioningPreprocessing(bucketName, destBucket, objectKey, objMD, log, (err, options) => { if (err) { // TODO: check AWS error when user requested a specific // version before any versions have been put @@ -425,177 +608,242 @@ function completeMultipartUpload(authInfo, request, log, callback) { } } - return next(null, destBucket, dataLocations, - metaStoreParams, mpuBucket, keysToDelete, aggregateETag, - objMD, extraPartLocations, pseudoCipherBundle, - completeObjData, options, droppedMPUSize); + return next( + null, + destBucket, + dataLocations, + metaStoreParams, + mpuBucket, + keysToDelete, + aggregateETag, + objMD, + extraPartLocations, + pseudoCipherBundle, + completeObjData, + options, + droppedMPUSize + ); }); - }, - function storeAsNewObj(destinationBucket, dataLocations, - metaStoreParams, mpuBucket, keysToDelete, aggregateETag, objMD, - extraPartLocations, pseudoCipherBundle, - completeObjData, options, droppedMPUSize, next) { - const dataToDelete = options.dataToDelete; - /* eslint-disable no-param-reassign */ - metaStoreParams.versionId = options.versionId; - metaStoreParams.versioning = options.versioning; - metaStoreParams.isNull = options.isNull; - metaStoreParams.deleteNullKey = options.deleteNullKey; - if (options.extraMD) { - Object.assign(metaStoreParams, options.extraMD); - } - /* eslint-enable no-param-reassign */ - - // For external backends (where completeObjData is not - // null), the backend key does not change for new versions - // of the same object (or rewrites for nonversioned - // buckets), hence the deduplication sanity check does not - // make sense for external backends. - if (objMD && !completeObjData) { - // An object with the same key already exists, check - // if it has been created by the same MPU upload by - // checking if any of its internal location keys match - // the new keys. In such case, it must be a duplicate - // from a retry of a previous failed completion - // attempt, hence do the following: - // - // - skip writing the new metadata key to avoid - // creating a new version pointing to the same data - // keys - // - // - skip old data locations deletion since the old - // data location keys overlap the new ones (in - // principle they should be fully identical as there - // is no reuse of previous versions' data keys in - // the normal process) - note that the previous - // failed completion attempt may have left orphan - // data keys but we lost track of them so we cannot - // delete them now - // - // - proceed to the deletion of overview and part - // metadata keys, which are likely to have failed in - // the previous MPU completion attempt - // - if (!locationKeysHaveChanged(objMD.location, dataLocations)) { - log.info('MPU complete request replay detected', { - method: 'completeMultipartUpload.storeAsNewObj', - bucketName: destinationBucket.getName(), - objectKey: metaStoreParams.objectKey, - uploadId: metaStoreParams.uploadId, - }); - return next(null, mpuBucket, keysToDelete, aggregateETag, - extraPartLocations, destinationBucket, - // pass the original version ID as generatedVersionId - objMD.versionId, droppedMPUSize); + }, + function storeAsNewObj( + destinationBucket, + dataLocations, + metaStoreParams, + mpuBucket, + keysToDelete, + aggregateETag, + objMD, + extraPartLocations, + pseudoCipherBundle, + completeObjData, + options, + droppedMPUSize, + next + ) { + const dataToDelete = options.dataToDelete; + /* eslint-disable no-param-reassign */ + metaStoreParams.versionId = options.versionId; + metaStoreParams.versioning = options.versioning; + metaStoreParams.isNull = options.isNull; + metaStoreParams.deleteNullKey = options.deleteNullKey; + if (options.extraMD) { + Object.assign(metaStoreParams, options.extraMD); } - } - return services.metadataStoreObject(destinationBucket.getName(), - dataLocations, pseudoCipherBundle, metaStoreParams, - (err, res) => { - if (err) { - log.error('error storing object metadata', { error: err }); - return next(err, destinationBucket); + /* eslint-enable no-param-reassign */ + + // For external backends (where completeObjData is not + // null), the backend key does not change for new versions + // of the same object (or rewrites for nonversioned + // buckets), hence the deduplication sanity check does not + // make sense for external backends. + if (objMD && !completeObjData) { + // An object with the same key already exists, check + // if it has been created by the same MPU upload by + // checking if any of its internal location keys match + // the new keys. In such case, it must be a duplicate + // from a retry of a previous failed completion + // attempt, hence do the following: + // + // - skip writing the new metadata key to avoid + // creating a new version pointing to the same data + // keys + // + // - skip old data locations deletion since the old + // data location keys overlap the new ones (in + // principle they should be fully identical as there + // is no reuse of previous versions' data keys in + // the normal process) - note that the previous + // failed completion attempt may have left orphan + // data keys but we lost track of them so we cannot + // delete them now + // + // - proceed to the deletion of overview and part + // metadata keys, which are likely to have failed in + // the previous MPU completion attempt + // + if (!locationKeysHaveChanged(objMD.location, dataLocations)) { + log.info('MPU complete request replay detected', { + method: 'completeMultipartUpload.storeAsNewObj', + bucketName: destinationBucket.getName(), + objectKey: metaStoreParams.objectKey, + uploadId: metaStoreParams.uploadId, + }); + return next( + null, + mpuBucket, + keysToDelete, + aggregateETag, + extraPartLocations, + destinationBucket, + // pass the original version ID as generatedVersionId + objMD.versionId, + droppedMPUSize + ); } + } + return services.metadataStoreObject( + destinationBucket.getName(), + dataLocations, + pseudoCipherBundle, + metaStoreParams, + (err, res) => { + if (err) { + log.error('error storing object metadata', { error: err }); + return next(err, destinationBucket); + } - setExpirationHeaders(responseHeaders, { - lifecycleConfig: destinationBucket.getLifecycleConfiguration(), - objectParams: { - key: objectKey, - date: res.lastModified, - tags: res.tags, - }, - }); + setExpirationHeaders(responseHeaders, { + lifecycleConfig: destinationBucket.getLifecycleConfiguration(), + objectParams: { + key: objectKey, + date: res.lastModified, + tags: res.tags, + }, + }); - const generatedVersionId = res ? res.versionId : undefined; - // in cases where completing mpu overwrites a previous - // null version when versioning is suspended or versioning - // is not enabled, need to delete pre-existing data - // unless the preexisting object and the completed mpu - // are on external backends - if (dataToDelete) { - const newDataStoreName = - Array.isArray(dataLocations) && dataLocations[0] ? - dataLocations[0].dataStoreName : null; - return data.batchDelete(dataToDelete, - request.method, - newDataStoreName, log, err => { + const generatedVersionId = res ? res.versionId : undefined; + // in cases where completing mpu overwrites a previous + // null version when versioning is suspended or versioning + // is not enabled, need to delete pre-existing data + // unless the preexisting object and the completed mpu + // are on external backends + if (dataToDelete) { + const newDataStoreName = + Array.isArray(dataLocations) && dataLocations[0] + ? dataLocations[0].dataStoreName + : null; + return data.batchDelete(dataToDelete, request.method, newDataStoreName, log, err => { if (err) { return next(err); } - return next(null, mpuBucket, keysToDelete, - aggregateETag, extraPartLocations, - destinationBucket, generatedVersionId, - droppedMPUSize); + return next( + null, + mpuBucket, + keysToDelete, + aggregateETag, + extraPartLocations, + destinationBucket, + generatedVersionId, + droppedMPUSize + ); }); + } + return next( + null, + mpuBucket, + keysToDelete, + aggregateETag, + extraPartLocations, + destinationBucket, + generatedVersionId, + droppedMPUSize + ); } - return next(null, mpuBucket, keysToDelete, aggregateETag, - extraPartLocations, destinationBucket, - generatedVersionId, droppedMPUSize); - }); - }, - function deletePartsMetadata(mpuBucket, keysToDelete, aggregateETag, - extraPartLocations, destinationBucket, generatedVersionId, droppedMPUSize, next) { - services.batchDeleteObjectMetadata(mpuBucket.getName(), - keysToDelete, log, err => next(err, extraPartLocations, - destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize)); - }, - function batchDeleteExtraParts(extraPartLocations, destinationBucket, - aggregateETag, generatedVersionId, droppedMPUSize, next) { - if (extraPartLocations && extraPartLocations.length > 0) { - return data.batchDelete(extraPartLocations, request.method, null, log, err => { - if (err) { - return next(err); - } + ); + }, + function deletePartsMetadata( + mpuBucket, + keysToDelete, + aggregateETag, + extraPartLocations, + destinationBucket, + generatedVersionId, + droppedMPUSize, + next + ) { + services.batchDeleteObjectMetadata(mpuBucket.getName(), keysToDelete, log, err => + next(err, extraPartLocations, destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize) + ); + }, + function batchDeleteExtraParts( + extraPartLocations, + destinationBucket, + aggregateETag, + generatedVersionId, + droppedMPUSize, + next + ) { + if (extraPartLocations && extraPartLocations.length > 0) { + return data.batchDelete(extraPartLocations, request.method, null, log, err => { + if (err) { + return next(err); + } - return validateQuotas(request, destinationBucket, request.accountQuotas, - ['objectDelete'], 'objectDelete', -droppedMPUSize, false, log, err => { - if (err) { - // Ignore error, as the data has been deleted already: only inflight count - // has not been updated, and will be eventually consistent anyway - log.warn('failed to update inflights', { - method: 'completeMultipartUpload', - extraPartLocations, - error: err, - }); + return validateQuotas( + request, + destinationBucket, + request.accountQuotas, + ['objectDelete'], + 'objectDelete', + -droppedMPUSize, + false, + log, + err => { + if (err) { + // Ignore error, as the data has been deleted already: only inflight count + // has not been updated, and will be eventually consistent anyway + log.warn('failed to update inflights', { + method: 'completeMultipartUpload', + extraPartLocations, + error: err, + }); + } + return next(null, destinationBucket, aggregateETag, generatedVersionId); } - return next(null, destinationBucket, aggregateETag, - generatedVersionId); + ); }); - }); + } + return next(null, destinationBucket, aggregateETag, generatedVersionId); + }, + ], + (err, destinationBucket, aggregateETag, generatedVersionId) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); + if (err) { + return callback(err, null, corsHeaders); } - return next(null, destinationBucket, aggregateETag, - generatedVersionId); - }, - ], (err, destinationBucket, aggregateETag, generatedVersionId) => { - const corsHeaders = - collectCorsHeaders(request.headers.origin, request.method, - destinationBucket); - if (err) { - return callback(err, null, corsHeaders); - } - if (generatedVersionId) { - corsHeaders['x-amz-version-id'] = - versionIdUtils.encode(generatedVersionId); - } - Object.assign(responseHeaders, corsHeaders); + if (generatedVersionId) { + corsHeaders['x-amz-version-id'] = versionIdUtils.encode(generatedVersionId); + } + Object.assign(responseHeaders, corsHeaders); - const vcfg = destinationBucket.getVersioningConfiguration(); - const isVersionedObj = vcfg && vcfg.Status === 'Enabled'; + const vcfg = destinationBucket.getVersioningConfiguration(); + const isVersionedObj = vcfg && vcfg.Status === 'Enabled'; - xmlParams.eTag = `"${aggregateETag}"`; - const xml = convertToXml('completeMultipartUpload', xmlParams); - pushMetric('completeMultipartUpload', log, { - oldByteLength: isVersionedObj ? null : oldByteLength, - authInfo, - canonicalID: destinationBucket.getOwner(), - bucket: bucketName, - keys: [objectKey], - versionId: generatedVersionId, - numberOfObjects: !generatedVersionId && oldByteLength !== null ? 0 : 1, - location: destinationBucket.getLocationConstraint(), - }); - return callback(null, xml, responseHeaders); - }); + xmlParams.eTag = `"${aggregateETag}"`; + const xml = convertToXml('completeMultipartUpload', xmlParams); + pushMetric('completeMultipartUpload', log, { + oldByteLength: isVersionedObj ? null : oldByteLength, + authInfo, + canonicalID: destinationBucket.getOwner(), + bucket: bucketName, + keys: [objectKey], + versionId: generatedVersionId, + numberOfObjects: !generatedVersionId && oldByteLength !== null ? 0 : 1, + location: destinationBucket.getLocationConstraint(), + }); + return callback(null, xml, responseHeaders); + } + ); } module.exports = completeMultipartUpload; diff --git a/lib/api/corsPreflight.js b/lib/api/corsPreflight.js index 78ad92caf5..0200b019ea 100644 --- a/lib/api/corsPreflight.js +++ b/lib/api/corsPreflight.js @@ -2,42 +2,45 @@ const { errors, errorInstances } = require('arsenal'); const metadata = require('../metadata/wrapper'); const bucketShield = require('./apiUtils/bucket/bucketShield'); -const { findCorsRule, generateCorsResHeaders } - = require('./apiUtils/object/corsResponse'); +const { findCorsRule, generateCorsResHeaders } = require('./apiUtils/object/corsResponse'); // const { pushMetric } = require('../utapi/utilities'); const requestType = 'objectGet'; const customizedErrs = { corsNotEnabled: 'CORSResponse: CORS is not enabled for this bucket.', - notAllowed: 'CORSResponse: This CORS request is not allowed. ' + - 'This is usually because the evalution of Origin, request method / ' + - 'Access-Control-Request-Method or Access-Control-Request-Headers ' + - 'are not whitelisted by the resource\'s CORS spec.', + notAllowed: + 'CORSResponse: This CORS request is not allowed. ' + + 'This is usually because the evalution of Origin, request method / ' + + 'Access-Control-Request-Method or Access-Control-Request-Headers ' + + "are not whitelisted by the resource's CORS spec.", }; /** corsPreflight - handle preflight CORS requests -* @param {object} request - http request object -* @param {function} log - Werelogs request logger -* @param {function} callback - callback to respond to http request -* with either error code or 200 response -* @return {undefined} -*/ + * @param {object} request - http request object + * @param {function} log - Werelogs request logger + * @param {function} callback - callback to respond to http request + * with either error code or 200 response + * @return {undefined} + */ function corsPreflight(request, log, callback) { log.debug('processing request', { method: 'corsPreflight' }); const bucketName = request.bucketName; const corsOrigin = request.headers.origin; const corsMethod = request.headers['access-control-request-method']; - const corsHeaders = request.headers['access-control-request-headers'] ? - request.headers['access-control-request-headers'].replace(/ /g, '') - .split(',').reduce((resultArr, value) => { - // remove empty values and convert values to lowercase - if (value !== '') { - resultArr.push(value.toLowerCase()); - } - return resultArr; - }, []) : null; + const corsHeaders = request.headers['access-control-request-headers'] + ? request.headers['access-control-request-headers'] + .replace(/ /g, '') + .split(',') + .reduce((resultArr, value) => { + // remove empty values and convert values to lowercase + if (value !== '') { + resultArr.push(value.toLowerCase()); + } + return resultArr; + }, []) + : null; return metadata.getBucket(bucketName, log, (err, bucket) => { if (err) { @@ -51,8 +54,7 @@ function corsPreflight(request, log, callback) { const corsRules = bucket.getCors(); if (!corsRules) { - const err = errorInstances.AccessForbidden - .customizeDescription(customizedErrs.corsNotEnabled); + const err = errorInstances.AccessForbidden.customizeDescription(customizedErrs.corsNotEnabled); log.trace('no existing cors configuration', { error: err, method: 'corsPreflight', @@ -61,12 +63,10 @@ function corsPreflight(request, log, callback) { } log.trace('finding cors rule'); - const corsRule = findCorsRule(corsRules, corsOrigin, corsMethod, - corsHeaders); + const corsRule = findCorsRule(corsRules, corsOrigin, corsMethod, corsHeaders); if (!corsRule) { - const err = errorInstances.AccessForbidden - .customizeDescription(customizedErrs.notAllowed); + const err = errorInstances.AccessForbidden.customizeDescription(customizedErrs.notAllowed); log.trace('no matching cors rule', { error: err, method: 'corsPreflight', @@ -74,8 +74,7 @@ function corsPreflight(request, log, callback) { return callback(err); } - const resHeaders = generateCorsResHeaders(corsRule, corsOrigin, - corsMethod, corsHeaders, true); + const resHeaders = generateCorsResHeaders(corsRule, corsOrigin, corsMethod, corsHeaders, true); // TODO: add some level of metrics for non-standard API request: // pushMetric('corsPreflight', log, { bucket: bucketName }); return callback(null, resHeaders); diff --git a/lib/api/initiateMultipartUpload.js b/lib/api/initiateMultipartUpload.js index 00eacda224..cb4afaa9b3 100644 --- a/lib/api/initiateMultipartUpload.js +++ b/lib/api/initiateMultipartUpload.js @@ -10,15 +10,12 @@ const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation'); const constants = require('../../constants'); const services = require('../services'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); -const locationConstraintCheck - = require('./apiUtils/object/locationConstraintCheck'); -const validateWebsiteHeader = require('./apiUtils/object/websiteServing') - .validateWebsiteHeader; +const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck'); +const validateWebsiteHeader = require('./apiUtils/object/websiteServing').validateWebsiteHeader; const monitoring = require('../utilities/monitoringHandler'); const { data } = require('../data/wrapper'); const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD'); -const { validateHeaders, compareObjectLockInformation } = - require('./apiUtils/object/objectLockHelpers'); +const { validateHeaders, compareObjectLockInformation } = require('./apiUtils/object/objectLockHelpers'); const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { setSSEHeaders } = require('./apiUtils/object/sseHeaders'); @@ -54,9 +51,9 @@ function initiateMultipartUpload(authInfo, request, log, callback) { const objectKey = request.objectKey; if (hasNonPrintables(objectKey)) { - return callback(errorInstances.InvalidInput.customizeDescription( - 'object keys cannot contain non-printable characters', - )); + return callback( + errorInstances.InvalidInput.customizeDescription('object keys cannot contain non-printable characters') + ); } // Note that we are using the string set forth in constants.js @@ -65,19 +62,18 @@ function initiateMultipartUpload(authInfo, request, log, callback) { // there is the possiblity that the chosen splitter will occur in the object // name itself. To prevent this, we are restricting the creation of a // multipart upload object with a key containing the splitter. - const websiteRedirectHeader = - request.headers['x-amz-website-redirect-location']; - if (request.headers['x-amz-storage-class'] && - !constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) { + const websiteRedirectHeader = request.headers['x-amz-website-redirect-location']; + if ( + request.headers['x-amz-storage-class'] && + !constants.validStorageClasses.includes(request.headers['x-amz-storage-class']) + ) { log.trace('invalid storage-class header'); - monitoring.promMetrics('PUT', bucketName, - errorInstances.InvalidStorageClass.code, 'initiateMultipartUpload'); + monitoring.promMetrics('PUT', bucketName, errorInstances.InvalidStorageClass.code, 'initiateMultipartUpload'); return callback(errors.InvalidStorageClass); } if (!validateWebsiteHeader(websiteRedirectHeader)) { const err = errors.InvalidRedirectLocation; - log.debug('invalid x-amz-website-redirect-location' + - `value ${websiteRedirectHeader}`, { error: err }); + log.debug('invalid x-amz-website-redirect-location' + `value ${websiteRedirectHeader}`, { error: err }); return callback(err); } const metaHeaders = getMetaHeaders(request.headers); @@ -96,12 +92,10 @@ function initiateMultipartUpload(authInfo, request, log, callback) { // but after authentication so that string to sign is not impacted // This is GH Issue#89 // TODO: remove in CLDSRV-639 - const storageClassOptions = - ['standard', 'standard_ia', 'reduced_redundancy']; + const storageClassOptions = ['standard', 'standard_ia', 'reduced_redundancy']; let storageClass = 'STANDARD'; if (storageClassOptions.indexOf(request.headers['x-amz-storage-class']) > -1) { - storageClass = request.headers['x-amz-storage-class'] - .toUpperCase(); + storageClass = request.headers['x-amz-storage-class'].toUpperCase(); } const metadataValParams = { objectKey, @@ -143,8 +137,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) { metadataStoreParams.tagging = tagging; } - function _getMPUBucket(destinationBucket, log, corsHeaders, - uploadId, cipherBundle, locConstraint, callback) { + function _getMPUBucket(destinationBucket, log, corsHeaders, uploadId, cipherBundle, locConstraint, callback) { const xmlParams = { bucketName, objectKey, @@ -153,56 +146,59 @@ function initiateMultipartUpload(authInfo, request, log, callback) { const xml = convertToXml('initiateMultipartUpload', xmlParams); metadataStoreParams.uploadId = uploadId; - services.getMPUBucket(destinationBucket, bucketName, log, - (err, MPUbucket) => { - if (err) { - log.trace('error getting MPUbucket', { - error: err, - }); - return callback(err); - } - // BACKWARD: Remove to remove the old splitter - if (MPUbucket.getMdBucketModelVersion() < 2) { - metadataStoreParams.splitter = constants.oldSplitter; - } - return services.metadataStoreMPObject(MPUbucket.getName(), - cipherBundle, metadataStoreParams, - log, (err, mpuMD) => { - if (err) { - log.trace('error storing multipart object', { - error: err, - }); - monitoring.promMetrics('PUT', bucketName, err.code, - 'initiateMultipartUpload'); - return callback(err, null, corsHeaders); - } - log.addDefaultFields({ uploadId }); - log.trace('successfully initiated mpu'); - pushMetric('initiateMultipartUpload', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - location: locConstraint, + services.getMPUBucket(destinationBucket, bucketName, log, (err, MPUbucket) => { + if (err) { + log.trace('error getting MPUbucket', { + error: err, + }); + return callback(err); + } + // BACKWARD: Remove to remove the old splitter + if (MPUbucket.getMdBucketModelVersion() < 2) { + metadataStoreParams.splitter = constants.oldSplitter; + } + return services.metadataStoreMPObject( + MPUbucket.getName(), + cipherBundle, + metadataStoreParams, + log, + (err, mpuMD) => { + if (err) { + log.trace('error storing multipart object', { + error: err, }); + monitoring.promMetrics('PUT', bucketName, err.code, 'initiateMultipartUpload'); + return callback(err, null, corsHeaders); + } + log.addDefaultFields({ uploadId }); + log.trace('successfully initiated mpu'); + pushMetric('initiateMultipartUpload', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + location: locConstraint, + }); - // TODO: rename corsHeaders to headers - setExpirationHeaders(corsHeaders, { - lifecycleConfig: destinationBucket.getLifecycleConfiguration(), - mpuParams: { - key: mpuMD.key, - date: mpuMD.initiated, - }, - }); + // TODO: rename corsHeaders to headers + setExpirationHeaders(corsHeaders, { + lifecycleConfig: destinationBucket.getLifecycleConfiguration(), + mpuParams: { + key: mpuMD.key, + date: mpuMD.initiated, + }, + }); - setSSEHeaders(corsHeaders, - mpuMD['x-amz-server-side-encryption'], - mpuMD['x-amz-server-side-encryption-aws-kms-key-id']); + setSSEHeaders( + corsHeaders, + mpuMD['x-amz-server-side-encryption'], + mpuMD['x-amz-server-side-encryption-aws-kms-key-id'] + ); - monitoring.promMetrics('PUT', bucketName, '200', - 'initiateMultipartUpload'); - return callback(null, xml, corsHeaders); - }); - }); + monitoring.promMetrics('PUT', bucketName, '200', 'initiateMultipartUpload'); + return callback(null, xml, corsHeaders); + } + ); + }); } function _storetheMPObject(destinationBucket, corsHeaders, serverSideEncryption) { @@ -217,8 +213,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) { masterKeyId: configuredMasterKeyId || masterKeyId, }; } - const backendInfoObj = locationConstraintCheck(request, null, - destinationBucket, log); + const backendInfoObj = locationConstraintCheck(request, null, destinationBucket, log); if (backendInfoObj.err) { return process.nextTick(() => { callback(backendInfoObj.err); @@ -229,21 +224,17 @@ function initiateMultipartUpload(authInfo, request, log, callback) { metadataStoreParams.dataStoreName = locConstraint; if (request.headers) { - const objectLockValError = - validateHeaders(destinationBucket, request.headers, log); + const objectLockValError = validateHeaders(destinationBucket, request.headers, log); if (objectLockValError) { return callback(objectLockValError); } } const defaultRetention = destinationBucket.getObjectLockConfiguration(); - const finalObjectLockInfo = - compareObjectLockInformation(request.headers, defaultRetention); + const finalObjectLockInfo = compareObjectLockInformation(request.headers, defaultRetention); if (finalObjectLockInfo.retentionInfo) { - metadataStoreParams.retentionMode = - finalObjectLockInfo.retentionInfo.mode; - metadataStoreParams.retentionDate = - finalObjectLockInfo.retentionInfo.date; + metadataStoreParams.retentionMode = finalObjectLockInfo.retentionInfo.mode; + metadataStoreParams.retentionDate = finalObjectLockInfo.retentionInfo.date; } if (finalObjectLockInfo.legalHold) { metadataStoreParams.legalHold = finalObjectLockInfo.legalHold; @@ -261,9 +252,11 @@ function initiateMultipartUpload(authInfo, request, log, callback) { const putVersionId = request.headers['x-scal-s3-version-id']; const isPutVersion = putVersionId || putVersionId === ''; - if (isPutVersion && + if ( + isPutVersion && locConstraint === destinationBucket.getLocationConstraint() && - destinationBucket.isIngestionBucket()) { + destinationBucket.isIngestionBucket() + ) { // When restoring to OOB bucket, we cannot force the versionId of the object written to the // backend, and it is thus not match the versionId of the ingested object. Thus we add extra // user metadata to allow OOB to allow ingestion processor to "match" the (new) restored @@ -271,18 +264,15 @@ function initiateMultipartUpload(authInfo, request, log, callback) { mpuInfo.metaHeaders['x-amz-meta-scal-version-id'] = putVersionId; } - return data.initiateMPU(mpuInfo, websiteRedirectHeader, log, - (err, dataBackendResObj, isVersionedObj) => { + return data.initiateMPU(mpuInfo, websiteRedirectHeader, log, (err, dataBackendResObj, isVersionedObj) => { // will return as true and a custom error if external backend does // not support versioned objects if (isVersionedObj) { - monitoring.promMetrics('PUT', bucketName, 501, - 'initiateMultipartUpload'); + monitoring.promMetrics('PUT', bucketName, 501, 'initiateMultipartUpload'); return callback(err); } if (err) { - monitoring.promMetrics('PUT', bucketName, err.code, - 'initiateMultipartUpload'); + monitoring.promMetrics('PUT', bucketName, err.code, 'initiateMultipartUpload'); return callback(err); } // if mpu not handled externally, dataBackendResObj will be null @@ -292,64 +282,71 @@ function initiateMultipartUpload(authInfo, request, log, callback) { // Generate uniqueID without dashes so routing not messed up uploadId = uuidv4().replace(/-/g, ''); } - return _getMPUBucket(destinationBucket, log, corsHeaders, - uploadId, cipherBundle, locConstraint, callback); + return _getMPUBucket(destinationBucket, log, corsHeaders, uploadId, cipherBundle, locConstraint, callback); }); } - async.waterfall([ - next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, - (error, destinationBucket, destObjMD) => - updateEncryption(error, destinationBucket, destObjMD, objectKey, log, { skipObject: true }, - (error, destinationBucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); - if (error) { - log.debug('error processing request', { - error, - method: 'metadataValidateBucketAndObj', - }); - monitoring.promMetrics('PUT', bucketName, error.code, 'initiateMultipartUpload'); - return next(error, corsHeaders); - } - return next(null, corsHeaders, destinationBucket); - })), - (corsHeaders, destinationBucket, next) => { - if (destinationBucket.hasDeletedFlag() && accountCanonicalID !== destinationBucket.getOwner()) { - log.trace('deleted flag on bucket and request from non-owner account'); - monitoring.promMetrics('PUT', bucketName, 404, 'initiateMultipartUpload'); - return next(errors.NoSuchBucket, corsHeaders); - } - if (destinationBucket.hasTransientFlag() || destinationBucket.hasDeletedFlag()) { - log.trace('transient or deleted flag so cleaning up bucket'); - return cleanUpBucket( - destinationBucket, - accountCanonicalID, - log, - error => { - if (error) { - log.debug('error cleaning up bucket with flag', - { + async.waterfall( + [ + next => + standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (error, destinationBucket, destObjMD) => + updateEncryption( + error, + destinationBucket, + destObjMD, + objectKey, + log, + { skipObject: true }, + (error, destinationBucket) => { + const corsHeaders = collectCorsHeaders( + request.headers.origin, + request.method, + destinationBucket + ); + if (error) { + log.debug('error processing request', { error, - transientFlag: destinationBucket.hasTransientFlag(), - deletedFlag: destinationBucket.hasDeletedFlag(), + method: 'metadataValidateBucketAndObj', }); - // To avoid confusing user with error - // from cleaning up - // bucket return InternalError - monitoring.promMetrics('PUT', bucketName, 500, 'initiateMultipartUpload'); - return next(errors.InternalError, corsHeaders); + monitoring.promMetrics('PUT', bucketName, error.code, 'initiateMultipartUpload'); + return next(error, corsHeaders); + } + return next(null, corsHeaders, destinationBucket); } - return next(null, corsHeaders, destinationBucket); - }); - } - return next(null, corsHeaders, destinationBucket); - }, - (corsHeaders, destinationBucket, next) => - getObjectSSEConfiguration( - request.headers, - destinationBucket, - log, - (error, objectSSEConfig) => { + ) + ), + (corsHeaders, destinationBucket, next) => { + if (destinationBucket.hasDeletedFlag() && accountCanonicalID !== destinationBucket.getOwner()) { + log.trace('deleted flag on bucket and request from non-owner account'); + monitoring.promMetrics('PUT', bucketName, 404, 'initiateMultipartUpload'); + return next(errors.NoSuchBucket, corsHeaders); + } + if (destinationBucket.hasTransientFlag() || destinationBucket.hasDeletedFlag()) { + log.trace('transient or deleted flag so cleaning up bucket'); + return cleanUpBucket(destinationBucket, accountCanonicalID, log, error => { + if (error) { + log.debug('error cleaning up bucket with flag', { + error, + transientFlag: destinationBucket.hasTransientFlag(), + deletedFlag: destinationBucket.hasDeletedFlag(), + }); + // To avoid confusing user with error + // from cleaning up + // bucket return InternalError + monitoring.promMetrics('PUT', bucketName, 500, 'initiateMultipartUpload'); + return next(errors.InternalError, corsHeaders); + } + return next(null, corsHeaders, destinationBucket); + }); + } + return next(null, corsHeaders, destinationBucket); + }, + (corsHeaders, destinationBucket, next) => + getObjectSSEConfiguration(request.headers, destinationBucket, log, (error, objectSSEConfig) => { if (error) { log.error('error fetching server-side encryption config', { error, @@ -358,17 +355,17 @@ function initiateMultipartUpload(authInfo, request, log, callback) { return next(error, corsHeaders); } return next(null, corsHeaders, destinationBucket, objectSSEConfig); + }), + // If SSE configured, test kms key encryption access, but ignore cipher bundle + (corsHeaders, destinationBucket, objectSSEConfig, next) => { + if (objectSSEConfig) { + return kms.createCipherBundle(objectSSEConfig, log, err => + next(err, corsHeaders, destinationBucket, objectSSEConfig) + ); } - ), - // If SSE configured, test kms key encryption access, but ignore cipher bundle - (corsHeaders, destinationBucket, objectSSEConfig, next) => { - if (objectSSEConfig) { - return kms.createCipherBundle(objectSSEConfig, log, - err => next(err, corsHeaders, destinationBucket, objectSSEConfig)); - } - return next(null, corsHeaders, destinationBucket, objectSSEConfig); - }, - ], + return next(null, corsHeaders, destinationBucket, objectSSEConfig); + }, + ], (error, corsHeaders, destinationBucket, objectSSEConfig) => { if (error) { return callback(error, null, corsHeaders); diff --git a/lib/api/listMultipartUploads.js b/lib/api/listMultipartUploads.js index 71e428669c..cc43b4679c 100644 --- a/lib/api/listMultipartUploads.js +++ b/lib/api/listMultipartUploads.js @@ -101,72 +101,71 @@ function listMultipartUploads(authInfo, request, log, callback) { request, }; - async.waterfall([ - function waterfall1(next) { - // Check final destination bucket for authorization rather - // than multipart upload bucket - standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, - (err, bucket) => next(err, bucket)); - }, - function getMPUBucket(bucket, next) { - services.getMPUBucket(bucket, bucketName, log, - (err, mpuBucket) => next(err, bucket, mpuBucket)); - }, - function waterfall2(bucket, mpuBucket, next) { - let splitter = constants.splitter; - // BACKWARD: Remove to remove the old splitter - if (mpuBucket.getMdBucketModelVersion() < 2) { - splitter = constants.oldSplitter; + async.waterfall( + [ + function waterfall1(next) { + // Check final destination bucket for authorization rather + // than multipart upload bucket + standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => + next(err, bucket) + ); + }, + function getMPUBucket(bucket, next) { + services.getMPUBucket(bucket, bucketName, log, (err, mpuBucket) => next(err, bucket, mpuBucket)); + }, + function waterfall2(bucket, mpuBucket, next) { + let splitter = constants.splitter; + // BACKWARD: Remove to remove the old splitter + if (mpuBucket.getMdBucketModelVersion() < 2) { + splitter = constants.oldSplitter; + } + let maxUploads = query['max-uploads'] !== undefined ? Number.parseInt(query['max-uploads'], 10) : 1000; + if (maxUploads < 0) { + monitoring.promMetrics('GET', bucketName, 400, 'listMultipartUploads'); + return callback(errors.InvalidArgument, bucket); + } + if (maxUploads > constants.listingHardLimit) { + maxUploads = constants.listingHardLimit; + } + const listingParams = { + delimiter: query.delimiter, + keyMarker: query['key-marker'], + uploadIdMarker: query['upload-id-marker'], + maxKeys: maxUploads, + prefix: `overview${splitter}${prefix}`, + queryPrefixLength: prefix.length, + listingType: 'MPU', + splitter, + }; + services.getMultipartUploadListing(mpuBucketName, listingParams, log, (err, list) => + next(err, bucket, list) + ); + return undefined; + }, + ], + (err, bucket, list) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + monitoring.promMetrics('GET', bucketName, err.code, 'listMultipartUploads'); + return callback(err, null, corsHeaders); } - let maxUploads = query['max-uploads'] !== undefined ? - Number.parseInt(query['max-uploads'], 10) : 1000; - if (maxUploads < 0) { - monitoring.promMetrics('GET', bucketName, 400, - 'listMultipartUploads'); - return callback(errors.InvalidArgument, bucket); - } - if (maxUploads > constants.listingHardLimit) { - maxUploads = constants.listingHardLimit; - } - const listingParams = { - delimiter: query.delimiter, + const xmlParams = { + bucketName, + encoding, + list, + prefix: query.prefix, keyMarker: query['key-marker'], uploadIdMarker: query['upload-id-marker'], - maxKeys: maxUploads, - prefix: `overview${splitter}${prefix}`, - queryPrefixLength: prefix.length, - listingType: 'MPU', - splitter, }; - services.getMultipartUploadListing(mpuBucketName, listingParams, - log, (err, list) => next(err, bucket, list)); - return undefined; - }, - ], (err, bucket, list) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - monitoring.promMetrics('GET', bucketName, err.code, - 'listMultipartUploads'); - return callback(err, null, corsHeaders); + const xml = convertToXml('listMultipartUploads', xmlParams); + pushMetric('listMultipartUploads', log, { + authInfo, + bucket: bucketName, + }); + monitoring.promMetrics('GET', bucketName, '200', 'listMultipartUploads'); + return callback(null, xml, corsHeaders); } - const xmlParams = { - bucketName, - encoding, - list, - prefix: query.prefix, - keyMarker: query['key-marker'], - uploadIdMarker: query['upload-id-marker'], - }; - const xml = convertToXml('listMultipartUploads', xmlParams); - pushMetric('listMultipartUploads', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics( - 'GET', bucketName, '200', 'listMultipartUploads'); - return callback(null, xml, corsHeaders); - }); + ); } module.exports = listMultipartUploads; diff --git a/lib/api/listParts.js b/lib/api/listParts.js index 9ceec93e6e..5da6616761 100644 --- a/lib/api/listParts.js +++ b/lib/api/listParts.js @@ -5,8 +5,7 @@ const { errors, s3middleware } = require('arsenal'); const constants = require('../../constants'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); -const locationConstraintCheck = - require('./apiUtils/object/locationConstraintCheck'); +const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck'); const services = require('../services'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const escapeForXml = s3middleware.escapeForXml; @@ -57,8 +56,7 @@ function buildXML(xmlParams, xml, encodingFn) { xmlParams.forEach(param => { if (param.value !== undefined) { xml.push(`<${param.tag}>${encodingFn(param.value)}`); - } else if (param.tag !== 'NextPartNumberMarker' && - param.tag !== 'PartNumberMarker') { + } else if (param.tag !== 'NextPartNumberMarker' && param.tag !== 'PartNumberMarker') { xml.push(`<${param.tag}/>`); } }); @@ -79,19 +77,19 @@ function listParts(authInfo, request, log, callback) { const objectKey = request.objectKey; const uploadId = request.query.uploadId; const encoding = request.query['encoding-type']; - let maxParts = Number.parseInt(request.query['max-parts'], 10) ? - Number.parseInt(request.query['max-parts'], 10) : 1000; + let maxParts = Number.parseInt(request.query['max-parts'], 10) + ? Number.parseInt(request.query['max-parts'], 10) + : 1000; if (maxParts < 0) { - monitoring.promMetrics('GET', bucketName, 400, - 'listMultipartUploadParts'); + monitoring.promMetrics('GET', bucketName, 400, 'listMultipartUploadParts'); return callback(errors.InvalidArgument); } if (maxParts > constants.listingHardLimit) { maxParts = constants.listingHardLimit; } - const partNumberMarker = - Number.parseInt(request.query['part-number-marker'], 10) ? - Number.parseInt(request.query['part-number-marker'], 10) : 0; + const partNumberMarker = Number.parseInt(request.query['part-number-marker'], 10) + ? Number.parseInt(request.query['part-number-marker'], 10) + : 0; const metadataValMPUparams = { authInfo, bucketName, @@ -112,192 +110,202 @@ function listParts(authInfo, request, log, callback) { let splitter = constants.splitter; const responseHeaders = {}; - async.waterfall([ - function checkDestBucketVal(next) { - standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, - (err, destinationBucket) => { - if (err) { - return next(err, destinationBucket, null); - } - if (destinationBucket.policies) { - // TODO: Check bucket policies to see if user is granted - // permission or forbidden permission to take - // given action. - // If permitted, add 'bucketPolicyGoAhead' - // attribute to params for validating at MPU level. - // This is GH Issue#76 - metadataValMPUparams.requestType = - 'bucketPolicyGoAhead'; + async.waterfall( + [ + function checkDestBucketVal(next) { + standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, destinationBucket) => { + if (err) { + return next(err, destinationBucket, null); + } + if (destinationBucket.policies) { + // TODO: Check bucket policies to see if user is granted + // permission or forbidden permission to take + // given action. + // If permitted, add 'bucketPolicyGoAhead' + // attribute to params for validating at MPU level. + // This is GH Issue#76 + metadataValMPUparams.requestType = 'bucketPolicyGoAhead'; + } + return next(null, destinationBucket); } - return next(null, destinationBucket); - }); - }, - function waterfall2(destBucket, next) { - metadataValMPUparams.log = log; - services.metadataValidateMultipart(metadataValMPUparams, - (err, mpuBucket, mpuOverviewObj) => { + ); + }, + function waterfall2(destBucket, next) { + metadataValMPUparams.log = log; + services.metadataValidateMultipart(metadataValMPUparams, (err, mpuBucket, mpuOverviewObj) => { if (err) { return next(err, destBucket, null); } return next(null, destBucket, mpuBucket, mpuOverviewObj); }); - }, - function waterfall3(destBucket, mpuBucket, mpuOverviewObj, next) { - const mpuInfo = { - objectKey, - uploadId, - bucketName, - partNumberMarker, - maxParts, - mpuOverviewObj, - destBucket, - }; - const originalIdentityImpDenies = request.actionImplicitDenies; - // eslint-disable-next-line no-param-reassign - delete request.actionImplicitDenies; - return data.listParts(mpuInfo, request, locationConstraintCheck, - log, (err, backendPartList) => { + }, + function waterfall3(destBucket, mpuBucket, mpuOverviewObj, next) { + const mpuInfo = { + objectKey, + uploadId, + bucketName, + partNumberMarker, + maxParts, + mpuOverviewObj, + destBucket, + }; + const originalIdentityImpDenies = request.actionImplicitDenies; // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = originalIdentityImpDenies; - if (err) { - return next(err, destBucket); - } - // if external backend doesn't handle mpu, backendPartList - // will be null - return next(null, destBucket, mpuBucket, mpuOverviewObj, - backendPartList); - }); - }, - function waterfall4(destBucket, mpuBucket, mpuOverviewObj, - backendPartList, next) { - // if parts were returned from cloud backend, they were not - // stored in Scality S3 metadata, so this step can be skipped - if (backendPartList) { - return next(null, destBucket, mpuBucket, backendPartList, - mpuOverviewObj); - } - // BACKWARD: Remove to remove the old splitter - if (mpuBucket.getMdBucketModelVersion() < 2) { - splitter = constants.oldSplitter; - } - const getPartsParams = { - uploadId, - mpuBucketName: mpuBucket.getName(), - maxParts, - partNumberMarker, - log, - splitter, - }; - return services.getSomeMPUparts(getPartsParams, - (err, storedParts) => { - if (err) { - return next(err, destBucket, null); + delete request.actionImplicitDenies; + return data.listParts(mpuInfo, request, locationConstraintCheck, log, (err, backendPartList) => { + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = originalIdentityImpDenies; + if (err) { + return next(err, destBucket); + } + // if external backend doesn't handle mpu, backendPartList + // will be null + return next(null, destBucket, mpuBucket, mpuOverviewObj, backendPartList); + }); + }, + function waterfall4(destBucket, mpuBucket, mpuOverviewObj, backendPartList, next) { + // if parts were returned from cloud backend, they were not + // stored in Scality S3 metadata, so this step can be skipped + if (backendPartList) { + return next(null, destBucket, mpuBucket, backendPartList, mpuOverviewObj); } - return next(null, destBucket, mpuBucket, storedParts, - mpuOverviewObj); - }); - }, function waterfall5(destBucket, mpuBucket, storedParts, - mpuOverviewObj, next) { - const encodingFn = encoding === 'url' - ? querystring.escape : escapeForXml; - const isTruncated = storedParts.IsTruncated; - const splitterLen = splitter.length; - const partListing = storedParts.Contents.map(item => { - // key form: - // - {uploadId} - // - {splitter} - // - {partNumber} - let partNumber; - if (item.key) { - const index = item.key.lastIndexOf(splitter); - partNumber = - parseInt(item.key.substring(index + splitterLen), 10); - } else { - // if partListing came from real AWS backend, - // item.partNumber is present instead of item.key - partNumber = item.partNumber; + // BACKWARD: Remove to remove the old splitter + if (mpuBucket.getMdBucketModelVersion() < 2) { + splitter = constants.oldSplitter; } - return { - partNumber, - lastModified: item.value.LastModified, - ETag: item.value.ETag, - size: item.value.Size, + const getPartsParams = { + uploadId, + mpuBucketName: mpuBucket.getName(), + maxParts, + partNumberMarker, + log, + splitter, }; - }); - const lastPartShown = partListing.length > 0 ? - partListing[partListing.length - 1].partNumber : undefined; + return services.getSomeMPUparts(getPartsParams, (err, storedParts) => { + if (err) { + return next(err, destBucket, null); + } + return next(null, destBucket, mpuBucket, storedParts, mpuOverviewObj); + }); + }, + function waterfall5(destBucket, mpuBucket, storedParts, mpuOverviewObj, next) { + const encodingFn = encoding === 'url' ? querystring.escape : escapeForXml; + const isTruncated = storedParts.IsTruncated; + const splitterLen = splitter.length; + const partListing = storedParts.Contents.map(item => { + // key form: + // - {uploadId} + // - {splitter} + // - {partNumber} + let partNumber; + if (item.key) { + const index = item.key.lastIndexOf(splitter); + partNumber = parseInt(item.key.substring(index + splitterLen), 10); + } else { + // if partListing came from real AWS backend, + // item.partNumber is present instead of item.key + partNumber = item.partNumber; + } + return { + partNumber, + lastModified: item.value.LastModified, + ETag: item.value.ETag, + size: item.value.Size, + }; + }); + const lastPartShown = + partListing.length > 0 ? partListing[partListing.length - 1].partNumber : undefined; - setExpirationHeaders(responseHeaders, { - lifecycleConfig: destBucket.getLifecycleConfiguration(), - mpuParams: { - key: mpuOverviewObj.key, - date: mpuOverviewObj.initiated, - }, - }); + setExpirationHeaders(responseHeaders, { + lifecycleConfig: destBucket.getLifecycleConfiguration(), + mpuParams: { + key: mpuOverviewObj.key, + date: mpuOverviewObj.initiated, + }, + }); - const xml = []; - xml.push( - '', - '' - ); - buildXML([ - { tag: 'Bucket', value: bucketName }, - { tag: 'Key', value: objectKey }, - { tag: 'UploadId', value: uploadId }, - ], xml, encodingFn); - xml.push(''); - buildXML([ - { tag: 'ID', value: mpuOverviewObj.initiatorID }, - { tag: 'DisplayName', - value: mpuOverviewObj.initiatorDisplayName }, - ], xml, encodingFn); - xml.push(''); - xml.push(''); - buildXML([ - { tag: 'ID', value: mpuOverviewObj.ownerID }, - { tag: 'DisplayName', value: mpuOverviewObj.ownerDisplayName }, - ], xml, encodingFn); - xml.push(''); - buildXML([ - { tag: 'StorageClass', value: mpuOverviewObj.storageClass }, - { tag: 'PartNumberMarker', value: partNumberMarker || - undefined }, - // print only if it's truncated - { tag: 'NextPartNumberMarker', value: isTruncated ? - parseInt(lastPartShown, 10) : undefined }, - { tag: 'MaxParts', value: maxParts }, - { tag: 'IsTruncated', value: isTruncated ? 'true' : 'false' }, - ], xml, encodingFn); + const xml = []; + xml.push( + '', + '' + ); + buildXML( + [ + { tag: 'Bucket', value: bucketName }, + { tag: 'Key', value: objectKey }, + { tag: 'UploadId', value: uploadId }, + ], + xml, + encodingFn + ); + xml.push(''); + buildXML( + [ + { tag: 'ID', value: mpuOverviewObj.initiatorID }, + { tag: 'DisplayName', value: mpuOverviewObj.initiatorDisplayName }, + ], + xml, + encodingFn + ); + xml.push(''); + xml.push(''); + buildXML( + [ + { tag: 'ID', value: mpuOverviewObj.ownerID }, + { tag: 'DisplayName', value: mpuOverviewObj.ownerDisplayName }, + ], + xml, + encodingFn + ); + xml.push(''); + buildXML( + [ + { tag: 'StorageClass', value: mpuOverviewObj.storageClass }, + { tag: 'PartNumberMarker', value: partNumberMarker || undefined }, + // print only if it's truncated + { tag: 'NextPartNumberMarker', value: isTruncated ? parseInt(lastPartShown, 10) : undefined }, + { tag: 'MaxParts', value: maxParts }, + { tag: 'IsTruncated', value: isTruncated ? 'true' : 'false' }, + ], + xml, + encodingFn + ); - partListing.forEach(part => { - xml.push(''); - buildXML([ - { tag: 'PartNumber', value: part.partNumber }, - { tag: 'LastModified', value: part.lastModified }, - { tag: 'ETag', value: `"${part.ETag}"` }, - { tag: 'Size', value: part.size }, - ], xml, encodingFn); - xml.push(''); - }); - xml.push(''); - pushMetric('listMultipartUploadParts', log, { - authInfo, - bucket: bucketName, - }); - monitoring.promMetrics( - 'GET', bucketName, '200', 'listMultipartUploadParts'); - next(null, destBucket, xml.join('')); - }, - ], (err, destinationBucket, xml) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, destinationBucket); - monitoring.promMetrics('GET', bucketName, 400, - 'listMultipartUploadParts'); - Object.assign(responseHeaders, corsHeaders); + partListing.forEach(part => { + xml.push(''); + buildXML( + [ + { tag: 'PartNumber', value: part.partNumber }, + { tag: 'LastModified', value: part.lastModified }, + { tag: 'ETag', value: `"${part.ETag}"` }, + { tag: 'Size', value: part.size }, + ], + xml, + encodingFn + ); + xml.push(''); + }); + xml.push(''); + pushMetric('listMultipartUploadParts', log, { + authInfo, + bucket: bucketName, + }); + monitoring.promMetrics('GET', bucketName, '200', 'listMultipartUploadParts'); + next(null, destBucket, xml.join('')); + }, + ], + (err, destinationBucket, xml) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); + monitoring.promMetrics('GET', bucketName, 400, 'listMultipartUploadParts'); + Object.assign(responseHeaders, corsHeaders); - return callback(err, xml, responseHeaders); - }); + return callback(err, xml, responseHeaders); + } + ); return undefined; } diff --git a/lib/api/metadataSearch.js b/lib/api/metadataSearch.js index 959068eee0..2ce8ef3602 100644 --- a/lib/api/metadataSearch.js +++ b/lib/api/metadataSearch.js @@ -8,13 +8,10 @@ const validateSearchParams = require('../api/apiUtils/bucket/validateSearch'); const parseWhere = require('../api/apiUtils/bucket/parseWhere'); const versionIdUtils = versioning.VersionID; const monitoring = require('../utilities/monitoringHandler'); -const { decryptToken } - = require('../api/apiUtils/object/continueToken'); +const { decryptToken } = require('../api/apiUtils/object/continueToken'); const { processVersions, processMasterVersions } = require('./bucketGet'); - -function handleResult(listParams, requestMaxKeys, encoding, authInfo, - bucketName, list, corsHeaders, log, callback) { +function handleResult(listParams, requestMaxKeys, encoding, authInfo, bucketName, list, corsHeaders, log, callback) { // eslint-disable-next-line no-param-reassign listParams.maxKeys = requestMaxKeys; // eslint-disable-next-line no-param-reassign @@ -45,22 +42,21 @@ function metadataSearch(authInfo, request, log, callback) { const bucketName = request.bucketName; const v2 = params['list-type']; if (v2 !== undefined && Number.parseInt(v2, 10) !== 2) { - return callback(errorInstances.InvalidArgument.customizeDescription('Invalid ' + - 'List Type specified in Request')); + return callback( + errorInstances.InvalidArgument.customizeDescription('Invalid ' + 'List Type specified in Request') + ); } log.debug('processing request', { method: 'metadataSearch' }); const encoding = params['encoding-type']; if (encoding !== undefined && encoding !== 'url') { - monitoring.promMetrics( - 'GET', bucketName, 400, 'metadataSearch'); - return callback(errorInstances.InvalidArgument.customizeDescription('Invalid ' + - 'Encoding Method specified in Request')); + monitoring.promMetrics('GET', bucketName, 400, 'metadataSearch'); + return callback( + errorInstances.InvalidArgument.customizeDescription('Invalid ' + 'Encoding Method specified in Request') + ); } - const requestMaxKeys = params['max-keys'] ? - Number.parseInt(params['max-keys'], 10) : 1000; + const requestMaxKeys = params['max-keys'] ? Number.parseInt(params['max-keys'], 10) : 1000; if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) { - monitoring.promMetrics( - 'GET', bucketName, 400, 'metadataSearch'); + monitoring.promMetrics('GET', bucketName, 400, 'metadataSearch'); return callback(errors.InvalidArgument); } // AWS only returns 1000 keys even if max keys are greater. @@ -87,37 +83,34 @@ function metadataSearch(authInfo, request, log, callback) { log.debug(err.message, { stack: err.stack, }); - monitoring.promMetrics( - 'GET', bucketName, 400, 'metadataSearch'); - return callback(errorInstances.InvalidArgument - .customizeDescription('Invalid sql where clause ' + - 'sent as search query')); + monitoring.promMetrics('GET', bucketName, 400, 'metadataSearch'); + return callback( + errorInstances.InvalidArgument.customizeDescription('Invalid sql where clause ' + 'sent as search query') + ); } if (v2) { listParams.v2 = true; listParams.startAfter = params['start-after']; - listParams.continuationToken = - decryptToken(params['continuation-token']); + listParams.continuationToken = decryptToken(params['continuation-token']); listParams.fetchOwner = params['fetch-owner'] === 'true'; } else { listParams.marker = params.marker; } standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); if (err) { log.debug('error processing request', { error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'metadataSearch'); + monitoring.promMetrics('GET', bucketName, err.code, 'metadataSearch'); return callback(err, null, corsHeaders); } if (params.versions !== undefined) { listParams.listingType = 'DelimiterVersions'; delete listParams.marker; listParams.keyMarker = params['key-marker']; - listParams.versionIdMarker = params['version-id-marker'] ? - versionIdUtils.decode(params['version-id-marker']) : undefined; + listParams.versionIdMarker = params['version-id-marker'] + ? versionIdUtils.decode(params['version-id-marker']) + : undefined; } if (!requestMaxKeys) { const emptyList = { @@ -126,20 +119,36 @@ function metadataSearch(authInfo, request, log, callback) { Versions: [], IsTruncated: false, }; - return handleResult(listParams, requestMaxKeys, encoding, authInfo, - bucketName, emptyList, corsHeaders, log, callback); + return handleResult( + listParams, + requestMaxKeys, + encoding, + authInfo, + bucketName, + emptyList, + corsHeaders, + log, + callback + ); } - return services.getObjectListing(bucketName, listParams, log, - (err, list) => { - if (err) { - log.debug('error processing request', { error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'metadataSearch'); - return callback(err, null, corsHeaders); - } - return handleResult(listParams, requestMaxKeys, encoding, authInfo, - bucketName, list, corsHeaders, log, callback); - }); + return services.getObjectListing(bucketName, listParams, log, (err, list) => { + if (err) { + log.debug('error processing request', { error: err }); + monitoring.promMetrics('GET', bucketName, err.code, 'metadataSearch'); + return callback(err, null, corsHeaders); + } + return handleResult( + listParams, + requestMaxKeys, + encoding, + authInfo, + bucketName, + list, + corsHeaders, + log, + callback + ); + }); }); return undefined; } diff --git a/lib/api/multiObjectDelete.js b/lib/api/multiObjectDelete.js index 27d8dec3ff..ab59db541c 100644 --- a/lib/api/multiObjectDelete.js +++ b/lib/api/multiObjectDelete.js @@ -11,17 +11,18 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const metadata = require('../metadata/wrapper'); const services = require('../services'); const vault = require('../auth/vault'); -const { isBucketAuthorized, evaluateBucketPolicyWithIAM } = - require('./apiUtils/authorization/permissionChecks'); -const { preprocessingVersioningDelete } - = require('./apiUtils/object/versioning'); +const { isBucketAuthorized, evaluateBucketPolicyWithIAM } = require('./apiUtils/authorization/permissionChecks'); +const { preprocessingVersioningDelete } = require('./apiUtils/object/versioning'); const createAndStoreObject = require('./apiUtils/object/createAndStoreObject'); const monitoring = require('../utilities/monitoringHandler'); const metadataUtils = require('../metadata/metadataUtils'); const { config } = require('../Config'); const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks'); -const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo } - = require('./apiUtils/object/objectLockHelpers'); +const { + hasGovernanceBypassHeader, + checkUserGovernanceBypass, + ObjectLockInfo, +} = require('./apiUtils/object/objectLockHelpers'); const requestUtils = policies.requestUtils; const { validObjectKeys } = require('../routes/routeVeeam'); const { deleteVeeamCapabilities } = require('../routes/veeam/delete'); @@ -48,8 +49,7 @@ const { processBytesToWrite, validateQuotas } = require('./apiUtils/quotas/quota */ - - /* +/* Format of xml response: @@ -65,36 +65,35 @@ const { processBytesToWrite, validateQuotas } = require('./apiUtils/quotas/quota */ /** -* formats xml for response -* @param {boolean} quietSetting - true if xml should just include error list -* and false if should include deleted list and error list -* @param {object []} errorResults - list of error result objects with each -* object containing -- entry: { key, versionId }, error: arsenal error -* @param {object []} deleted - list of object deleted, an object has the format -* object: { entry, isDeleteMarker, isDeletingDeleteMarker } -* object.entry : above -* object.newDeleteMarker: if deletion resulted in delete marker -* object.isDeletingDeleteMarker: if a delete marker was deleted -* @return {string} xml string -*/ + * formats xml for response + * @param {boolean} quietSetting - true if xml should just include error list + * and false if should include deleted list and error list + * @param {object []} errorResults - list of error result objects with each + * object containing -- entry: { key, versionId }, error: arsenal error + * @param {object []} deleted - list of object deleted, an object has the format + * object: { entry, isDeleteMarker, isDeletingDeleteMarker } + * object.entry : above + * object.newDeleteMarker: if deletion resulted in delete marker + * object.isDeletingDeleteMarker: if a delete marker was deleted + * @return {string} xml string + */ function _formatXML(quietSetting, errorResults, deleted) { let errorXML = []; errorResults.forEach(errorObj => { errorXML.push( - '', - '', escapeForXml(errorObj.entry.key), '', - '', escapeForXml(errorObj.error.message), ''); + '', + '', + escapeForXml(errorObj.entry.key), + '', + '', + escapeForXml(errorObj.error.message), + '' + ); if (errorObj.entry.versionId) { - const version = errorObj.entry.versionId === 'null' ? - 'null' : escapeForXml(errorObj.entry.versionId); + const version = errorObj.entry.versionId === 'null' ? 'null' : escapeForXml(errorObj.entry.versionId); errorXML.push('', version, ''); } - errorXML.push( - '', - escapeForXml(errorObj.error.description), - '', - '' - ); + errorXML.push('', escapeForXml(errorObj.error.description), '', ''); }); errorXML = errorXML.join(''); const xml = [ @@ -115,18 +114,9 @@ function _formatXML(quietSetting, errorResults, deleted) { const isDeleteMarker = version.isDeleteMarker; const deleteMarkerVersionId = version.deleteMarkerVersionId; // if deletion resulted in new delete marker or deleting a delete marker - deletedXML.push( - '', - '', - escapeForXml(version.entry.key), - '' - ); + deletedXML.push('', '', escapeForXml(version.entry.key), ''); if (version.entry.versionId) { - deletedXML.push( - '', - escapeForXml(version.entry.versionId), - '' - ); + deletedXML.push('', escapeForXml(version.entry.versionId), ''); } if (isDeleteMarker) { deletedXML.push( @@ -182,8 +172,7 @@ function _parseXml(xmlToParse, next) { function decodeObjectVersion(entry) { let decodedVersionId; if (entry.versionId) { - decodedVersionId = entry.versionId === 'null' ? - 'null' : versionIdUtils.decode(entry.versionId); + decodedVersionId = entry.versionId === 'null' ? 'null' : versionIdUtils.decode(entry.versionId); } if (decodedVersionId instanceof Error) { return [errors.NoSuchVersion]; @@ -231,25 +220,35 @@ function initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, } /** -* gets object metadata and deletes object -* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info -* @param {string} canonicalID - canonicalId of requester -* @param {object} request - http request -* @param {string} bucketName - bucketName -* @param {BucketInfo} bucket - bucket -* @param {boolean} quietSetting - true if xml should just include error list -* and false if should include deleted list and error list -* @param {object []} errorResults - list of error result objects with each -* object containing -- key: objectName, error: arsenal error -* @param {string []} inPlay - list of object keys still in play -* @param {object} log - logger object -* @param {function} next - callback to next step in waterfall -* @return {undefined} -* @callback called with (err, quietSetting, errorResults, numOfObjects, -* successfullyDeleted, totalContentLengthDeleted) -*/ -function getObjMetadataAndDelete(authInfo, canonicalID, request, - bucketName, bucket, quietSetting, errorResults, inPlay, log, next) { + * gets object metadata and deletes object + * @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info + * @param {string} canonicalID - canonicalId of requester + * @param {object} request - http request + * @param {string} bucketName - bucketName + * @param {BucketInfo} bucket - bucket + * @param {boolean} quietSetting - true if xml should just include error list + * and false if should include deleted list and error list + * @param {object []} errorResults - list of error result objects with each + * object containing -- key: objectName, error: arsenal error + * @param {string []} inPlay - list of object keys still in play + * @param {object} log - logger object + * @param {function} next - callback to next step in waterfall + * @return {undefined} + * @callback called with (err, quietSetting, errorResults, numOfObjects, + * successfullyDeleted, totalContentLengthDeleted) + */ +function getObjMetadataAndDelete( + authInfo, + canonicalID, + request, + bucketName, + bucket, + quietSetting, + errorResults, + inPlay, + log, + next +) { const successfullyDeleted = []; let totalContentLengthDeleted = 0; let numOfObjectsRemoved = 0; @@ -257,200 +256,279 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request, const objectLockedError = new Error('object locked'); let deleteFromStorage = []; - return async.waterfall([ - callback => initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback), - (cache, callback) => async.forEachLimit(inPlay, config.multiObjectDeleteConcurrency, (entry, moveOn) => { - async.waterfall([ - callback => callback(...decodeObjectVersion(entry, bucketName)), - // for obj deletes, no need to check acl's at object level - // (authority is at the bucket level for obj deletes) - (versionId, callback) => metadataUtils.metadataGetObject(bucketName, entry.key, - versionId, cache, log, (err, objMD) => callback(err, objMD, versionId)), - (objMD, versionId, callback) => { - if (!objMD) { - const verCfg = bucket.getVersioningConfiguration(); - // To adhere to AWS behavior, create a delete marker - // if trying to delete an object that does not exist - // when versioning has been configured - if (verCfg && !entry.versionId) { - log.debug('trying to delete specific version ' + - 'that does not exist'); - return callback(null, objMD, versionId); - } - // otherwise if particular key does not exist, AWS - // returns success for key so add to successfullyDeleted - // list and move on - successfullyDeleted.push({ entry }); - return callback(skipError); - } - if (versionId && objMD.location && - Array.isArray(objMD.location) && objMD.location[0]) { - // we need this information for data deletes to AWS - // eslint-disable-next-line no-param-reassign - objMD.location[0].deleteVersion = true; - } - return callback(null, objMD, versionId); - }, - (objMD, versionId, callback) => { - // AWS only returns an object lock error if a version id - // is specified, else continue to create a delete marker - if (!versionId || !bucket.isObjectLockEnabled()) { - return callback(null, null, objMD, versionId); - } - const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers); - if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) { - return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => { - if (error && error.is.AccessDenied) { - log.debug('user does not have BypassGovernanceRetention and object is locked', - { error }); - return callback(objectLockedError); - } - if (error) { - return callback(error); - } - return callback(null, hasGovernanceBypass, objMD, versionId); - }); - } - return callback(null, hasGovernanceBypass, objMD, versionId); - }, - (hasGovernanceBypass, objMD, versionId, callback) => { - // AWS only returns an object lock error if a version id - // is specified, else continue to create a delete marker - if (!versionId || !bucket.isObjectLockEnabled()) { - return callback(null, objMD, versionId); - } - const objLockInfo = new ObjectLockInfo({ - mode: objMD.retentionMode, - date: objMD.retentionDate, - legalHold: objMD.legalHold || false, - }); + return async.waterfall( + [ + callback => initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback), + (cache, callback) => + async.forEachLimit( + inPlay, + config.multiObjectDeleteConcurrency, + (entry, moveOn) => { + async.waterfall( + [ + callback => callback(...decodeObjectVersion(entry, bucketName)), + // for obj deletes, no need to check acl's at object level + // (authority is at the bucket level for obj deletes) + (versionId, callback) => + metadataUtils.metadataGetObject( + bucketName, + entry.key, + versionId, + cache, + log, + (err, objMD) => callback(err, objMD, versionId) + ), + (objMD, versionId, callback) => { + if (!objMD) { + const verCfg = bucket.getVersioningConfiguration(); + // To adhere to AWS behavior, create a delete marker + // if trying to delete an object that does not exist + // when versioning has been configured + if (verCfg && !entry.versionId) { + log.debug('trying to delete specific version ' + 'that does not exist'); + return callback(null, objMD, versionId); + } + // otherwise if particular key does not exist, AWS + // returns success for key so add to successfullyDeleted + // list and move on + successfullyDeleted.push({ entry }); + return callback(skipError); + } + if ( + versionId && + objMD.location && + Array.isArray(objMD.location) && + objMD.location[0] + ) { + // we need this information for data deletes to AWS + // eslint-disable-next-line no-param-reassign + objMD.location[0].deleteVersion = true; + } + return callback(null, objMD, versionId); + }, + (objMD, versionId, callback) => { + // AWS only returns an object lock error if a version id + // is specified, else continue to create a delete marker + if (!versionId || !bucket.isObjectLockEnabled()) { + return callback(null, null, objMD, versionId); + } + const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers); + if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) { + return checkUserGovernanceBypass( + request, + authInfo, + bucket, + entry.key, + log, + error => { + if (error && error.is.AccessDenied) { + log.debug( + 'user does not have BypassGovernanceRetention and object is locked', + { error } + ); + return callback(objectLockedError); + } + if (error) { + return callback(error); + } + return callback(null, hasGovernanceBypass, objMD, versionId); + } + ); + } + return callback(null, hasGovernanceBypass, objMD, versionId); + }, + (hasGovernanceBypass, objMD, versionId, callback) => { + // AWS only returns an object lock error if a version id + // is specified, else continue to create a delete marker + if (!versionId || !bucket.isObjectLockEnabled()) { + return callback(null, objMD, versionId); + } + const objLockInfo = new ObjectLockInfo({ + mode: objMD.retentionMode, + date: objMD.retentionDate, + legalHold: objMD.legalHold || false, + }); - // If the object can not be deleted raise an error - if (!objLockInfo.canModifyObject(hasGovernanceBypass)) { - log.debug('trying to delete locked object'); - return callback(objectLockedError); - } + // If the object can not be deleted raise an error + if (!objLockInfo.canModifyObject(hasGovernanceBypass)) { + log.debug('trying to delete locked object'); + return callback(objectLockedError); + } - return callback(null, objMD, versionId); - }, - (objMD, versionId, callback) => { - const bytes = processBytesToWrite('objectDelete', bucket, versionId, 0, objMD); - return validateQuotas(request, bucket, request.accountQuotas, ['objectDelete'], - 'objectDelete', bytes, false, log, err => callback(err, objMD, versionId)); - }, - (objMD, versionId, callback) => { - const options = preprocessingVersioningDelete( - bucketName, bucket, objMD, versionId, config.nullVersionCompatMode); - const deleteInfo = {}; - if (options && options.deleteData) { - options.overheadField = overheadField; - deleteInfo.deleted = true; - if (!_deleteRequiresOplogUpdate(objMD, bucket)) { - options.doesNotNeedOpogUpdate = true; - } - if (objMD.uploadId) { - options.replayId = objMD.uploadId; - } - return services.deleteObject(bucketName, objMD, - entry.key, options, config.multiObjectDeleteEnableOptimizations, log, - 's3:ObjectRemoved:Delete', (err, toDelete) => { - if (err) { - return callback(err); + return callback(null, objMD, versionId); + }, + (objMD, versionId, callback) => { + const bytes = processBytesToWrite('objectDelete', bucket, versionId, 0, objMD); + return validateQuotas( + request, + bucket, + request.accountQuotas, + ['objectDelete'], + 'objectDelete', + bytes, + false, + log, + err => callback(err, objMD, versionId) + ); + }, + (objMD, versionId, callback) => { + const options = preprocessingVersioningDelete( + bucketName, + bucket, + objMD, + versionId, + config.nullVersionCompatMode + ); + const deleteInfo = {}; + if (options && options.deleteData) { + options.overheadField = overheadField; + deleteInfo.deleted = true; + if (!_deleteRequiresOplogUpdate(objMD, bucket)) { + options.doesNotNeedOpogUpdate = true; + } + if (objMD.uploadId) { + options.replayId = objMD.uploadId; + } + return services.deleteObject( + bucketName, + objMD, + entry.key, + options, + config.multiObjectDeleteEnableOptimizations, + log, + 's3:ObjectRemoved:Delete', + (err, toDelete) => { + if (err) { + return callback(err); + } + if (toDelete) { + deleteFromStorage = deleteFromStorage.concat(toDelete); + } + return callback(null, objMD, deleteInfo); + } + ); + } + deleteInfo.newDeleteMarker = true; + // This call will create a delete-marker + return createAndStoreObject( + bucketName, + bucket, + entry.key, + objMD, + authInfo, + canonicalID, + null, + request, + deleteInfo.newDeleteMarker, + null, + overheadField, + log, + 's3:ObjectRemoved:DeleteMarkerCreated', + (err, result) => callback(err, objMD, deleteInfo, result.versionId) + ); + }, + ], + (err, objMD, deleteInfo, versionId) => { + if (err === skipError) { + return moveOn(); + } else if (err === objectLockedError) { + errorResults.push({ entry, error: errors.AccessDenied, objectLocked: true }); + return moveOn(); + } else if (err) { + log.error('error deleting object', { error: err, entry }); + errorResults.push({ entry, error: err }); + return moveOn(); } - if (toDelete) { - deleteFromStorage = deleteFromStorage.concat(toDelete); + if (deleteInfo.deleted && objMD['content-length']) { + numOfObjectsRemoved++; + totalContentLengthDeleted += objMD['content-length']; } - return callback(null, objMD, deleteInfo); - }); - } - deleteInfo.newDeleteMarker = true; - // This call will create a delete-marker - return createAndStoreObject(bucketName, bucket, entry.key, - objMD, authInfo, canonicalID, null, request, - deleteInfo.newDeleteMarker, null, overheadField, log, - 's3:ObjectRemoved:DeleteMarkerCreated', (err, result) => - callback(err, objMD, deleteInfo, result.versionId)); - }, - ], (err, objMD, deleteInfo, versionId) => { - if (err === skipError) { - return moveOn(); - } else if (err === objectLockedError) { - errorResults.push({ entry, error: errors.AccessDenied, objectLocked: true }); - return moveOn(); - } else if (err) { - log.error('error deleting object', { error: err, entry }); - errorResults.push({ entry, error: err }); - return moveOn(); - } - if (deleteInfo.deleted && objMD['content-length']) { - numOfObjectsRemoved++; - totalContentLengthDeleted += objMD['content-length']; - } - let isDeleteMarker; - let deleteMarkerVersionId; - // - If trying to delete an object that does not exist (if a new - // delete marker was created) - // - Or if an object exists but no version was specified - // return DeleteMarkerVersionId equals the versionID of the marker - // you just generated and DeleteMarker tag equals true - if (deleteInfo.newDeleteMarker) { - isDeleteMarker = true; - deleteMarkerVersionId = versionIdUtils.encode(versionId); - // In this case we are putting a new object (i.e., the delete - // marker), so we decrement the numOfObjectsRemoved value. - numOfObjectsRemoved--; - // If trying to delete a delete marker, DeleteMarkerVersionId equals - // deleteMarker's versionID and DeleteMarker equals true - } else if (objMD && objMD.isDeleteMarker) { - isDeleteMarker = true; - deleteMarkerVersionId = entry.versionId; - } - successfullyDeleted.push({ - entry, isDeleteMarker, - deleteMarkerVersionId, - }); - return moveOn(); - }); - }, - // end of forEach func - err => { - // Batch delete all objects - const onDone = () => callback(err, quietSetting, errorResults, numOfObjectsRemoved, - successfullyDeleted, totalContentLengthDeleted, bucket); - - if (err && deleteFromStorage.length === 0) { - log.trace('no objects to delete from data backend'); - return onDone(); - } - // If error but we have objects in the list, delete them to ensure - // consistent state. - log.trace('deleting objects from data backend'); + let isDeleteMarker; + let deleteMarkerVersionId; + // - If trying to delete an object that does not exist (if a new + // delete marker was created) + // - Or if an object exists but no version was specified + // return DeleteMarkerVersionId equals the versionID of the marker + // you just generated and DeleteMarker tag equals true + if (deleteInfo.newDeleteMarker) { + isDeleteMarker = true; + deleteMarkerVersionId = versionIdUtils.encode(versionId); + // In this case we are putting a new object (i.e., the delete + // marker), so we decrement the numOfObjectsRemoved value. + numOfObjectsRemoved--; + // If trying to delete a delete marker, DeleteMarkerVersionId equals + // deleteMarker's versionID and DeleteMarker equals true + } else if (objMD && objMD.isDeleteMarker) { + isDeleteMarker = true; + deleteMarkerVersionId = entry.versionId; + } + successfullyDeleted.push({ + entry, + isDeleteMarker, + deleteMarkerVersionId, + }); + return moveOn(); + } + ); + }, + // end of forEach func + err => { + // Batch delete all objects + const onDone = () => + callback( + err, + quietSetting, + errorResults, + numOfObjectsRemoved, + successfullyDeleted, + totalContentLengthDeleted, + bucket + ); - // Split the array into chunks - const chunks = []; - while (deleteFromStorage.length > 0) { - chunks.push(deleteFromStorage.splice(0, config.multiObjectDeleteConcurrency)); - } + if (err && deleteFromStorage.length === 0) { + log.trace('no objects to delete from data backend'); + return onDone(); + } + // If error but we have objects in the list, delete them to ensure + // consistent state. + log.trace('deleting objects from data backend'); - return async.each(chunks, (chunk, done) => data.batchDelete(chunk, null, null, - logger.newRequestLoggerFromSerializedUids(log.getSerializedUids()), done), - err => { - if (err) { - log.error('error deleting objects from data backend', { error: err }); - return onDone(err); + // Split the array into chunks + const chunks = []; + while (deleteFromStorage.length > 0) { + chunks.push(deleteFromStorage.splice(0, config.multiObjectDeleteConcurrency)); } - return onDone(); - }); - }), - ], (err, ...results) => { - // if general error from metadata return error - if (err) { - monitoring.promMetrics('DELETE', bucketName, err.code, - 'multiObjectDelete'); - return next(err); + + return async.each( + chunks, + (chunk, done) => + data.batchDelete( + chunk, + null, + null, + logger.newRequestLoggerFromSerializedUids(log.getSerializedUids()), + done + ), + err => { + if (err) { + log.error('error deleting objects from data backend', { error: err }); + return onDone(err); + } + return onDone(); + } + ); + } + ), + ], + (err, ...results) => { + // if general error from metadata return error + if (err) { + monitoring.promMetrics('DELETE', bucketName, err.code, 'multiObjectDelete'); + return next(err); + } + return next(null, ...results); } - return next(null, ...results); - }); + ); } /** @@ -470,15 +548,12 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request, function multiObjectDelete(authInfo, request, log, callback) { log.debug('processing request', { method: 'multiObjectDelete' }); if (!request.post) { - monitoring.promMetrics('DELETE', request.bucketName, 400, - 'multiObjectDelete'); + monitoring.promMetrics('DELETE', request.bucketName, 400, 'multiObjectDelete'); return callback(errors.MissingRequestBodyError); } - const md5 = crypto.createHash('md5') - .update(request.post, 'utf8').digest('base64'); + const md5 = crypto.createHash('md5').update(request.post, 'utf8').digest('base64'); if (md5 !== request.headers['content-md5']) { - monitoring.promMetrics('DELETE', request.bucketName, 400, - 'multiObjectDelete'); + monitoring.promMetrics('DELETE', request.bucketName, 400, 'multiObjectDelete'); return callback(errors.BadDigest); } @@ -488,216 +563,240 @@ function multiObjectDelete(authInfo, request, log, callback) { const ip = requestUtils.getClientIp(request, config); const isSecure = requestUtils.getHttpProtocolSecurity(request, config); - return async.waterfall([ - function parseXML(next) { - return _parseXml(request.post, - (err, quietSetting, objects) => { + return async.waterfall( + [ + function parseXML(next) { + return _parseXml(request.post, (err, quietSetting, objects) => { if (err || objects.length < 1 || objects.length > 1000) { return next(errors.MalformedXML); } return next(null, quietSetting, objects); }); - }, - function checkBucketMetadata(quietSetting, objects, next) { - const errorResults = []; - return metadata.getBucket(bucketName, log, (err, bucketMD) => { - if (err) { - log.trace('error retrieving bucket metadata', - { error: err }); - return next(err); - } - // check whether bucket has transient or deleted flag - if (bucketShield(bucketMD, 'objectDelete')) { - return next(errors.NoSuchBucket); - } - // The implicit deny flag is ignored in the DeleteObjects API, as authorization only - // affects the objects. - if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) { - log.trace("access denied due to bucket acl's"); - // if access denied at the bucket level, no access for - // any of the objects so all results will be error results - objects.forEach(entry => { - errorResults.push({ - entry, - error: errors.AccessDenied, - }); - }); - // by sending an empty array as the objects array - // async.forEachLimit below will not actually - // make any calls to metadata or data but will continue on - // to the next step to build xml - return next(null, quietSetting, errorResults, [], bucketMD); - } - return next(null, quietSetting, errorResults, objects, bucketMD); - }); - }, - function checkPolicies(quietSetting, errorResults, objects, bucketMD, next) { - // track keys that are still on track to be deleted - const inPlay = []; - // if request from account, no need to check policies - // all objects are inPlay so send array of object keys - // as inPlay argument - if (!isRequesterNonAccountUser(authInfo)) { - return next(null, quietSetting, errorResults, objects, bucketMD); - } - - // TODO: once arsenal's extractParams is separated from doAuth - // function, refactor so only extract once and send - // params on to this api - const authParams = auth.server.extractParams(request, log, - 's3', request.query); - const requestContextParams = { - constantParams: { - headers: request.headers, - query: request.query, - generalResource: request.bucketName, - requesterIp: ip, - sslEnabled: isSecure, - apiMethod: 'objectDelete', - awsService: 's3', - locationConstraint: null, - requesterInfo: authInfo, - signatureVersion: authParams.params.data.authType, - authType: authParams.params.data.signatureVersion, - signatureAge: authParams.params.data.signatureAge, - }, - parameterize: { - // eslint-disable-next-line - specificResource: objects.map(entry => { - return { - key: entry.key, - versionId: entry.versionId, - }; - }), - }, - }; - return vault.checkPolicies(requestContextParams, authInfo.getArn(), - log, (err, authorizationResults) => { - // there were no policies so received a blanket AccessDenied - if (err?.is?.AccessDenied) { - objects.forEach(entry => { - errorResults.push({ - entry, - error: errors.AccessDenied }); - }); - // send empty array for inPlay - return next(null, quietSetting, errorResults, [], bucketMD); - } + }, + function checkBucketMetadata(quietSetting, objects, next) { + const errorResults = []; + return metadata.getBucket(bucketName, log, (err, bucketMD) => { if (err) { - log.trace('error checking policies', { - error: err, - method: 'multiObjectDelete.checkPolicies', - }); + log.trace('error retrieving bucket metadata', { error: err }); return next(err); } - if (objects.length !== authorizationResults.length) { - log.error('vault did not return correct number of ' + - 'authorization results', { - authorizationResultsLength: - authorizationResults.length, - objectsLength: objects.length, - }); - return next(errors.InternalError); + // check whether bucket has transient or deleted flag + if (bucketShield(bucketMD, 'objectDelete')) { + return next(errors.NoSuchBucket); } - // Convert authorization results into an easier to handle format - const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => { - const apiMethod = authorizationResults[idx].action; - // eslint-disable-next-line no-param-reassign - acc[apiMethod] = curr.isImplicit; - return acc; - }, {}); - for (let i = 0; i < authorizationResults.length; i++) { - const result = authorizationResults[i]; - // result is { isAllowed: true, - // arn: arn:aws:s3:::bucket/object, - // versionId: sampleversionId } unless not allowed - // in which case no isAllowed key will be present - const slashIndex = result.arn.indexOf('/'); - if (slashIndex === -1) { - log.error('wrong arn format from vault'); - return next(errors.InternalError); - } - const entry = { - key: result.arn.slice(slashIndex + 1), - versionId: result.versionId, - }; - // Deny immediately if there is an explicit deny - if (!result.isImplicit && !result.isAllowed) { + // The implicit deny flag is ignored in the DeleteObjects API, as authorization only + // affects the objects. + if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) { + log.trace("access denied due to bucket acl's"); + // if access denied at the bucket level, no access for + // any of the objects so all results will be error results + objects.forEach(entry => { errorResults.push({ entry, error: errors.AccessDenied, }); - continue; + }); + // by sending an empty array as the objects array + // async.forEachLimit below will not actually + // make any calls to metadata or data but will continue on + // to the next step to build xml + return next(null, quietSetting, errorResults, [], bucketMD); + } + return next(null, quietSetting, errorResults, objects, bucketMD); + }); + }, + function checkPolicies(quietSetting, errorResults, objects, bucketMD, next) { + // track keys that are still on track to be deleted + const inPlay = []; + // if request from account, no need to check policies + // all objects are inPlay so send array of object keys + // as inPlay argument + if (!isRequesterNonAccountUser(authInfo)) { + return next(null, quietSetting, errorResults, objects, bucketMD); + } + + // TODO: once arsenal's extractParams is separated from doAuth + // function, refactor so only extract once and send + // params on to this api + const authParams = auth.server.extractParams(request, log, 's3', request.query); + const requestContextParams = { + constantParams: { + headers: request.headers, + query: request.query, + generalResource: request.bucketName, + requesterIp: ip, + sslEnabled: isSecure, + apiMethod: 'objectDelete', + awsService: 's3', + locationConstraint: null, + requesterInfo: authInfo, + signatureVersion: authParams.params.data.authType, + authType: authParams.params.data.signatureVersion, + signatureAge: authParams.params.data.signatureAge, + }, + parameterize: { + // eslint-disable-next-line + specificResource: objects.map(entry => { + return { + key: entry.key, + versionId: entry.versionId, + }; + }), + }, + }; + return vault.checkPolicies( + requestContextParams, + authInfo.getArn(), + log, + (err, authorizationResults) => { + // there were no policies so received a blanket AccessDenied + if (err?.is?.AccessDenied) { + objects.forEach(entry => { + errorResults.push({ + entry, + error: errors.AccessDenied, + }); + }); + // send empty array for inPlay + return next(null, quietSetting, errorResults, [], bucketMD); + } + if (err) { + log.trace('error checking policies', { + error: err, + method: 'multiObjectDelete.checkPolicies', + }); + return next(err); + } + if (objects.length !== authorizationResults.length) { + log.error('vault did not return correct number of ' + 'authorization results', { + authorizationResultsLength: authorizationResults.length, + objectsLength: objects.length, + }); + return next(errors.InternalError); } + // Convert authorization results into an easier to handle format + const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => { + const apiMethod = authorizationResults[idx].action; + // eslint-disable-next-line no-param-reassign + acc[apiMethod] = curr.isImplicit; + return acc; + }, {}); + for (let i = 0; i < authorizationResults.length; i++) { + const result = authorizationResults[i]; + // result is { isAllowed: true, + // arn: arn:aws:s3:::bucket/object, + // versionId: sampleversionId } unless not allowed + // in which case no isAllowed key will be present + const slashIndex = result.arn.indexOf('/'); + if (slashIndex === -1) { + log.error('wrong arn format from vault'); + return next(errors.InternalError); + } + const entry = { + key: result.arn.slice(slashIndex + 1), + versionId: result.versionId, + }; + // Deny immediately if there is an explicit deny + if (!result.isImplicit && !result.isAllowed) { + errorResults.push({ + entry, + error: errors.AccessDenied, + }); + continue; + } - // Evaluate against the bucket policies - const areAllActionsAllowed = evaluateBucketPolicyWithIAM( - bucketMD, - Object.keys(actionImplicitDenies), - canonicalID, - authInfo, - actionImplicitDenies, - log, - request); + // Evaluate against the bucket policies + const areAllActionsAllowed = evaluateBucketPolicyWithIAM( + bucketMD, + Object.keys(actionImplicitDenies), + canonicalID, + authInfo, + actionImplicitDenies, + log, + request + ); - if (areAllActionsAllowed) { - if (validObjectKeys.includes(entry.key)) { - inPlayInternal.push(entry.key); + if (areAllActionsAllowed) { + if (validObjectKeys.includes(entry.key)) { + inPlayInternal.push(entry.key); + } else { + inPlay.push(entry); + } } else { - inPlay.push(entry); + errorResults.push({ + entry, + error: errors.AccessDenied, + }); } - } else { - errorResults.push({ - entry, - error: errors.AccessDenied, - }); } + return next(null, quietSetting, errorResults, inPlay, bucketMD); } - return next(null, quietSetting, errorResults, inPlay, bucketMD); - }); - }, - function handleInternalFiles(quietSetting, errorResults, inPlay, bucketMD, next) { - return async.each(inPlayInternal, - (localInPlay, next) => deleteVeeamCapabilities(bucketName, localInPlay, bucketMD, log, next), - err => next(err, quietSetting, errorResults, inPlay, bucketMD)); - }, - function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay, - bucket, next) { - return getObjMetadataAndDelete(authInfo, canonicalID, request, - bucketName, bucket, quietSetting, errorResults, inPlay, - log, next); - }, - ], (err, quietSetting, errorResults, numOfObjectsRemoved, - successfullyDeleted, totalContentLengthDeleted, bucket) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - monitoring.promMetrics('DELETE', bucketName, err.code, - 'multiObjectDelete'); - return callback(err, null, corsHeaders); + ); + }, + function handleInternalFiles(quietSetting, errorResults, inPlay, bucketMD, next) { + return async.each( + inPlayInternal, + (localInPlay, next) => deleteVeeamCapabilities(bucketName, localInPlay, bucketMD, log, next), + err => next(err, quietSetting, errorResults, inPlay, bucketMD) + ); + }, + function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay, bucket, next) { + return getObjMetadataAndDelete( + authInfo, + canonicalID, + request, + bucketName, + bucket, + quietSetting, + errorResults, + inPlay, + log, + next + ); + }, + ], + ( + err, + quietSetting, + errorResults, + numOfObjectsRemoved, + successfullyDeleted, + totalContentLengthDeleted, + bucket + ) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + monitoring.promMetrics('DELETE', bucketName, err.code, 'multiObjectDelete'); + return callback(err, null, corsHeaders); + } + const xml = _formatXML(quietSetting, errorResults, successfullyDeleted); + const deletedKeys = successfullyDeleted.map(item => item.key); + const removedDeleteMarkers = successfullyDeleted.filter( + item => item.isDeleteMarker && item.entry && item.entry.versionId + ).length; + pushMetric('multiObjectDelete', log, { + authInfo, + canonicalID: bucket ? bucket.getOwner() : '', + bucket: bucketName, + keys: deletedKeys, + byteLength: Number.parseInt(totalContentLengthDeleted, 10), + numberOfObjects: numOfObjectsRemoved, + removedDeleteMarkers, + isDelete: true, + }); + monitoring.promMetrics( + 'DELETE', + bucketName, + '200', + 'multiObjectDelete', + Number.parseInt(totalContentLengthDeleted, 10), + null, + null, + numOfObjectsRemoved + ); + return callback(null, xml, corsHeaders); } - const xml = _formatXML(quietSetting, errorResults, - successfullyDeleted); - const deletedKeys = successfullyDeleted.map(item => item.key); - const removedDeleteMarkers = successfullyDeleted - .filter(item => item.isDeleteMarker && item.entry && item.entry.versionId) - .length; - pushMetric('multiObjectDelete', log, { - authInfo, - canonicalID: bucket ? bucket.getOwner() : '', - bucket: bucketName, - keys: deletedKeys, - byteLength: Number.parseInt(totalContentLengthDeleted, 10), - numberOfObjects: numOfObjectsRemoved, - removedDeleteMarkers, - isDelete: true, - }); - monitoring.promMetrics('DELETE', bucketName, '200', - 'multiObjectDelete', - Number.parseInt(totalContentLengthDeleted, 10), null, null, - numOfObjectsRemoved); - return callback(null, xml, corsHeaders); - }); + ); } module.exports = { diff --git a/lib/api/multipartDelete.js b/lib/api/multipartDelete.js index 5580573492..bd86e6d123 100644 --- a/lib/api/multipartDelete.js +++ b/lib/api/multipartDelete.js @@ -22,12 +22,15 @@ function multipartDelete(authInfo, request, log, callback) { const objectKey = request.objectKey; const uploadId = request.query.uploadId; - abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, + abortMultipartUpload( + authInfo, + bucketName, + objectKey, + uploadId, + log, (err, destinationBucket, partSizeSum) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, destinationBucket); - const location = destinationBucket ? - destinationBucket.getLocationConstraint() : null; + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); + const location = destinationBucket ? destinationBucket.getLocationConstraint() : null; if (err && !err?.is?.NoSuchUpload) { return callback(err, corsHeaders); } @@ -36,12 +39,10 @@ function multipartDelete(authInfo, request, log, callback) { method: 'multipartDelete', uploadId, }); - monitoring.promMetrics('DELETE', bucketName, 400, - 'abortMultipartUpload'); + monitoring.promMetrics('DELETE', bucketName, 400, 'abortMultipartUpload'); return callback(err, corsHeaders); } - monitoring.promMetrics('DELETE', bucketName, 400, - 'abortMultipartUpload'); + monitoring.promMetrics('DELETE', bucketName, 400, 'abortMultipartUpload'); if (!err) { pushMetric('abortMultipartUpload', log, { authInfo, @@ -57,7 +58,9 @@ function multipartDelete(authInfo, request, log, callback) { }); } return callback(null, corsHeaders); - }, request); + }, + request + ); } module.exports = multipartDelete; diff --git a/lib/api/objectCopy.js b/lib/api/objectCopy.js index 66e98db1c4..af2d9640ec 100644 --- a/lib/api/objectCopy.js +++ b/lib/api/objectCopy.js @@ -6,18 +6,15 @@ const validateHeaders = s3middleware.validateConditionalHeaders; const constants = require('../../constants'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); -const locationConstraintCheck - = require('./apiUtils/object/locationConstraintCheck'); -const { checkQueryVersionId, versioningPreprocessing, decodeVID } - = require('./apiUtils/object/versioning'); +const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck'); +const { checkQueryVersionId, versioningPreprocessing, decodeVID } = require('./apiUtils/object/versioning'); const getReplicationInfo = require('./apiUtils/object/getReplicationInfo'); const { data } = require('../data/wrapper'); const services = require('../services'); const { pushMetric } = require('../utapi/utilities'); const removeAWSChunked = require('./apiUtils/object/removeAWSChunked'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); -const validateWebsiteHeader = require('./apiUtils/object/websiteServing') - .validateWebsiteHeader; +const validateWebsiteHeader = require('./apiUtils/object/websiteServing').validateWebsiteHeader; const { config } = require('../Config'); const monitoring = require('../utilities/monitoringHandler'); const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD'); @@ -30,8 +27,8 @@ const { updateEncryption } = require('./apiUtils/bucket/updateEncryption'); const versionIdUtils = versioning.VersionID; const locationHeader = constants.objectLocationConstraintHeader; const versioningNotImplBackends = constants.versioningNotImplBackends; -const externalVersioningErrorMessage = 'We do not currently support putting ' + -'a versioned object to a location-constraint of type AWS or Azure or GCP.'; +const externalVersioningErrorMessage = + 'We do not currently support putting ' + 'a versioned object to a location-constraint of type AWS or Azure or GCP.'; /** * Preps metadata to be saved (based on copy or replace request header) @@ -51,8 +48,18 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' + * - sourceLocationConstraintName {string} - location type of the source * - OR error */ -function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination, - authInfo, objectKey, sourceBucketMD, destBucketMD, sourceVersionId, log) { +function _prepMetadata( + request, + sourceObjMD, + headers, + sourceIsDestination, + authInfo, + objectKey, + sourceBucketMD, + destBucketMD, + sourceVersionId, + log +) { let whichMetadata = headers['x-amz-metadata-directive']; // Default is COPY whichMetadata = whichMetadata === undefined ? 'COPY' : whichMetadata; @@ -63,45 +70,47 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination, // Default is COPY whichTagging = whichTagging === undefined ? 'COPY' : whichTagging; if (whichTagging !== 'COPY' && whichTagging !== 'REPLACE') { - return { error: errorInstances.InvalidArgument - .customizeDescription('Unknown tagging directive') }; + return { error: errorInstances.InvalidArgument.customizeDescription('Unknown tagging directive') }; } const overrideMetadata = {}; if (headers['x-amz-server-side-encryption']) { - overrideMetadata['x-amz-server-side-encryption'] = - headers['x-amz-server-side-encryption']; + overrideMetadata['x-amz-server-side-encryption'] = headers['x-amz-server-side-encryption']; } - if (headers['x-amz-storage-class']) { // TODO: remove in CLDSRV-639 - overrideMetadata['x-amz-storage-class'] = - headers['x-amz-storage-class']; + if (headers['x-amz-storage-class']) { + // TODO: remove in CLDSRV-639 + overrideMetadata['x-amz-storage-class'] = headers['x-amz-storage-class']; } if (headers['x-amz-website-redirect-location']) { - overrideMetadata['x-amz-website-redirect-location'] = - headers['x-amz-website-redirect-location']; + overrideMetadata['x-amz-website-redirect-location'] = headers['x-amz-website-redirect-location']; } - const retentionHeaders = headers['x-amz-object-lock-mode'] - && headers['x-amz-object-lock-retain-until-date']; + const retentionHeaders = headers['x-amz-object-lock-mode'] && headers['x-amz-object-lock-retain-until-date']; const legalHoldHeader = headers['x-amz-object-lock-legal-hold']; - if ((retentionHeaders || legalHoldHeader) - && !destBucketMD.isObjectLockEnabled()) { - return { error: errorInstances.InvalidRequest.customizeDescription( - 'Bucket is missing ObjectLockConfiguration') }; + if ((retentionHeaders || legalHoldHeader) && !destBucketMD.isObjectLockEnabled()) { + return { + error: errorInstances.InvalidRequest.customizeDescription('Bucket is missing ObjectLockConfiguration'), + }; } // Cannot copy from same source and destination if no MD // changed and no source version id - if (sourceIsDestination && whichMetadata === 'COPY' && - Object.keys(overrideMetadata).length === 0 && !sourceVersionId) { - return { error: errorInstances.InvalidRequest.customizeDescription('This copy' + - ' request is illegal because it is trying to copy an ' + - 'object to itself without changing the object\'s metadata, ' + - 'storage class, website redirect location or encryption ' + - 'attributes.') }; + if ( + sourceIsDestination && + whichMetadata === 'COPY' && + Object.keys(overrideMetadata).length === 0 && + !sourceVersionId + ) { + return { + error: errorInstances.InvalidRequest.customizeDescription( + 'This copy' + + ' request is illegal because it is trying to copy an ' + + "object to itself without changing the object's metadata, " + + 'storage class, website redirect location or encryption ' + + 'attributes.' + ), + }; } // If COPY, pull all x-amz-meta keys/values from source object // Otherwise, pull all x-amz-meta keys/values from request headers - const userMetadata = whichMetadata === 'COPY' ? - getMetaHeaders(sourceObjMD) : - getMetaHeaders(headers); + const userMetadata = whichMetadata === 'COPY' ? getMetaHeaders(sourceObjMD) : getMetaHeaders(headers); if (userMetadata instanceof Error) { log.debug('user metadata validation failed', { error: userMetadata, @@ -115,28 +124,23 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination, // If metadataDirective is: // - 'COPY' and source object has a location constraint in its metadata // we use the bucket destination location constraint - if (whichMetadata === 'COPY' - && userMetadata[locationHeader] - && destBucketMD.getLocationConstraint()) { + if (whichMetadata === 'COPY' && userMetadata[locationHeader] && destBucketMD.getLocationConstraint()) { userMetadata[locationHeader] = destBucketMD.getLocationConstraint(); } - const backendInfoObjSource = locationConstraintCheck(request, - sourceObjMD, sourceBucketMD, log); + const backendInfoObjSource = locationConstraintCheck(request, sourceObjMD, sourceBucketMD, log); if (backendInfoObjSource.err) { return { error: backendInfoObjSource.err }; } const sourceLocationConstraintName = backendInfoObjSource.controllingLC; - const backendInfoObjDest = locationConstraintCheck(request, - userMetadata, destBucketMD, log); + const backendInfoObjDest = locationConstraintCheck(request, userMetadata, destBucketMD, log); if (backendInfoObjDest.err) { return { error: backendInfoObjSource.err }; } const destLocationConstraintName = backendInfoObjDest.controllingLC; // If location constraint header is not included, locations match - const locationMatch = - sourceLocationConstraintName === destLocationConstraintName; + const locationMatch = sourceLocationConstraintName === destLocationConstraintName; // If tagging directive is REPLACE but you don't specify any // tags in the request, the destination object will @@ -153,8 +157,7 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination, // If COPY, pull the necessary headers from source object // Otherwise, pull them from request headers - const headersToStoreSource = whichMetadata === 'COPY' ? - sourceObjMD : headers; + const headersToStoreSource = whichMetadata === 'COPY' ? sourceObjMD : headers; const storeMetadataParams = { objectKey, @@ -167,16 +170,14 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination, contentMD5: sourceObjMD['content-md5'], cacheControl: headersToStoreSource['cache-control'], contentDisposition: headersToStoreSource['content-disposition'], - contentEncoding: - removeAWSChunked(headersToStoreSource['content-encoding']), + contentEncoding: removeAWSChunked(headersToStoreSource['content-encoding']), dataStoreName: destLocationConstraintName, expires: headersToStoreSource.expires, overrideMetadata, lastModifiedDate: new Date().toJSON(), tagging, taggingCopy, - replicationInfo: getReplicationInfo(config, - objectKey, destBucketMD, false, sourceObjMD['content-length']), + replicationInfo: getReplicationInfo(config, objectKey, destBucketMD, false, sourceObjMD['content-length']), locationMatch, originOp: 's3:ObjectCreated:Copy', }; @@ -196,8 +197,7 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination, storeMetadataParams.bucketOwnerId = destBucketMD.getOwner(); } - return { storeMetadataParams, sourceLocationConstraintName, - backendInfoDest: backendInfoObjDest.backendInfo }; + return { storeMetadataParams, sourceLocationConstraintName, backendInfoDest: backendInfoObjDest.backendInfo }; } /** @@ -213,13 +213,11 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination, * @param {function} callback - final callback to call with the result * @return {undefined} */ -function objectCopy(authInfo, request, sourceBucket, - sourceObject, sourceVersionId, log, callback) { +function objectCopy(authInfo, request, sourceBucket, sourceObject, sourceVersionId, log, callback) { log.debug('processing request', { method: 'objectCopy' }); const destBucketName = request.bucketName; const destObjectKey = request.objectKey; - const sourceIsDestination = - destBucketName === sourceBucket && destObjectKey === sourceObject; + const sourceIsDestination = destBucketName === sourceBucket && destObjectKey === sourceObject; const valGetParams = { authInfo, bucketName: sourceBucket, @@ -251,398 +249,529 @@ function objectCopy(authInfo, request, sourceBucket, namespace: request.namespace, objectKey: destObjectKey, }; - const websiteRedirectHeader = - request.headers['x-amz-website-redirect-location']; + const websiteRedirectHeader = request.headers['x-amz-website-redirect-location']; const responseHeaders = {}; - if (request.headers['x-amz-storage-class'] && - !constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) { + if ( + request.headers['x-amz-storage-class'] && + !constants.validStorageClasses.includes(request.headers['x-amz-storage-class']) + ) { log.trace('invalid storage-class header'); - monitoring.promMetrics('PUT', destBucketName, - errorInstances.InvalidStorageClass.code, 'copyObject'); + monitoring.promMetrics('PUT', destBucketName, errorInstances.InvalidStorageClass.code, 'copyObject'); return callback(errors.InvalidStorageClass); } if (!validateWebsiteHeader(websiteRedirectHeader)) { const err = errors.InvalidRedirectLocation; - log.debug('invalid x-amz-website-redirect-location' + - `value ${websiteRedirectHeader}`, { error: err }); - monitoring.promMetrics( - 'PUT', destBucketName, err.code, 'copyObject'); + log.debug('invalid x-amz-website-redirect-location' + `value ${websiteRedirectHeader}`, { error: err }); + monitoring.promMetrics('PUT', destBucketName, err.code, 'copyObject'); return callback(err); } const queryContainsVersionId = checkQueryVersionId(request.query); if (queryContainsVersionId instanceof Error) { return callback(queryContainsVersionId); } - return async.waterfall([ - function checkDestAuth(next) { - return standardMetadataValidateBucketAndObj(valPutParams, request.actionImplicitDenies, log, - (err, destBucketMD, destObjMD) => - updateEncryption(err, destBucketMD, destObjMD, destObjectKey, log, { skipObject: true }, - (err, destBucketMD, destObjMD) => { - if (err) { - log.debug('error validating put part of request', - { error: err }); - return next(err, destBucketMD); - } - const flag = destBucketMD.hasDeletedFlag() - || destBucketMD.hasTransientFlag(); - if (flag) { - log.trace('deleted flag or transient flag ' + - 'on destination bucket', { flag }); - return next(errors.NoSuchBucket); - } - return next(null, destBucketMD, destObjMD); - })); - }, - function checkSourceAuthorization(destBucketMD, destObjMD, next) { - return standardMetadataValidateBucketAndObj({ - ...valGetParams, - destObjMD, - }, request.actionImplicitDenies, log, - (err, sourceBucketMD, sourceObjMD) => { - if (err) { - log.debug('error validating get part of request', - { error: err }); - return next(err, null, destBucketMD); - } - if (!sourceObjMD) { - const err = sourceVersionId ? errors.NoSuchVersion : - errors.NoSuchKey; - log.debug('no source object', { sourceObject }); - return next(err, null, destBucketMD); - } - // check if object data is in a cold storage - const coldErr = verifyColdObjectAvailable(sourceObjMD); - if (coldErr) { - return next(coldErr, null); - } - if (sourceObjMD.isDeleteMarker) { - log.debug('delete marker on source object', - { sourceObject }); - if (sourceVersionId) { - const err = errorInstances.InvalidRequest - .customizeDescription('The source of a copy ' + - 'request may not specifically refer to a delete' + - 'marker by version id.'); - return next(err, destBucketMD); + return async.waterfall( + [ + function checkDestAuth(next) { + return standardMetadataValidateBucketAndObj( + valPutParams, + request.actionImplicitDenies, + log, + (err, destBucketMD, destObjMD) => + updateEncryption( + err, + destBucketMD, + destObjMD, + destObjectKey, + log, + { skipObject: true }, + (err, destBucketMD, destObjMD) => { + if (err) { + log.debug('error validating put part of request', { error: err }); + return next(err, destBucketMD); + } + const flag = destBucketMD.hasDeletedFlag() || destBucketMD.hasTransientFlag(); + if (flag) { + log.trace('deleted flag or transient flag ' + 'on destination bucket', { flag }); + return next(errors.NoSuchBucket); + } + return next(null, destBucketMD, destObjMD); + } + ) + ); + }, + function checkSourceAuthorization(destBucketMD, destObjMD, next) { + return standardMetadataValidateBucketAndObj( + { + ...valGetParams, + destObjMD, + }, + request.actionImplicitDenies, + log, + (err, sourceBucketMD, sourceObjMD) => { + if (err) { + log.debug('error validating get part of request', { error: err }); + return next(err, null, destBucketMD); + } + if (!sourceObjMD) { + const err = sourceVersionId ? errors.NoSuchVersion : errors.NoSuchKey; + log.debug('no source object', { sourceObject }); + return next(err, null, destBucketMD); + } + // check if object data is in a cold storage + const coldErr = verifyColdObjectAvailable(sourceObjMD); + if (coldErr) { + return next(coldErr, null); + } + if (sourceObjMD.isDeleteMarker) { + log.debug('delete marker on source object', { sourceObject }); + if (sourceVersionId) { + const err = errorInstances.InvalidRequest.customizeDescription( + 'The source of a copy ' + + 'request may not specifically refer to a delete' + + 'marker by version id.' + ); + return next(err, destBucketMD); + } + // if user specifies a key in a versioned source bucket + // without specifying a version, and the object has + // a delete marker, return NoSuchKey + return next(errors.NoSuchKey, destBucketMD); + } + const headerValResult = validateHeaders( + request.headers, + sourceObjMD['last-modified'], + sourceObjMD['content-md5'] + ); + if (headerValResult.error) { + return next(errors.PreconditionFailed, destBucketMD); + } + const { + storeMetadataParams, + error: metadataError, + sourceLocationConstraintName, + backendInfoDest, + } = _prepMetadata( + request, + sourceObjMD, + request.headers, + sourceIsDestination, + authInfo, + destObjectKey, + sourceBucketMD, + destBucketMD, + sourceVersionId, + log + ); + if (metadataError) { + return next(metadataError, destBucketMD); + } + if (storeMetadataParams.metaHeaders) { + dataStoreContext.metaHeaders = storeMetadataParams.metaHeaders; } - // if user specifies a key in a versioned source bucket - // without specifying a version, and the object has - // a delete marker, return NoSuchKey - return next(errors.NoSuchKey, destBucketMD); - } - const headerValResult = - validateHeaders(request.headers, - sourceObjMD['last-modified'], - sourceObjMD['content-md5']); - if (headerValResult.error) { - return next(errors.PreconditionFailed, destBucketMD); - } - const { storeMetadataParams, error: metadataError, - sourceLocationConstraintName, backendInfoDest } = - _prepMetadata(request, sourceObjMD, request.headers, - sourceIsDestination, authInfo, destObjectKey, - sourceBucketMD, destBucketMD, sourceVersionId, log); - if (metadataError) { - return next(metadataError, destBucketMD); - } - if (storeMetadataParams.metaHeaders) { - dataStoreContext.metaHeaders = - storeMetadataParams.metaHeaders; - } - storeMetadataParams.overheadField = constants.overheadField; + storeMetadataParams.overheadField = constants.overheadField; - let dataLocator; - // If 0 byte object just set dataLocator to empty array - if (!sourceObjMD.location) { - dataLocator = []; - } else { - // To provide for backwards compatibility before - // md-model-version 2, need to handle cases where - // objMD.location is just a string - dataLocator = Array.isArray(sourceObjMD.location) ? - sourceObjMD.location : [{ key: sourceObjMD.location }]; - } + let dataLocator; + // If 0 byte object just set dataLocator to empty array + if (!sourceObjMD.location) { + dataLocator = []; + } else { + // To provide for backwards compatibility before + // md-model-version 2, need to handle cases where + // objMD.location is just a string + dataLocator = Array.isArray(sourceObjMD.location) + ? sourceObjMD.location + : [{ key: sourceObjMD.location }]; + } - if (sourceObjMD['x-amz-server-side-encryption']) { - for (let i = 0; i < dataLocator.length; i++) { - dataLocator[i].masterKeyId = sourceObjMD[ - 'x-amz-server-side-encryption-aws-kms-key-id']; - dataLocator[i].algorithm = - sourceObjMD['x-amz-server-side-encryption']; + if (sourceObjMD['x-amz-server-side-encryption']) { + for (let i = 0; i < dataLocator.length; i++) { + dataLocator[i].masterKeyId = sourceObjMD['x-amz-server-side-encryption-aws-kms-key-id']; + dataLocator[i].algorithm = sourceObjMD['x-amz-server-side-encryption']; + } } - } - // If the destination key already exists - if (destObjMD) { - // Re-use creation-time if we can - if (destObjMD['creation-time']) { - storeMetadataParams.creationTime = - destObjMD['creation-time']; - // Otherwise fallback to last-modified + // If the destination key already exists + if (destObjMD) { + // Re-use creation-time if we can + if (destObjMD['creation-time']) { + storeMetadataParams.creationTime = destObjMD['creation-time']; + // Otherwise fallback to last-modified + } else { + storeMetadataParams.creationTime = destObjMD['last-modified']; + } + // If this is a new key, create a new timestamp } else { - storeMetadataParams.creationTime = - destObjMD['last-modified']; + storeMetadataParams.creationTime = new Date().toJSON(); } - // If this is a new key, create a new timestamp - } else { - storeMetadataParams.creationTime = new Date().toJSON(); - } - return next(null, storeMetadataParams, dataLocator, - sourceBucketMD, destBucketMD, destObjMD, - sourceLocationConstraintName, backendInfoDest); - }); - }, - function getSSEConfiguration(storeMetadataParams, dataLocator, sourceBucketMD, - destBucketMD, destObjMD, sourceLocationConstraintName, - backendInfoDest, next) { - getObjectSSEConfiguration( - request.headers, - destBucketMD, - log, - (err, sseConfig) => - next(err, storeMetadataParams, dataLocator, sourceBucketMD, - destBucketMD, destObjMD, sourceLocationConstraintName, - backendInfoDest, sseConfig)); - }, - function goGetData(storeMetadataParams, dataLocator, sourceBucketMD, - destBucketMD, destObjMD, sourceLocationConstraintName, - backendInfoDest, serverSideEncryption, next) { - const vcfg = destBucketMD.getVersioningConfiguration(); - const isVersionedObj = vcfg && vcfg.Status === 'Enabled'; - const destLocationConstraintName = - storeMetadataParams.dataStoreName; - const needsEncryption = serverSideEncryption && !!serverSideEncryption.algo; - // skip if source and dest and location constraint the same and - // versioning is not enabled - // still send along serverSideEncryption info so algo - // and masterKeyId stored properly in metadata - if (sourceIsDestination && storeMetadataParams.locationMatch - && !isVersionedObj && !needsEncryption) { - return next(null, storeMetadataParams, dataLocator, destObjMD, - serverSideEncryption, destBucketMD); - } + return next( + null, + storeMetadataParams, + dataLocator, + sourceBucketMD, + destBucketMD, + destObjMD, + sourceLocationConstraintName, + backendInfoDest + ); + } + ); + }, + function getSSEConfiguration( + storeMetadataParams, + dataLocator, + sourceBucketMD, + destBucketMD, + destObjMD, + sourceLocationConstraintName, + backendInfoDest, + next + ) { + getObjectSSEConfiguration(request.headers, destBucketMD, log, (err, sseConfig) => + next( + err, + storeMetadataParams, + dataLocator, + sourceBucketMD, + destBucketMD, + destObjMD, + sourceLocationConstraintName, + backendInfoDest, + sseConfig + ) + ); + }, + function goGetData( + storeMetadataParams, + dataLocator, + sourceBucketMD, + destBucketMD, + destObjMD, + sourceLocationConstraintName, + backendInfoDest, + serverSideEncryption, + next + ) { + const vcfg = destBucketMD.getVersioningConfiguration(); + const isVersionedObj = vcfg && vcfg.Status === 'Enabled'; + const destLocationConstraintName = storeMetadataParams.dataStoreName; + const needsEncryption = serverSideEncryption && !!serverSideEncryption.algo; + // skip if source and dest and location constraint the same and + // versioning is not enabled + // still send along serverSideEncryption info so algo + // and masterKeyId stored properly in metadata + if (sourceIsDestination && storeMetadataParams.locationMatch && !isVersionedObj && !needsEncryption) { + return next(null, storeMetadataParams, dataLocator, destObjMD, serverSideEncryption, destBucketMD); + } - // also skip if 0 byte object, unless location constraint is an - // external backend and differs from source, in which case put - // metadata to backend - let destLocationConstraintType; - if (config.backends.data === 'multiple') { - destLocationConstraintType = - config.getLocationConstraintType(destLocationConstraintName); - } - if (destLocationConstraintType && - versioningNotImplBackends[destLocationConstraintType] - && isVersionedObj) { - log.debug(externalVersioningErrorMessage, - { method: 'multipleBackendGateway', - error: errors.NotImplemented }); - return next(errorInstances.NotImplemented.customizeDescription( - externalVersioningErrorMessage), destBucketMD); - } - if (dataLocator.length === 0) { - if (!storeMetadataParams.locationMatch && - destLocationConstraintType && - constants.externalBackends[destLocationConstraintType]) { - return data.put(null, null, storeMetadataParams.size, - dataStoreContext, backendInfoDest, - log, (error, objectRetrievalInfo) => { - if (error) { - return next(error, destBucketMD); + // also skip if 0 byte object, unless location constraint is an + // external backend and differs from source, in which case put + // metadata to backend + let destLocationConstraintType; + if (config.backends.data === 'multiple') { + destLocationConstraintType = config.getLocationConstraintType(destLocationConstraintName); + } + if ( + destLocationConstraintType && + versioningNotImplBackends[destLocationConstraintType] && + isVersionedObj + ) { + log.debug(externalVersioningErrorMessage, { + method: 'multipleBackendGateway', + error: errors.NotImplemented, + }); + return next( + errorInstances.NotImplemented.customizeDescription(externalVersioningErrorMessage), + destBucketMD + ); + } + if (dataLocator.length === 0) { + if ( + !storeMetadataParams.locationMatch && + destLocationConstraintType && + constants.externalBackends[destLocationConstraintType] + ) { + return data.put( + null, + null, + storeMetadataParams.size, + dataStoreContext, + backendInfoDest, + log, + (error, objectRetrievalInfo) => { + if (error) { + return next(error, destBucketMD); + } + const putResult = { + key: objectRetrievalInfo.key, + dataStoreName: objectRetrievalInfo.dataStoreName, + dataStoreType: objectRetrievalInfo.dataStoreType, + size: storeMetadataParams.size, + }; + const putResultArr = [putResult]; + return next( + null, + storeMetadataParams, + putResultArr, + destObjMD, + serverSideEncryption, + destBucketMD + ); } - const putResult = { - key: objectRetrievalInfo.key, - dataStoreName: objectRetrievalInfo. - dataStoreName, - dataStoreType: objectRetrievalInfo. - dataStoreType, - size: storeMetadataParams.size, - }; - const putResultArr = [putResult]; - return next(null, storeMetadataParams, putResultArr, - destObjMD, serverSideEncryption, destBucketMD); - }); + ); + } + return next(null, storeMetadataParams, dataLocator, destObjMD, serverSideEncryption, destBucketMD); } - return next(null, storeMetadataParams, dataLocator, destObjMD, - serverSideEncryption, destBucketMD); - } - const originalIdentityImpDenies = request.actionImplicitDenies; - // eslint-disable-next-line no-param-reassign - delete request.actionImplicitDenies; - return data.copyObject(request, sourceLocationConstraintName, - storeMetadataParams, dataLocator, dataStoreContext, - backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log, - (err, results) => { + const originalIdentityImpDenies = request.actionImplicitDenies; // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = originalIdentityImpDenies; - if (err) { - return next(err, destBucketMD); - } - return next(null, storeMetadataParams, results, - destObjMD, serverSideEncryption, destBucketMD); - }); - }, - function getVersioningInfo(storeMetadataParams, destDataGetInfoArr, - destObjMD, serverSideEncryption, destBucketMD, next) { - if (!destBucketMD.isVersioningEnabled() && destObjMD?.archive?.archiveInfo) { - // Ensure we trigger a "delete" event in the oplog for the previously archived object - // eslint-disable-next-line - storeMetadataParams.needOplogUpdate = 's3:ReplaceArchivedObject'; - } - return versioningPreprocessing(destBucketName, - destBucketMD, destObjectKey, destObjMD, log, - (err, options) => { - if (err) { - log.debug('error processing versioning info', - { error: err }); - return next(err, null, destBucketMD); + delete request.actionImplicitDenies; + return data.copyObject( + request, + sourceLocationConstraintName, + storeMetadataParams, + dataLocator, + dataStoreContext, + backendInfoDest, + sourceBucketMD, + destBucketMD, + serverSideEncryption, + log, + (err, results) => { + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = originalIdentityImpDenies; + if (err) { + return next(err, destBucketMD); + } + return next(null, storeMetadataParams, results, destObjMD, serverSideEncryption, destBucketMD); } + ); + }, + function getVersioningInfo( + storeMetadataParams, + destDataGetInfoArr, + destObjMD, + serverSideEncryption, + destBucketMD, + next + ) { + if (!destBucketMD.isVersioningEnabled() && destObjMD?.archive?.archiveInfo) { + // Ensure we trigger a "delete" event in the oplog for the previously archived object + // eslint-disable-next-line + storeMetadataParams.needOplogUpdate = 's3:ReplaceArchivedObject'; + } + return versioningPreprocessing( + destBucketName, + destBucketMD, + destObjectKey, + destObjMD, + log, + (err, options) => { + if (err) { + log.debug('error processing versioning info', { error: err }); + return next(err, null, destBucketMD); + } - const location = destDataGetInfoArr?.[0]?.dataStoreName; - if (location === destBucketMD.getLocationConstraint() && destBucketMD.isIngestionBucket()) { - // If the object is being written to the "ingested" storage location, keep the same - // versionId for consistency and to avoid creating an extra version when it gets - // ingested - const backendVersionId = decodeVID(destDataGetInfoArr[0].dataStoreVersionId); - if (!(backendVersionId instanceof Error)) { - options.versionId = backendVersionId; // eslint-disable-line no-param-reassign + const location = destDataGetInfoArr?.[0]?.dataStoreName; + if (location === destBucketMD.getLocationConstraint() && destBucketMD.isIngestionBucket()) { + // If the object is being written to the "ingested" storage location, keep the same + // versionId for consistency and to avoid creating an extra version when it gets + // ingested + const backendVersionId = decodeVID(destDataGetInfoArr[0].dataStoreVersionId); + if (!(backendVersionId instanceof Error)) { + options.versionId = backendVersionId; // eslint-disable-line no-param-reassign + } } - } - // eslint-disable-next-line - storeMetadataParams.versionId = options.versionId; - // eslint-disable-next-line - storeMetadataParams.versioning = options.versioning; - // eslint-disable-next-line - storeMetadataParams.isNull = options.isNull; - if (options.extraMD) { - Object.assign(storeMetadataParams, options.extraMD); + // eslint-disable-next-line + storeMetadataParams.versionId = options.versionId; + // eslint-disable-next-line + storeMetadataParams.versioning = options.versioning; + // eslint-disable-next-line + storeMetadataParams.isNull = options.isNull; + if (options.extraMD) { + Object.assign(storeMetadataParams, options.extraMD); + } + const dataToDelete = options.dataToDelete; + return next( + null, + storeMetadataParams, + destDataGetInfoArr, + destObjMD, + serverSideEncryption, + destBucketMD, + dataToDelete + ); } - const dataToDelete = options.dataToDelete; - return next(null, storeMetadataParams, destDataGetInfoArr, - destObjMD, serverSideEncryption, destBucketMD, - dataToDelete); - }); - }, - function storeNewMetadata(storeMetadataParams, destDataGetInfoArr, - destObjMD, serverSideEncryption, destBucketMD, dataToDelete, next) { - if (destObjMD && destObjMD.uploadId) { - // eslint-disable-next-line - storeMetadataParams.oldReplayId = destObjMD.uploadId; - } + ); + }, + function storeNewMetadata( + storeMetadataParams, + destDataGetInfoArr, + destObjMD, + serverSideEncryption, + destBucketMD, + dataToDelete, + next + ) { + if (destObjMD && destObjMD.uploadId) { + // eslint-disable-next-line + storeMetadataParams.oldReplayId = destObjMD.uploadId; + } - return services.metadataStoreObject(destBucketName, - destDataGetInfoArr, serverSideEncryption, - storeMetadataParams, (err, result) => { - if (err) { - log.debug('error storing new metadata', { error: err }); - return next(err, null, destBucketMD); - } - const sourceObjSize = storeMetadataParams.size; - const destObjPrevSize = (destObjMD && - destObjMD['content-length'] !== undefined) ? - destObjMD['content-length'] : null; + return services.metadataStoreObject( + destBucketName, + destDataGetInfoArr, + serverSideEncryption, + storeMetadataParams, + (err, result) => { + if (err) { + log.debug('error storing new metadata', { error: err }); + return next(err, null, destBucketMD); + } + const sourceObjSize = storeMetadataParams.size; + const destObjPrevSize = + destObjMD && destObjMD['content-length'] !== undefined ? destObjMD['content-length'] : null; - setExpirationHeaders(responseHeaders, { - lifecycleConfig: destBucketMD.getLifecycleConfiguration(), - objectParams: { - key: destObjectKey, - date: result.lastModified, - tags: result.tags, - }, - }); + setExpirationHeaders(responseHeaders, { + lifecycleConfig: destBucketMD.getLifecycleConfiguration(), + objectParams: { + key: destObjectKey, + date: result.lastModified, + tags: result.tags, + }, + }); - return next(null, dataToDelete, result, destBucketMD, - storeMetadataParams, serverSideEncryption, - sourceObjSize, destObjPrevSize); - }); - }, - function deleteExistingData(dataToDelete, storingNewMdResult, - destBucketMD, storeMetadataParams, serverSideEncryption, - sourceObjSize, destObjPrevSize, next) { - // Clean up any potential orphans in data if object - // put is an overwrite of already existing - // object with same name, so long as the source is not - // the same as the destination - if (!sourceIsDestination && dataToDelete) { - const newDataStoreName = storeMetadataParams.dataStoreName; - return data.batchDelete(dataToDelete, request.method, - newDataStoreName, log, err => { + return next( + null, + dataToDelete, + result, + destBucketMD, + storeMetadataParams, + serverSideEncryption, + sourceObjSize, + destObjPrevSize + ); + } + ); + }, + function deleteExistingData( + dataToDelete, + storingNewMdResult, + destBucketMD, + storeMetadataParams, + serverSideEncryption, + sourceObjSize, + destObjPrevSize, + next + ) { + // Clean up any potential orphans in data if object + // put is an overwrite of already existing + // object with same name, so long as the source is not + // the same as the destination + if (!sourceIsDestination && dataToDelete) { + const newDataStoreName = storeMetadataParams.dataStoreName; + return data.batchDelete(dataToDelete, request.method, newDataStoreName, log, err => { if (err) { // if error, log the error and move on as it is not // relevant to the client as the client's // object already succeeded putting data, metadata - log.error('error deleting existing data', - { error: err }); + log.error('error deleting existing data', { error: err }); } - next(null, - storingNewMdResult, destBucketMD, storeMetadataParams, - serverSideEncryption, sourceObjSize, destObjPrevSize); + next( + null, + storingNewMdResult, + destBucketMD, + storeMetadataParams, + serverSideEncryption, + sourceObjSize, + destObjPrevSize + ); }); + } + return next( + null, + storingNewMdResult, + destBucketMD, + storeMetadataParams, + serverSideEncryption, + sourceObjSize, + destObjPrevSize + ); + }, + ], + ( + err, + storingNewMdResult, + destBucketMD, + storeMetadataParams, + serverSideEncryption, + sourceObjSize, + destObjPrevSize + ) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destBucketMD); + + if (err) { + monitoring.promMetrics('PUT', destBucketName, err.code, 'copyObject'); + return callback(err, null, corsHeaders); + } + const xml = [ + '', + '', + '', + new Date(storeMetadataParams.lastModifiedDate).toISOString(), + '', + '"', + storeMetadataParams.contentMD5, + '"', + '', + ].join(''); + const additionalHeaders = corsHeaders || {}; + if (serverSideEncryption) { + setSSEHeaders( + additionalHeaders, + serverSideEncryption.algorithm, + serverSideEncryption.configuredMasterKeyId || serverSideEncryption.masterKeyId + ); + } + if (sourceVersionId) { + additionalHeaders['x-amz-copy-source-version-id'] = versionIdUtils.encode(sourceVersionId); } - return next(null, - storingNewMdResult, destBucketMD, storeMetadataParams, - serverSideEncryption, sourceObjSize, destObjPrevSize); - }, - ], (err, storingNewMdResult, destBucketMD, storeMetadataParams, - serverSideEncryption, sourceObjSize, destObjPrevSize) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, destBucketMD); + const isVersioned = storingNewMdResult && storingNewMdResult.versionId; + if (isVersioned) { + additionalHeaders['x-amz-version-id'] = versionIdUtils.encode(storingNewMdResult.versionId); + } + + Object.assign(responseHeaders, additionalHeaders); + + // Only pre-existing non-versioned objects get 0 all others use 1 + const numberOfObjects = !isVersioned && destObjPrevSize !== null ? 0 : 1; - if (err) { + pushMetric('copyObject', log, { + authInfo, + canonicalID: destBucketMD.getOwner(), + bucket: destBucketName, + keys: [destObjectKey], + newByteLength: sourceObjSize, + oldByteLength: isVersioned ? null : destObjPrevSize, + location: storeMetadataParams.dataStoreName, + versionId: isVersioned ? storingNewMdResult.versionId : undefined, + numberOfObjects, + }); monitoring.promMetrics( - 'PUT', destBucketName, err.code, 'copyObject'); - return callback(err, null, corsHeaders); - } - const xml = [ - '', - '', - '', new Date(storeMetadataParams.lastModifiedDate) - .toISOString(), '', - '"', storeMetadataParams.contentMD5, '"', - '', - ].join(''); - const additionalHeaders = corsHeaders || {}; - if (serverSideEncryption) { - setSSEHeaders(additionalHeaders, - serverSideEncryption.algorithm, - serverSideEncryption.configuredMasterKeyId || serverSideEncryption.masterKeyId + 'PUT', + destBucketName, + '200', + 'copyObject', + sourceObjSize, + destObjPrevSize, + isVersioned ); + // Add expiration header if lifecycle enabled + return callback(null, xml, responseHeaders); } - if (sourceVersionId) { - additionalHeaders['x-amz-copy-source-version-id'] = - versionIdUtils.encode(sourceVersionId); - } - const isVersioned = storingNewMdResult && storingNewMdResult.versionId; - if (isVersioned) { - additionalHeaders['x-amz-version-id'] = - versionIdUtils.encode(storingNewMdResult.versionId); - } - - Object.assign(responseHeaders, additionalHeaders); - - // Only pre-existing non-versioned objects get 0 all others use 1 - const numberOfObjects = !isVersioned && destObjPrevSize !== null ? 0 : 1; - - pushMetric('copyObject', log, { - authInfo, - canonicalID: destBucketMD.getOwner(), - bucket: destBucketName, - keys: [destObjectKey], - newByteLength: sourceObjSize, - oldByteLength: isVersioned ? null : destObjPrevSize, - location: storeMetadataParams.dataStoreName, - versionId: isVersioned ? storingNewMdResult.versionId : undefined, - numberOfObjects, - }); - monitoring.promMetrics('PUT', destBucketName, '200', - 'copyObject', sourceObjSize, destObjPrevSize, isVersioned); - // Add expiration header if lifecycle enabled - return callback(null, xml, responseHeaders); - }); + ); } module.exports = objectCopy; diff --git a/lib/api/objectDelete.js b/lib/api/objectDelete.js index 112a4b721f..b49009ae43 100644 --- a/lib/api/objectDelete.js +++ b/lib/api/objectDelete.js @@ -5,12 +5,10 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const services = require('../services'); const { pushMetric } = require('../utapi/utilities'); const createAndStoreObject = require('./apiUtils/object/createAndStoreObject'); -const { decodeVersionId, preprocessingVersioningDelete } - = require('./apiUtils/object/versioning'); +const { decodeVersionId, preprocessingVersioningDelete } = require('./apiUtils/object/versioning'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const monitoring = require('../utilities/monitoringHandler'); -const { hasGovernanceBypassHeader, ObjectLockInfo } - = require('./apiUtils/object/objectLockHelpers'); +const { hasGovernanceBypassHeader, ObjectLockInfo } = require('./apiUtils/object/objectLockHelpers'); const { config } = require('../Config'); const { _deleteRequiresOplogUpdate } = require('./apiUtils/object/deleteObject'); @@ -32,8 +30,7 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) { log.debug('processing request', { method: 'objectDeleteInternal' }); if (authInfo.isRequesterPublicUser()) { log.debug('operation not available for public user'); - monitoring.promMetrics( - 'DELETE', request.bucketName, 403, 'deleteObject'); + monitoring.promMetrics('DELETE', request.bucketName, 403, 'deleteObject'); return cb(errors.AccessDenied); } const bucketName = request.bucketName; @@ -60,246 +57,296 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) { }; const canonicalID = authInfo.getCanonicalID(); - return async.waterfall([ - function validateBucketAndObj(next) { - return standardMetadataValidateBucketAndObj(valParams, request.actionImplicitDenies, log, - (err, bucketMD, objMD) => { - if (err) { - return next(err, bucketMD); - } + return async.waterfall( + [ + function validateBucketAndObj(next) { + return standardMetadataValidateBucketAndObj( + valParams, + request.actionImplicitDenies, + log, + (err, bucketMD, objMD) => { + if (err) { + return next(err, bucketMD); + } - const versioningCfg = bucketMD.getVersioningConfiguration(); - if (!objMD) { - if (!versioningCfg) { - return next(errors.NoSuchKey, bucketMD); - } - // AWS does not return an error when trying to delete a - // specific version that does not exist. We skip to the end - // of the waterfall here. - if (reqVersionId) { - log.debug('trying to delete specific version ' + - ' that does not exist'); - return next(errors.NoSuchVersion, bucketMD); - } - // To adhere to AWS behavior, create a delete marker even - // if trying to delete an object that does not exist when - // versioning has been configured - return next(null, bucketMD, objMD); - } + const versioningCfg = bucketMD.getVersioningConfiguration(); + if (!objMD) { + if (!versioningCfg) { + return next(errors.NoSuchKey, bucketMD); + } + // AWS does not return an error when trying to delete a + // specific version that does not exist. We skip to the end + // of the waterfall here. + if (reqVersionId) { + log.debug('trying to delete specific version ' + ' that does not exist'); + return next(errors.NoSuchVersion, bucketMD); + } + // To adhere to AWS behavior, create a delete marker even + // if trying to delete an object that does not exist when + // versioning has been configured + return next(null, bucketMD, objMD); + } - if (versioningCfg && versioningCfg.Status === 'Enabled' && - objMD.versionId === reqVersionId && isExpiration && - !objMD.isDeleteMarker) { - log.warn('expiration is trying to delete a master version ' + - 'of an object with versioning enabled', { - method: 'objectDeleteInternal', - isExpiration, - reqVersionId, - versionId: objMD.versionId, - replicationState: objMD.replicationInfo, - location: objMD.location, - originOp: objMD.originOp, - }); + if ( + versioningCfg && + versioningCfg.Status === 'Enabled' && + objMD.versionId === reqVersionId && + isExpiration && + !objMD.isDeleteMarker + ) { + log.warn( + 'expiration is trying to delete a master version ' + + 'of an object with versioning enabled', + { + method: 'objectDeleteInternal', + isExpiration, + reqVersionId, + versionId: objMD.versionId, + replicationState: objMD.replicationInfo, + location: objMD.location, + originOp: objMD.originOp, + } + ); + } + if (reqVersionId && objMD.location && Array.isArray(objMD.location) && objMD.location[0]) { + // we need this information for data deletes to AWS + // eslint-disable-next-line no-param-reassign + objMD.location[0].deleteVersion = true; + } + if (objMD['content-length'] !== undefined) { + log.end().addDefaultFields({ + bytesDeleted: objMD['content-length'], + }); + } + return next(null, bucketMD, objMD); } - if (reqVersionId && objMD.location && - Array.isArray(objMD.location) && objMD.location[0]) { - // we need this information for data deletes to AWS - // eslint-disable-next-line no-param-reassign - objMD.location[0].deleteVersion = true; - } - if (objMD['content-length'] !== undefined) { - log.end().addDefaultFields({ - bytesDeleted: objMD['content-length'], - }); + ); + }, + function evaluateObjectLockPolicy(bucketMD, objectMD, next) { + // AWS only returns an object lock error if a version id + // is specified, else continue to create a delete marker + if (!reqVersionId) { + return next(null, bucketMD, objectMD); } - return next(null, bucketMD, objMD); - }); - }, - function evaluateObjectLockPolicy(bucketMD, objectMD, next) { - // AWS only returns an object lock error if a version id - // is specified, else continue to create a delete marker - if (!reqVersionId) { - return next(null, bucketMD, objectMD); - } - const objLockInfo = new ObjectLockInfo({ - mode: objectMD.retentionMode, - date: objectMD.retentionDate, - legalHold: objectMD.legalHold || false, - }); + const objLockInfo = new ObjectLockInfo({ + mode: objectMD.retentionMode, + date: objectMD.retentionDate, + legalHold: objectMD.legalHold || false, + }); - // If the object can not be deleted raise an error - if (!objLockInfo.canModifyObject(hasGovernanceBypass)) { - log.debug('trying to delete locked object'); - return next(objectLockedError, bucketMD); - } + // If the object can not be deleted raise an error + if (!objLockInfo.canModifyObject(hasGovernanceBypass)) { + log.debug('trying to delete locked object'); + return next(objectLockedError, bucketMD); + } - return next(null, bucketMD, objectMD); - }, - function validateHeaders(bucketMD, objectMD, next) { - if (objectMD) { - const lastModified = objectMD['last-modified']; - const { modifiedSinceRes, unmodifiedSinceRes } = - checkDateModifiedHeaders(request.headers, lastModified); - const err = modifiedSinceRes.error || unmodifiedSinceRes.error; - if (err) { - return process.nextTick(() => next(err, bucketMD)); + return next(null, bucketMD, objectMD); + }, + function validateHeaders(bucketMD, objectMD, next) { + if (objectMD) { + const lastModified = objectMD['last-modified']; + const { modifiedSinceRes, unmodifiedSinceRes } = checkDateModifiedHeaders( + request.headers, + lastModified + ); + const err = modifiedSinceRes.error || unmodifiedSinceRes.error; + if (err) { + return process.nextTick(() => next(err, bucketMD)); + } } - } - return process.nextTick(() => - next(null, bucketMD, objectMD)); - }, - function deleteOperation(bucketMD, objectMD, next) { - const delOptions = preprocessingVersioningDelete( - bucketName, bucketMD, objectMD, reqVersionId, config.nullVersionCompatMode); - const deleteInfo = { - removeDeleteMarker: false, - newDeleteMarker: false, - }; - if (delOptions && delOptions.deleteData && bucketMD.isNFS() && - bucketMD.getReplicationConfiguration()) { - // If an NFS bucket that has replication configured, we want - // to put a delete marker on the destination even though the - // source does not have versioning. - return createAndStoreObject(bucketName, bucketMD, objectKey, - objectMD, authInfo, canonicalID, null, request, true, null, - log, isExpiration ? - 's3:LifecycleExpiration:DeleteMarkerCreated' : - 's3:ObjectRemoved:DeleteMarkerCreated', - err => { - if (err) { - return next(err); + return process.nextTick(() => next(null, bucketMD, objectMD)); + }, + function deleteOperation(bucketMD, objectMD, next) { + const delOptions = preprocessingVersioningDelete( + bucketName, + bucketMD, + objectMD, + reqVersionId, + config.nullVersionCompatMode + ); + const deleteInfo = { + removeDeleteMarker: false, + newDeleteMarker: false, + }; + if (delOptions && delOptions.deleteData && bucketMD.isNFS() && bucketMD.getReplicationConfiguration()) { + // If an NFS bucket that has replication configured, we want + // to put a delete marker on the destination even though the + // source does not have versioning. + return createAndStoreObject( + bucketName, + bucketMD, + objectKey, + objectMD, + authInfo, + canonicalID, + null, + request, + true, + null, + log, + isExpiration + ? 's3:LifecycleExpiration:DeleteMarkerCreated' + : 's3:ObjectRemoved:DeleteMarkerCreated', + err => { + if (err) { + return next(err); + } + if (objectMD.isDeleteMarker) { + // record that we deleted a delete marker to set + // response headers accordingly + deleteInfo.removeDeleteMarker = true; + } + return services.deleteObject( + bucketName, + objectMD, + objectKey, + delOptions, + false, + log, + isExpiration ? 's3:LifecycleExpiration:Delete' : 's3:ObjectRemoved:Delete', + (err, delResult) => next(err, bucketMD, objectMD, delResult, deleteInfo) + ); } - if (objectMD.isDeleteMarker) { - // record that we deleted a delete marker to set - // response headers accordingly - deleteInfo.removeDeleteMarker = true; - } - return services.deleteObject(bucketName, objectMD, - objectKey, delOptions, false, log, isExpiration ? - 's3:LifecycleExpiration:Delete' : - 's3:ObjectRemoved:Delete', - (err, delResult) => - next(err, bucketMD, objectMD, delResult, deleteInfo)); - }); - } - if (delOptions && delOptions.deleteData) { - delOptions.overheadField = overheadField; - if (objectMD.isDeleteMarker) { - // record that we deleted a delete marker to set - // response headers accordingly - deleteInfo.removeDeleteMarker = true; + ); } + if (delOptions && delOptions.deleteData) { + delOptions.overheadField = overheadField; + if (objectMD.isDeleteMarker) { + // record that we deleted a delete marker to set + // response headers accordingly + deleteInfo.removeDeleteMarker = true; + } - if (objectMD.uploadId) { - delOptions.replayId = objectMD.uploadId; - } + if (objectMD.uploadId) { + delOptions.replayId = objectMD.uploadId; + } - if (!_deleteRequiresOplogUpdate(objectMD, bucketMD)) { - delOptions.doesNotNeedOpogUpdate = true; - } + if (!_deleteRequiresOplogUpdate(objectMD, bucketMD)) { + delOptions.doesNotNeedOpogUpdate = true; + } - return services.deleteObject(bucketName, objectMD, objectKey, - delOptions, false, log, isExpiration ? - 's3:LifecycleExpiration:Delete' : - 's3:ObjectRemoved:Delete', - (err, delResult) => next(err, bucketMD, - objectMD, delResult, deleteInfo)); - } - // putting a new delete marker - deleteInfo.newDeleteMarker = true; - return createAndStoreObject(bucketName, bucketMD, - objectKey, objectMD, authInfo, canonicalID, null, request, - deleteInfo.newDeleteMarker, null, overheadField, log, isExpiration ? - 's3:LifecycleExpiration:DeleteMarkerCreated' : - 's3:ObjectRemoved:DeleteMarkerCreated', - (err, newDelMarkerRes) => { - next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo); - }); - }, - ], (err, bucketMD, objectMD, result, deleteInfo) => { - const resHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucketMD); - // if deleting a specific version or delete marker, return version id - // in the response headers, even in case of NoSuchVersion - if (reqVersionId) { - resHeaders['x-amz-version-id'] = reqVersionId === 'null' ? - reqVersionId : versionIdUtils.encode(reqVersionId); - if (deleteInfo && deleteInfo.removeDeleteMarker) { - resHeaders['x-amz-delete-marker'] = true; + return services.deleteObject( + bucketName, + objectMD, + objectKey, + delOptions, + false, + log, + isExpiration ? 's3:LifecycleExpiration:Delete' : 's3:ObjectRemoved:Delete', + (err, delResult) => next(err, bucketMD, objectMD, delResult, deleteInfo) + ); + } + // putting a new delete marker + deleteInfo.newDeleteMarker = true; + return createAndStoreObject( + bucketName, + bucketMD, + objectKey, + objectMD, + authInfo, + canonicalID, + null, + request, + deleteInfo.newDeleteMarker, + null, + overheadField, + log, + isExpiration + ? 's3:LifecycleExpiration:DeleteMarkerCreated' + : 's3:ObjectRemoved:DeleteMarkerCreated', + (err, newDelMarkerRes) => { + next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo); + } + ); + }, + ], + (err, bucketMD, objectMD, result, deleteInfo) => { + const resHeaders = collectCorsHeaders(request.headers.origin, request.method, bucketMD); + // if deleting a specific version or delete marker, return version id + // in the response headers, even in case of NoSuchVersion + if (reqVersionId) { + resHeaders['x-amz-version-id'] = + reqVersionId === 'null' ? reqVersionId : versionIdUtils.encode(reqVersionId); + if (deleteInfo && deleteInfo.removeDeleteMarker) { + resHeaders['x-amz-delete-marker'] = true; + } } - } - if (err === objectLockedError) { - log.debug('preventing deletion due to object lock', - { + if (err === objectLockedError) { + log.debug('preventing deletion due to object lock', { error: errors.AccessDenied, objectLocked: true, method: 'objectDelete', }); - return cb(errors.AccessDenied, resHeaders); - } - if (err) { - log.debug('error processing request', { error: err, - method: 'objectDelete' }); - monitoring.promMetrics( - 'DELETE', bucketName, err.code, 'deleteObject'); - return cb(err, resHeaders); - } - if (deleteInfo.newDeleteMarker) { - // if we created a new delete marker, return true for - // x-amz-delete-marker and the version ID of the new delete marker - if (result.versionId) { - resHeaders['x-amz-delete-marker'] = true; - resHeaders['x-amz-version-id'] = result.versionId === 'null' ? - result.versionId : versionIdUtils.encode(result.versionId); + return cb(errors.AccessDenied, resHeaders); } + if (err) { + log.debug('error processing request', { error: err, method: 'objectDelete' }); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteObject'); + return cb(err, resHeaders); + } + if (deleteInfo.newDeleteMarker) { + // if we created a new delete marker, return true for + // x-amz-delete-marker and the version ID of the new delete marker + if (result.versionId) { + resHeaders['x-amz-delete-marker'] = true; + resHeaders['x-amz-version-id'] = + result.versionId === 'null' ? result.versionId : versionIdUtils.encode(result.versionId); + } - /* byteLength is passed under the following conditions: - * - bucket versioning is suspended - * - object version id is null - * and one of: - * - the content length of the object exists - * - or - - * - it is a delete marker - * In this case, the master key is deleted and replaced with a delete marker. - * The decrement accounts for the deletion of the master key when utapi reports - * on the number of objects. - */ - // FIXME: byteLength may be incorrect, see S3C-7440 - const versioningSuspended = bucketMD.getVersioningConfiguration() - && bucketMD.getVersioningConfiguration().Status === 'Suspended'; - const deletedSuspendedMasterVersion = versioningSuspended && !!objectMD; - // Default to 0 content-length to cover deleting a DeleteMarker - const objectByteLength = (objectMD && objectMD['content-length']) || 0; - const byteLength = deletedSuspendedMasterVersion ? Number.parseInt(objectByteLength, 10) : null; + /* byteLength is passed under the following conditions: + * - bucket versioning is suspended + * - object version id is null + * and one of: + * - the content length of the object exists + * - or - + * - it is a delete marker + * In this case, the master key is deleted and replaced with a delete marker. + * The decrement accounts for the deletion of the master key when utapi reports + * on the number of objects. + */ + // FIXME: byteLength may be incorrect, see S3C-7440 + const versioningSuspended = + bucketMD.getVersioningConfiguration() && + bucketMD.getVersioningConfiguration().Status === 'Suspended'; + const deletedSuspendedMasterVersion = versioningSuspended && !!objectMD; + // Default to 0 content-length to cover deleting a DeleteMarker + const objectByteLength = (objectMD && objectMD['content-length']) || 0; + const byteLength = deletedSuspendedMasterVersion ? Number.parseInt(objectByteLength, 10) : null; - pushMetric('putDeleteMarkerObject', log, { - authInfo, - byteLength, - bucket: bucketName, - keys: [objectKey], - versionId: result.versionId, - location: objectMD ? objectMD.dataStoreName : undefined, - }); - } else { - log.end().addDefaultFields({ - contentLength: objectMD['content-length'], - }); - pushMetric('deleteObject', log, { - authInfo, - canonicalID: bucketMD.getOwner(), - bucket: bucketName, - keys: [objectKey], - byteLength: Number.parseInt(objectMD['content-length'], 10), - numberOfObjects: 1, - location: objectMD.dataStoreName, - isDelete: true, - }); - monitoring.promMetrics('DELETE', bucketName, '200', 'deleteObject', - Number.parseInt(objectMD['content-length'], 10)); + pushMetric('putDeleteMarkerObject', log, { + authInfo, + byteLength, + bucket: bucketName, + keys: [objectKey], + versionId: result.versionId, + location: objectMD ? objectMD.dataStoreName : undefined, + }); + } else { + log.end().addDefaultFields({ + contentLength: objectMD['content-length'], + }); + pushMetric('deleteObject', log, { + authInfo, + canonicalID: bucketMD.getOwner(), + bucket: bucketName, + keys: [objectKey], + byteLength: Number.parseInt(objectMD['content-length'], 10), + numberOfObjects: 1, + location: objectMD.dataStoreName, + isDelete: true, + }); + monitoring.promMetrics( + 'DELETE', + bucketName, + '200', + 'deleteObject', + Number.parseInt(objectMD['content-length'], 10) + ); + } + return cb(err, resHeaders); } - return cb(err, resHeaders); - }); + ); } /** diff --git a/lib/api/objectDeleteTagging.js b/lib/api/objectDeleteTagging.js index 71115ffe5a..bbd12d17be 100644 --- a/lib/api/objectDeleteTagging.js +++ b/lib/api/objectDeleteTagging.js @@ -1,8 +1,11 @@ const async = require('async'); const { errors } = require('arsenal'); -const { decodeVersionId, getVersionIdResHeader, getVersionSpecificMetadataOptions } - = require('./apiUtils/object/versioning'); +const { + decodeVersionId, + getVersionIdResHeader, + getVersionSpecificMetadataOptions, +} = require('./apiUtils/object/versioning'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); @@ -48,75 +51,81 @@ function objectDeleteTagging(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, - (err, bucket, objectMD) => { - if (err) { - log.trace('request authorization failed', - { method: 'objectDeleteTagging', error: err }); - return next(err); - } - if (!objectMD) { - const err = reqVersionId ? errors.NoSuchVersion : - errors.NoSuchKey; - log.trace('error no object metadata found', - { method: 'objectDeleteTagging', error: err }); - return next(err, bucket); - } - if (objectMD.isDeleteMarker) { - log.trace('version is a delete marker', - { method: 'objectDeleteTagging' }); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - return next(errors.MethodNotAllowed, bucket); - } - return next(null, bucket, objectMD); - }), - (bucket, objectMD, next) => { - // eslint-disable-next-line no-param-reassign - objectMD.tags = {}; - const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); - const replicationInfo = getReplicationInfo(config, - objectKey, bucket, true, 0, REPLICATION_ACTION, objectMD); - if (replicationInfo) { + return async.waterfall( + [ + next => + standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, bucket, objectMD) => { + if (err) { + log.trace('request authorization failed', { method: 'objectDeleteTagging', error: err }); + return next(err); + } + if (!objectMD) { + const err = reqVersionId ? errors.NoSuchVersion : errors.NoSuchKey; + log.trace('error no object metadata found', { method: 'objectDeleteTagging', error: err }); + return next(err, bucket); + } + if (objectMD.isDeleteMarker) { + log.trace('version is a delete marker', { method: 'objectDeleteTagging' }); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + return next(errors.MethodNotAllowed, bucket); + } + return next(null, bucket, objectMD); + } + ), + (bucket, objectMD, next) => { // eslint-disable-next-line no-param-reassign - objectMD.replicationInfo = Object.assign({}, - objectMD.replicationInfo, replicationInfo); + objectMD.tags = {}; + const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); + const replicationInfo = getReplicationInfo( + config, + objectKey, + bucket, + true, + 0, + REPLICATION_ACTION, + objectMD + ); + if (replicationInfo) { + // eslint-disable-next-line no-param-reassign + objectMD.replicationInfo = Object.assign({}, objectMD.replicationInfo, replicationInfo); + } + // eslint-disable-next-line no-param-reassign + objectMD.originOp = 's3:ObjectTagging:Delete'; + metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params, log, err => + next(err, bucket, objectMD) + ); + }, + (bucket, objectMD, next) => + // if external backends handles tagging + data.objectTagging('Delete', objectKey, bucket.getName(), objectMD, log, err => + next(err, bucket, objectMD) + ), + ], + (err, bucket, objectMD) => { + const additionalResHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'objectDeleteTagging' }); + monitoring.promMetrics('DELETE', bucketName, err.code, 'deleteObjectTagging'); + } else { + pushMetric('deleteObjectTagging', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + versionId: objectMD ? objectMD.versionId : undefined, + location: objectMD ? objectMD.dataStoreName : undefined, + }); + monitoring.promMetrics('DELETE', bucketName, '200', 'deleteObjectTagging'); + const verCfg = bucket.getVersioningConfiguration(); + additionalResHeaders['x-amz-version-id'] = getVersionIdResHeader(verCfg, objectMD); } - // eslint-disable-next-line no-param-reassign - objectMD.originOp = 's3:ObjectTagging:Delete'; - metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params, - log, err => - next(err, bucket, objectMD)); - }, - (bucket, objectMD, next) => - // if external backends handles tagging - data.objectTagging('Delete', objectKey, bucket.getName(), objectMD, - log, err => next(err, bucket, objectMD)), - ], (err, bucket, objectMD) => { - const additionalResHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, - method: 'objectDeleteTagging' }); - monitoring.promMetrics( - 'DELETE', bucketName, err.code, 'deleteObjectTagging'); - } else { - pushMetric('deleteObjectTagging', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - versionId: objectMD ? objectMD.versionId : undefined, - location: objectMD ? objectMD.dataStoreName : undefined, - }); - monitoring.promMetrics( - 'DELETE', bucketName, '200', 'deleteObjectTagging'); - const verCfg = bucket.getVersioningConfiguration(); - additionalResHeaders['x-amz-version-id'] = - getVersionIdResHeader(verCfg, objectMD); + return callback(err, additionalResHeaders); } - return callback(err, additionalResHeaders); - }); + ); } module.exports = objectDeleteTagging; diff --git a/lib/api/objectGet.js b/lib/api/objectGet.js index 48525a79ec..d934b1e145 100644 --- a/lib/api/objectGet.js +++ b/lib/api/objectGet.js @@ -10,10 +10,8 @@ const collectResponseHeaders = require('../utilities/collectResponseHeaders'); const { pushMetric } = require('../utapi/utilities'); const { getVersionIdResHeader } = require('./apiUtils/object/versioning'); const setPartRanges = require('./apiUtils/object/setPartRanges'); -const locationHeaderCheck = - require('./apiUtils/object/locationHeaderCheck'); -const getReplicationBackendDataLocator = - require('./apiUtils/object/getReplicationBackendDataLocator'); +const locationHeaderCheck = require('./apiUtils/object/locationHeaderCheck'); +const getReplicationBackendDataLocator = require('./apiUtils/object/getReplicationBackendDataLocator'); const checkReadLocation = require('./apiUtils/object/checkReadLocation'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); @@ -42,8 +40,7 @@ function objectGet(authInfo, request, returnTagCount, log, callback) { const objectKey = request.objectKey; // returns name of location to get from and key if successful - const locCheckResult = - locationHeaderCheck(request.headers, objectKey, bucketName); + const locCheckResult = locationHeaderCheck(request.headers, objectKey, bucketName); if (locCheckResult instanceof Error) { log.trace('invalid location constraint to get from', { location: request.headers['x-amz-location-constraint'], @@ -72,288 +69,259 @@ function objectGet(authInfo, request, returnTagCount, log, callback) { request, }; - return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, - (err, bucket, objMD) => updateEncryption(err, bucket, objMD, objectKey, log, {}, - (err, bucket, objMD) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.debug('error processing request', { - error: err, - method: 'metadataValidateBucketAndObj', - }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getObject'); - return callback(err, null, corsHeaders); - } - if (!objMD) { - const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey; - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getObject'); - return callback(err, null, corsHeaders); - } - const verCfg = bucket.getVersioningConfiguration(); - // check if object data is in a cold storage - const coldErr = verifyColdObjectAvailable(objMD); - if (coldErr) { - monitoring.promMetrics( - 'GET', bucketName, coldErr.code, 'getObject'); - return callback(coldErr, null, corsHeaders); - } - if (objMD.isDeleteMarker) { - const responseMetaHeaders = Object.assign({}, - { 'x-amz-delete-marker': true }, corsHeaders); - if (!versionId) { - monitoring.promMetrics( - 'GET', bucketName, 404, 'getObject'); - return callback(errors.NoSuchKey, null, responseMetaHeaders); + return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, (err, bucket, objMD) => + updateEncryption(err, bucket, objMD, objectKey, log, {}, (err, bucket, objMD) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.debug('error processing request', { + error: err, + method: 'metadataValidateBucketAndObj', + }); + monitoring.promMetrics('GET', bucketName, err.code, 'getObject'); + return callback(err, null, corsHeaders); } - // return MethodNotAllowed if requesting a specific - // version that has a delete marker - responseMetaHeaders['x-amz-version-id'] = - getVersionIdResHeader(verCfg, objMD); - monitoring.promMetrics( - 'GET', bucketName, 405, 'getObject'); - return callback(errors.MethodNotAllowed, null, - responseMetaHeaders); - } - const headerValResult = validateHeaders(request.headers, - objMD['last-modified'], objMD['content-md5']); - if (headerValResult.error) { - return callback(headerValResult.error, null, corsHeaders); - } - const responseMetaHeaders = collectResponseHeaders(objMD, - corsHeaders, verCfg, returnTagCount); - - setExpirationHeaders(responseMetaHeaders, { - lifecycleConfig: bucket.getLifecycleConfiguration(), - objectParams: { - key: objectKey, - tags: objMD.tags, - date: objMD['last-modified'], - }, - isVersionedReq: !!versionId, - }); - - const objLength = (objMD.location === null ? - 0 : parseInt(objMD['content-length'], 10)); - let byteRange; - const streamingParams = {}; - if (request.headers.range) { - const { range, error } = parseRange(request.headers.range, - objLength); - if (error) { - monitoring.promMetrics( - 'GET', bucketName, 400, 'getObject'); - return callback(error, null, corsHeaders); + if (!objMD) { + const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey; + monitoring.promMetrics('GET', bucketName, err.code, 'getObject'); + return callback(err, null, corsHeaders); } - responseMetaHeaders['Accept-Ranges'] = 'bytes'; - if (range) { - byteRange = range; - // End of range should be included so + 1 - responseMetaHeaders['Content-Length'] = - range[1] - range[0] + 1; - responseMetaHeaders['Content-Range'] = - `bytes ${range[0]}-${range[1]}/${objLength}`; - streamingParams.rangeStart = (range[0] || typeof range[0] === 'number') ? - range[0].toString() : undefined; - streamingParams.rangeEnd = range[1] ? - range[1].toString() : undefined; + const verCfg = bucket.getVersioningConfiguration(); + // check if object data is in a cold storage + const coldErr = verifyColdObjectAvailable(objMD); + if (coldErr) { + monitoring.promMetrics('GET', bucketName, coldErr.code, 'getObject'); + return callback(coldErr, null, corsHeaders); } - } - let dataLocator = null; - if (objMD.location !== null) { - // To provide for backwards compatibility before - // md-model-version 2, need to handle cases where - // objMD.location is just a string - dataLocator = Array.isArray(objMD.location) ? - objMD.location : [{ key: objMD.location }]; - - const repConf = bucket.getReplicationConfiguration(); - const prefReadLocation = repConf && repConf.preferredReadLocation; - const prefReadDataLocator = checkReadLocation(config, - prefReadLocation, objectKey, bucketName); - const targetLocation = locCheckResult || prefReadDataLocator || - null; - - if (targetLocation && - targetLocation.location !== objMD.dataStoreName) { - const repBackendResult = getReplicationBackendDataLocator( - targetLocation, objMD.replicationInfo); - if (repBackendResult.error) { - log.error('Error with location constraint header', { - bucketName, objectKey, versionId, - error: repBackendResult.error, - status: repBackendResult.status, - }); - return callback(repBackendResult.error, null, corsHeaders); - } - const targetDataLocator = repBackendResult.dataLocator; - if (targetDataLocator) { - dataLocator = targetDataLocator; - } else { - log.debug('using source location as preferred read ' + - 'is unavailable', { - bucketName, objectKey, versionId, - reason: repBackendResult.reason, - }); + if (objMD.isDeleteMarker) { + const responseMetaHeaders = Object.assign({}, { 'x-amz-delete-marker': true }, corsHeaders); + if (!versionId) { + monitoring.promMetrics('GET', bucketName, 404, 'getObject'); + return callback(errors.NoSuchKey, null, responseMetaHeaders); } + // return MethodNotAllowed if requesting a specific + // version that has a delete marker + responseMetaHeaders['x-amz-version-id'] = getVersionIdResHeader(verCfg, objMD); + monitoring.promMetrics('GET', bucketName, 405, 'getObject'); + return callback(errors.MethodNotAllowed, null, responseMetaHeaders); } - // if the data backend is azure, there will only ever be at - // most one item in the dataLocator array - if (dataLocator[0] && dataLocator[0].dataStoreType === 'azure') { - dataLocator[0].azureStreamingOptions = streamingParams; + const headerValResult = validateHeaders(request.headers, objMD['last-modified'], objMD['content-md5']); + if (headerValResult.error) { + return callback(headerValResult.error, null, corsHeaders); } + const responseMetaHeaders = collectResponseHeaders(objMD, corsHeaders, verCfg, returnTagCount); - let partNumber = null; - if (request.query && request.query.partNumber !== undefined) { - if (byteRange) { - const error = errorInstances.InvalidRequest - .customizeDescription('Cannot specify both Range ' + - 'header and partNumber query parameter.'); - monitoring.promMetrics( - 'GET', bucketName, 400, 'getObject'); - return callback(error, null, corsHeaders); - } - partNumber = Number.parseInt(request.query.partNumber, 10); - if (Number.isNaN(partNumber)) { - const error = errorInstances.InvalidArgument - .customizeDescription('Part number must be a number.'); - monitoring.promMetrics( - 'GET', bucketName, 400, 'getObject'); - return callback(error, null, corsHeaders); - } - if (partNumber < 1 || partNumber > 10000) { - const error = errorInstances.InvalidArgument - .customizeDescription('Part number must be an ' + - 'integer between 1 and 10000, inclusive.'); - monitoring.promMetrics( - 'GET', bucketName, 400, 'getObject'); + setExpirationHeaders(responseMetaHeaders, { + lifecycleConfig: bucket.getLifecycleConfiguration(), + objectParams: { + key: objectKey, + tags: objMD.tags, + date: objMD['last-modified'], + }, + isVersionedReq: !!versionId, + }); + + const objLength = objMD.location === null ? 0 : parseInt(objMD['content-length'], 10); + let byteRange; + const streamingParams = {}; + if (request.headers.range) { + const { range, error } = parseRange(request.headers.range, objLength); + if (error) { + monitoring.promMetrics('GET', bucketName, 400, 'getObject'); return callback(error, null, corsHeaders); } - } - // If have a data model before version 2, cannot support - // get range for objects with multiple parts - if (byteRange && dataLocator.length > 1 && - dataLocator[0].start === undefined) { - monitoring.promMetrics( - 'GET', bucketName, 501, 'getObject'); - return callback(errors.NotImplemented, null, corsHeaders); - } - if (objMD['x-amz-server-side-encryption']) { - for (let i = 0; i < dataLocator.length; i++) { - dataLocator[i].masterKeyId = - objMD['x-amz-server-side-encryption-aws-kms-key-id']; - dataLocator[i].algorithm = - objMD['x-amz-server-side-encryption']; + responseMetaHeaders['Accept-Ranges'] = 'bytes'; + if (range) { + byteRange = range; + // End of range should be included so + 1 + responseMetaHeaders['Content-Length'] = range[1] - range[0] + 1; + responseMetaHeaders['Content-Range'] = `bytes ${range[0]}-${range[1]}/${objLength}`; + streamingParams.rangeStart = + range[0] || typeof range[0] === 'number' ? range[0].toString() : undefined; + streamingParams.rangeEnd = range[1] ? range[1].toString() : undefined; } } - if (partNumber) { - const locations = []; - let locationPartNumber; - for (let i = 0; i < objMD.location.length; i++) { - const { dataStoreETag } = objMD.location[i]; + let dataLocator = null; + if (objMD.location !== null) { + // To provide for backwards compatibility before + // md-model-version 2, need to handle cases where + // objMD.location is just a string + dataLocator = Array.isArray(objMD.location) ? objMD.location : [{ key: objMD.location }]; - if (dataStoreETag) { - locationPartNumber = - Number.parseInt(dataStoreETag.split(':')[0], 10); + const repConf = bucket.getReplicationConfiguration(); + const prefReadLocation = repConf && repConf.preferredReadLocation; + const prefReadDataLocator = checkReadLocation(config, prefReadLocation, objectKey, bucketName); + const targetLocation = locCheckResult || prefReadDataLocator || null; + + if (targetLocation && targetLocation.location !== objMD.dataStoreName) { + const repBackendResult = getReplicationBackendDataLocator(targetLocation, objMD.replicationInfo); + if (repBackendResult.error) { + log.error('Error with location constraint header', { + bucketName, + objectKey, + versionId, + error: repBackendResult.error, + status: repBackendResult.status, + }); + return callback(repBackendResult.error, null, corsHeaders); + } + const targetDataLocator = repBackendResult.dataLocator; + if (targetDataLocator) { + dataLocator = targetDataLocator; } else { - /** - * Location objects prior to GA7.1 do not include the - * dataStoreETag field so we cannot find the part range, - * the objects are treated as if they only have 1 part - */ - locationPartNumber = 1; + log.debug('using source location as preferred read ' + 'is unavailable', { + bucketName, + objectKey, + versionId, + reason: repBackendResult.reason, + }); } + } + // if the data backend is azure, there will only ever be at + // most one item in the dataLocator array + if (dataLocator[0] && dataLocator[0].dataStoreType === 'azure') { + dataLocator[0].azureStreamingOptions = streamingParams; + } - // Get all parts that belong to the requested part number - if (partNumber === locationPartNumber) { - locations.push(objMD.location[i]); - } else if (locationPartNumber > partNumber) { - break; + let partNumber = null; + if (request.query && request.query.partNumber !== undefined) { + if (byteRange) { + const error = errorInstances.InvalidRequest.customizeDescription( + 'Cannot specify both Range ' + 'header and partNumber query parameter.' + ); + monitoring.promMetrics('GET', bucketName, 400, 'getObject'); + return callback(error, null, corsHeaders); + } + partNumber = Number.parseInt(request.query.partNumber, 10); + if (Number.isNaN(partNumber)) { + const error = errorInstances.InvalidArgument.customizeDescription( + 'Part number must be a number.' + ); + monitoring.promMetrics('GET', bucketName, 400, 'getObject'); + return callback(error, null, corsHeaders); + } + if (partNumber < 1 || partNumber > 10000) { + const error = errorInstances.InvalidArgument.customizeDescription( + 'Part number must be an ' + 'integer between 1 and 10000, inclusive.' + ); + monitoring.promMetrics('GET', bucketName, 400, 'getObject'); + return callback(error, null, corsHeaders); } } - if (locations.length === 0) { - monitoring.promMetrics( - 'GET', bucketName, 400, 'getObject'); - return callback(errors.InvalidPartNumber, null, - corsHeaders); + // If have a data model before version 2, cannot support + // get range for objects with multiple parts + if (byteRange && dataLocator.length > 1 && dataLocator[0].start === undefined) { + monitoring.promMetrics('GET', bucketName, 501, 'getObject'); + return callback(errors.NotImplemented, null, corsHeaders); } - const { start } = locations[0]; - const endLocation = locations[locations.length - 1]; - const end = endLocation.start + endLocation.size - 1; - responseMetaHeaders['Content-Length'] = end - start + 1; - const partByteRange = [start, end]; - dataLocator = setPartRanges(dataLocator, partByteRange); - const partsCount = getPartCountFromMd5(objMD); - if (partsCount) { - responseMetaHeaders['x-amz-mp-parts-count'] = - partsCount; + if (objMD['x-amz-server-side-encryption']) { + for (let i = 0; i < dataLocator.length; i++) { + dataLocator[i].masterKeyId = objMD['x-amz-server-side-encryption-aws-kms-key-id']; + dataLocator[i].algorithm = objMD['x-amz-server-side-encryption']; + } } - } else { - dataLocator = setPartRanges(dataLocator, byteRange); - } - } - // Check KMS Key access and usability before checking data - // diff with AWS: for empty object (no dataLocator) KMS not checked - return async.each(dataLocator || [], - (objectGetInfo, next) => { - if (!objectGetInfo.cipheredDataKey) { - return next(); + if (partNumber) { + const locations = []; + let locationPartNumber; + for (let i = 0; i < objMD.location.length; i++) { + const { dataStoreETag } = objMD.location[i]; + + if (dataStoreETag) { + locationPartNumber = Number.parseInt(dataStoreETag.split(':')[0], 10); + } else { + /** + * Location objects prior to GA7.1 do not include the + * dataStoreETag field so we cannot find the part range, + * the objects are treated as if they only have 1 part + */ + locationPartNumber = 1; + } + + // Get all parts that belong to the requested part number + if (partNumber === locationPartNumber) { + locations.push(objMD.location[i]); + } else if (locationPartNumber > partNumber) { + break; + } + } + if (locations.length === 0) { + monitoring.promMetrics('GET', bucketName, 400, 'getObject'); + return callback(errors.InvalidPartNumber, null, corsHeaders); + } + const { start } = locations[0]; + const endLocation = locations[locations.length - 1]; + const end = endLocation.start + endLocation.size - 1; + responseMetaHeaders['Content-Length'] = end - start + 1; + const partByteRange = [start, end]; + dataLocator = setPartRanges(dataLocator, partByteRange); + const partsCount = getPartCountFromMd5(objMD); + if (partsCount) { + responseMetaHeaders['x-amz-mp-parts-count'] = partsCount; + } + } else { + dataLocator = setPartRanges(dataLocator, byteRange); } - const serverSideEncryption = { - cryptoScheme: objectGetInfo.cryptoScheme, - masterKeyId: objectGetInfo.masterKeyId, - cipheredDataKey: Buffer.from( - objectGetInfo.cipheredDataKey, 'base64'), - }; - const offset = objectGetInfo.range ? objectGetInfo.range[0] : 0; - return kms.createDecipherBundle(serverSideEncryption, - offset, log, (err, decipherBundle) => { + } + // Check KMS Key access and usability before checking data + // diff with AWS: for empty object (no dataLocator) KMS not checked + return async.each( + dataLocator || [], + (objectGetInfo, next) => { + if (!objectGetInfo.cipheredDataKey) { + return next(); + } + const serverSideEncryption = { + cryptoScheme: objectGetInfo.cryptoScheme, + masterKeyId: objectGetInfo.masterKeyId, + cipheredDataKey: Buffer.from(objectGetInfo.cipheredDataKey, 'base64'), + }; + const offset = objectGetInfo.range ? objectGetInfo.range[0] : 0; + return kms.createDecipherBundle(serverSideEncryption, offset, log, (err, decipherBundle) => { if (err) { - log.error('cannot get decipher bundle from kms', - { method: 'objectGet' }); + log.error('cannot get decipher bundle from kms', { method: 'objectGet' }); return next(err); } // eslint-disable-next-line no-param-reassign objectGetInfo.decipherStream = decipherBundle.decipher; return next(); }); - }, - err => { - if (err) { - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getObject'); - return callback(err); - } - - return data.head(dataLocator, log, err => { + }, + err => { if (err) { - if (!err.is.LocationNotFound) { - log.error('error from external backend checking for ' + - 'object existence', { error: err }); - } - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getObject'); + monitoring.promMetrics('GET', bucketName, err.code, 'getObject'); return callback(err); } - pushMetric('getObject', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - newByteLength: - Number.parseInt(responseMetaHeaders['Content-Length'], 10), - versionId: objMD.versionId, - location: objMD.dataStoreName, + + return data.head(dataLocator, log, err => { + if (err) { + if (!err.is.LocationNotFound) { + log.error('error from external backend checking for ' + 'object existence', { + error: err, + }); + } + monitoring.promMetrics('GET', bucketName, err.code, 'getObject'); + return callback(err); + } + pushMetric('getObject', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + newByteLength: Number.parseInt(responseMetaHeaders['Content-Length'], 10), + versionId: objMD.versionId, + location: objMD.dataStoreName, + }); + monitoring.promMetrics( + 'GET', + bucketName, + '200', + 'getObject', + Number.parseInt(responseMetaHeaders['Content-Length'], 10) + ); + return callback(null, dataLocator, responseMetaHeaders, byteRange); }); - monitoring.promMetrics('GET', bucketName, '200', 'getObject', - Number.parseInt(responseMetaHeaders['Content-Length'], 10)); - return callback(null, dataLocator, responseMetaHeaders, - byteRange); - }); - } - ); - })); + } + ); + }) + ); } module.exports = objectGet; diff --git a/lib/api/objectGetACL.js b/lib/api/objectGetACL.js index 5f4045bbc0..f578bc4f14 100644 --- a/lib/api/objectGetACL.js +++ b/lib/api/objectGetACL.js @@ -4,8 +4,7 @@ const { errors } = require('arsenal'); const aclUtils = require('../utilities/aclUtils'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const { pushMetric } = require('../utapi/utilities'); -const { decodeVersionId, getVersionIdResHeader } - = require('./apiUtils/object/versioning'); +const { decodeVersionId, getVersionIdResHeader } = require('./apiUtils/object/versioning'); const vault = require('../auth/vault'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const monitoring = require('../utilities/monitoringHandler'); @@ -72,128 +71,123 @@ function objectGetACL(authInfo, request, log, callback) { }, }; - return async.waterfall([ - function validateBucketAndObj(next) { - return standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, - (err, bucket, objectMD) => { - if (err) { - log.trace('request authorization failed', - { method: 'objectGetACL', error: err }); - return next(err); - } - if (!objectMD) { - const err = versionId ? errors.NoSuchVersion : - errors.NoSuchKey; - log.trace('error processing request', - { method: 'objectGetACL', error: err }); - return next(err, bucket); - } - if (objectMD.isDeleteMarker) { - if (versionId) { - log.trace('requested version is delete marker', - { method: 'objectGetACL' }); + return async.waterfall( + [ + function validateBucketAndObj(next) { + return standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, bucket, objectMD) => { + if (err) { + log.trace('request authorization failed', { method: 'objectGetACL', error: err }); + return next(err); + } + if (!objectMD) { + const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey; + log.trace('error processing request', { method: 'objectGetACL', error: err }); + return next(err, bucket); + } + if (objectMD.isDeleteMarker) { + if (versionId) { + log.trace('requested version is delete marker', { method: 'objectGetACL' }); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + return next(errors.MethodNotAllowed); + } + log.trace('most recent version is delete marker', { method: 'objectGetACL' }); // FIXME we should return a `x-amz-delete-marker: true` header, // see S3C-7592 - return next(errors.MethodNotAllowed); + return next(errors.NoSuchKey); } - log.trace('most recent version is delete marker', - { method: 'objectGetACL' }); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - return next(errors.NoSuchKey); + return next(null, bucket, objectMD); } - return next(null, bucket, objectMD); - }); - }, - function gatherACLs(bucket, objectMD, next) { - const verCfg = bucket.getVersioningConfiguration(); - const resVersionId = getVersionIdResHeader(verCfg, objectMD); - const objectACL = objectMD.acl; - grantInfo.ownerInfo.ID = objectMD['owner-id']; - grantInfo.ownerInfo.displayName = objectMD['owner-display-name']; - // Object owner always has full control - const ownerGrant = { - ID: objectMD['owner-id'], - displayName: objectMD['owner-display-name'], - permission: 'FULL_CONTROL', - }; - if (objectACL.Canned !== '') { - /** - * If bucket owner and object owner are different - * need to send info about bucket owner from bucket - * metadata to handleCannedGrant function - */ - let cannedGrants; - if (bucket.getOwner() !== objectMD['owner-id']) { - cannedGrants = aclUtils.handleCannedGrant( - objectACL.Canned, ownerGrant, bucket); - } else { - cannedGrants = aclUtils.handleCannedGrant( - objectACL.Canned, ownerGrant); + ); + }, + function gatherACLs(bucket, objectMD, next) { + const verCfg = bucket.getVersioningConfiguration(); + const resVersionId = getVersionIdResHeader(verCfg, objectMD); + const objectACL = objectMD.acl; + grantInfo.ownerInfo.ID = objectMD['owner-id']; + grantInfo.ownerInfo.displayName = objectMD['owner-display-name']; + // Object owner always has full control + const ownerGrant = { + ID: objectMD['owner-id'], + displayName: objectMD['owner-display-name'], + permission: 'FULL_CONTROL', + }; + if (objectACL.Canned !== '') { + /** + * If bucket owner and object owner are different + * need to send info about bucket owner from bucket + * metadata to handleCannedGrant function + */ + let cannedGrants; + if (bucket.getOwner() !== objectMD['owner-id']) { + cannedGrants = aclUtils.handleCannedGrant(objectACL.Canned, ownerGrant, bucket); + } else { + cannedGrants = aclUtils.handleCannedGrant(objectACL.Canned, ownerGrant); + } + grantInfo.grants = grantInfo.grants.concat(cannedGrants); + const xml = aclUtils.convertToXml(grantInfo); + return next(null, bucket, xml, resVersionId); } - grantInfo.grants = grantInfo.grants.concat(cannedGrants); - const xml = aclUtils.convertToXml(grantInfo); - return next(null, bucket, xml, resVersionId); - } - /** - * Build array of all canonicalIDs used in ACLs so duplicates - * will be retained (e.g. if an account has both read and write - * privileges, want to display both and not lose the duplicate - * when receive one dictionary entry back from Vault) - */ - const canonicalIDs = aclUtils.getCanonicalIDs(objectACL); - // Build array with grants by URI - const uriGrantInfo = aclUtils.getUriGrantInfo(objectACL); + /** + * Build array of all canonicalIDs used in ACLs so duplicates + * will be retained (e.g. if an account has both read and write + * privileges, want to display both and not lose the duplicate + * when receive one dictionary entry back from Vault) + */ + const canonicalIDs = aclUtils.getCanonicalIDs(objectACL); + // Build array with grants by URI + const uriGrantInfo = aclUtils.getUriGrantInfo(objectACL); - if (canonicalIDs.length === 0) { + if (canonicalIDs.length === 0) { + /** + * If no acl's set by account canonicalID, just add URI + * grants (if any) and return + */ + grantInfo.grants = grantInfo.grants.concat(uriGrantInfo); + const xml = aclUtils.convertToXml(grantInfo); + return next(null, bucket, xml, resVersionId); + } /** - * If no acl's set by account canonicalID, just add URI - * grants (if any) and return - */ - grantInfo.grants = grantInfo.grants.concat(uriGrantInfo); - const xml = aclUtils.convertToXml(grantInfo); - return next(null, bucket, xml, resVersionId); + * If acl's set by account canonicalID, + * get emails from Vault to serve + * as display names + */ + return vault.getEmailAddresses(canonicalIDs, log, (err, emails) => { + if (err) { + log.trace('error processing request', { method: 'objectGetACL', error: err }); + return next(err, bucket); + } + const individualGrants = aclUtils.getIndividualGrants(objectACL, canonicalIDs, emails); + // Add to grantInfo any individual grants and grants by uri + grantInfo.grants = grantInfo.grants.concat(individualGrants).concat(uriGrantInfo); + // parse info about accounts and owner info to convert to xml + const xml = aclUtils.convertToXml(grantInfo); + return next(null, bucket, xml, resVersionId); + }); + }, + ], + (err, bucket, xml, resVersionId) => { + const resHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + monitoring.promMetrics('GET', bucketName, err.code, 'getObjectAcl'); + return callback(err, null, resHeaders); } - /** - * If acl's set by account canonicalID, - * get emails from Vault to serve - * as display names - */ - return vault.getEmailAddresses(canonicalIDs, log, (err, emails) => { - if (err) { - log.trace('error processing request', - { method: 'objectGetACL', error: err }); - return next(err, bucket); - } - const individualGrants = aclUtils.getIndividualGrants(objectACL, - canonicalIDs, emails); - // Add to grantInfo any individual grants and grants by uri - grantInfo.grants = grantInfo.grants - .concat(individualGrants).concat(uriGrantInfo); - // parse info about accounts and owner info to convert to xml - const xml = aclUtils.convertToXml(grantInfo); - return next(null, bucket, xml, resVersionId); + pushMetric('getObjectAcl', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + versionId: resVersionId, + location: bucket ? bucket.getLocationConstraint() : undefined, }); - }, - ], (err, bucket, xml, resVersionId) => { - const resHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getObjectAcl'); - return callback(err, null, resHeaders); + monitoring.promMetrics('GET', bucketName, '200', 'getObjectAcl'); + resHeaders['x-amz-version-id'] = resVersionId; + return callback(null, xml, resHeaders); } - pushMetric('getObjectAcl', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - versionId: resVersionId, - location: bucket ? bucket.getLocationConstraint() : undefined, - }); - monitoring.promMetrics('GET', bucketName, '200', 'getObjectAcl'); - resHeaders['x-amz-version-id'] = resVersionId; - return callback(null, xml, resHeaders); - }); + ); } module.exports = objectGetACL; diff --git a/lib/api/objectGetLegalHold.js b/lib/api/objectGetLegalHold.js index 2f165748f3..5d99dd91c7 100644 --- a/lib/api/objectGetLegalHold.js +++ b/lib/api/objectGetLegalHold.js @@ -1,8 +1,7 @@ const async = require('async'); const { errors, errorInstances, s3middleware } = require('arsenal'); -const { decodeVersionId, getVersionIdResHeader } - = require('./apiUtils/object/versioning'); +const { decodeVersionId, getVersionIdResHeader } = require('./apiUtils/object/versioning'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); @@ -44,71 +43,73 @@ function objectGetLegalHold(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, - (err, bucket, objectMD) => { - if (err) { - log.trace('request authorization failed', - { method: 'objectGetLegalHold', error: err }); - return next(err); - } - if (!objectMD) { - const err = versionId ? errors.NoSuchVersion : - errors.NoSuchKey; - log.trace('error no object metadata found', - { method: 'objectGetLegalHold', error: err }); - return next(err, bucket); - } - if (objectMD.isDeleteMarker) { - if (versionId) { - log.trace('requested version is delete marker', - { method: 'objectGetLegalHold' }); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - return next(errors.MethodNotAllowed); + return async.waterfall( + [ + next => + standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, bucket, objectMD) => { + if (err) { + log.trace('request authorization failed', { method: 'objectGetLegalHold', error: err }); + return next(err); + } + if (!objectMD) { + const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey; + log.trace('error no object metadata found', { method: 'objectGetLegalHold', error: err }); + return next(err, bucket); + } + if (objectMD.isDeleteMarker) { + if (versionId) { + log.trace('requested version is delete marker', { method: 'objectGetLegalHold' }); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + return next(errors.MethodNotAllowed); + } + log.trace('most recent version is delete marker', { method: 'objectGetLegalHold' }); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + return next(errors.NoSuchKey); + } + if (!bucket.isObjectLockEnabled()) { + log.trace('object lock not enabled on bucket', { method: 'objectGetRetention' }); + return next( + errorInstances.InvalidRequest.customizeDescription( + 'Bucket is missing Object Lock Configuration' + ) + ); + } + return next(null, bucket, objectMD); } - log.trace('most recent version is delete marker', - { method: 'objectGetLegalHold' }); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - return next(errors.NoSuchKey); - } - if (!bucket.isObjectLockEnabled()) { - log.trace('object lock not enabled on bucket', - { method: 'objectGetRetention' }); - return next(errorInstances.InvalidRequest.customizeDescription( - 'Bucket is missing Object Lock Configuration')); + ), + (bucket, objectMD, next) => { + const { legalHold } = objectMD; + const xml = convertToXml(legalHold); + if (xml === '') { + return next(errors.NoSuchObjectLockConfiguration); } - return next(null, bucket, objectMD); - }), - (bucket, objectMD, next) => { - const { legalHold } = objectMD; - const xml = convertToXml(legalHold); - if (xml === '') { - return next(errors.NoSuchObjectLockConfiguration); + return next(null, bucket, xml, objectMD); + }, + ], + (err, bucket, xml, objectMD) => { + const additionalResHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'objectGetLegalHold' }); + } else { + pushMetric('getObjectLegalHold', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + versionId: objectMD ? objectMD.versionId : undefined, + location: objectMD ? objectMD.dataStoreName : undefined, + }); + const verCfg = bucket.getVersioningConfiguration(); + additionalResHeaders['x-amz-version-id'] = getVersionIdResHeader(verCfg, objectMD); } - return next(null, bucket, xml, objectMD); - }, - ], (err, bucket, xml, objectMD) => { - const additionalResHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, - method: 'objectGetLegalHold' }); - } else { - pushMetric('getObjectLegalHold', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - versionId: objectMD ? objectMD.versionId : undefined, - location: objectMD ? objectMD.dataStoreName : undefined, - }); - const verCfg = bucket.getVersioningConfiguration(); - additionalResHeaders['x-amz-version-id'] = - getVersionIdResHeader(verCfg, objectMD); + return callback(err, xml, additionalResHeaders); } - return callback(err, xml, additionalResHeaders); - }); + ); } module.exports = objectGetLegalHold; diff --git a/lib/api/objectGetRetention.js b/lib/api/objectGetRetention.js index 3ac4c19c98..9279243660 100644 --- a/lib/api/objectGetRetention.js +++ b/lib/api/objectGetRetention.js @@ -1,8 +1,7 @@ const async = require('async'); const { errors, errorInstances, s3middleware } = require('arsenal'); -const { decodeVersionId, getVersionIdResHeader } - = require('./apiUtils/object/versioning'); +const { decodeVersionId, getVersionIdResHeader } = require('./apiUtils/object/versioning'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); @@ -44,71 +43,73 @@ function objectGetRetention(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, - (err, bucket, objectMD) => { - if (err) { - log.trace('request authorization failed', - { method: 'objectGetRetention', error: err }); - return next(err); - } - if (!objectMD) { - const err = reqVersionId ? errors.NoSuchVersion : - errors.NoSuchKey; - log.trace('error no object metadata found', - { method: 'objectGetRetention', error: err }); - return next(err, bucket); - } - if (objectMD.isDeleteMarker) { - if (reqVersionId) { - log.trace('requested version is delete marker', - { method: 'objectGetRetention' }); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - return next(errors.MethodNotAllowed); + return async.waterfall( + [ + next => + standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, bucket, objectMD) => { + if (err) { + log.trace('request authorization failed', { method: 'objectGetRetention', error: err }); + return next(err); + } + if (!objectMD) { + const err = reqVersionId ? errors.NoSuchVersion : errors.NoSuchKey; + log.trace('error no object metadata found', { method: 'objectGetRetention', error: err }); + return next(err, bucket); + } + if (objectMD.isDeleteMarker) { + if (reqVersionId) { + log.trace('requested version is delete marker', { method: 'objectGetRetention' }); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + return next(errors.MethodNotAllowed); + } + log.trace('most recent version is delete marker', { method: 'objectGetRetention' }); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + return next(errors.NoSuchKey); + } + if (!bucket.isObjectLockEnabled()) { + log.trace('object lock not enabled on bucket', { method: 'objectGetRetention' }); + return next( + errorInstances.InvalidRequest.customizeDescription( + 'Bucket is missing Object Lock Configuration' + ) + ); + } + return next(null, bucket, objectMD); } - log.trace('most recent version is delete marker', - { method: 'objectGetRetention' }); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - return next(errors.NoSuchKey); - } - if (!bucket.isObjectLockEnabled()) { - log.trace('object lock not enabled on bucket', - { method: 'objectGetRetention' }); - return next(errorInstances.InvalidRequest.customizeDescription( - 'Bucket is missing Object Lock Configuration')); + ), + (bucket, objectMD, next) => { + const { retentionMode, retentionDate } = objectMD; + if (!retentionMode || !retentionDate) { + return next(errors.NoSuchObjectLockConfiguration); } - return next(null, bucket, objectMD); - }), - (bucket, objectMD, next) => { - const { retentionMode, retentionDate } = objectMD; - if (!retentionMode || !retentionDate) { - return next(errors.NoSuchObjectLockConfiguration); + const xml = convertToXml(retentionMode, retentionDate); + return next(null, bucket, xml, objectMD); + }, + ], + (err, bucket, xml, objectMD) => { + const additionalResHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'objectGetRetention' }); + } else { + pushMetric('getObjectRetention', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + versionId: objectMD ? objectMD.versionId : undefined, + location: objectMD ? objectMD.dataStoreName : undefined, + }); + const verCfg = bucket.getVersioningConfiguration(); + additionalResHeaders['x-amz-version-id'] = getVersionIdResHeader(verCfg, objectMD); } - const xml = convertToXml(retentionMode, retentionDate); - return next(null, bucket, xml, objectMD); - }, - ], (err, bucket, xml, objectMD) => { - const additionalResHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, - method: 'objectGetRetention' }); - } else { - pushMetric('getObjectRetention', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - versionId: objectMD ? objectMD.versionId : undefined, - location: objectMD ? objectMD.dataStoreName : undefined, - }); - const verCfg = bucket.getVersioningConfiguration(); - additionalResHeaders['x-amz-version-id'] = - getVersionIdResHeader(verCfg, objectMD); + return callback(err, xml, additionalResHeaders); } - return callback(err, xml, additionalResHeaders); - }); + ); } module.exports = objectGetRetention; diff --git a/lib/api/objectGetTagging.js b/lib/api/objectGetTagging.js index 9d52b528a7..3c2b3b38c1 100644 --- a/lib/api/objectGetTagging.js +++ b/lib/api/objectGetTagging.js @@ -1,8 +1,7 @@ const async = require('async'); const { errors, s3middleware } = require('arsenal'); -const { decodeVersionId, getVersionIdResHeader } - = require('./apiUtils/object/versioning'); +const { decodeVersionId, getVersionIdResHeader } = require('./apiUtils/object/versioning'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); @@ -45,66 +44,64 @@ function objectGetTagging(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, - (err, bucket, objectMD) => { - if (err) { - log.trace('request authorization failed', - { method: 'objectGetTagging', error: err }); - return next(err); - } - if (!objectMD) { - const err = reqVersionId ? errors.NoSuchVersion : - errors.NoSuchKey; - log.trace('error no object metadata found', - { method: 'objectGetTagging', error: err }); - return next(err, bucket); - } - if (objectMD.isDeleteMarker) { - if (reqVersionId) { - log.trace('requested version is delete marker', - { method: 'objectGetTagging' }); - return next(errors.MethodNotAllowed); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - } - log.trace('most recent version is delete marker', - { method: 'objectGetTagging' }); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - return next(errors.NoSuchKey); - } - return next(null, bucket, objectMD); - }), - (bucket, objectMD, next) => { - const tags = objectMD.tags; - const xml = convertToXml(tags); - next(null, bucket, xml, objectMD); - }, - ], (err, bucket, xml, objectMD) => { - const additionalResHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, - method: 'objectGetTagging' }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getObjectTagging'); - } else { - pushMetric('getObjectTagging', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - versionId: objectMD ? objectMD.versionId : undefined, - location: objectMD ? objectMD.dataStoreName : undefined, - }); - monitoring.promMetrics( - 'GET', bucketName, '200', 'getObjectTagging'); - const verCfg = bucket.getVersioningConfiguration(); - additionalResHeaders['x-amz-version-id'] = - getVersionIdResHeader(verCfg, objectMD); + return async.waterfall( + [ + next => + standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, bucket, objectMD) => { + if (err) { + log.trace('request authorization failed', { method: 'objectGetTagging', error: err }); + return next(err); + } + if (!objectMD) { + const err = reqVersionId ? errors.NoSuchVersion : errors.NoSuchKey; + log.trace('error no object metadata found', { method: 'objectGetTagging', error: err }); + return next(err, bucket); + } + if (objectMD.isDeleteMarker) { + if (reqVersionId) { + log.trace('requested version is delete marker', { method: 'objectGetTagging' }); + return next(errors.MethodNotAllowed); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + } + log.trace('most recent version is delete marker', { method: 'objectGetTagging' }); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + return next(errors.NoSuchKey); + } + return next(null, bucket, objectMD); + } + ), + (bucket, objectMD, next) => { + const tags = objectMD.tags; + const xml = convertToXml(tags); + next(null, bucket, xml, objectMD); + }, + ], + (err, bucket, xml, objectMD) => { + const additionalResHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'objectGetTagging' }); + monitoring.promMetrics('GET', bucketName, err.code, 'getObjectTagging'); + } else { + pushMetric('getObjectTagging', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + versionId: objectMD ? objectMD.versionId : undefined, + location: objectMD ? objectMD.dataStoreName : undefined, + }); + monitoring.promMetrics('GET', bucketName, '200', 'getObjectTagging'); + const verCfg = bucket.getVersioningConfiguration(); + additionalResHeaders['x-amz-version-id'] = getVersionIdResHeader(verCfg, objectMD); + } + return callback(err, xml, additionalResHeaders); } - return callback(err, xml, additionalResHeaders); - }); + ); } module.exports = objectGetTagging; diff --git a/lib/api/objectHead.js b/lib/api/objectHead.js index 7c1d85c02a..dc8943e31b 100644 --- a/lib/api/objectHead.js +++ b/lib/api/objectHead.js @@ -8,8 +8,7 @@ const collectResponseHeaders = require('../utilities/collectResponseHeaders'); const { pushMetric } = require('../utapi/utilities'); const { getVersionIdResHeader } = require('./apiUtils/object/versioning'); const monitoring = require('../utilities/monitoringHandler'); -const { getPartNumber, getPartSize, getPartCountFromMd5 } = - require('./apiUtils/object/partInfo'); +const { getPartNumber, getPartSize, getPartCountFromMd5 } = require('./apiUtils/object/partInfo'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { maximumAllowedPartCount } = require('../../constants'); @@ -52,50 +51,40 @@ function objectHead(authInfo, request, log, callback) { request, }; - return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, - (err, bucket, objMD) => updateEncryption(err, bucket, objMD, objectKey, log, {}, - (err, bucket, objMD) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, (err, bucket, objMD) => + updateEncryption(err, bucket, objMD, objectKey, log, {}, (err, bucket, objMD) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); if (err) { log.debug('error validating request', { error: err, method: 'objectHead', }); - monitoring.promMetrics( - 'HEAD', bucketName, err.code, 'headObject'); + monitoring.promMetrics('HEAD', bucketName, err.code, 'headObject'); return callback(err, corsHeaders); } if (!objMD) { const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey; - monitoring.promMetrics( - 'HEAD', bucketName, err.code, 'headObject'); + monitoring.promMetrics('HEAD', bucketName, err.code, 'headObject'); return callback(err, corsHeaders); } const verCfg = bucket.getVersioningConfiguration(); if (objMD.isDeleteMarker) { - const responseHeaders = Object.assign({}, - { 'x-amz-delete-marker': true }, corsHeaders); + const responseHeaders = Object.assign({}, { 'x-amz-delete-marker': true }, corsHeaders); if (!versionId) { - monitoring.promMetrics( - 'HEAD', bucketName, 404, 'headObject'); + monitoring.promMetrics('HEAD', bucketName, 404, 'headObject'); return callback(errors.NoSuchKey, responseHeaders); } // return MethodNotAllowed if requesting a specific // version that has a delete marker - responseHeaders['x-amz-version-id'] = - getVersionIdResHeader(verCfg, objMD); - monitoring.promMetrics( - 'HEAD', bucketName, 405, 'headObject'); + responseHeaders['x-amz-version-id'] = getVersionIdResHeader(verCfg, objMD); + monitoring.promMetrics('HEAD', bucketName, 405, 'headObject'); return callback(errors.MethodNotAllowed, responseHeaders); } - const headerValResult = validateHeaders(request.headers, - objMD['last-modified'], objMD['content-md5']); + const headerValResult = validateHeaders(request.headers, objMD['last-modified'], objMD['content-md5']); if (headerValResult.error) { return callback(headerValResult.error, corsHeaders); } - const responseHeaders = collectResponseHeaders(objMD, corsHeaders, - verCfg); + const responseHeaders = collectResponseHeaders(objMD, corsHeaders, verCfg); setExpirationHeaders(responseHeaders, { lifecycleConfig: bucket.getLifecycleConfiguration(), @@ -111,36 +100,31 @@ function objectHead(authInfo, request, log, callback) { Object.assign(responseHeaders, setArchiveInfoHeaders(objMD)); } - const objLength = (objMD.location === null ? - 0 : parseInt(objMD['content-length'], 10)); + const objLength = objMD.location === null ? 0 : parseInt(objMD['content-length'], 10); let byteRange; if (request.headers.range) { - const { range, error } - = parseRange(request.headers.range, objLength); + const { range, error } = parseRange(request.headers.range, objLength); if (error) { return callback(error, corsHeaders); } responseHeaders['accept-ranges'] = 'bytes'; if (range) { byteRange = range; - responseHeaders['content-length'] = - range[1] - range[0] + 1; - responseHeaders['content-range'] = - `bytes ${range[0]}-${range[1]}/${objLength}`; + responseHeaders['content-length'] = range[1] - range[0] + 1; + responseHeaders['content-range'] = `bytes ${range[0]}-${range[1]}/${objLength}`; } } const partNumber = getPartNumber(request.query); if (partNumber !== undefined) { if (byteRange) { - const error = errorInstances.InvalidRequest - .customizeDescription('Cannot specify both Range ' + - 'header and partNumber query parameter.'); + const error = errorInstances.InvalidRequest.customizeDescription( + 'Cannot specify both Range ' + 'header and partNumber query parameter.' + ); return callback(error, corsHeaders); } if (Number.isNaN(partNumber)) { - const error = errorInstances.InvalidArgument - .customizeDescription('Part number must be a number.'); + const error = errorInstances.InvalidArgument.customizeDescription('Part number must be a number.'); return callback(error, corsHeaders); } if (partNumber < 1 || partNumber > maximumAllowedPartCount) { @@ -169,7 +153,8 @@ function objectHead(authInfo, request, log, callback) { }); monitoring.promMetrics('HEAD', bucketName, '200', 'headObject'); return callback(null, responseHeaders); - })); + }) + ); } module.exports = objectHead; diff --git a/lib/api/objectPut.js b/lib/api/objectPut.js index 66bf725c23..91c38a75ff 100644 --- a/lib/api/objectPut.js +++ b/lib/api/objectPut.js @@ -63,25 +63,15 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) { versionId = decodedVidResult; } - const { - bucketName, - headers, - method, - objectKey, - parsedContentLength, - query, - } = request; - if (headers['x-amz-storage-class'] && - !constants.validStorageClasses.includes(headers['x-amz-storage-class'])) { + const { bucketName, headers, method, objectKey, parsedContentLength, query } = request; + if (headers['x-amz-storage-class'] && !constants.validStorageClasses.includes(headers['x-amz-storage-class'])) { log.trace('invalid storage-class header'); - monitoring.promMetrics('PUT', request.bucketName, - errorInstances.InvalidStorageClass.code, 'putObject'); + monitoring.promMetrics('PUT', request.bucketName, errorInstances.InvalidStorageClass.code, 'putObject'); return callback(errors.InvalidStorageClass); } if (!aclUtils.checkGrantHeaderValidity(headers)) { log.trace('invalid acl header'); - monitoring.promMetrics('PUT', request.bucketName, 400, - 'putObject'); + monitoring.promMetrics('PUT', request.bucketName, 400, 'putObject'); return callback(errors.InvalidArgument); } const queryContainsVersionId = checkQueryVersionId(query); @@ -90,22 +80,21 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) { } const size = request.parsedContentLength; if (Number.parseInt(size, 10) > constants.maximumAllowedUploadSize) { - log.debug('Upload size exceeds maximum allowed for a single PUT', - { size }); + log.debug('Upload size exceeds maximum allowed for a single PUT', { size }); return callback(errors.EntityTooLarge); } const invalidSSEError = errorInstances.InvalidArgument.customizeDescription( - 'The encryption method specified is not supported'); + 'The encryption method specified is not supported' + ); const requestType = request.apiMethods || 'objectPut'; - const valParams = { authInfo, bucketName, objectKey, versionId, - requestType, request, withVersionId: isPutVersion }; + const valParams = { authInfo, bucketName, objectKey, versionId, requestType, request, withVersionId: isPutVersion }; const canonicalID = authInfo.getCanonicalID(); if (hasNonPrintables(objectKey)) { - return callback(errorInstances.InvalidInput.customizeDescription( - 'object keys cannot contain non-printable characters', - )); + return callback( + errorInstances.InvalidInput.customizeDescription('object keys cannot contain non-printable characters') + ); } const checksumHeaderErr = validateChecksumHeaders(headers); @@ -115,146 +104,164 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) { log.trace('owner canonicalID to send to data', { canonicalID }); - return standardMetadataValidateBucketAndObj(valParams, request.actionImplicitDenies, log, - (err, bucket, objMD) => updateEncryption(err, bucket, objMD, objectKey, log, { skipObject: true }, - (err, bucket, objMD) => { - const responseHeaders = collectCorsHeaders(headers.origin, - method, bucket); - if (err) { - log.trace('error processing request', { - error: err, - method: 'metadataValidateBucketAndObj', - }); - monitoring.promMetrics('PUT', bucketName, err.code, 'putObject'); - return callback(err, responseHeaders); - } - if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) { - log.trace('deleted flag on bucket and request ' + - 'from non-owner account'); - monitoring.promMetrics('PUT', bucketName, 404, 'putObject'); - return callback(errors.NoSuchBucket); - } - - if (isPutVersion) { - const error = validatePutVersionId(objMD, putVersionId, log); - if (error) { - return callback(error); + return standardMetadataValidateBucketAndObj(valParams, request.actionImplicitDenies, log, (err, bucket, objMD) => + updateEncryption(err, bucket, objMD, objectKey, log, { skipObject: true }, (err, bucket, objMD) => { + const responseHeaders = collectCorsHeaders(headers.origin, method, bucket); + if (err) { + log.trace('error processing request', { + error: err, + method: 'metadataValidateBucketAndObj', + }); + monitoring.promMetrics('PUT', bucketName, err.code, 'putObject'); + return callback(err, responseHeaders); + } + if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) { + log.trace('deleted flag on bucket and request ' + 'from non-owner account'); + monitoring.promMetrics('PUT', bucketName, 404, 'putObject'); + return callback(errors.NoSuchBucket); } - } - return async.waterfall([ - function handleTransientOrDeleteBuckets(next) { - if (bucket.hasTransientFlag() || bucket.hasDeletedFlag()) { - return cleanUpBucket(bucket, canonicalID, log, next); + if (isPutVersion) { + const error = validatePutVersionId(objMD, putVersionId, log); + if (error) { + return callback(error); } - return next(); - }, - function getSSEConfig(next) { - return getObjectSSEConfiguration(headers, bucket, log, - (err, sseConfig) => { - if (err) { - log.error('error getting server side encryption config', { err }); - return next(invalidSSEError); + } + + return async.waterfall( + [ + function handleTransientOrDeleteBuckets(next) { + if (bucket.hasTransientFlag() || bucket.hasDeletedFlag()) { + return cleanUpBucket(bucket, canonicalID, log, next); + } + return next(); + }, + function getSSEConfig(next) { + return getObjectSSEConfiguration(headers, bucket, log, (err, sseConfig) => { + if (err) { + log.error('error getting server side encryption config', { err }); + return next(invalidSSEError); + } + return next(null, sseConfig); + }); + }, + function createCipherBundle(serverSideEncryptionConfig, next) { + if (serverSideEncryptionConfig) { + return kms.createCipherBundle(serverSideEncryptionConfig, log, next); } - return next(null, sseConfig); + return next(null, null); + }, + function objectCreateAndStore(cipherBundle, next) { + const objectLockValidationError = validateHeaders(bucket, headers, log); + if (objectLockValidationError) { + return next(objectLockValidationError); + } + writeContinue(request, request._response); + return createAndStoreObject( + bucketName, + bucket, + objectKey, + objMD, + authInfo, + canonicalID, + cipherBundle, + request, + false, + streamingV4Params, + overheadField, + log, + 's3:ObjectCreated:Put', + next + ); + }, + ], + (err, storingResult) => { + if (err) { + monitoring.promMetrics('PUT', bucketName, err.code, 'putObject'); + return callback(err, responseHeaders); } - ); - }, - function createCipherBundle(serverSideEncryptionConfig, next) { - if (serverSideEncryptionConfig) { - return kms.createCipherBundle( - serverSideEncryptionConfig, log, next); - } - return next(null, null); - }, - function objectCreateAndStore(cipherBundle, next) { - const objectLockValidationError - = validateHeaders(bucket, headers, log); - if (objectLockValidationError) { - return next(objectLockValidationError); - } - writeContinue(request, request._response); - return createAndStoreObject(bucketName, - bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle, - request, false, streamingV4Params, overheadField, log, 's3:ObjectCreated:Put', next); - }, - ], (err, storingResult) => { - if (err) { - monitoring.promMetrics('PUT', bucketName, err.code, - 'putObject'); - return callback(err, responseHeaders); - } - // ingestSize assumes that these custom headers indicate - // an ingestion PUT which is a metadata only operation. - // Since these headers can be modified client side, they - // should be used with caution if needed for precise - // metrics. - const ingestSize = (request.headers['x-amz-meta-mdonly'] - && !Number.isNaN(request.headers['x-amz-meta-size'])) - ? Number.parseInt(request.headers['x-amz-meta-size'], 10) : null; - const newByteLength = parsedContentLength; + // ingestSize assumes that these custom headers indicate + // an ingestion PUT which is a metadata only operation. + // Since these headers can be modified client side, they + // should be used with caution if needed for precise + // metrics. + const ingestSize = + request.headers['x-amz-meta-mdonly'] && !Number.isNaN(request.headers['x-amz-meta-size']) + ? Number.parseInt(request.headers['x-amz-meta-size'], 10) + : null; + const newByteLength = parsedContentLength; - setExpirationHeaders(responseHeaders, { - lifecycleConfig: bucket.getLifecycleConfiguration(), - objectParams: { - key: objectKey, - date: storingResult.lastModified, - tags: storingResult.tags, - }, - }); + setExpirationHeaders(responseHeaders, { + lifecycleConfig: bucket.getLifecycleConfiguration(), + objectParams: { + key: objectKey, + date: storingResult.lastModified, + tags: storingResult.tags, + }, + }); - // Utapi expects null or a number for oldByteLength: - // * null - new object - // * 0 or > 0 - existing object with content-length 0 or > 0 - // objMD here is the master version that we would - // have overwritten if there was an existing version or object - // - // TODO: Handle utapi metrics for null version overwrites. - const oldByteLength = objMD && objMD['content-length'] - !== undefined ? objMD['content-length'] : null; - if (storingResult) { - // ETag's hex should always be enclosed in quotes - responseHeaders.ETag = `"${storingResult.contentMD5}"`; - } - const vcfg = bucket.getVersioningConfiguration(); - const isVersionedObj = vcfg && vcfg.Status === 'Enabled'; - if (isVersionedObj) { - if (storingResult && storingResult.versionId) { - responseHeaders['x-amz-version-id'] = - versionIdUtils.encode(storingResult.versionId); - } - } + // Utapi expects null or a number for oldByteLength: + // * null - new object + // * 0 or > 0 - existing object with content-length 0 or > 0 + // objMD here is the master version that we would + // have overwritten if there was an existing version or object + // + // TODO: Handle utapi metrics for null version overwrites. + const oldByteLength = + objMD && objMD['content-length'] !== undefined ? objMD['content-length'] : null; + if (storingResult) { + // ETag's hex should always be enclosed in quotes + responseHeaders.ETag = `"${storingResult.contentMD5}"`; + } + const vcfg = bucket.getVersioningConfiguration(); + const isVersionedObj = vcfg && vcfg.Status === 'Enabled'; + if (isVersionedObj) { + if (storingResult && storingResult.versionId) { + responseHeaders['x-amz-version-id'] = versionIdUtils.encode(storingResult.versionId); + } + } - // Only pre-existing non-versioned objects get 0 all others use 1 - const numberOfObjects = !isVersionedObj && oldByteLength !== null ? 0 : 1; + // Only pre-existing non-versioned objects get 0 all others use 1 + const numberOfObjects = !isVersionedObj && oldByteLength !== null ? 0 : 1; - // only the bucket owner's metrics should be updated, regardless of - // who the requester is - pushMetric('putObject', log, { - authInfo, - canonicalID: bucket.getOwner(), - bucket: bucketName, - keys: [objectKey], - newByteLength, - oldByteLength: isVersionedObj ? null : oldByteLength, - versionId: isVersionedObj && storingResult ? storingResult.versionId : undefined, - location: bucket.getLocationConstraint(), - numberOfObjects, - }); - monitoring.promMetrics('PUT', bucketName, '200', - 'putObject', newByteLength, oldByteLength, isVersionedObj, - null, ingestSize); + // only the bucket owner's metrics should be updated, regardless of + // who the requester is + pushMetric('putObject', log, { + authInfo, + canonicalID: bucket.getOwner(), + bucket: bucketName, + keys: [objectKey], + newByteLength, + oldByteLength: isVersionedObj ? null : oldByteLength, + versionId: isVersionedObj && storingResult ? storingResult.versionId : undefined, + location: bucket.getLocationConstraint(), + numberOfObjects, + }); + monitoring.promMetrics( + 'PUT', + bucketName, + '200', + 'putObject', + newByteLength, + oldByteLength, + isVersionedObj, + null, + ingestSize + ); - if (isPutVersion) { - const durationMs = Date.now() - new Date(objMD.archive.restoreRequestedAt); - monitoring.lifecycleDuration.observe( - { type: 'restore', location: objMD.dataStoreName }, - durationMs / 1000); - } + if (isPutVersion) { + const durationMs = Date.now() - new Date(objMD.archive.restoreRequestedAt); + monitoring.lifecycleDuration.observe( + { type: 'restore', location: objMD.dataStoreName }, + durationMs / 1000 + ); + } - return callback(null, responseHeaders); - }); - })); + return callback(null, responseHeaders); + } + ); + }) + ); } module.exports = objectPut; diff --git a/lib/api/objectPutACL.js b/lib/api/objectPutACL.js index 62045f7cc8..d89b4dc32d 100644 --- a/lib/api/objectPutACL.js +++ b/lib/api/objectPutACL.js @@ -7,8 +7,11 @@ const { pushMetric } = require('../utapi/utilities'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const constants = require('../../constants'); const vault = require('../auth/vault'); -const { decodeVersionId, getVersionIdResHeader, getVersionSpecificMetadataOptions } - = require('./apiUtils/object/versioning'); +const { + decodeVersionId, + getVersionIdResHeader, + getVersionSpecificMetadataOptions, +} = require('./apiUtils/object/versioning'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const monitoring = require('../utilities/monitoringHandler'); const { config } = require('../Config'); @@ -65,11 +68,7 @@ function objectPutACL(authInfo, request, log, cb) { monitoring.promMetrics('PUT', bucketName, 400, 'putObjectAcl'); return cb(errors.InvalidArgument); } - const possibleGroups = [ - constants.publicId, - constants.allAuthedUsersId, - constants.logId, - ]; + const possibleGroups = [constants.publicId, constants.allAuthedUsersId, constants.logId]; const decodedVidResult = decodeVersionId(request.query); if (decodedVidResult instanceof Error) { @@ -100,224 +99,217 @@ function objectPutACL(authInfo, request, log, cb) { READ_ACP: [], }; - const grantReadHeader = - aclUtils.parseGrant(request.headers['x-amz-grant-read'], 'READ'); - const grantReadACPHeader = - aclUtils.parseGrant(request.headers['x-amz-grant-read-acp'], - 'READ_ACP'); - const grantWriteACPHeader = aclUtils.parseGrant( - request.headers['x-amz-grant-write-acp'], 'WRITE_ACP'); - const grantFullControlHeader = aclUtils.parseGrant( - request.headers['x-amz-grant-full-control'], 'FULL_CONTROL'); + const grantReadHeader = aclUtils.parseGrant(request.headers['x-amz-grant-read'], 'READ'); + const grantReadACPHeader = aclUtils.parseGrant(request.headers['x-amz-grant-read-acp'], 'READ_ACP'); + const grantWriteACPHeader = aclUtils.parseGrant(request.headers['x-amz-grant-write-acp'], 'WRITE_ACP'); + const grantFullControlHeader = aclUtils.parseGrant(request.headers['x-amz-grant-full-control'], 'FULL_CONTROL'); - return async.waterfall([ - function validateBucketAndObj(next) { - return standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, - (err, bucket, objectMD) => { - if (err) { - return next(err); - } - if (!objectMD) { - const err = reqVersionId ? errors.NoSuchVersion : - errors.NoSuchKey; - return next(err, bucket); + return async.waterfall( + [ + function validateBucketAndObj(next) { + return standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, bucket, objectMD) => { + if (err) { + return next(err); + } + if (!objectMD) { + const err = reqVersionId ? errors.NoSuchVersion : errors.NoSuchKey; + return next(err, bucket); + } + if (objectMD.isDeleteMarker) { + log.trace('delete marker detected', { method: 'objectPutACL' }); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + return next(errors.MethodNotAllowed, bucket); + } + return next(null, bucket, objectMD); } - if (objectMD.isDeleteMarker) { - log.trace('delete marker detected', - { method: 'objectPutACL' }); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - return next(errors.MethodNotAllowed, bucket); + ); + }, + function parseAclFromXml(bucket, objectMD, next) { + // If not setting acl through headers, parse body + let jsonGrants; + let aclOwnerID; + if ( + newCannedACL === undefined && + grantReadHeader === undefined && + grantReadACPHeader === undefined && + grantWriteACPHeader === undefined && + grantFullControlHeader === undefined + ) { + if (request.post) { + log.trace('using acls from request body'); + return aclUtils.parseAclXml(request.post, log, (err, jsonGrants, aclOwnerID) => + next(err, bucket, objectMD, jsonGrants, aclOwnerID) + ); } - return next(null, bucket, objectMD); - }); - }, - function parseAclFromXml(bucket, objectMD, next) { - // If not setting acl through headers, parse body - let jsonGrants; - let aclOwnerID; - if (newCannedACL === undefined - && grantReadHeader === undefined - && grantReadACPHeader === undefined - && grantWriteACPHeader === undefined - && grantFullControlHeader === undefined) { - if (request.post) { - log.trace('using acls from request body'); - return aclUtils.parseAclXml(request.post, log, - (err, jsonGrants, aclOwnerID) => next(err, bucket, - objectMD, jsonGrants, aclOwnerID)); + // If no ACLs sent with request at all + return next(errors.MalformedXML, bucket); } - // If no ACLs sent with request at all - return next(errors.MalformedXML, bucket); - } - /** - * If acl set in headers (including canned acl) pass bucket and - * undefined to the next function - */ - log.debug('using acls from request headers'); - return next(null, bucket, objectMD, jsonGrants, aclOwnerID); - }, - function processAcls(bucket, objectMD, jsonGrants, aclOwnerID, next) { - if (newCannedACL) { - log.debug('canned acl', { cannedAcl: newCannedACL }); - addACLParams.Canned = newCannedACL; - return next(null, bucket, objectMD, addACLParams); - } - let usersIdentifiedByEmail = []; - let usersIdentifiedByGroup = []; - let usersIdentifiedByID = []; - let hasError = false; + /** + * If acl set in headers (including canned acl) pass bucket and + * undefined to the next function + */ + log.debug('using acls from request headers'); + return next(null, bucket, objectMD, jsonGrants, aclOwnerID); + }, + function processAcls(bucket, objectMD, jsonGrants, aclOwnerID, next) { + if (newCannedACL) { + log.debug('canned acl', { cannedAcl: newCannedACL }); + addACLParams.Canned = newCannedACL; + return next(null, bucket, objectMD, addACLParams); + } + let usersIdentifiedByEmail = []; + let usersIdentifiedByGroup = []; + let usersIdentifiedByID = []; + let hasError = false; - // If grants set by xml and xml owner ID is incorrect - if (aclOwnerID && (aclOwnerID !== objectMD['owner-id'])) { - log.trace('incorrect owner ID provided in ACL', { - ACL: request.post, - method: 'objectPutACL', - }); - return next(errors.AccessDenied, bucket); - } + // If grants set by xml and xml owner ID is incorrect + if (aclOwnerID && aclOwnerID !== objectMD['owner-id']) { + log.trace('incorrect owner ID provided in ACL', { + ACL: request.post, + method: 'objectPutACL', + }); + return next(errors.AccessDenied, bucket); + } - /** - * If grants set by xml, loop through the grants - * and separate grant types so parsed in same manner - * as header grants - */ - if (jsonGrants) { - log.trace('parsing acl grants'); - jsonGrants.forEach(grant => { - const grantee = grant.Grantee[0]; - const granteeType = grantee.$['xsi:type']; - const permission = grant.Permission[0]; - let skip = false; - if (possibleGrants.indexOf(permission) < 0) { - skip = true; - } - if (!skip && granteeType === 'AmazonCustomerByEmail') { - usersIdentifiedByEmail.push({ - identifier: grantee.EmailAddress[0], - grantType: permission, - userIDType: 'emailaddress', - }); - } - if (!skip && granteeType === 'CanonicalUser') { - usersIdentifiedByID.push({ - identifier: grantee.ID[0], - grantType: permission, - userIDType: 'id', - }); - } - if (!skip && granteeType === 'Group') { - if (possibleGroups.indexOf(grantee.URI[0]) < 0) { - log.trace('invalid user group', - { userGroup: grantee.URI[0] }); - hasError = true; - return next(errors.InvalidArgument, bucket); + /** + * If grants set by xml, loop through the grants + * and separate grant types so parsed in same manner + * as header grants + */ + if (jsonGrants) { + log.trace('parsing acl grants'); + jsonGrants.forEach(grant => { + const grantee = grant.Grantee[0]; + const granteeType = grantee.$['xsi:type']; + const permission = grant.Permission[0]; + let skip = false; + if (possibleGrants.indexOf(permission) < 0) { + skip = true; + } + if (!skip && granteeType === 'AmazonCustomerByEmail') { + usersIdentifiedByEmail.push({ + identifier: grantee.EmailAddress[0], + grantType: permission, + userIDType: 'emailaddress', + }); + } + if (!skip && granteeType === 'CanonicalUser') { + usersIdentifiedByID.push({ + identifier: grantee.ID[0], + grantType: permission, + userIDType: 'id', + }); } - return usersIdentifiedByGroup.push({ - identifier: grantee.URI[0], - grantType: permission, - userIDType: 'uri', - }); + if (!skip && granteeType === 'Group') { + if (possibleGroups.indexOf(grantee.URI[0]) < 0) { + log.trace('invalid user group', { userGroup: grantee.URI[0] }); + hasError = true; + return next(errors.InvalidArgument, bucket); + } + return usersIdentifiedByGroup.push({ + identifier: grantee.URI[0], + grantType: permission, + userIDType: 'uri', + }); + } + return undefined; + }); + if (hasError) { + return undefined; } - return undefined; - }); - if (hasError) { - return undefined; - } - } else { - // If no canned ACL and no parsed xml, loop - // through the access headers - const allGrantHeaders = - [].concat(grantReadHeader, - grantReadACPHeader, grantWriteACPHeader, - grantFullControlHeader); + } else { + // If no canned ACL and no parsed xml, loop + // through the access headers + const allGrantHeaders = [].concat( + grantReadHeader, + grantReadACPHeader, + grantWriteACPHeader, + grantFullControlHeader + ); - usersIdentifiedByEmail = allGrantHeaders.filter(item => - item && item.userIDType.toLowerCase() === 'emailaddress'); - usersIdentifiedByGroup = allGrantHeaders - .filter(itm => itm && itm.userIDType - .toLowerCase() === 'uri'); - for (let i = 0; i < usersIdentifiedByGroup.length; i++) { - if (possibleGroups.indexOf( - usersIdentifiedByGroup[i].identifier) < 0) { - log.trace('invalid user group', - { userGroup: usersIdentifiedByGroup[i] - .identifier }); - return next(errors.InvalidArgument, bucket); + usersIdentifiedByEmail = allGrantHeaders.filter( + item => item && item.userIDType.toLowerCase() === 'emailaddress' + ); + usersIdentifiedByGroup = allGrantHeaders.filter( + itm => itm && itm.userIDType.toLowerCase() === 'uri' + ); + for (let i = 0; i < usersIdentifiedByGroup.length; i++) { + if (possibleGroups.indexOf(usersIdentifiedByGroup[i].identifier) < 0) { + log.trace('invalid user group', { userGroup: usersIdentifiedByGroup[i].identifier }); + return next(errors.InvalidArgument, bucket); + } } + /** TODO: Consider whether want to verify with Vault + * whether canonicalID is associated with existing + * account before adding to ACL */ + usersIdentifiedByID = allGrantHeaders.filter( + item => item && item.userIDType.toLowerCase() === 'id' + ); } - /** TODO: Consider whether want to verify with Vault - * whether canonicalID is associated with existing - * account before adding to ACL */ - usersIdentifiedByID = allGrantHeaders - .filter(item => item && item.userIDType - .toLowerCase() === 'id'); - } - const justEmails = usersIdentifiedByEmail - .map(item => item.identifier); - // If have to lookup canonicalID's do that asynchronously - if (justEmails.length > 0) { - return vault.getCanonicalIds( - justEmails, log, (err, results) => { + const justEmails = usersIdentifiedByEmail.map(item => item.identifier); + // If have to lookup canonicalID's do that asynchronously + if (justEmails.length > 0) { + return vault.getCanonicalIds(justEmails, log, (err, results) => { if (err) { - log.trace('error looking up canonical ids', - { error: err, method: 'getCanonicalIDs' }); + log.trace('error looking up canonical ids', { error: err, method: 'getCanonicalIDs' }); return next(err, bucket); } - const reconstructedUsersIdentifiedByEmail = aclUtils - .reconstructUsersIdentifiedByEmail(results, - usersIdentifiedByEmail); + const reconstructedUsersIdentifiedByEmail = aclUtils.reconstructUsersIdentifiedByEmail( + results, + usersIdentifiedByEmail + ); const allUsers = [].concat( reconstructedUsersIdentifiedByEmail, usersIdentifiedByID, - usersIdentifiedByGroup); - const revisedAddACLParams = aclUtils - .sortHeaderGrants(allUsers, addACLParams); - return next(null, bucket, objectMD, - revisedAddACLParams); + usersIdentifiedByGroup + ); + const revisedAddACLParams = aclUtils.sortHeaderGrants(allUsers, addACLParams); + return next(null, bucket, objectMD, revisedAddACLParams); }); + } + const allUsers = [].concat(usersIdentifiedByID, usersIdentifiedByGroup); + const revisedAddACLParams = aclUtils.sortHeaderGrants(allUsers, addACLParams); + return next(null, bucket, objectMD, revisedAddACLParams); + }, + function addAclsToObjMD(bucket, objectMD, ACLParams, next) { + // Add acl's to object metadata + const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); + acl.addObjectACL(bucket, objectKey, objectMD, ACLParams, params, log, err => + next(err, bucket, objectMD) + ); + }, + ], + (err, bucket, objectMD) => { + const resHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { + error: err, + method: 'objectPutACL', + }); + monitoring.promMetrics('PUT', bucketName, err.code, 'putObjectAcl'); + return cb(err, resHeaders); } - const allUsers = [].concat( - usersIdentifiedByID, - usersIdentifiedByGroup); - const revisedAddACLParams = - aclUtils.sortHeaderGrants(allUsers, addACLParams); - return next(null, bucket, objectMD, revisedAddACLParams); - }, - function addAclsToObjMD(bucket, objectMD, ACLParams, next) { - // Add acl's to object metadata - const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); - acl.addObjectACL(bucket, objectKey, objectMD, - ACLParams, params, log, err => next(err, bucket, objectMD)); - }, - ], (err, bucket, objectMD) => { - const resHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { - error: err, - method: 'objectPutACL', - }); - monitoring.promMetrics( - 'PUT', bucketName, err.code, 'putObjectAcl'); - return cb(err, resHeaders); - } - const verCfg = bucket.getVersioningConfiguration(); - resHeaders['x-amz-version-id'] = - getVersionIdResHeader(verCfg, objectMD); + const verCfg = bucket.getVersioningConfiguration(); + resHeaders['x-amz-version-id'] = getVersionIdResHeader(verCfg, objectMD); - log.trace('processed request successfully in object put acl api'); - pushMetric('putObjectAcl', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - versionId: objectMD ? objectMD.versionId : undefined, - location: objectMD ? objectMD.dataStoreName : undefined, - }); - monitoring.promMetrics('PUT', bucketName, '200', 'putObjectAcl'); - return cb(null, resHeaders); - }); + log.trace('processed request successfully in object put acl api'); + pushMetric('putObjectAcl', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + versionId: objectMD ? objectMD.versionId : undefined, + location: objectMD ? objectMD.dataStoreName : undefined, + }); + monitoring.promMetrics('PUT', bucketName, '200', 'putObjectAcl'); + return cb(null, resHeaders); + } + ); } module.exports = objectPutACL; diff --git a/lib/api/objectPutCopyPart.js b/lib/api/objectPutCopyPart.js index ed43082252..74fb2d3dfa 100644 --- a/lib/api/objectPutCopyPart.js +++ b/lib/api/objectPutCopyPart.js @@ -5,8 +5,7 @@ const validateHeaders = s3middleware.validateConditionalHeaders; const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const constants = require('../../constants'); const { data } = require('../data/wrapper'); -const locationConstraintCheck = - require('./apiUtils/object/locationConstraintCheck'); +const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck'); const metadata = require('../metadata/wrapper'); const { pushMetric } = require('../utapi/utilities'); const services = require('../services'); @@ -34,8 +33,7 @@ const skipError = new Error('skip'); * @param {function} callback - final callback to call with the result * @return {undefined} */ -function objectPutCopyPart(authInfo, request, sourceBucket, - sourceObject, reqVersionId, log, callback) { +function objectPutCopyPart(authInfo, request, sourceBucket, sourceObject, reqVersionId, log, callback) { log.debug('processing request', { method: 'objectPutCopyPart' }); const destBucketName = request.bucketName; const destObjectKey = request.objectKey; @@ -61,8 +59,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket, const partNumber = Number.parseInt(request.query.partNumber, 10); // AWS caps partNumbers at 10,000 if (partNumber > 10000 || !Number.isInteger(partNumber) || partNumber < 1) { - monitoring.promMetrics('PUT', destBucketName, 400, - 'putObjectCopyPart'); + monitoring.promMetrics('PUT', destBucketName, 400, 'putObjectCopyPart'); return callback(errors.InvalidArgument); } // We pad the partNumbers so that the parts will be sorted @@ -71,7 +68,9 @@ function objectPutCopyPart(authInfo, request, sourceBucket, // Note that keys in the query object retain their case, so // request.query.uploadId must be called with that exact // capitalization - const { query: { uploadId } } = request; + const { + query: { uploadId }, + } = request; const valPutParams = { authInfo, @@ -86,10 +85,13 @@ function objectPutCopyPart(authInfo, request, sourceBucket, // as validating for the destination bucket except additionally need // the uploadId and splitter. // Also, requestType is 'putPart or complete' - const valMPUParams = Object.assign({ - uploadId, - splitter: constants.splitter, - }, valPutParams); + const valMPUParams = Object.assign( + { + uploadId, + splitter: constants.splitter, + }, + valPutParams + ); valMPUParams.requestType = 'putPart or complete'; const dataStoreContext = { @@ -102,113 +104,143 @@ function objectPutCopyPart(authInfo, request, sourceBucket, enableQuota: true, }; - return async.waterfall([ - function checkDestAuth(next) { - return standardMetadataValidateBucketAndObj(valPutParams, request.actionImplicitDenies, log, - (err, destBucketMD) => { - if (err) { - log.debug('error validating authorization for ' + - 'destination bucket', - { error: err }); - return next(err, destBucketMD); - } - const flag = destBucketMD.hasDeletedFlag() - || destBucketMD.hasTransientFlag(); - if (flag) { - log.trace('deleted flag or transient flag ' + - 'on destination bucket', { flag }); - return next(errors.NoSuchBucket); - } - return next(null, destBucketMD); - }); - }, - function checkSourceAuthorization(destBucketMD, next) { - return standardMetadataValidateBucketAndObj(valGetParams, request.actionImplicitDenies, log, - (err, sourceBucketMD, sourceObjMD) => { - if (err) { - log.debug('error validating get part of request', - { error: err }); - return next(err, destBucketMD); - } - if (!sourceObjMD) { - log.debug('no source object', { sourceObject }); - const err = reqVersionId ? errors.NoSuchVersion : - errors.NoSuchKey; - return next(err, destBucketMD); - } - let sourceLocationConstraintName = - sourceObjMD.dataStoreName; - // for backwards compatibility before storing dataStoreName - // TODO: handle in objectMD class - if (!sourceLocationConstraintName && - sourceObjMD.location[0] && - sourceObjMD.location[0].dataStoreName) { - sourceLocationConstraintName = - sourceObjMD.location[0].dataStoreName; - } - // check if object data is in a cold storage - const coldErr = verifyColdObjectAvailable(sourceObjMD); - if (coldErr) { - return next(coldErr, null); - } - if (sourceObjMD.isDeleteMarker) { - log.debug('delete marker on source object', - { sourceObject }); - if (reqVersionId) { - const err = errorInstances.InvalidRequest - .customizeDescription('The source of a copy ' + - 'request may not specifically refer to a delete' + - 'marker by version id.'); + return async.waterfall( + [ + function checkDestAuth(next) { + return standardMetadataValidateBucketAndObj( + valPutParams, + request.actionImplicitDenies, + log, + (err, destBucketMD) => { + if (err) { + log.debug('error validating authorization for ' + 'destination bucket', { error: err }); return next(err, destBucketMD); } - // if user specifies a key in a versioned source bucket - // without specifying a version, and the object has a - // delete marker, return NoSuchKey - return next(errors.NoSuchKey, destBucketMD); - } - const headerValResult = - validateHeaders(request.headers, - sourceObjMD['last-modified'], - sourceObjMD['content-md5']); - if (headerValResult.error) { - return next(errors.PreconditionFailed, destBucketMD); - } - const copyLocator = setUpCopyLocator(sourceObjMD, - request.headers['x-amz-copy-source-range'], log); - if (copyLocator.error) { - return next(copyLocator.error, destBucketMD); + const flag = destBucketMD.hasDeletedFlag() || destBucketMD.hasTransientFlag(); + if (flag) { + log.trace('deleted flag or transient flag ' + 'on destination bucket', { flag }); + return next(errors.NoSuchBucket); + } + return next(null, destBucketMD); } - let sourceVerId; - // If specific version requested, include copy source - // version id in response. Include in request by default - // if versioning is enabled or suspended. - if (sourceBucketMD.getVersioningConfiguration() || - reqVersionId) { - if (sourceObjMD.isNull || !sourceObjMD.versionId) { - sourceVerId = 'null'; - } else { - sourceVerId = - versionIdUtils.encode(sourceObjMD.versionId); + ); + }, + function checkSourceAuthorization(destBucketMD, next) { + return standardMetadataValidateBucketAndObj( + valGetParams, + request.actionImplicitDenies, + log, + (err, sourceBucketMD, sourceObjMD) => { + if (err) { + log.debug('error validating get part of request', { error: err }); + return next(err, destBucketMD); + } + if (!sourceObjMD) { + log.debug('no source object', { sourceObject }); + const err = reqVersionId ? errors.NoSuchVersion : errors.NoSuchKey; + return next(err, destBucketMD); + } + let sourceLocationConstraintName = sourceObjMD.dataStoreName; + // for backwards compatibility before storing dataStoreName + // TODO: handle in objectMD class + if ( + !sourceLocationConstraintName && + sourceObjMD.location[0] && + sourceObjMD.location[0].dataStoreName + ) { + sourceLocationConstraintName = sourceObjMD.location[0].dataStoreName; + } + // check if object data is in a cold storage + const coldErr = verifyColdObjectAvailable(sourceObjMD); + if (coldErr) { + return next(coldErr, null); + } + if (sourceObjMD.isDeleteMarker) { + log.debug('delete marker on source object', { sourceObject }); + if (reqVersionId) { + const err = errorInstances.InvalidRequest.customizeDescription( + 'The source of a copy ' + + 'request may not specifically refer to a delete' + + 'marker by version id.' + ); + return next(err, destBucketMD); + } + // if user specifies a key in a versioned source bucket + // without specifying a version, and the object has a + // delete marker, return NoSuchKey + return next(errors.NoSuchKey, destBucketMD); } + const headerValResult = validateHeaders( + request.headers, + sourceObjMD['last-modified'], + sourceObjMD['content-md5'] + ); + if (headerValResult.error) { + return next(errors.PreconditionFailed, destBucketMD); + } + const copyLocator = setUpCopyLocator( + sourceObjMD, + request.headers['x-amz-copy-source-range'], + log + ); + if (copyLocator.error) { + return next(copyLocator.error, destBucketMD); + } + let sourceVerId; + // If specific version requested, include copy source + // version id in response. Include in request by default + // if versioning is enabled or suspended. + if (sourceBucketMD.getVersioningConfiguration() || reqVersionId) { + if (sourceObjMD.isNull || !sourceObjMD.versionId) { + sourceVerId = 'null'; + } else { + sourceVerId = versionIdUtils.encode(sourceObjMD.versionId); + } + } + return next( + null, + copyLocator.dataLocator, + destBucketMD, + copyLocator.copyObjectSize, + sourceVerId, + sourceLocationConstraintName, + sourceObjMD + ); } - return next(null, copyLocator.dataLocator, destBucketMD, - copyLocator.copyObjectSize, sourceVerId, - sourceLocationConstraintName, sourceObjMD); - }); - }, - function _validateQuotas(dataLocator, destBucketMD, - copyObjectSize, sourceVerId, - sourceLocationConstraintName, sourceObjMD, next) { - return validateQuotas(request, destBucketMD, request.accountQuotas, valPutParams.requestType, - request.apiMethod, sourceObjMD?.['content-length'] || 0, false, log, err => - next(err, dataLocator, destBucketMD, copyObjectSize, sourceVerId, sourceLocationConstraintName)); - }, - // get MPU shadow bucket to get splitter based on MD version - function getMpuShadowBucket(dataLocator, destBucketMD, - copyObjectSize, sourceVerId, - sourceLocationConstraintName, next) { - return metadata.getBucket(mpuBucketName, log, - (err, mpuBucket) => { + ); + }, + function _validateQuotas( + dataLocator, + destBucketMD, + copyObjectSize, + sourceVerId, + sourceLocationConstraintName, + sourceObjMD, + next + ) { + return validateQuotas( + request, + destBucketMD, + request.accountQuotas, + valPutParams.requestType, + request.apiMethod, + sourceObjMD?.['content-length'] || 0, + false, + log, + err => + next(err, dataLocator, destBucketMD, copyObjectSize, sourceVerId, sourceLocationConstraintName) + ); + }, + // get MPU shadow bucket to get splitter based on MD version + function getMpuShadowBucket( + dataLocator, + destBucketMD, + copyObjectSize, + sourceVerId, + sourceLocationConstraintName, + next + ) { + return metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => { // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver if (err && err.NoSuchBucket) { return next(errors.NoSuchUpload); @@ -224,103 +256,138 @@ function objectPutCopyPart(authInfo, request, sourceBucket, if (mpuBucket.getMdBucketModelVersion() < 2) { splitter = constants.oldSplitter; } - return next(null, dataLocator, destBucketMD, - copyObjectSize, sourceVerId, splitter, - sourceLocationConstraintName); + return next( + null, + dataLocator, + destBucketMD, + copyObjectSize, + sourceVerId, + splitter, + sourceLocationConstraintName + ); }); - }, - // Get MPU overview object to check authorization to put a part - // and to get any object location constraint info - function getMpuOverviewObject(dataLocator, destBucketMD, - copyObjectSize, sourceVerId, splitter, - sourceLocationConstraintName, next) { - const mpuOverviewKey = - `overview${splitter}${destObjectKey}${splitter}${uploadId}`; - return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, - null, log, (err, res) => { - if (err) { - // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver - if (err.NoSuchKey) { - return next(errors.NoSuchUpload); - } - log.error('error getting overview object from ' + - 'mpu bucket', { - error: err, - method: 'objectPutCopyPart::' + - 'metadata.getObjectMD', - }); - return next(err); - } - const initiatorID = res.initiator.ID; - const requesterID = authInfo.isRequesterAnIAMUser() ? - authInfo.getArn() : authInfo.getCanonicalID(); - if (initiatorID !== requesterID) { - return next(errors.AccessDenied); - } - const destObjLocationConstraint = - res.controllingLocationConstraint; - const sseAlgo = res['x-amz-server-side-encryption']; - const sse = sseAlgo ? { - algorithm: sseAlgo, - masterKeyId: res['x-amz-server-side-encryption-aws-kms-key-id'], - } : null; - return next(null, dataLocator, destBucketMD, - destObjLocationConstraint, copyObjectSize, - sourceVerId, sourceLocationConstraintName, sse, splitter); - }); - }, - function goGetData( - dataLocator, - destBucketMD, - destObjLocationConstraint, - copyObjectSize, - sourceVerId, - sourceLocationConstraintName, - sse, - splitter, - next, - ) { - const originalIdentityAuthzResults = request.actionImplicitDenies; - // eslint-disable-next-line no-param-reassign - delete request.actionImplicitDenies; - data.uploadPartCopy( - request, - log, + }, + // Get MPU overview object to check authorization to put a part + // and to get any object location constraint info + function getMpuOverviewObject( + dataLocator, destBucketMD, + copyObjectSize, + sourceVerId, + splitter, sourceLocationConstraintName, - destObjLocationConstraint, + next + ) { + const mpuOverviewKey = `overview${splitter}${destObjectKey}${splitter}${uploadId}`; + return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, null, log, (err, res) => { + if (err) { + // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver + if (err.NoSuchKey) { + return next(errors.NoSuchUpload); + } + log.error('error getting overview object from ' + 'mpu bucket', { + error: err, + method: 'objectPutCopyPart::' + 'metadata.getObjectMD', + }); + return next(err); + } + const initiatorID = res.initiator.ID; + const requesterID = authInfo.isRequesterAnIAMUser() ? authInfo.getArn() : authInfo.getCanonicalID(); + if (initiatorID !== requesterID) { + return next(errors.AccessDenied); + } + const destObjLocationConstraint = res.controllingLocationConstraint; + const sseAlgo = res['x-amz-server-side-encryption']; + const sse = sseAlgo + ? { + algorithm: sseAlgo, + masterKeyId: res['x-amz-server-side-encryption-aws-kms-key-id'], + } + : null; + return next( + null, + dataLocator, + destBucketMD, + destObjLocationConstraint, + copyObjectSize, + sourceVerId, + sourceLocationConstraintName, + sse, + splitter + ); + }); + }, + function goGetData( dataLocator, - dataStoreContext, - locationConstraintCheck, + destBucketMD, + destObjLocationConstraint, + copyObjectSize, + sourceVerId, + sourceLocationConstraintName, sse, - (error, eTag, lastModified, serverSideEncryption, locations) => { - // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = originalIdentityAuthzResults; - if (error) { - if (error.message === 'skip') { - return next(skipError, destBucketMD, eTag, - lastModified, sourceVerId, - serverSideEncryption); + splitter, + next + ) { + const originalIdentityAuthzResults = request.actionImplicitDenies; + // eslint-disable-next-line no-param-reassign + delete request.actionImplicitDenies; + data.uploadPartCopy( + request, + log, + destBucketMD, + sourceLocationConstraintName, + destObjLocationConstraint, + dataLocator, + dataStoreContext, + locationConstraintCheck, + sse, + (error, eTag, lastModified, serverSideEncryption, locations) => { + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = originalIdentityAuthzResults; + if (error) { + if (error.message === 'skip') { + return next( + skipError, + destBucketMD, + eTag, + lastModified, + sourceVerId, + serverSideEncryption + ); + } + return next(error, destBucketMD); } - return next(error, destBucketMD); + return next( + null, + destBucketMD, + locations, + eTag, + copyObjectSize, + sourceVerId, + serverSideEncryption, + lastModified, + splitter + ); } - return next(null, destBucketMD, locations, eTag, - copyObjectSize, sourceVerId, serverSideEncryption, - lastModified, splitter); - }); - }, - function getExistingPartInfo(destBucketMD, locations, totalHash, - copyObjectSize, sourceVerId, serverSideEncryption, lastModified, - splitter, next) { - const partKey = - `${uploadId}${constants.splitter}${paddedPartNumber}`; - metadata.getObjectMD(mpuBucketName, partKey, {}, log, - (err, result) => { + ); + }, + function getExistingPartInfo( + destBucketMD, + locations, + totalHash, + copyObjectSize, + sourceVerId, + serverSideEncryption, + lastModified, + splitter, + next + ) { + const partKey = `${uploadId}${constants.splitter}${paddedPartNumber}`; + metadata.getObjectMD(mpuBucketName, partKey, {}, log, (err, result) => { // If there is nothing being overwritten just move on // TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver if (err && !err.NoSuchKey) { - log.debug('error getting current part (if any)', - { error: err }); + log.debug('error getting current part (if any)', { error: err }); return next(err); } let oldLocations; @@ -331,141 +398,225 @@ function objectPutCopyPart(authInfo, request, sourceBucket, // Pull locations to clean up any potential orphans // in data if object put is an overwrite of // already existing object with same key and part number - oldLocations = Array.isArray(oldLocations) ? - oldLocations : [oldLocations]; + oldLocations = Array.isArray(oldLocations) ? oldLocations : [oldLocations]; } - return next(null, destBucketMD, locations, totalHash, - prevObjectSize, copyObjectSize, sourceVerId, - serverSideEncryption, lastModified, oldLocations, splitter); + return next( + null, + destBucketMD, + locations, + totalHash, + prevObjectSize, + copyObjectSize, + sourceVerId, + serverSideEncryption, + lastModified, + oldLocations, + splitter + ); }); - }, - function storeNewPartMetadata(destBucketMD, locations, totalHash, - prevObjectSize, copyObjectSize, sourceVerId, serverSideEncryption, - lastModified, oldLocations, splitter, next) { - const metaStoreParams = { - partNumber: paddedPartNumber, - contentMD5: totalHash, - size: copyObjectSize, - uploadId, - splitter: constants.splitter, + }, + function storeNewPartMetadata( + destBucketMD, + locations, + totalHash, + prevObjectSize, + copyObjectSize, + sourceVerId, + serverSideEncryption, lastModified, - overheadField: constants.overheadField, - ownerId: destBucketMD.getOwner(), - }; - return services.metadataStorePart(mpuBucketName, - locations, metaStoreParams, log, err => { + oldLocations, + splitter, + next + ) { + const metaStoreParams = { + partNumber: paddedPartNumber, + contentMD5: totalHash, + size: copyObjectSize, + uploadId, + splitter: constants.splitter, + lastModified, + overheadField: constants.overheadField, + ownerId: destBucketMD.getOwner(), + }; + return services.metadataStorePart(mpuBucketName, locations, metaStoreParams, log, err => { if (err) { - log.debug('error storing new metadata', - { error: err, method: 'storeNewPartMetadata' }); + log.debug('error storing new metadata', { error: err, method: 'storeNewPartMetadata' }); return next(err); } - return next(null, locations, oldLocations, destBucketMD, totalHash, - lastModified, sourceVerId, serverSideEncryption, - prevObjectSize, copyObjectSize, splitter); + return next( + null, + locations, + oldLocations, + destBucketMD, + totalHash, + lastModified, + sourceVerId, + serverSideEncryption, + prevObjectSize, + copyObjectSize, + splitter + ); }); - }, - function checkCanDeleteOldLocations(partLocations, oldLocations, destBucketMD, - totalHash, lastModified, sourceVerId, serverSideEncryption, - prevObjectSize, copyObjectSize, splitter, next) { - if (!oldLocations) { - return next(null, oldLocations, destBucketMD, totalHash, - lastModified, sourceVerId, serverSideEncryption, - prevObjectSize, copyObjectSize); - } - return services.isCompleteMPUInProgress({ - bucketName: destBucketName, - objectKey: destObjectKey, - uploadId, + }, + function checkCanDeleteOldLocations( + partLocations, + oldLocations, + destBucketMD, + totalHash, + lastModified, + sourceVerId, + serverSideEncryption, + prevObjectSize, + copyObjectSize, splitter, - }, log, (err, completeInProgress) => { - if (err) { - return next(err, destBucketMD); + next + ) { + if (!oldLocations) { + return next( + null, + oldLocations, + destBucketMD, + totalHash, + lastModified, + sourceVerId, + serverSideEncryption, + prevObjectSize, + copyObjectSize + ); } - let oldLocationsToDelete = oldLocations; - // Prevent deletion of old data if a completeMPU - // is already in progress because then there is no - // guarantee that the old location will not be the - // committed one. - if (completeInProgress) { - log.warn('not deleting old locations because CompleteMPU is in progress', { - method: 'objectPutCopyPart::checkCanDeleteOldLocations', + return services.isCompleteMPUInProgress( + { bucketName: destBucketName, objectKey: destObjectKey, uploadId, - partLocations, - oldLocations, - }); - oldLocationsToDelete = null; - } - return next(null, oldLocationsToDelete, destBucketMD, totalHash, - lastModified, sourceVerId, serverSideEncryption, - prevObjectSize, copyObjectSize); - }); - }, - function cleanupExistingData(oldLocationsToDelete, destBucketMD, totalHash, - lastModified, sourceVerId, serverSideEncryption, - prevObjectSize, copyObjectSize, next) { - // Clean up the old data now that new metadata (with new - // data locations) has been stored - if (oldLocationsToDelete) { - return data.batchDelete(oldLocationsToDelete, request.method, null, - log, err => { + splitter, + }, + log, + (err, completeInProgress) => { + if (err) { + return next(err, destBucketMD); + } + let oldLocationsToDelete = oldLocations; + // Prevent deletion of old data if a completeMPU + // is already in progress because then there is no + // guarantee that the old location will not be the + // committed one. + if (completeInProgress) { + log.warn('not deleting old locations because CompleteMPU is in progress', { + method: 'objectPutCopyPart::checkCanDeleteOldLocations', + bucketName: destBucketName, + objectKey: destObjectKey, + uploadId, + partLocations, + oldLocations, + }); + oldLocationsToDelete = null; + } + return next( + null, + oldLocationsToDelete, + destBucketMD, + totalHash, + lastModified, + sourceVerId, + serverSideEncryption, + prevObjectSize, + copyObjectSize + ); + } + ); + }, + function cleanupExistingData( + oldLocationsToDelete, + destBucketMD, + totalHash, + lastModified, + sourceVerId, + serverSideEncryption, + prevObjectSize, + copyObjectSize, + next + ) { + // Clean up the old data now that new metadata (with new + // data locations) has been stored + if (oldLocationsToDelete) { + return data.batchDelete(oldLocationsToDelete, request.method, null, log, err => { if (err) { // if error, log the error and move on as it is not // relevant to the client as the client's // object already succeeded putting data, metadata - log.error('error deleting existing data', - { error: err }); + log.error('error deleting existing data', { error: err }); } - return next(null, destBucketMD, totalHash, - lastModified, sourceVerId, serverSideEncryption, - prevObjectSize, copyObjectSize); + return next( + null, + destBucketMD, + totalHash, + lastModified, + sourceVerId, + serverSideEncryption, + prevObjectSize, + copyObjectSize + ); }); + } + return next( + null, + destBucketMD, + totalHash, + lastModified, + sourceVerId, + serverSideEncryption, + prevObjectSize, + copyObjectSize + ); + }, + ], + ( + err, + destBucketMD, + totalHash, + lastModified, + sourceVerId, + serverSideEncryption, + prevObjectSize, + copyObjectSize + ) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destBucketMD); + if (err && err !== skipError) { + log.trace('error from copy part waterfall', { error: err }); + monitoring.promMetrics('PUT', destBucketName, err.code, 'putObjectCopyPart'); + return callback(err, null, corsHeaders); } - return next(null, destBucketMD, totalHash, - lastModified, sourceVerId, serverSideEncryption, - prevObjectSize, copyObjectSize); - }, - ], (err, destBucketMD, totalHash, lastModified, sourceVerId, - serverSideEncryption, prevObjectSize, copyObjectSize) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, destBucketMD); - if (err && err !== skipError) { - log.trace('error from copy part waterfall', - { error: err }); - monitoring.promMetrics('PUT', destBucketName, err.code, - 'putObjectCopyPart'); - return callback(err, null, corsHeaders); - } - const xml = [ - '', - '', - '', new Date(lastModified) - .toISOString(), '', - '"', totalHash, '"', - '', - ].join(''); + const xml = [ + '', + '', + '', + new Date(lastModified).toISOString(), + '', + '"', + totalHash, + '"', + '', + ].join(''); - const additionalHeaders = corsHeaders || {}; - if (serverSideEncryption) { - setSSEHeaders(additionalHeaders, - serverSideEncryption.algorithm, - serverSideEncryption.masterKeyId); + const additionalHeaders = corsHeaders || {}; + if (serverSideEncryption) { + setSSEHeaders(additionalHeaders, serverSideEncryption.algorithm, serverSideEncryption.masterKeyId); + } + additionalHeaders['x-amz-copy-source-version-id'] = sourceVerId; + pushMetric('uploadPartCopy', log, { + authInfo, + canonicalID: destBucketMD.getOwner(), + bucket: destBucketName, + keys: [destObjectKey], + newByteLength: copyObjectSize, + oldByteLength: prevObjectSize, + location: destBucketMD.getLocationConstraint(), + }); + monitoring.promMetrics('PUT', destBucketName, '200', 'putObjectCopyPart'); + return callback(null, xml, additionalHeaders); } - additionalHeaders['x-amz-copy-source-version-id'] = sourceVerId; - pushMetric('uploadPartCopy', log, { - authInfo, - canonicalID: destBucketMD.getOwner(), - bucket: destBucketName, - keys: [destObjectKey], - newByteLength: copyObjectSize, - oldByteLength: prevObjectSize, - location: destBucketMD.getLocationConstraint(), - }); - monitoring.promMetrics( - 'PUT', destBucketName, '200', 'putObjectCopyPart'); - return callback(null, xml, additionalHeaders); - }); + ); } module.exports = objectPutCopyPart; diff --git a/lib/api/objectPutLegalHold.js b/lib/api/objectPutLegalHold.js index c16f2c84e8..816b5fdbd9 100644 --- a/lib/api/objectPutLegalHold.js +++ b/lib/api/objectPutLegalHold.js @@ -2,8 +2,11 @@ const async = require('async'); const { errors, errorInstances, s3middleware } = require('arsenal'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); -const { decodeVersionId, getVersionIdResHeader, getVersionSpecificMetadataOptions } = - require('./apiUtils/object/versioning'); +const { + decodeVersionId, + getVersionIdResHeader, + getVersionSpecificMetadataOptions, +} = require('./apiUtils/object/versioning'); const getReplicationInfo = require('./apiUtils/object/getReplicationInfo'); const metadata = require('../metadata/wrapper'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); @@ -47,78 +50,87 @@ function objectPutLegalHold(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, + return async.waterfall( + [ + next => + standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, bucket, objectMD) => { + if (err) { + log.trace('request authorization failed', { method: 'objectPutLegalHold', error: err }); + return next(err); + } + if (!objectMD) { + const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey; + log.trace('error no object metadata found', { method: 'objectPutLegalHold', error: err }); + return next(err, bucket); + } + if (objectMD.isDeleteMarker) { + log.trace('version is a delete marker', { method: 'objectPutLegalHold' }); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + return next(errors.MethodNotAllowed, bucket); + } + if (!bucket.isObjectLockEnabled()) { + log.trace('object lock not enabled on bucket', { method: 'objectPutLegalHold' }); + return next( + errorInstances.InvalidRequest.customizeDescription( + 'Bucket is missing Object Lock Configuration' + ), + bucket + ); + } + return next(null, bucket, objectMD); + } + ), + (bucket, objectMD, next) => { + log.trace('parsing legal hold'); + parseLegalHoldXml(request.post, log, (err, res) => next(err, bucket, res, objectMD)); + }, + (bucket, legalHold, objectMD, next) => { + // eslint-disable-next-line no-param-reassign + objectMD.legalHold = legalHold; + const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); + const replicationInfo = getReplicationInfo( + config, + objectKey, + bucket, + true, + 0, + REPLICATION_ACTION, + objectMD + ); + if (replicationInfo) { + // eslint-disable-next-line no-param-reassign + objectMD.replicationInfo = Object.assign({}, objectMD.replicationInfo, replicationInfo); + } + // eslint-disable-next-line no-param-reassign + objectMD.originOp = 's3:ObjectLegalHold:Put'; + metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params, log, err => + next(err, bucket, objectMD) + ); + }, + ], (err, bucket, objectMD) => { + const additionalResHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); if (err) { - log.trace('request authorization failed', - { method: 'objectPutLegalHold', error: err }); - return next(err); - } - if (!objectMD) { - const err = versionId ? errors.NoSuchVersion : - errors.NoSuchKey; - log.trace('error no object metadata found', - { method: 'objectPutLegalHold', error: err }); - return next(err, bucket); - } - if (objectMD.isDeleteMarker) { - log.trace('version is a delete marker', - { method: 'objectPutLegalHold' }); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - return next(errors.MethodNotAllowed, bucket); - } - if (!bucket.isObjectLockEnabled()) { - log.trace('object lock not enabled on bucket', - { method: 'objectPutLegalHold' }); - return next(errorInstances.InvalidRequest.customizeDescription( - 'Bucket is missing Object Lock Configuration' - ), bucket); - } - return next(null, bucket, objectMD); - }), - (bucket, objectMD, next) => { - log.trace('parsing legal hold'); - parseLegalHoldXml(request.post, log, (err, res) => - next(err, bucket, res, objectMD)); - }, - (bucket, legalHold, objectMD, next) => { - // eslint-disable-next-line no-param-reassign - objectMD.legalHold = legalHold; - const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); - const replicationInfo = getReplicationInfo(config, - objectKey, bucket, true, 0, REPLICATION_ACTION, objectMD); - if (replicationInfo) { - // eslint-disable-next-line no-param-reassign - objectMD.replicationInfo = Object.assign({}, - objectMD.replicationInfo, replicationInfo); + log.trace('error processing request', { error: err, method: 'objectPutLegalHold' }); + } else { + pushMetric('putObjectLegalHold', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + versionId: objectMD ? objectMD.versionId : undefined, + location: objectMD ? objectMD.dataStoreName : undefined, + }); + const verCfg = bucket.getVersioningConfiguration(); + additionalResHeaders['x-amz-version-id'] = getVersionIdResHeader(verCfg, objectMD); } - // eslint-disable-next-line no-param-reassign - objectMD.originOp = 's3:ObjectLegalHold:Put'; - metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params, - log, err => next(err, bucket, objectMD)); - }, - ], (err, bucket, objectMD) => { - const additionalResHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', - { error: err, method: 'objectPutLegalHold' }); - } else { - pushMetric('putObjectLegalHold', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - versionId: objectMD ? objectMD.versionId : undefined, - location: objectMD ? objectMD.dataStoreName : undefined, - }); - const verCfg = bucket.getVersioningConfiguration(); - additionalResHeaders['x-amz-version-id'] = - getVersionIdResHeader(verCfg, objectMD); + return callback(err, additionalResHeaders); } - return callback(err, additionalResHeaders); - }); + ); } module.exports = objectPutLegalHold; diff --git a/lib/api/objectPutPart.js b/lib/api/objectPutPart.js index 528d5391ab..fd86569d73 100644 --- a/lib/api/objectPutPart.js +++ b/lib/api/objectPutPart.js @@ -6,14 +6,12 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const constants = require('../../constants'); const { data } = require('../data/wrapper'); const { dataStore } = require('./apiUtils/object/storeObject'); -const { isBucketAuthorized } = - require('./apiUtils/authorization/permissionChecks'); +const { isBucketAuthorized } = require('./apiUtils/authorization/permissionChecks'); const kms = require('../kms/wrapper'); const metadata = require('../metadata/wrapper'); const { pushMetric } = require('../utapi/utilities'); const services = require('../services'); -const locationConstraintCheck - = require('./apiUtils/object/locationConstraintCheck'); +const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck'); const monitoring = require('../utilities/monitoringHandler'); const { config } = require('../Config'); const { BackendInfo } = models; @@ -57,8 +55,7 @@ function _getPartKey(uploadId, splitter, paddedPartNumber) { * @param {function} cb - final callback to call with the result * @return {undefined} */ -function objectPutPart(authInfo, request, streamingV4Params, log, - cb) { +function objectPutPart(authInfo, request, streamingV4Params, log, cb) { log.debug('processing request', { method: 'objectPutPart' }); const size = request.parsedContentLength; @@ -67,8 +64,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log, if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) { log.debug('put part size too large', { size }); - monitoring.promMetrics('PUT', request.bucketName, 400, - 'putObjectPart'); + monitoring.promMetrics('PUT', request.bucketName, 400, 'putObjectPart'); return cb(errors.EntityTooLarge); } @@ -86,13 +82,11 @@ function objectPutPart(authInfo, request, streamingV4Params, log, const partNumber = Number.parseInt(request.query.partNumber, 10); // AWS caps partNumbers at 10,000 if (partNumber > 10000) { - monitoring.promMetrics('PUT', request.bucketName, 400, - 'putObjectPart'); + monitoring.promMetrics('PUT', request.bucketName, 400, 'putObjectPart'); return cb(errors.TooManyParts); } if (!Number.isInteger(partNumber) || partNumber < 1) { - monitoring.promMetrics('PUT', request.bucketName, 400, - 'putObjectPart'); + monitoring.promMetrics('PUT', request.bucketName, 400, 'putObjectPart'); return cb(errors.InvalidArgument); } const bucketName = request.bucketName; @@ -104,7 +98,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log, }); // Note that keys in the query object retain their case, so // `request.query.uploadId` must be called with that exact capitalization. - const { query: { uploadId } } = request; + const { + query: { uploadId }, + } = request; const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`; const { objectKey } = request; const originalIdentityAuthzResults = request.actionImplicitDenies; @@ -112,73 +108,93 @@ function objectPutPart(authInfo, request, streamingV4Params, log, // `requestType` is the general 'objectPut'. const requestType = request.apiMethods || 'objectPutPart'; - return async.waterfall([ - // Get the destination bucket. - next => metadata.getBucket(bucketName, log, - (err, destinationBucket) => { - if (err?.is?.NoSuchBucket) { - return next(errors.NoSuchBucket, destinationBucket); - } - if (err) { - log.error('error getting the destination bucket', { - error: err, - method: 'objectPutPart::metadata.getBucket', - }); - return next(err, destinationBucket); + return async.waterfall( + [ + // Get the destination bucket. + next => + metadata.getBucket(bucketName, log, (err, destinationBucket) => { + if (err?.is?.NoSuchBucket) { + return next(errors.NoSuchBucket, destinationBucket); + } + if (err) { + log.error('error getting the destination bucket', { + error: err, + method: 'objectPutPart::metadata.getBucket', + }); + return next(err, destinationBucket); + } + return next(null, destinationBucket); + }), + // Check the bucket authorization. + (destinationBucket, next) => { + if ( + !isBucketAuthorized( + destinationBucket, + requestType, + canonicalID, + authInfo, + log, + request, + request.actionImplicitDenies + ) + ) { + log.debug('access denied for user on bucket', { requestType }); + return next(errors.AccessDenied, destinationBucket); } return next(null, destinationBucket); - }), - // Check the bucket authorization. - (destinationBucket, next) => { - if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo, - log, request, request.actionImplicitDenies)) { - log.debug('access denied for user on bucket', { requestType }); - return next(errors.AccessDenied, destinationBucket); - } - return next(null, destinationBucket); - }, - (destinationBucket, next) => validateQuotas(request, destinationBucket, request.accountQuotas, - requestType, request.apiMethod, size, isPutVersion, log, err => next(err, destinationBucket)), - // Validate that no object SSE is provided for part. - // Part must use SSE from initiateMPU (overview in metadata) - (destinationBucket, next) => { - const { error, objectSSE } = parseObjectEncryptionHeaders(request.headers); - if (error) { - return next(error, destinationBucket); - } - if (objectSSE.algorithm) { - return next(errors.InvalidArgument.customizeDescription( - 'x-amz-server-side-encryption header is not supported for this operation.')); - } - return next(null, destinationBucket); - }, - // Get the MPU shadow bucket. - (destinationBucket, next) => - metadata.getBucket(mpuBucketName, log, - (err, mpuBucket) => { - if (err?.is?.NoSuchBucket) { - return next(errors.NoSuchUpload, destinationBucket); + }, + (destinationBucket, next) => + validateQuotas( + request, + destinationBucket, + request.accountQuotas, + requestType, + request.apiMethod, + size, + isPutVersion, + log, + err => next(err, destinationBucket) + ), + // Validate that no object SSE is provided for part. + // Part must use SSE from initiateMPU (overview in metadata) + (destinationBucket, next) => { + const { error, objectSSE } = parseObjectEncryptionHeaders(request.headers); + if (error) { + return next(error, destinationBucket); } - if (err) { - log.error('error getting the shadow mpu bucket', { - error: err, - method: 'objectPutPart::metadata.getBucket', - }); - return next(err, destinationBucket); + if (objectSSE.algorithm) { + return next( + errors.InvalidArgument.customizeDescription( + 'x-amz-server-side-encryption header is not supported for this operation.' + ) + ); } - let splitter = constants.splitter; - // BACKWARD: Remove to remove the old splitter - if (mpuBucket.getMdBucketModelVersion() < 2) { - splitter = constants.oldSplitter; - } - return next(null, destinationBucket, splitter); - }), - // Check authorization of the MPU shadow bucket. - (destinationBucket, splitter, next) => { - const mpuOverviewKey = _getOverviewKey(splitter, objectKey, - uploadId); - return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log, - (err, res) => { + return next(null, destinationBucket); + }, + // Get the MPU shadow bucket. + (destinationBucket, next) => + metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => { + if (err?.is?.NoSuchBucket) { + return next(errors.NoSuchUpload, destinationBucket); + } + if (err) { + log.error('error getting the shadow mpu bucket', { + error: err, + method: 'objectPutPart::metadata.getBucket', + }); + return next(err, destinationBucket); + } + let splitter = constants.splitter; + // BACKWARD: Remove to remove the old splitter + if (mpuBucket.getMdBucketModelVersion() < 2) { + splitter = constants.oldSplitter; + } + return next(null, destinationBucket, splitter); + }), + // Check authorization of the MPU shadow bucket. + (destinationBucket, splitter, next) => { + const mpuOverviewKey = _getOverviewKey(splitter, objectKey, uploadId); + return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log, (err, res) => { if (err) { log.error('error getting the object from mpu bucket', { error: err, @@ -187,85 +203,88 @@ function objectPutPart(authInfo, request, streamingV4Params, log, return next(err, destinationBucket); } const initiatorID = res.initiator.ID; - const requesterID = authInfo.isRequesterAnIAMUser() ? - authInfo.getArn() : authInfo.getCanonicalID(); + const requesterID = authInfo.isRequesterAnIAMUser() ? authInfo.getArn() : authInfo.getCanonicalID(); if (initiatorID !== requesterID) { return next(errors.AccessDenied, destinationBucket); } - const objectLocationConstraint = - res.controllingLocationConstraint; + const objectLocationConstraint = res.controllingLocationConstraint; const sseAlgo = res['x-amz-server-side-encryption']; - const sse = sseAlgo ? { - algorithm: sseAlgo, - masterKeyId: res['x-amz-server-side-encryption-aws-kms-key-id'], - } : null; - return next(null, destinationBucket, - objectLocationConstraint, - sse, splitter); + const sse = sseAlgo + ? { + algorithm: sseAlgo, + masterKeyId: res['x-amz-server-side-encryption-aws-kms-key-id'], + } + : null; + return next(null, destinationBucket, objectLocationConstraint, sse, splitter); }); - }, - // Use MPU overview SSE config - (destinationBucket, objectLocationConstraint, encryption, splitter, next) => { - // If MPU has server-side encryption, pass the `res` value - if (encryption) { - return kms.createCipherBundle(encryption, log, (err, res) => { - if (err) { - log.error('error processing the cipher bundle for ' + - 'the destination bucket', { - error: err, - }); - return next(err, destinationBucket); - } - return next(null, destinationBucket, objectLocationConstraint, res, splitter); - // Allow KMS to use a key from previous provider (if sseMigration configured) - // Because ongoing MPU started before sseMigration is no migrated - }, { previousOk: true }); - } - // The MPU does not have server-side encryption, so pass `null` - return next(null, destinationBucket, objectLocationConstraint, null, splitter); - }, - // If data backend is backend that handles mpu (like real AWS), - // no need to store part info in metadata - (destinationBucket, objectLocationConstraint, cipherBundle, - splitter, next) => { - const mpuInfo = { - destinationBucket, - size, - objectKey, - uploadId, - partNumber, - bucketName, - }; - // eslint-disable-next-line no-param-reassign - delete request.actionImplicitDenies; - writeContinue(request, request._response); - return data.putPart(request, mpuInfo, streamingV4Params, - objectLocationConstraint, locationConstraintCheck, log, - (err, partInfo, updatedObjectLC) => { - if (err) { - return next(err, destinationBucket); - } - // if data backend handles mpu, skip to end of waterfall - // TODO CLDSRV-640 (artesca) data backend should return SSE to include in response headers - if (partInfo && partInfo.dataStoreType === 'aws_s3') { - return next(skipError, destinationBucket, - partInfo.dataStoreETag); + }, + // Use MPU overview SSE config + (destinationBucket, objectLocationConstraint, encryption, splitter, next) => { + // If MPU has server-side encryption, pass the `res` value + if (encryption) { + return kms.createCipherBundle( + encryption, + log, + (err, res) => { + if (err) { + log.error('error processing the cipher bundle for ' + 'the destination bucket', { + error: err, + }); + return next(err, destinationBucket); + } + return next(null, destinationBucket, objectLocationConstraint, res, splitter); + // Allow KMS to use a key from previous provider (if sseMigration configured) + // Because ongoing MPU started before sseMigration is no migrated + }, + { previousOk: true } + ); } - // partInfo will be null if data backend is not external - // if the object location constraint undefined because - // mpu was initiated in legacy version, update it - return next(null, destinationBucket, updatedObjectLC, - cipherBundle, splitter, partInfo); - }); - }, - // Get any pre-existing part. - (destinationBucket, objectLocationConstraint, cipherBundle, - splitter, partInfo, next) => { - const paddedPartNumber = _getPaddedPartNumber(partNumber); - const partKey = _getPartKey(uploadId, splitter, paddedPartNumber); - return metadata.getObjectMD(mpuBucketName, partKey, {}, log, - (err, res) => { + // The MPU does not have server-side encryption, so pass `null` + return next(null, destinationBucket, objectLocationConstraint, null, splitter); + }, + // If data backend is backend that handles mpu (like real AWS), + // no need to store part info in metadata + (destinationBucket, objectLocationConstraint, cipherBundle, splitter, next) => { + const mpuInfo = { + destinationBucket, + size, + objectKey, + uploadId, + partNumber, + bucketName, + }; + // eslint-disable-next-line no-param-reassign + delete request.actionImplicitDenies; + writeContinue(request, request._response); + return data.putPart( + request, + mpuInfo, + streamingV4Params, + objectLocationConstraint, + locationConstraintCheck, + log, + (err, partInfo, updatedObjectLC) => { + if (err) { + return next(err, destinationBucket); + } + // if data backend handles mpu, skip to end of waterfall + // TODO CLDSRV-640 (artesca) data backend should return SSE to include in response headers + if (partInfo && partInfo.dataStoreType === 'aws_s3') { + return next(skipError, destinationBucket, partInfo.dataStoreETag); + } + // partInfo will be null if data backend is not external + // if the object location constraint undefined because + // mpu was initiated in legacy version, update it + return next(null, destinationBucket, updatedObjectLC, cipherBundle, splitter, partInfo); + } + ); + }, + // Get any pre-existing part. + (destinationBucket, objectLocationConstraint, cipherBundle, splitter, partInfo, next) => { + const paddedPartNumber = _getPaddedPartNumber(partNumber); + const partKey = _getPartKey(uploadId, splitter, paddedPartNumber); + return metadata.getObjectMD(mpuBucketName, partKey, {}, log, (err, res) => { // If there is no object with the same key, continue. if (err && !err.is.NoSuchKey) { log.error('error getting current part (if any)', { @@ -283,78 +302,124 @@ function objectPutPart(authInfo, request, streamingV4Params, log, // Pull locations to clean up any potential orphans in // data if object put is an overwrite of a pre-existing // object with the same key and part number. - oldLocations = Array.isArray(res.partLocations) ? - res.partLocations : [res.partLocations]; + oldLocations = Array.isArray(res.partLocations) ? res.partLocations : [res.partLocations]; } - return next(null, destinationBucket, - objectLocationConstraint, cipherBundle, - partKey, prevObjectSize, oldLocations, partInfo, splitter); + return next( + null, + destinationBucket, + objectLocationConstraint, + cipherBundle, + partKey, + prevObjectSize, + oldLocations, + partInfo, + splitter + ); }); - }, - // Store in data backend. - (destinationBucket, objectLocationConstraint, cipherBundle, - partKey, prevObjectSize, oldLocations, partInfo, splitter, next) => { - // NOTE: set oldLocations to null so we do not batchDelete for now - if (partInfo && - constants.skipBatchDeleteBackends[partInfo.dataStoreType]) { - // skip to storing metadata - return next(null, destinationBucket, partInfo, - partInfo.dataStoreETag, - cipherBundle, partKey, prevObjectSize, null, - objectLocationConstraint, splitter); - } - const objectContext = { - bucketName, - owner: canonicalID, - namespace: request.namespace, - objectKey, - partNumber: _getPaddedPartNumber(partNumber), - uploadId, - }; - const backendInfo = new BackendInfo(config, - objectLocationConstraint); - return dataStore(objectContext, cipherBundle, request, - size, streamingV4Params, backendInfo, log, - (err, dataGetInfo, hexDigest) => { - if (err) { - return next(err, destinationBucket); + }, + // Store in data backend. + ( + destinationBucket, + objectLocationConstraint, + cipherBundle, + partKey, + prevObjectSize, + oldLocations, + partInfo, + splitter, + next + ) => { + // NOTE: set oldLocations to null so we do not batchDelete for now + if (partInfo && constants.skipBatchDeleteBackends[partInfo.dataStoreType]) { + // skip to storing metadata + return next( + null, + destinationBucket, + partInfo, + partInfo.dataStoreETag, + cipherBundle, + partKey, + prevObjectSize, + null, + objectLocationConstraint, + splitter + ); + } + const objectContext = { + bucketName, + owner: canonicalID, + namespace: request.namespace, + objectKey, + partNumber: _getPaddedPartNumber(partNumber), + uploadId, + }; + const backendInfo = new BackendInfo(config, objectLocationConstraint); + return dataStore( + objectContext, + cipherBundle, + request, + size, + streamingV4Params, + backendInfo, + log, + (err, dataGetInfo, hexDigest) => { + if (err) { + return next(err, destinationBucket); + } + return next( + null, + destinationBucket, + dataGetInfo, + hexDigest, + cipherBundle, + partKey, + prevObjectSize, + oldLocations, + objectLocationConstraint, + splitter + ); } - return next(null, destinationBucket, dataGetInfo, hexDigest, - cipherBundle, partKey, prevObjectSize, oldLocations, - objectLocationConstraint, splitter); - }); - }, - // Store data locations in metadata and delete any overwritten - // data if completeMPU hasn't been initiated yet. - (destinationBucket, dataGetInfo, hexDigest, cipherBundle, partKey, - prevObjectSize, oldLocations, objectLocationConstraint, splitter, next) => { - // Use an array to be consistent with objectPutCopyPart where there - // could be multiple locations. - const partLocations = [dataGetInfo]; - const sseHeaders = {}; - if (cipherBundle) { - const { algorithm, masterKeyId, cryptoScheme, - cipheredDataKey } = cipherBundle; - partLocations[0].sseAlgorithm = algorithm; - partLocations[0].sseMasterKeyId = masterKeyId; - partLocations[0].sseCryptoScheme = cryptoScheme; - partLocations[0].sseCipheredDataKey = cipheredDataKey; - sseHeaders.algo = algorithm; - sseHeaders.kmsKey = masterKeyId; - } - const omVal = { - // back to Version 3 since number-subparts is not needed - 'md-model-version': 3, - partLocations, - 'key': partKey, - 'last-modified': new Date().toJSON(), - 'content-md5': hexDigest, - 'content-length': size, - 'owner-id': destinationBucket.getOwner(), - }; - const mdParams = { overheadField: constants.overheadField }; - return metadata.putObjectMD(mpuBucketName, partKey, omVal, mdParams, log, - err => { + ); + }, + // Store data locations in metadata and delete any overwritten + // data if completeMPU hasn't been initiated yet. + ( + destinationBucket, + dataGetInfo, + hexDigest, + cipherBundle, + partKey, + prevObjectSize, + oldLocations, + objectLocationConstraint, + splitter, + next + ) => { + // Use an array to be consistent with objectPutCopyPart where there + // could be multiple locations. + const partLocations = [dataGetInfo]; + const sseHeaders = {}; + if (cipherBundle) { + const { algorithm, masterKeyId, cryptoScheme, cipheredDataKey } = cipherBundle; + partLocations[0].sseAlgorithm = algorithm; + partLocations[0].sseMasterKeyId = masterKeyId; + partLocations[0].sseCryptoScheme = cryptoScheme; + partLocations[0].sseCipheredDataKey = cipheredDataKey; + sseHeaders.algo = algorithm; + sseHeaders.kmsKey = masterKeyId; + } + const omVal = { + // back to Version 3 since number-subparts is not needed + 'md-model-version': 3, + partLocations, + key: partKey, + 'last-modified': new Date().toJSON(), + 'content-md5': hexDigest, + 'content-length': size, + 'owner-id': destinationBucket.getOwner(), + }; + const mdParams = { overheadField: constants.overheadField }; + return metadata.putObjectMD(mpuBucketName, partKey, omVal, mdParams, log, err => { if (err) { log.error('error putting object in mpu bucket', { error: err, @@ -362,100 +427,144 @@ function objectPutPart(authInfo, request, streamingV4Params, log, }); return next(err, destinationBucket); } - return next(null, partLocations, oldLocations, objectLocationConstraint, - destinationBucket, hexDigest, sseHeaders, prevObjectSize, splitter); + return next( + null, + partLocations, + oldLocations, + objectLocationConstraint, + destinationBucket, + hexDigest, + sseHeaders, + prevObjectSize, + splitter + ); }); - }, - (partLocations, oldLocations, objectLocationConstraint, destinationBucket, - hexDigest, sseHeaders, prevObjectSize, splitter, next) => { - if (!oldLocations) { - return next(null, oldLocations, objectLocationConstraint, - destinationBucket, hexDigest, sseHeaders, prevObjectSize); - } - return services.isCompleteMPUInProgress({ - bucketName, - objectKey, - uploadId, + }, + ( + partLocations, + oldLocations, + objectLocationConstraint, + destinationBucket, + hexDigest, + sseHeaders, + prevObjectSize, splitter, - }, log, (err, completeInProgress) => { - if (err) { - return next(err, destinationBucket); + next + ) => { + if (!oldLocations) { + return next( + null, + oldLocations, + objectLocationConstraint, + destinationBucket, + hexDigest, + sseHeaders, + prevObjectSize + ); } - let oldLocationsToDelete = oldLocations; - // Prevent deletion of old data if a completeMPU - // is already in progress because then there is no - // guarantee that the old location will not be the - // committed one. - if (completeInProgress) { - log.warn('not deleting old locations because CompleteMPU is in progress', { - method: 'objectPutPart::metadata.getObjectMD', + return services.isCompleteMPUInProgress( + { bucketName, objectKey, uploadId, - partLocations, - oldLocations, - }); - oldLocationsToDelete = null; - } - return next(null, oldLocationsToDelete, objectLocationConstraint, - destinationBucket, hexDigest, sseHeaders, prevObjectSize); - }); - }, - // Clean up any old data now that new metadata (with new - // data locations) has been stored. - (oldLocationsToDelete, objectLocationConstraint, destinationBucket, hexDigest, - sseHeaders, prevObjectSize, next) => { - if (oldLocationsToDelete) { - log.trace('overwriting mpu part, deleting data'); - return data.batchDelete(oldLocationsToDelete, request.method, - objectLocationConstraint, log, err => { + splitter, + }, + log, + (err, completeInProgress) => { if (err) { - // if error, log the error and move on as it is not - // relevant to the client as the client's - // object already succeeded putting data, metadata - log.error('error deleting existing data', - { error: err }); + return next(err, destinationBucket); + } + let oldLocationsToDelete = oldLocations; + // Prevent deletion of old data if a completeMPU + // is already in progress because then there is no + // guarantee that the old location will not be the + // committed one. + if (completeInProgress) { + log.warn('not deleting old locations because CompleteMPU is in progress', { + method: 'objectPutPart::metadata.getObjectMD', + bucketName, + objectKey, + uploadId, + partLocations, + oldLocations, + }); + oldLocationsToDelete = null; + } + return next( + null, + oldLocationsToDelete, + objectLocationConstraint, + destinationBucket, + hexDigest, + sseHeaders, + prevObjectSize + ); + } + ); + }, + // Clean up any old data now that new metadata (with new + // data locations) has been stored. + ( + oldLocationsToDelete, + objectLocationConstraint, + destinationBucket, + hexDigest, + sseHeaders, + prevObjectSize, + next + ) => { + if (oldLocationsToDelete) { + log.trace('overwriting mpu part, deleting data'); + return data.batchDelete( + oldLocationsToDelete, + request.method, + objectLocationConstraint, + log, + err => { + if (err) { + // if error, log the error and move on as it is not + // relevant to the client as the client's + // object already succeeded putting data, metadata + log.error('error deleting existing data', { error: err }); + } + return next(null, destinationBucket, hexDigest, sseHeaders, prevObjectSize); } - return next(null, destinationBucket, hexDigest, - sseHeaders, prevObjectSize); - }); + ); + } + return next(null, destinationBucket, hexDigest, sseHeaders, prevObjectSize); + }, + ], + (err, destinationBucket, hexDigest, sseHeaders, prevObjectSize) => { + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = originalIdentityAuthzResults; + if (sseHeaders) { + setSSEHeaders(corsHeaders, sseHeaders.algo, sseHeaders.kmsKey); } - return next(null, destinationBucket, hexDigest, - sseHeaders, prevObjectSize); - }, - ], (err, destinationBucket, hexDigest, sseHeaders, prevObjectSize) => { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, destinationBucket); - // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = originalIdentityAuthzResults; - if (sseHeaders) { - setSSEHeaders(corsHeaders, sseHeaders.algo, sseHeaders.kmsKey); - } - if (err) { - if (err === skipError) { - return cb(null, hexDigest, corsHeaders); + if (err) { + if (err === skipError) { + return cb(null, hexDigest, corsHeaders); + } + log.error('error in object put part (upload part)', { + error: err, + method: 'objectPutPart', + }); + monitoring.promMetrics('PUT', bucketName, err.code, 'putObjectPart'); + return cb(err, null, corsHeaders); } - log.error('error in object put part (upload part)', { - error: err, - method: 'objectPutPart', + pushMetric('uploadPart', log, { + authInfo, + canonicalID: destinationBucket.getOwner(), + bucket: bucketName, + keys: [objectKey], + newByteLength: size, + oldByteLength: prevObjectSize, + location: destinationBucket.getLocationConstraint(), }); - monitoring.promMetrics('PUT', bucketName, err.code, - 'putObjectPart'); - return cb(err, null, corsHeaders); + monitoring.promMetrics('PUT', bucketName, '200', 'putObjectPart', size, prevObjectSize); + return cb(null, hexDigest, corsHeaders); } - pushMetric('uploadPart', log, { - authInfo, - canonicalID: destinationBucket.getOwner(), - bucket: bucketName, - keys: [objectKey], - newByteLength: size, - oldByteLength: prevObjectSize, - location: destinationBucket.getLocationConstraint(), - }); - monitoring.promMetrics('PUT', bucketName, - '200', 'putObjectPart', size, prevObjectSize); - return cb(null, hexDigest, corsHeaders); - }); + ); } module.exports = objectPutPart; diff --git a/lib/api/objectPutRetention.js b/lib/api/objectPutRetention.js index 6a7a2c8441..8e836de296 100644 --- a/lib/api/objectPutRetention.js +++ b/lib/api/objectPutRetention.js @@ -1,10 +1,12 @@ const async = require('async'); const { errors, errorInstances, s3middleware } = require('arsenal'); -const { decodeVersionId, getVersionIdResHeader, getVersionSpecificMetadataOptions } = - require('./apiUtils/object/versioning'); -const { ObjectLockInfo, hasGovernanceBypassHeader } = - require('./apiUtils/object/objectLockHelpers'); +const { + decodeVersionId, + getVersionIdResHeader, + getVersionSpecificMetadataOptions, +} = require('./apiUtils/object/versioning'); +const { ObjectLockInfo, hasGovernanceBypassHeader } = require('./apiUtils/object/objectLockHelpers'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); const getReplicationInfo = require('./apiUtils/object/getReplicationInfo'); @@ -50,99 +52,106 @@ function objectPutRetention(authInfo, request, log, callback) { const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers); - return async.waterfall([ - next => { - log.trace('parsing retention information'); - parseRetentionXml(request.post, log, - (err, retentionInfo) => { + return async.waterfall( + [ + next => { + log.trace('parsing retention information'); + parseRetentionXml(request.post, log, (err, retentionInfo) => { if (err) { - log.trace('error parsing retention information', - { error: err }); + log.trace('error parsing retention information', { error: err }); return next(err); } - const remainingDays = Math.ceil( - (new Date(retentionInfo.date) - Date.now()) / (1000 * 3600 * 24)); + const remainingDays = Math.ceil((new Date(retentionInfo.date) - Date.now()) / (1000 * 3600 * 24)); metadataValParams.request.objectLockRetentionDays = remainingDays; return next(null, retentionInfo); }); - }, - (retentionInfo, next) => standardMetadataValidateBucketAndObj(metadataValParams, - request.actionImplicitDenies, log, (err, bucket, objectMD) => { - if (err) { - log.trace('request authorization failed', - { method: 'objectPutRetention', error: err }); - return next(err); - } - if (!objectMD) { - const err = reqVersionId ? errors.NoSuchVersion : - errors.NoSuchKey; - log.trace('error no object metadata found', - { method: 'objectPutRetention', error: err }); - return next(err, bucket); - } - if (objectMD.isDeleteMarker) { - log.trace('version is a delete marker', - { method: 'objectPutRetention' }); - return next(errors.MethodNotAllowed, bucket); - } - if (!bucket.isObjectLockEnabled()) { - log.trace('object lock not enabled on bucket', - { method: 'objectPutRetention' }); - return next(errorInstances.InvalidRequest.customizeDescription( - 'Bucket is missing Object Lock Configuration' - ), bucket); - } - return next(null, bucket, retentionInfo, objectMD); - }), - (bucket, retentionInfo, objectMD, next) => { - const objLockInfo = new ObjectLockInfo({ - mode: objectMD.retentionMode, - date: objectMD.retentionDate, - legalHold: objectMD.legalHold, - }); + }, + (retentionInfo, next) => + standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, bucket, objectMD) => { + if (err) { + log.trace('request authorization failed', { method: 'objectPutRetention', error: err }); + return next(err); + } + if (!objectMD) { + const err = reqVersionId ? errors.NoSuchVersion : errors.NoSuchKey; + log.trace('error no object metadata found', { method: 'objectPutRetention', error: err }); + return next(err, bucket); + } + if (objectMD.isDeleteMarker) { + log.trace('version is a delete marker', { method: 'objectPutRetention' }); + return next(errors.MethodNotAllowed, bucket); + } + if (!bucket.isObjectLockEnabled()) { + log.trace('object lock not enabled on bucket', { method: 'objectPutRetention' }); + return next( + errorInstances.InvalidRequest.customizeDescription( + 'Bucket is missing Object Lock Configuration' + ), + bucket + ); + } + return next(null, bucket, retentionInfo, objectMD); + } + ), + (bucket, retentionInfo, objectMD, next) => { + const objLockInfo = new ObjectLockInfo({ + mode: objectMD.retentionMode, + date: objectMD.retentionDate, + legalHold: objectMD.legalHold, + }); - if (!objLockInfo.canModifyPolicy(retentionInfo, hasGovernanceBypass)) { - return next(errors.AccessDenied, bucket); - } + if (!objLockInfo.canModifyPolicy(retentionInfo, hasGovernanceBypass)) { + return next(errors.AccessDenied, bucket); + } - return next(null, bucket, retentionInfo, objectMD); - }, - (bucket, retentionInfo, objectMD, next) => { - /* eslint-disable no-param-reassign */ - objectMD.retentionMode = retentionInfo.mode; - objectMD.retentionDate = retentionInfo.date; - const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); - const replicationInfo = getReplicationInfo(config, - objectKey, bucket, true, 0, REPLICATION_ACTION, objectMD); - if (replicationInfo) { - objectMD.replicationInfo = Object.assign({}, - objectMD.replicationInfo, replicationInfo); + return next(null, bucket, retentionInfo, objectMD); + }, + (bucket, retentionInfo, objectMD, next) => { + /* eslint-disable no-param-reassign */ + objectMD.retentionMode = retentionInfo.mode; + objectMD.retentionDate = retentionInfo.date; + const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); + const replicationInfo = getReplicationInfo( + config, + objectKey, + bucket, + true, + 0, + REPLICATION_ACTION, + objectMD + ); + if (replicationInfo) { + objectMD.replicationInfo = Object.assign({}, objectMD.replicationInfo, replicationInfo); + } + objectMD.originOp = 's3:ObjectRetention:Put'; + /* eslint-enable no-param-reassign */ + metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params, log, err => + next(err, bucket, objectMD) + ); + }, + ], + (err, bucket, objectMD) => { + const additionalResHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'objectPutRetention' }); + } else { + pushMetric('putObjectRetention', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + versionId: objectMD ? objectMD.versionId : undefined, + location: objectMD ? objectMD.dataStoreName : undefined, + }); + const verCfg = bucket.getVersioningConfiguration(); + additionalResHeaders['x-amz-version-id'] = getVersionIdResHeader(verCfg, objectMD); } - objectMD.originOp = 's3:ObjectRetention:Put'; - /* eslint-enable no-param-reassign */ - metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params, - log, err => next(err, bucket, objectMD)); - }, - ], (err, bucket, objectMD) => { - const additionalResHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', - { error: err, method: 'objectPutRetention' }); - } else { - pushMetric('putObjectRetention', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - versionId: objectMD ? objectMD.versionId : undefined, - location: objectMD ? objectMD.dataStoreName : undefined, - }); - const verCfg = bucket.getVersioningConfiguration(); - additionalResHeaders['x-amz-version-id'] = - getVersionIdResHeader(verCfg, objectMD); + return callback(err, additionalResHeaders); } - return callback(err, additionalResHeaders); - }); + ); } module.exports = objectPutRetention; diff --git a/lib/api/objectPutTagging.js b/lib/api/objectPutTagging.js index fbb1bba96d..ae1b7e48a8 100644 --- a/lib/api/objectPutTagging.js +++ b/lib/api/objectPutTagging.js @@ -1,8 +1,11 @@ const async = require('async'); const { errors, s3middleware } = require('arsenal'); -const { decodeVersionId, getVersionIdResHeader, getVersionSpecificMetadataOptions } = - require('./apiUtils/object/versioning'); +const { + decodeVersionId, + getVersionIdResHeader, + getVersionSpecificMetadataOptions, +} = require('./apiUtils/object/versioning'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { pushMetric } = require('../utapi/utilities'); @@ -48,80 +51,85 @@ function objectPutTagging(authInfo, request, log, callback) { request, }; - return async.waterfall([ - next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, - (err, bucket, objectMD) => { - if (err) { - log.trace('request authorization failed', - { method: 'objectPutTagging', error: err }); - return next(err); - } - if (!objectMD) { - const err = reqVersionId ? errors.NoSuchVersion : - errors.NoSuchKey; - log.trace('error no object metadata found', - { method: 'objectPutTagging', error: err }); - return next(err, bucket); - } - if (objectMD.isDeleteMarker) { - log.trace('version is a delete marker', - { method: 'objectPutTagging' }); - // FIXME we should return a `x-amz-delete-marker: true` header, - // see S3C-7592 - return next(errors.MethodNotAllowed, bucket); - } - return next(null, bucket, objectMD); - }), - (bucket, objectMD, next) => { - log.trace('parsing tag(s)'); - parseTagXml(request.post, log, (err, tags) => - next(err, bucket, tags, objectMD)); - }, - (bucket, tags, objectMD, next) => { - // eslint-disable-next-line no-param-reassign - objectMD.tags = tags; - const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); - const replicationInfo = getReplicationInfo(config, - objectKey, bucket, true, 0, REPLICATION_ACTION, objectMD); - if (replicationInfo) { + return async.waterfall( + [ + next => + standardMetadataValidateBucketAndObj( + metadataValParams, + request.actionImplicitDenies, + log, + (err, bucket, objectMD) => { + if (err) { + log.trace('request authorization failed', { method: 'objectPutTagging', error: err }); + return next(err); + } + if (!objectMD) { + const err = reqVersionId ? errors.NoSuchVersion : errors.NoSuchKey; + log.trace('error no object metadata found', { method: 'objectPutTagging', error: err }); + return next(err, bucket); + } + if (objectMD.isDeleteMarker) { + log.trace('version is a delete marker', { method: 'objectPutTagging' }); + // FIXME we should return a `x-amz-delete-marker: true` header, + // see S3C-7592 + return next(errors.MethodNotAllowed, bucket); + } + return next(null, bucket, objectMD); + } + ), + (bucket, objectMD, next) => { + log.trace('parsing tag(s)'); + parseTagXml(request.post, log, (err, tags) => next(err, bucket, tags, objectMD)); + }, + (bucket, tags, objectMD, next) => { // eslint-disable-next-line no-param-reassign - objectMD.replicationInfo = Object.assign({}, - objectMD.replicationInfo, replicationInfo); + objectMD.tags = tags; + const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); + const replicationInfo = getReplicationInfo( + config, + objectKey, + bucket, + true, + 0, + REPLICATION_ACTION, + objectMD + ); + if (replicationInfo) { + // eslint-disable-next-line no-param-reassign + objectMD.replicationInfo = Object.assign({}, objectMD.replicationInfo, replicationInfo); + } + // eslint-disable-next-line no-param-reassign + objectMD.originOp = 's3:ObjectTagging:Put'; + metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params, log, err => + next(err, bucket, objectMD) + ); + }, + (bucket, objectMD, next) => + // if external backend handles tagging + data.objectTagging('Put', objectKey, bucket.getName(), objectMD, log, err => + next(err, bucket, objectMD) + ), + ], + (err, bucket, objectMD) => { + const additionalResHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + if (err) { + log.trace('error processing request', { error: err, method: 'objectPutTagging' }); + monitoring.promMetrics('PUT', bucketName, err.code, 'putObjectTagging'); + } else { + pushMetric('putObjectTagging', log, { + authInfo, + bucket: bucketName, + keys: [objectKey], + versionId: objectMD ? objectMD.versionId : undefined, + location: objectMD ? objectMD.dataStoreName : undefined, + }); + monitoring.promMetrics('PUT', bucketName, '200', 'putObjectTagging'); + const verCfg = bucket.getVersioningConfiguration(); + additionalResHeaders['x-amz-version-id'] = getVersionIdResHeader(verCfg, objectMD); } - // eslint-disable-next-line no-param-reassign - objectMD.originOp = 's3:ObjectTagging:Put'; - metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params, - log, err => - next(err, bucket, objectMD)); - }, - (bucket, objectMD, next) => - // if external backend handles tagging - data.objectTagging('Put', objectKey, bucket.getName(), objectMD, - log, err => next(err, bucket, objectMD)), - ], (err, bucket, objectMD) => { - const additionalResHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - if (err) { - log.trace('error processing request', { error: err, - method: 'objectPutTagging' }); - monitoring.promMetrics('PUT', bucketName, err.code, - 'putObjectTagging'); - } else { - pushMetric('putObjectTagging', log, { - authInfo, - bucket: bucketName, - keys: [objectKey], - versionId: objectMD ? objectMD.versionId : undefined, - location: objectMD ? objectMD.dataStoreName : undefined, - }); - monitoring.promMetrics( - 'PUT', bucketName, '200', 'putObjectTagging'); - const verCfg = bucket.getVersioningConfiguration(); - additionalResHeaders['x-amz-version-id'] = - getVersionIdResHeader(verCfg, objectMD); + return callback(err, additionalResHeaders); } - return callback(err, additionalResHeaders); - }); + ); } module.exports = objectPutTagging; diff --git a/lib/api/objectRestore.js b/lib/api/objectRestore.js index be0eb48389..f96234dc4e 100644 --- a/lib/api/objectRestore.js +++ b/lib/api/objectRestore.js @@ -22,8 +22,7 @@ const sdtObjectRestore = require('./apiUtils/object/objectRestore'); * @return {undefined} */ function objectRestore(userInfo, request, log, callback) { - return sdtObjectRestore(metadata, metadataUtils, userInfo, request, - log, callback); + return sdtObjectRestore(metadata, metadataUtils, userInfo, request, log, callback); } module.exports = objectRestore; diff --git a/lib/api/serviceGet.js b/lib/api/serviceGet.js index f81169b3f9..bd7d1e8020 100644 --- a/lib/api/serviceGet.js +++ b/lib/api/serviceGet.js @@ -34,8 +34,7 @@ function generateXml(xml, owner, userBuckets, splitter) { xml.push( '', `${key}`, - `${bucket.value.creationDate}` + - '', + `${bucket.value.creationDate}` + '', '' ); }); @@ -56,38 +55,32 @@ function serviceGet(authInfo, request, log, callback) { if (authInfo.isRequesterPublicUser()) { log.debug('operation not available for public user'); - monitoring.promMetrics( - 'GET', request.bucketName, 403, 'getService'); + monitoring.promMetrics('GET', request.bucketName, 403, 'getService'); return callback(errors.AccessDenied); } const xml = []; const canonicalId = authInfo.getCanonicalID(); xml.push( '', - '', + '', '', `${canonicalId}`, - `${authInfo.getAccountDisplayName()}` + - '', + `${authInfo.getAccountDisplayName()}` + '', '', '' ); - return services.getService(authInfo, request, log, constants.splitter, - (err, userBuckets, splitter) => { - if (err) { - monitoring.promMetrics( - 'GET', userBuckets, err.code, 'getService'); - return callback(err); - } - // TODO push metric for serviceGet - // pushMetric('getService', log, { - // bucket: bucketName, - // }); - monitoring.promMetrics('GET', userBuckets, '200', 'getService'); - return callback(null, generateXml(xml, canonicalId, userBuckets, - splitter)); - }); + return services.getService(authInfo, request, log, constants.splitter, (err, userBuckets, splitter) => { + if (err) { + monitoring.promMetrics('GET', userBuckets, err.code, 'getService'); + return callback(err); + } + // TODO push metric for serviceGet + // pushMetric('getService', log, { + // bucket: bucketName, + // }); + monitoring.promMetrics('GET', userBuckets, '200', 'getService'); + return callback(null, generateXml(xml, canonicalId, userBuckets, splitter)); + }); } module.exports = serviceGet; diff --git a/lib/api/website.js b/lib/api/website.js index 801d92f708..058e3083cf 100644 --- a/lib/api/website.js +++ b/lib/api/website.js @@ -5,10 +5,12 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const constants = require('../../constants'); const metadata = require('../metadata/wrapper'); const bucketShield = require('./apiUtils/bucket/bucketShield'); -const { appendWebsiteIndexDocument, findRoutingRule, extractRedirectInfo } = - require('./apiUtils/object/websiteServing'); -const { isObjAuthorized, isBucketAuthorized } = - require('./apiUtils/authorization/permissionChecks'); +const { + appendWebsiteIndexDocument, + findRoutingRule, + extractRedirectInfo, +} = require('./apiUtils/object/websiteServing'); +const { isObjAuthorized, isBucketAuthorized } = require('./apiUtils/authorization/permissionChecks'); const collectResponseHeaders = require('../utilities/collectResponseHeaders'); const { pushMetric } = require('../utapi/utilities'); const monitoring = require('../utilities/monitoringHandler'); @@ -27,81 +29,74 @@ const monitoring = require('../utilities/monitoringHandler'); * @param {function} callback - callback to function in route * @return {undefined} */ -function _errorActions(err, errorDocument, routingRules, - bucket, objectKey, corsHeaders, request, log, callback) { +function _errorActions(err, errorDocument, routingRules, bucket, objectKey, corsHeaders, request, log, callback) { const bucketName = bucket.getName(); - const errRoutingRule = findRoutingRule(routingRules, - objectKey, err.code); + const errRoutingRule = findRoutingRule(routingRules, objectKey, err.code); if (errRoutingRule) { // route will redirect const action = request.method === 'HEAD' ? 'headObject' : 'getObject'; - monitoring.promMetrics( - request.method, bucketName, err.code, action); - return callback(err, false, null, corsHeaders, errRoutingRule, - objectKey); + monitoring.promMetrics(request.method, bucketName, err.code, action); + return callback(err, false, null, corsHeaders, errRoutingRule, objectKey); } if (request.method === 'HEAD') { - monitoring.promMetrics( - 'HEAD', bucketName, err.code, 'headObject'); + monitoring.promMetrics('HEAD', bucketName, err.code, 'headObject'); return callback(err, false, null, corsHeaders); } if (errorDocument) { - return metadata.getObjectMD(bucketName, errorDocument, {}, log, - (errObjErr, errObjMD) => { - if (errObjErr) { - // error retrieving error document so return original error - // and set boolean of error retrieving user's error document - // to true - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getObject'); - return callback(err, true, null, corsHeaders); - } - // return the default error message if the object is private - // rather than sending a stored error file - // eslint-disable-next-line no-param-reassign - request.objectKey = errorDocument; - if (!isObjAuthorized(bucket, errObjMD, request.apiMethods || 'objectGet', - constants.publicId, null, log, request, request.actionImplicitDenies, true)) { - log.trace('errorObj not authorized', { error: err }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getObject'); - return callback(err, true, null, corsHeaders); - } - const dataLocator = errObjMD.location; - if (errObjMD['x-amz-server-side-encryption']) { - for (let i = 0; i < dataLocator.length; i++) { - dataLocator[i].masterKeyId = - errObjMD['x-amz-server-side-encryption-aws-' + - 'kms-key-id']; - dataLocator[i].algorithm = - errObjMD['x-amz-server-side-encryption']; - } + return metadata.getObjectMD(bucketName, errorDocument, {}, log, (errObjErr, errObjMD) => { + if (errObjErr) { + // error retrieving error document so return original error + // and set boolean of error retrieving user's error document + // to true + monitoring.promMetrics('GET', bucketName, err.code, 'getObject'); + return callback(err, true, null, corsHeaders); + } + // return the default error message if the object is private + // rather than sending a stored error file + // eslint-disable-next-line no-param-reassign + request.objectKey = errorDocument; + if ( + !isObjAuthorized( + bucket, + errObjMD, + request.apiMethods || 'objectGet', + constants.publicId, + null, + log, + request, + request.actionImplicitDenies, + true + ) + ) { + log.trace('errorObj not authorized', { error: err }); + monitoring.promMetrics('GET', bucketName, err.code, 'getObject'); + return callback(err, true, null, corsHeaders); + } + const dataLocator = errObjMD.location; + if (errObjMD['x-amz-server-side-encryption']) { + for (let i = 0; i < dataLocator.length; i++) { + dataLocator[i].masterKeyId = errObjMD['x-amz-server-side-encryption-aws-' + 'kms-key-id']; + dataLocator[i].algorithm = errObjMD['x-amz-server-side-encryption']; } + } - if (errObjMD['x-amz-website-redirect-location']) { - const redirectLocation = - errObjMD['x-amz-website-redirect-location']; - const redirectInfo = { withError: true, - location: redirectLocation }; - log.trace('redirecting to x-amz-website-redirect-location', - { location: redirectLocation }); - return callback(err, false, dataLocator, corsHeaders, - redirectInfo, ''); - } + if (errObjMD['x-amz-website-redirect-location']) { + const redirectLocation = errObjMD['x-amz-website-redirect-location']; + const redirectInfo = { withError: true, location: redirectLocation }; + log.trace('redirecting to x-amz-website-redirect-location', { location: redirectLocation }); + return callback(err, false, dataLocator, corsHeaders, redirectInfo, ''); + } - const responseMetaHeaders = collectResponseHeaders(errObjMD, - corsHeaders); - pushMetric('getObject', log, { - bucket: bucketName, - newByteLength: responseMetaHeaders['Content-Length'], - }); - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getObject'); - return callback(err, false, dataLocator, responseMetaHeaders); + const responseMetaHeaders = collectResponseHeaders(errObjMD, corsHeaders); + pushMetric('getObject', log, { + bucket: bucketName, + newByteLength: responseMetaHeaders['Content-Length'], }); + monitoring.promMetrics('GET', bucketName, err.code, 'getObject'); + return callback(err, false, dataLocator, responseMetaHeaders); + }); } - monitoring.promMetrics( - 'GET', bucketName, err.code, 'getObject'); + monitoring.promMetrics('GET', bucketName, err.code, 'getObject'); return callback(err, false, null, corsHeaders); } @@ -120,8 +115,7 @@ function capitalize(str) { * @returns {function} HEAD callback with GET signature */ function callbackGetToHead(callback) { - return (err, userErrorPageFailure, dataGetInfo, - resMetaHeaders, redirectInfo, key) => + return (err, userErrorPageFailure, dataGetInfo, resMetaHeaders, redirectInfo, key) => callback(err, resMetaHeaders, redirectInfo, key); } @@ -147,26 +141,21 @@ function website(request, log, callback) { return metadata.getBucket(bucketName, log, (err, bucket) => { if (err) { log.trace('error retrieving bucket metadata', { error: err }); - monitoring.promMetrics( - request.method, bucketName, err.code, action); + monitoring.promMetrics(request.method, bucketName, err.code, action); return callback(err, false); } if (bucketShield(bucket, `object${methodCapitalized}`)) { log.trace('bucket in transient/deleted state so shielding'); - monitoring.promMetrics( - request.method, bucketName, 404, action); + monitoring.promMetrics(request.method, bucketName, 404, action); return callback(errors.NoSuchBucket, false); } - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); // bucket ACL's do not matter for website head since it is always the // head of an object. object ACL's are what matter const websiteConfig = bucket.getWebsiteConfiguration(); if (!websiteConfig) { - monitoring.promMetrics( - request.method, bucketName, 404, action); - return callback(errors.NoSuchWebsiteConfiguration, false, null, - corsHeaders); + monitoring.promMetrics(request.method, bucketName, 404, action); + return callback(errors.NoSuchWebsiteConfiguration, false, null, corsHeaders); } // any errors above would be our own created generic error html // if have a website config, error going forward would be user's @@ -174,8 +163,7 @@ function website(request, log, callback) { // handle redirect all if (websiteConfig.getRedirectAllRequestsTo()) { - return callback(null, false, null, corsHeaders, - websiteConfig.getRedirectAllRequestsTo(), reqObjectKey); + return callback(null, false, null, corsHeaders, websiteConfig.getRedirectAllRequestsTo(), reqObjectKey); } // check whether need to redirect based on key @@ -185,8 +173,7 @@ function website(request, log, callback) { if (keyRoutingRule) { // TODO: optimize by not rerouting if only routing // rule is to change out key - return callback(null, false, null, corsHeaders, - keyRoutingRule, reqObjectKey); + return callback(null, false, null, corsHeaders, keyRoutingRule, reqObjectKey); } appendWebsiteIndexDocument(request, websiteConfig.getIndexDocument()); @@ -203,107 +190,134 @@ function website(request, log, callback) { function runWebsite(originalError) { // get object metadata and check authorization and header // validation - return metadata.getObjectMD(bucketName, request.objectKey, {}, log, - (err, objMD) => { - // Note: In case of error, we intentionally send the original - // object key to _errorActions as in case of a redirect, we do - // not want to append index key to redirect location - if (err) { - log.trace('error retrieving object metadata', - { error: err }); - let returnErr = err; - const bucketAuthorized = isBucketAuthorized(bucket, request.apiMethods || 'bucketGet', - constants.publicId, null, log, request, request.actionImplicitDenies, true); - // if index object does not exist and bucket is private AWS - // returns 403 - AccessDenied error. - if (err.is.NoSuchKey && !bucketAuthorized) { - returnErr = errors.AccessDenied; - } - - // Check if key is a folder containing index for redirect 302 - // https://docs.aws.amazon.com/AmazonS3/latest/userguide/IndexDocumentSupport.html - if (!originalError && reqObjectKey && !reqObjectKey.endsWith('/')) { - appendWebsiteIndexDocument(request, websiteConfig.getIndexDocument(), true); - // propagate returnErr as originalError to be used if index is not found - return runWebsite(returnErr); - } - - return _errorActions(originalError || returnErr, - websiteConfig.getErrorDocument(), routingRules, - bucket, reqObjectKey, corsHeaders, request, log, - callback); - } - if (!isObjAuthorized(bucket, objMD, request.apiMethods || 'objectGet', - constants.publicId, null, log, request, request.actionImplicitDenies, true)) { - const err = errors.AccessDenied; - log.trace('request not authorized', { error: err }); - return _errorActions(err, websiteConfig.getErrorDocument(), - routingRules, bucket, - reqObjectKey, corsHeaders, request, log, callback); + return metadata.getObjectMD(bucketName, request.objectKey, {}, log, (err, objMD) => { + // Note: In case of error, we intentionally send the original + // object key to _errorActions as in case of a redirect, we do + // not want to append index key to redirect location + if (err) { + log.trace('error retrieving object metadata', { error: err }); + let returnErr = err; + const bucketAuthorized = isBucketAuthorized( + bucket, + request.apiMethods || 'bucketGet', + constants.publicId, + null, + log, + request, + request.actionImplicitDenies, + true + ); + // if index object does not exist and bucket is private AWS + // returns 403 - AccessDenied error. + if (err.is.NoSuchKey && !bucketAuthorized) { + returnErr = errors.AccessDenied; } - // access granted to index document, needs a redirect 302 - // to the original key with trailing / - if (originalError) { - const redirectInfo = { withError: true, - location: `/${reqObjectKey}/` }; - return callback(errors.Found, false, null, corsHeaders, - redirectInfo, ''); + // Check if key is a folder containing index for redirect 302 + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/IndexDocumentSupport.html + if (!originalError && reqObjectKey && !reqObjectKey.endsWith('/')) { + appendWebsiteIndexDocument(request, websiteConfig.getIndexDocument(), true); + // propagate returnErr as originalError to be used if index is not found + return runWebsite(returnErr); } - const headerValResult = validateHeaders(request.headers, - objMD['last-modified'], objMD['content-md5']); - if (headerValResult.error) { - const err = headerValResult.error; - log.trace('header validation error', { error: err }); - return _errorActions(err, websiteConfig.getErrorDocument(), - routingRules, bucket, reqObjectKey, - corsHeaders, request, log, callback); - } - // check if object to serve has website redirect header - // Note: AWS prioritizes website configuration rules over - // object key's website redirect header, so we make the - // check at the end. - if (objMD['x-amz-website-redirect-location']) { - const redirectLocation = - objMD['x-amz-website-redirect-location']; - const redirectInfo = - extractRedirectInfo(redirectLocation); - log.trace('redirecting to x-amz-website-redirect-location', - { location: redirectLocation }); - return callback(null, false, null, corsHeaders, - redirectInfo, ''); - } - // got obj metadata, authorized and headers validated, - // good to go - const responseMetaHeaders = collectResponseHeaders(objMD, - corsHeaders); + return _errorActions( + originalError || returnErr, + websiteConfig.getErrorDocument(), + routingRules, + bucket, + reqObjectKey, + corsHeaders, + request, + log, + callback + ); + } + if ( + !isObjAuthorized( + bucket, + objMD, + request.apiMethods || 'objectGet', + constants.publicId, + null, + log, + request, + request.actionImplicitDenies, + true + ) + ) { + const err = errors.AccessDenied; + log.trace('request not authorized', { error: err }); + return _errorActions( + err, + websiteConfig.getErrorDocument(), + routingRules, + bucket, + reqObjectKey, + corsHeaders, + request, + log, + callback + ); + } - if (request.method === 'HEAD') { - pushMetric('headObject', log, { bucket: bucketName }); - monitoring.promMetrics('HEAD', - bucketName, '200', 'headObject'); - return callback(null, false, null, responseMetaHeaders); - } + // access granted to index document, needs a redirect 302 + // to the original key with trailing / + if (originalError) { + const redirectInfo = { withError: true, location: `/${reqObjectKey}/` }; + return callback(errors.Found, false, null, corsHeaders, redirectInfo, ''); + } + + const headerValResult = validateHeaders(request.headers, objMD['last-modified'], objMD['content-md5']); + if (headerValResult.error) { + const err = headerValResult.error; + log.trace('header validation error', { error: err }); + return _errorActions( + err, + websiteConfig.getErrorDocument(), + routingRules, + bucket, + reqObjectKey, + corsHeaders, + request, + log, + callback + ); + } + // check if object to serve has website redirect header + // Note: AWS prioritizes website configuration rules over + // object key's website redirect header, so we make the + // check at the end. + if (objMD['x-amz-website-redirect-location']) { + const redirectLocation = objMD['x-amz-website-redirect-location']; + const redirectInfo = extractRedirectInfo(redirectLocation); + log.trace('redirecting to x-amz-website-redirect-location', { location: redirectLocation }); + return callback(null, false, null, corsHeaders, redirectInfo, ''); + } + // got obj metadata, authorized and headers validated, + // good to go + const responseMetaHeaders = collectResponseHeaders(objMD, corsHeaders); + + if (request.method === 'HEAD') { + pushMetric('headObject', log, { bucket: bucketName }); + monitoring.promMetrics('HEAD', bucketName, '200', 'headObject'); + return callback(null, false, null, responseMetaHeaders); + } - const dataLocator = objMD.location; - if (objMD['x-amz-server-side-encryption']) { - for (let i = 0; i < dataLocator.length; i++) { - dataLocator[i].masterKeyId = - objMD['x-amz-server-side-encryption-aws-' + - 'kms-key-id']; - dataLocator[i].algorithm = - objMD['x-amz-server-side-encryption']; - } + const dataLocator = objMD.location; + if (objMD['x-amz-server-side-encryption']) { + for (let i = 0; i < dataLocator.length; i++) { + dataLocator[i].masterKeyId = objMD['x-amz-server-side-encryption-aws-' + 'kms-key-id']; + dataLocator[i].algorithm = objMD['x-amz-server-side-encryption']; } - pushMetric('getObject', log, { - bucket: bucketName, - newByteLength: responseMetaHeaders['Content-Length'], - }); - monitoring.promMetrics('GET', bucketName, '200', - 'getObject', responseMetaHeaders['Content-Length']); - return callback(null, false, dataLocator, responseMetaHeaders); + } + pushMetric('getObject', log, { + bucket: bucketName, + newByteLength: responseMetaHeaders['Content-Length'], }); + monitoring.promMetrics('GET', bucketName, '200', 'getObject', responseMetaHeaders['Content-Length']); + return callback(null, false, dataLocator, responseMetaHeaders); + }); } return runWebsite(); diff --git a/lib/auth/in_memory/builder.js b/lib/auth/in_memory/builder.js index 37b358e6d5..068b58ceb4 100644 --- a/lib/auth/in_memory/builder.js +++ b/lib/auth/in_memory/builder.js @@ -1,5 +1,4 @@ -const serviceAccountPrefix = - require('arsenal').constants.zenkoServiceAccount; +const serviceAccountPrefix = require('arsenal').constants.zenkoServiceAccount; /** build simple authdata with only one account * @param {string} accessKey - account's accessKey @@ -9,26 +8,31 @@ const serviceAccountPrefix = * @param {string} userName - account's user name * @return {object} authdata - authdata with account's accessKey and secretKey */ -function buildAuthDataAccount(accessKey, secretKey, canonicalId, serviceName, -userName) { +function buildAuthDataAccount(accessKey, secretKey, canonicalId, serviceName, userName) { // TODO: remove specific check for clueso and generate unique // canonical id's for accounts - const finalCanonicalId = canonicalId || - (serviceName ? `${serviceAccountPrefix}/${serviceName}` : - '12349df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47qwer'); + const finalCanonicalId = + canonicalId || + (serviceName + ? `${serviceAccountPrefix}/${serviceName}` + : '12349df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47qwer'); const shortid = '123456789012'; return { - accounts: [{ - name: userName || 'CustomAccount', - email: 'customaccount1@setbyenv.com', - arn: `arn:aws:iam::${shortid}:root`, - canonicalID: finalCanonicalId, - shortid, - keys: [{ - access: accessKey, - secret: secretKey, - }], - }], + accounts: [ + { + name: userName || 'CustomAccount', + email: 'customaccount1@setbyenv.com', + arn: `arn:aws:iam::${shortid}:root`, + canonicalID: finalCanonicalId, + shortid, + keys: [ + { + access: accessKey, + secret: secretKey, + }, + ], + }, + ], }; } diff --git a/lib/auth/streamingV4/V4Transform.js b/lib/auth/streamingV4/V4Transform.js index e18d20e14f..383a18908d 100644 --- a/lib/auth/streamingV4/V4Transform.js +++ b/lib/auth/streamingV4/V4Transform.js @@ -28,8 +28,7 @@ class V4Transform extends Transform { * @param {function} errCb - callback called if an error occurs */ constructor(streamingV4Params, log, errCb) { - const { accessKey, signatureFromRequest, region, scopeDate, timestamp, - credentialScope } = streamingV4Params; + const { accessKey, signatureFromRequest, region, scopeDate, timestamp, credentialScope } = streamingV4Params; super({}); this.log = log; this.errCb = errCb; @@ -79,28 +78,24 @@ class V4Transform extends Transform { this.currentMetadata.push(remainingPlusStoredMetadata); return { completeMetadata: false }; } - let fullMetadata = remainingPlusStoredMetadata.slice(0, - lineBreakIndex); + let fullMetadata = remainingPlusStoredMetadata.slice(0, lineBreakIndex); // handle extra line break on end of data chunk if (fullMetadata.length === 0) { - const chunkWithoutLeadingLineBreak = remainingPlusStoredMetadata - .slice(2); + const chunkWithoutLeadingLineBreak = remainingPlusStoredMetadata.slice(2); // find second line break lineBreakIndex = chunkWithoutLeadingLineBreak.indexOf('\r\n'); if (lineBreakIndex < 0) { this.currentMetadata.push(chunkWithoutLeadingLineBreak); return { completeMetadata: false }; } - fullMetadata = chunkWithoutLeadingLineBreak.slice(0, - lineBreakIndex); + fullMetadata = chunkWithoutLeadingLineBreak.slice(0, lineBreakIndex); } const splitMeta = fullMetadata.toString().split(';'); this.log.trace('parsed full metadata for chunk', { splitMeta }); if (splitMeta.length !== 2) { - this.log.trace('chunk body did not contain correct ' + - 'metadata format'); + this.log.trace('chunk body did not contain correct ' + 'metadata format'); return { err: errors.InvalidArgument }; } let dataSize = splitMeta[0]; @@ -132,8 +127,7 @@ class V4Transform extends Transform { completeMetadata: true, // start slice at lineBreak plus 2 to remove line break at end of // metadata piece since length of '\r\n' is 2 - unparsedChunk: remainingPlusStoredMetadata - .slice(lineBreakIndex + 2), + unparsedChunk: remainingPlusStoredMetadata.slice(lineBreakIndex + 2), }; } @@ -146,10 +140,13 @@ class V4Transform extends Transform { */ _authenticate(dataToSend, done) { // use prior sig to construct new string to sign - const stringToSign = constructChunkStringToSign(this.timestamp, - this.credentialScope, this.lastSignature, dataToSend); - this.log.trace('constructed chunk string to sign', - { stringToSign }); + const stringToSign = constructChunkStringToSign( + this.timestamp, + this.credentialScope, + this.lastSignature, + dataToSend + ); + this.log.trace('constructed chunk string to sign', { stringToSign }); // once used prior sig to construct string to sign, reassign // lastSignature to current signature this.lastSignature = this.currentSignature; @@ -167,15 +164,16 @@ class V4Transform extends Transform { }; return vault.authenticateV4Request(vaultParams, null, {}, err => { if (err) { - this.log.trace('err from vault on streaming v4 auth', - { error: err, paramsSentToVault: vaultParams.data }); + this.log.trace('err from vault on streaming v4 auth', { + error: err, + paramsSentToVault: vaultParams.data, + }); return done(err); } return done(); }); } - /** * This function will parse the chunk into metadata and data, * use the metadata to authenticate with vault and send the @@ -199,9 +197,7 @@ class V4Transform extends Transform { } if (this.lastPieceDone) { const slice = chunk.slice(0, 10); - this.log.trace('received chunk after end.' + - 'See first 10 bytes of chunk', - { chunk: slice.toString() }); + this.log.trace('received chunk after end.' + 'See first 10 bytes of chunk', { chunk: slice.toString() }); return callback(); } let unparsedChunk = chunk; @@ -212,11 +208,9 @@ class V4Transform extends Transform { // async function done => { if (!this.haveMetadata) { - this.log.trace('do not have metadata so calling ' + - '_parseMetadata'); + this.log.trace('do not have metadata so calling ' + '_parseMetadata'); // need to parse our metadata - const parsedMetadataResults = - this._parseMetadata(unparsedChunk); + const parsedMetadataResults = this._parseMetadata(unparsedChunk); if (parsedMetadataResults.err) { return done(parsedMetadataResults.err); } @@ -250,8 +244,7 @@ class V4Transform extends Transform { } // parse just the next data piece without \r\n at the end // (therefore, minus 2) - const nextDataPiece = - unparsedChunk.slice(0, this.seekingDataSize - 2); + const nextDataPiece = unparsedChunk.slice(0, this.seekingDataSize - 2); // add parsed data piece to other currentData pieces // so that this.currentData is the full data piece nextDataPiece.copy(this.currentData, this.dataCursor); @@ -259,8 +252,7 @@ class V4Transform extends Transform { if (err) { return done(err); } - unparsedChunk = - unparsedChunk.slice(this.seekingDataSize); + unparsedChunk = unparsedChunk.slice(this.seekingDataSize); this.push(this.currentData); this.haveMetadata = false; this.seekingDataSize = -1; diff --git a/lib/auth/streamingV4/constructChunkStringToSign.js b/lib/auth/streamingV4/constructChunkStringToSign.js index 370501fe57..6a39806f93 100644 --- a/lib/auth/streamingV4/constructChunkStringToSign.js +++ b/lib/auth/streamingV4/constructChunkStringToSign.js @@ -13,20 +13,20 @@ const constants = require('../../../constants'); * @param {string} justDataChunk - data portion of chunk * @returns {string} stringToSign */ -function constructChunkStringToSign(timestamp, - credentialScope, lastSignature, justDataChunk) { +function constructChunkStringToSign(timestamp, credentialScope, lastSignature, justDataChunk) { let currentChunkHash; // for last chunk, there will be no data, so use emptyStringHash if (!justDataChunk) { currentChunkHash = constants.emptyStringHash; } else { currentChunkHash = crypto.createHash('sha256'); - currentChunkHash = currentChunkHash - .update(justDataChunk, 'binary').digest('hex'); + currentChunkHash = currentChunkHash.update(justDataChunk, 'binary').digest('hex'); } - return `AWS4-HMAC-SHA256-PAYLOAD\n${timestamp}\n` + + return ( + `AWS4-HMAC-SHA256-PAYLOAD\n${timestamp}\n` + `${credentialScope}\n${lastSignature}\n` + - `${constants.emptyStringHash}\n${currentChunkHash}`; + `${constants.emptyStringHash}\n${currentChunkHash}` + ); } module.exports = constructChunkStringToSign; diff --git a/lib/auth/vault.js b/lib/auth/vault.js index 3aeb06eaf9..5642e17563 100644 --- a/lib/auth/vault.js +++ b/lib/auth/vault.js @@ -49,20 +49,17 @@ function getMemBackend(config) { } switch (config.backends.auth) { -case 'mem': - implName = 'vaultMem'; - client = getMemBackend(config); - break; -case 'multiple': - implName = 'vaultChain'; - client = new ChainBackend('s3', [ - getMemBackend(config), - getVaultClient(config), - ]); - break; -default: // vault - implName = 'vault'; - client = getVaultClient(config); + case 'mem': + implName = 'vaultMem'; + client = getMemBackend(config); + break; + case 'multiple': + implName = 'vaultChain'; + client = new ChainBackend('s3', [getMemBackend(config), getVaultClient(config)]); + break; + default: // vault + implName = 'vault'; + client = getVaultClient(config); } module.exports = new Vault(client, implName); diff --git a/lib/data/wrapper.js b/lib/data/wrapper.js index cf3a216d4d..68caaf7f91 100644 --- a/lib/data/wrapper.js +++ b/lib/data/wrapper.js @@ -4,8 +4,7 @@ const { config } = require('../Config'); const kms = require('../kms/wrapper'); const metadata = require('../metadata/wrapper'); const vault = require('../auth/vault'); -const locationStorageCheck = - require('../api/apiUtils/object/locationStorageCheck'); +const locationStorageCheck = require('../api/apiUtils/object/locationStorageCheck'); const { DataWrapper, MultipleBackendGateway, parseLC } = storage.data; const { DataFileInterface } = storage.data.file; const inMemory = storage.data.inMemory.datastore.backend; @@ -28,8 +27,7 @@ if (config.backends.data === 'mem') { implName = 'file'; } else if (config.backends.data === 'multiple') { const clients = parseLC(config, vault); - client = new MultipleBackendGateway( - clients, metadata, locationStorageCheck); + client = new MultipleBackendGateway(clients, metadata, locationStorageCheck); implName = 'multipleBackends'; } else if (config.backends.data === 'cdmi') { if (!CdmiData) { @@ -45,14 +43,12 @@ if (config.backends.data === 'mem') { implName = 'cdmi'; } -const data = new DataWrapper( - client, implName, config, kms, metadata, locationStorageCheck, vault); +const data = new DataWrapper(client, implName, config, kms, metadata, locationStorageCheck, vault); config.on('location-constraints-update', () => { if (implName === 'multipleBackends') { const clients = parseLC(config, vault); - client = new MultipleBackendGateway( - clients, metadata, locationStorageCheck); + client = new MultipleBackendGateway(clients, metadata, locationStorageCheck); data.switch(client); } }); diff --git a/lib/kms/Cache.js b/lib/kms/Cache.js index c0ef71942a..43c8b8dbf2 100644 --- a/lib/kms/Cache.js +++ b/lib/kms/Cache.js @@ -41,7 +41,8 @@ class Cache { * @param {number} duration - Duration in milliseconds for cache validity. * @returns {boolean} true if the cache should be refreshed, else false. */ - shouldRefresh(duration = 1 * 60 * 60 * 1000) { // Default: 1 hour + shouldRefresh(duration = 1 * 60 * 60 * 1000) { + // Default: 1 hour if (!this.lastChecked) { return true; } @@ -49,7 +50,7 @@ class Cache { const now = Date.now(); const elapsed = now - this.lastChecked; const jitter = Math.floor(Math.random() * 15 * 60 * 1000); // Up to 15 minutes - return elapsed > (duration - jitter); + return elapsed > duration - jitter; } /** diff --git a/lib/kms/common.js b/lib/kms/common.js index 2c0cd3658d..50e87308a7 100644 --- a/lib/kms/common.js +++ b/lib/kms/common.js @@ -57,16 +57,16 @@ class Common { return newIV; } - /** - * Derive key to use in cipher - * @param {number} cryptoScheme - cryptoScheme being used - * @param {buffer} dataKey - the unencrypted key (either from the - * appliance on a get or originally generated by kms in the case of a put) - * @param {object} log - logger object - * @param {function} cb - cb from createDecipher - * @returns {undefined} - * @callback called with (err, derivedKey, derivedIV) - */ + /** + * Derive key to use in cipher + * @param {number} cryptoScheme - cryptoScheme being used + * @param {buffer} dataKey - the unencrypted key (either from the + * appliance on a get or originally generated by kms in the case of a put) + * @param {object} log - logger object + * @param {function} cb - cb from createDecipher + * @returns {undefined} + * @callback called with (err, derivedKey, derivedIV) + */ static _deriveKey(cryptoScheme, dataKey, log, cb) { if (cryptoScheme <= 1) { /* we are not storing hashed human password. @@ -79,69 +79,59 @@ class Common { */ const salt = Buffer.from('ItsTasty', 'utf8'); const iterations = 1; - return crypto.pbkdf2( - dataKey, salt, iterations, - this._keySize(), 'sha1', (err, derivedKey) => { + return crypto.pbkdf2(dataKey, salt, iterations, this._keySize(), 'sha1', (err, derivedKey) => { + if (err) { + log.error('pbkdf2 function failed on key derivation', { error: err }); + cb(errors.InternalError); + return; + } + crypto.pbkdf2(derivedKey, salt, iterations, this._IVSize(), 'sha1', (err, derivedIV) => { if (err) { - log.error('pbkdf2 function failed on key derivation', - { error: err }); - cb(errors.InternalError); - return; + log.error('pbkdf2 function failed on IV derivation', { error: err }); + return cb(errors.InternalError); } - crypto.pbkdf2( - derivedKey, salt, iterations, - this._IVSize(), 'sha1', (err, derivedIV) => { - if (err) { - log.error( - 'pbkdf2 function failed on IV derivation', - { error: err }); - return cb(errors.InternalError); - } - // derivedKey is the actual data encryption or - // decryption key used in the AES ctr cipher - return cb(null, derivedKey, derivedIV); - }); + // derivedKey is the actual data encryption or + // decryption key used in the AES ctr cipher + return cb(null, derivedKey, derivedIV); }); + }); } log.error('Unknown cryptographic scheme', { cryptoScheme }); return cb(errors.InternalError); } - /** - * createDecipher - * @param {number} cryptoScheme - cryptoScheme being used - * @param {buffer} dataKey - the unencrypted key (either from the - * appliance on a get or originally generated by kms in the case of a put) - * @param {number} offset - offset - * @param {object} log - logger object - * @param {function} cb - cb from external call - * @returns {undefined} - * @callback called with (err, decipher: ReadWritable.stream) - */ + /** + * createDecipher + * @param {number} cryptoScheme - cryptoScheme being used + * @param {buffer} dataKey - the unencrypted key (either from the + * appliance on a get or originally generated by kms in the case of a put) + * @param {number} offset - offset + * @param {object} log - logger object + * @param {function} cb - cb from external call + * @returns {undefined} + * @callback called with (err, decipher: ReadWritable.stream) + */ static createDecipher(cryptoScheme, dataKey, offset, log, cb) { - this._deriveKey( - cryptoScheme, dataKey, log, - (err, derivedKey, derivedIV) => { - if (err) { - log.debug('key derivation failed', { error: err }); - return cb(err); - } - const aesBlockSize = this._aesBlockSize(); - const blocks = Math.floor(offset / aesBlockSize); - const toSkip = offset % aesBlockSize; - const iv = this._incrementIV(derivedIV, blocks); - const cipher = crypto.createDecipheriv(this._algorithm(), - derivedKey, iv); - if (toSkip) { - /* Above, we advanced to the latest boundary not + this._deriveKey(cryptoScheme, dataKey, log, (err, derivedKey, derivedIV) => { + if (err) { + log.debug('key derivation failed', { error: err }); + return cb(err); + } + const aesBlockSize = this._aesBlockSize(); + const blocks = Math.floor(offset / aesBlockSize); + const toSkip = offset % aesBlockSize; + const iv = this._incrementIV(derivedIV, blocks); + const cipher = crypto.createDecipheriv(this._algorithm(), derivedKey, iv); + if (toSkip) { + /* Above, we advanced to the latest boundary not greater than the offset amount. Here we advance by the toSkip amount if necessary. */ - const dummyBuffer = Buffer.alloc(toSkip); - cipher.write(dummyBuffer); - cipher.read(); - } - return cb(null, cipher); - }); + const dummyBuffer = Buffer.alloc(toSkip); + cipher.write(dummyBuffer); + cipher.read(); + } + return cb(null, cipher); + }); } /** diff --git a/lib/kms/file/backend.js b/lib/kms/file/backend.js index 5f430b21ad..f7384f68b5 100644 --- a/lib/kms/file/backend.js +++ b/lib/kms/file/backend.js @@ -17,7 +17,7 @@ const backend = { * @param {function} cb - callback * @returns {undefined} * @callback called with (err, masterKeyId: string) - */ + */ createBucketKey: function createBucketKeyMem(bucket, log, cb) { process.nextTick(() => { // Using createDataKey here for purposes of createBucketKeyMem @@ -44,88 +44,69 @@ const backend = { }); }, - /** - * - * @param {number} cryptoScheme - crypto scheme version number - * @param {string} masterKeyIdOrArn - master key; for the file backend - * the master key is the actual bucket master key rather than the key to - * retrieve the actual key from a dictionary - * @param {buffer} plainTextDataKey - data key - * @param {object} log - logger object - * @param {function} cb - callback - * @returns {undefined} - * @callback called with (err, cipheredDataKey: Buffer) - */ - cipherDataKey: function cipherDataKeyMem(cryptoScheme, - masterKeyIdOrArn, - plainTextDataKey, - log, - cb) { + /** + * + * @param {number} cryptoScheme - crypto scheme version number + * @param {string} masterKeyIdOrArn - master key; for the file backend + * the master key is the actual bucket master key rather than the key to + * retrieve the actual key from a dictionary + * @param {buffer} plainTextDataKey - data key + * @param {object} log - logger object + * @param {function} cb - callback + * @returns {undefined} + * @callback called with (err, cipheredDataKey: Buffer) + */ + cipherDataKey: function cipherDataKeyMem(cryptoScheme, masterKeyIdOrArn, plainTextDataKey, log, cb) { process.nextTick(() => { const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn); const masterKey = Buffer.from(masterKeyId, 'hex'); - Common.createCipher( - cryptoScheme, masterKey, 0, log, - (err, cipher) => { - if (err) { - cb(err); - return; - } - let cipheredDataKey = - cipher.update(plainTextDataKey); - // call final() to ensure that any bytes remaining in - // the output of the stream are captured - const final = cipher.final(); - if (final.length !== 0) { - cipheredDataKey = - Buffer.concat([cipheredDataKey, - final]); - } - cb(null, cipheredDataKey); - }); + Common.createCipher(cryptoScheme, masterKey, 0, log, (err, cipher) => { + if (err) { + cb(err); + return; + } + let cipheredDataKey = cipher.update(plainTextDataKey); + // call final() to ensure that any bytes remaining in + // the output of the stream are captured + const final = cipher.final(); + if (final.length !== 0) { + cipheredDataKey = Buffer.concat([cipheredDataKey, final]); + } + cb(null, cipheredDataKey); + }); }); }, - /** - * - * @param {number} cryptoScheme - crypto scheme version number - * @param {string} masterKeyIdOrArn - master key; for the file backend - * the master key is the actual bucket master key rather than the key to - * retrieve the actual key from a dictionary - * @param {buffer} cipheredDataKey - data key - * @param {object} log - logger object - * @param {function} cb - callback - * @returns {undefined} - * @callback called with (err, plainTextDataKey: Buffer) - */ - decipherDataKey: function decipherDataKeyMem(cryptoScheme, - masterKeyIdOrArn, - cipheredDataKey, - log, - cb) { + /** + * + * @param {number} cryptoScheme - crypto scheme version number + * @param {string} masterKeyIdOrArn - master key; for the file backend + * the master key is the actual bucket master key rather than the key to + * retrieve the actual key from a dictionary + * @param {buffer} cipheredDataKey - data key + * @param {object} log - logger object + * @param {function} cb - callback + * @returns {undefined} + * @callback called with (err, plainTextDataKey: Buffer) + */ + decipherDataKey: function decipherDataKeyMem(cryptoScheme, masterKeyIdOrArn, cipheredDataKey, log, cb) { process.nextTick(() => { const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn); const masterKey = Buffer.from(masterKeyId, 'hex'); - Common.createDecipher( - cryptoScheme, masterKey, 0, log, - (err, decipher) => { - if (err) { - cb(err); - return; - } - let plainTextDataKey = - decipher.update(cipheredDataKey); - const final = decipher.final(); - if (final.length !== 0) { - plainTextDataKey = - Buffer.concat([plainTextDataKey, - final]); - } - cb(null, plainTextDataKey); - }); + Common.createDecipher(cryptoScheme, masterKey, 0, log, (err, decipher) => { + if (err) { + cb(err); + return; + } + let plainTextDataKey = decipher.update(cipheredDataKey); + const final = decipher.final(); + if (final.length !== 0) { + plainTextDataKey = Buffer.concat([plainTextDataKey, final]); + } + cb(null, plainTextDataKey); + }); }); }, - }; module.exports = backend; diff --git a/lib/kms/in_memory/backend.js b/lib/kms/in_memory/backend.js index 609ab17333..15e76e519c 100644 --- a/lib/kms/in_memory/backend.js +++ b/lib/kms/in_memory/backend.js @@ -15,14 +15,14 @@ const backend = { supportsDefaultKeyPerAccount: false, - /** - * - * @param {BucketInfo} bucket - bucket info - * @param {object} log - logger object - * @param {function} cb - callback - * @returns {undefined} - * @callback called with (err, masterKeyId: string) - */ + /** + * + * @param {BucketInfo} bucket - bucket info + * @param {object} log - logger object + * @param {function} cb - callback + * @returns {undefined} + * @callback called with (err, masterKeyId: string) + */ createBucketKey: function createBucketKeyMem(bucket, log, cb) { process.nextTick(() => { // Using createDataKey here for purposes of createBucketKeyMem @@ -49,82 +49,63 @@ const backend = { }); }, - /** - * - * @param {number} cryptoScheme - crypto scheme version number - * @param {string} masterKeyIdOrArn - key to retrieve master key - * @param {buffer} plainTextDataKey - data key - * @param {object} log - logger object - * @param {function} cb - callback - * @returns {undefined} - * @callback called with (err, cipheredDataKey: Buffer) - */ - cipherDataKey: function cipherDataKeyMem(cryptoScheme, - masterKeyIdOrArn, - plainTextDataKey, - log, - cb) { + /** + * + * @param {number} cryptoScheme - crypto scheme version number + * @param {string} masterKeyIdOrArn - key to retrieve master key + * @param {buffer} plainTextDataKey - data key + * @param {object} log - logger object + * @param {function} cb - callback + * @returns {undefined} + * @callback called with (err, cipheredDataKey: Buffer) + */ + cipherDataKey: function cipherDataKeyMem(cryptoScheme, masterKeyIdOrArn, plainTextDataKey, log, cb) { process.nextTick(() => { const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn); - Common.createCipher( - cryptoScheme, kms[masterKeyId], 0, log, - (err, cipher) => { - if (err) { - cb(err); - return; - } - let cipheredDataKey = - cipher.update(plainTextDataKey); - // call final() to ensure that any bytes remaining in - // the output of the stream are captured - const final = cipher.final(); - if (final.length !== 0) { - cipheredDataKey = - Buffer.concat([cipheredDataKey, - final]); - } - cb(null, cipheredDataKey); - }); + Common.createCipher(cryptoScheme, kms[masterKeyId], 0, log, (err, cipher) => { + if (err) { + cb(err); + return; + } + let cipheredDataKey = cipher.update(plainTextDataKey); + // call final() to ensure that any bytes remaining in + // the output of the stream are captured + const final = cipher.final(); + if (final.length !== 0) { + cipheredDataKey = Buffer.concat([cipheredDataKey, final]); + } + cb(null, cipheredDataKey); + }); }); }, - /** - * - * @param {number} cryptoScheme - crypto scheme version number - * @param {string} masterKeyIdOrArn - key to retrieve master key - * @param {buffer} cipheredDataKey - data key - * @param {object} log - logger object - * @param {function} cb - callback - * @returns {undefined} - * @callback called with (err, plainTextDataKey: Buffer) - */ - decipherDataKey: function decipherDataKeyMem(cryptoScheme, - masterKeyIdOrArn, - cipheredDataKey, - log, - cb) { + /** + * + * @param {number} cryptoScheme - crypto scheme version number + * @param {string} masterKeyIdOrArn - key to retrieve master key + * @param {buffer} cipheredDataKey - data key + * @param {object} log - logger object + * @param {function} cb - callback + * @returns {undefined} + * @callback called with (err, plainTextDataKey: Buffer) + */ + decipherDataKey: function decipherDataKeyMem(cryptoScheme, masterKeyIdOrArn, cipheredDataKey, log, cb) { process.nextTick(() => { const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn); - Common.createDecipher( - cryptoScheme, kms[masterKeyId], 0, log, - (err, decipher) => { - if (err) { - cb(err); - return; - } - let plainTextDataKey = - decipher.update(cipheredDataKey); - const final = decipher.final(); - if (final.length !== 0) { - plainTextDataKey = - Buffer.concat([plainTextDataKey, - final]); - } - cb(null, plainTextDataKey); - }); + Common.createDecipher(cryptoScheme, kms[masterKeyId], 0, log, (err, decipher) => { + if (err) { + cb(err); + return; + } + let plainTextDataKey = decipher.update(cipheredDataKey); + const final = decipher.final(); + if (final.length !== 0) { + plainTextDataKey = Buffer.concat([plainTextDataKey, final]); + } + cb(null, plainTextDataKey); + }); }); }, - }; module.exports = { diff --git a/lib/kms/utilities.js b/lib/kms/utilities.js index 35c9e56800..47d5ae7303 100644 --- a/lib/kms/utilities.js +++ b/lib/kms/utilities.js @@ -5,13 +5,7 @@ const http = require('http'); const https = require('https'); const logger = require('../utilities/logger'); -function _createEncryptedBucket(host, - port, - bucketName, - accessKey, - secretKey, - verbose, ssl, - locationConstraint) { +function _createEncryptedBucket(host, port, bucketName, accessKey, secretKey, verbose, ssl, locationConstraint) { const options = { host, port, @@ -55,10 +49,11 @@ function _createEncryptedBucket(host, logger.info('request headers', { headers: request._headers }); } if (locationConstraint) { - const createBucketConfiguration = '' + - `${locationConstraint}` + - ''; + const createBucketConfiguration = + '' + + `${locationConstraint}` + + ''; request.write(createBucketConfiguration); } request.end(); @@ -81,20 +76,17 @@ function createEncryptedBucket() { .option('-p, --port ', 'Port of the server') .option('-s, --ssl', 'Enable ssl') .option('-v, --verbose') - .option('-l, --location-constraint ', - 'location Constraint') + .option('-l, --location-constraint ', 'location Constraint') .parse(process.argv); - const { host, port, accessKey, secretKey, bucket, verbose, ssl, - locationConstraint } = commander; + const { host, port, accessKey, secretKey, bucket, verbose, ssl, locationConstraint } = commander; if (!host || !port || !accessKey || !secretKey || !bucket) { logger.error('missing parameter'); commander.outputHelp(); process.exit(1); } - _createEncryptedBucket(host, port, bucket, accessKey, secretKey, verbose, - ssl, locationConstraint); + _createEncryptedBucket(host, port, bucket, accessKey, secretKey, verbose, ssl, locationConstraint); } module.exports = { diff --git a/lib/kms/wrapper.js b/lib/kms/wrapper.js index 772f6d2204..85aabd5440 100644 --- a/lib/kms/wrapper.js +++ b/lib/kms/wrapper.js @@ -30,9 +30,7 @@ function getScalityKms() { scalityKMS = new ScalityKMS(config.kms); scalityKMSImpl = 'scalityKms'; } catch (error) { - logger.warn('scality kms unavailable. ' + - 'Using file kms backend unless mem specified.', - { error }); + logger.warn('scality kms unavailable. ' + 'Using file kms backend unless mem specified.', { error }); scalityKMS = file; scalityKMSImpl = 'fileKms'; } @@ -100,9 +98,7 @@ if (config.sseMigration) { config.sseMigration.previousKeyProvider ); availableBackends.push(previousBackend); - previousIdentifier = `${previousBackend.type - }:${previousBackend.protocol - }:${previousBackend.provider}`; + previousIdentifier = `${previousBackend.type}:${previousBackend.protocol}:${previousBackend.provider}`; // Pre instantiate previous backend as for now only internal backend (file) is supported // for future multiple external backend we should consider keeping open connection to @@ -152,10 +148,10 @@ function getClientForKey(key, log) { // Only pre instantiated previous KMS from sseMigration is supported now // Here we could instantiate other provider on the fly to manage multi providers - log.error('KMS key doesn\'t match any KMS instance', { key, detail, availableBackends }); - return { error: new errors.InvalidArgument - // eslint-disable-next-line new-cap - .customizeDescription(`KMS unknown provider for key ${key}`), + log.error("KMS key doesn't match any KMS instance", { key, detail, availableBackends }); + return { + error: new // eslint-disable-next-line new-cap + errors.InvalidArgument.customizeDescription(`KMS unknown provider for key ${key}`), }; } @@ -170,20 +166,20 @@ class KMS { return client.backend.arnPrefix; } - /** - * Create a new bucket encryption key. - * - * This function is responsible for creating an encryption key for a bucket. - * If the client supports using a default master encryption key per account - * and one is configured, the key is managed at the account level by Vault. - * Otherwise, a bucket-level encryption key is created for legacy support. - * - * @param {BucketInfo} bucket - bucket info - * @param {object} log - logger object - * @param {function} cb - callback - * @returns {undefined} - * @callback called with (err, { masterKeyId: string, masterKeyArn: string, isAccountEncryptionEnabled: boolean }) - */ + /** + * Create a new bucket encryption key. + * + * This function is responsible for creating an encryption key for a bucket. + * If the client supports using a default master encryption key per account + * and one is configured, the key is managed at the account level by Vault. + * Otherwise, a bucket-level encryption key is created for legacy support. + * + * @param {BucketInfo} bucket - bucket info + * @param {object} log - logger object + * @param {function} cb - callback + * @returns {undefined} + * @callback called with (err, { masterKeyId: string, masterKeyArn: string, isAccountEncryptionEnabled: boolean }) + */ static createBucketKey(bucket, log, cb) { // always use current client for create log.debug('creating a new bucket key'); @@ -194,14 +190,19 @@ class KMS { if (client.supportsDefaultKeyPerAccount && config.defaultEncryptionKeyPerAccount) { return vault.getOrCreateEncryptionKeyId(bucket.getOwner(), log, (err, data) => { if (err) { - log.debug('error retrieving or creating the default encryption key at the account level from vault', - { implName, error: err }); + log.debug( + 'error retrieving or creating the default encryption key at the account level from vault', + { implName, error: err } + ); return cb(err); } const { encryptionKeyId, action } = data; - log.trace('default encryption key retrieved or created at the account level from vault', - { implName, encryptionKeyId, action }); + log.trace('default encryption key retrieved or created at the account level from vault', { + implName, + encryptionKeyId, + action, + }); return cb(null, { // vault only return arn masterKeyId: encryptionKeyId, @@ -221,15 +222,15 @@ class KMS { }); } - /** - * - * @param {BucketInfo} bucket - bucket info - * @param {object} sseConfig - SSE configuration - * @param {object} log - logger object - * @param {function} cb - callback - * @returns {undefined} - * @callback called with (err, serverSideEncryptionInfo: object) - */ + /** + * + * @param {BucketInfo} bucket - bucket info + * @param {object} sseConfig - SSE configuration + * @param {object} log - logger object + * @param {function} cb - callback + * @returns {undefined} + * @callback called with (err, serverSideEncryptionInfo: object) + */ static bucketLevelEncryption(bucket, sseConfig, log, cb) { /* The purpose of bucket level encryption is so that the client does not @@ -258,8 +259,7 @@ class KMS { } serverSideEncryptionInfo.configuredMasterKeyId = configuredMasterKeyId; } else { - serverSideEncryptionInfo.configuredMasterKeyId = - `${client.backend.arnPrefix}${configuredMasterKeyId}`; + serverSideEncryptionInfo.configuredMasterKeyId = `${client.backend.arnPrefix}${configuredMasterKeyId}`; } return process.nextTick(() => cb(null, serverSideEncryptionInfo)); @@ -280,9 +280,9 @@ class KMS { return cb(null, serverSideEncryptionInfo); }); } - /* - * no encryption - */ + /* + * no encryption + */ return cb(null, null); } @@ -311,26 +311,25 @@ class KMS { }); } - /** - * createCipherBundle - * @param {object} serverSideEncryptionInfo - info for encryption - * @param {number} serverSideEncryptionInfo.cryptoScheme - - * cryptoScheme used - * @param {string} serverSideEncryptionInfo.algorithm - - * algorithm to use - * @param {string} serverSideEncryptionInfo.masterKeyId - - * key to get master key - * @param {boolean} serverSideEncryptionInfo.mandatory - - * true for mandatory encryption - * @param {object} log - logger object - * @param {function} cb - cb from external call - * @param {object} [opts] - additional options - * @param {boolean} [opts.previousOk] - allow usage of previous KMS (for ongoing MPU not migrated) - * @returns {undefined} - * @callback called with (err, cipherBundle) - */ - static createCipherBundle(serverSideEncryptionInfo, - log, cb, opts) { + /** + * createCipherBundle + * @param {object} serverSideEncryptionInfo - info for encryption + * @param {number} serverSideEncryptionInfo.cryptoScheme - + * cryptoScheme used + * @param {string} serverSideEncryptionInfo.algorithm - + * algorithm to use + * @param {string} serverSideEncryptionInfo.masterKeyId - + * key to get master key + * @param {boolean} serverSideEncryptionInfo.mandatory - + * true for mandatory encryption + * @param {object} log - logger object + * @param {function} cb - cb from external call + * @param {object} [opts] - additional options + * @param {boolean} [opts.previousOk] - allow usage of previous KMS (for ongoing MPU not migrated) + * @returns {undefined} + * @callback called with (err, cipherBundle) + */ + static createCipherBundle(serverSideEncryptionInfo, log, cb, opts) { const { algorithm, configuredMasterKeyId, masterKeyId: bucketMasterKeyId } = serverSideEncryptionInfo; let masterKeyId = bucketMasterKeyId; @@ -345,14 +344,18 @@ class KMS { if (error) { return cb(error); } - if (previousIdentifier - && clientIdentifier === previousIdentifier - && clientIdentifier !== currentIdentifier - && (opts && !opts.previousOk) + if ( + previousIdentifier && + clientIdentifier === previousIdentifier && + clientIdentifier !== currentIdentifier && + opts && + !opts.previousOk ) { - return cb(errors.InvalidArgument - .customizeDescription( - 'KMS cannot use previous provider to encrypt new objects if a new provider is configured')); + return cb( + errors.InvalidArgument.customizeDescription( + 'KMS cannot use previous provider to encrypt new objects if a new provider is configured' + ) + ); } const cipherBundle = { @@ -363,9 +366,10 @@ class KMS { cipher: null, }; - return async.waterfall([ - function generateDataKey(next) { - /* There are 2 ways of generating a datakey : + return async.waterfall( + [ + function generateDataKey(next) { + /* There are 2 ways of generating a datakey : - using the generateDataKey of the KMS backend if it exists (currently only implemented for the AWS KMS backend). This is the preferred solution since a dedicated KMS should offer a better @@ -374,92 +378,96 @@ class KMS { encrypt the datakey. This method is used when the KMS backend doesn't provide the generateDataKey method. */ - let res; - if (client.generateDataKey) { - log.debug('creating a data key using the KMS'); - res = client.generateDataKey(cipherBundle.cryptoScheme, - key, - log, (err, plainTextDataKey, cipheredDataKey) => { - if (err) { - log.debug('error generating a new data key from KMS', - { implName, error: err }); - return next(err); + let res; + if (client.generateDataKey) { + log.debug('creating a data key using the KMS'); + res = client.generateDataKey( + cipherBundle.cryptoScheme, + key, + log, + (err, plainTextDataKey, cipheredDataKey) => { + if (err) { + log.debug('error generating a new data key from KMS', { implName, error: err }); + return next(err); + } + log.trace('data key generated by the kms'); + return next(null, plainTextDataKey, cipheredDataKey); } - log.trace('data key generated by the kms'); - return next(null, plainTextDataKey, cipheredDataKey); - }); - } else { - log.debug('creating a new data key'); - const plainTextDataKey = Common.createDataKey(); - - log.debug('ciphering the data key'); - res = client.cipherDataKey(cipherBundle.cryptoScheme, - key, - plainTextDataKey, log, (err, cipheredDataKey) => { - if (err) { - log.debug('error encrypting the data key using KMS', - { implName, error: err }); - return next(err); + ); + } else { + log.debug('creating a new data key'); + const plainTextDataKey = Common.createDataKey(); + + log.debug('ciphering the data key'); + res = client.cipherDataKey( + cipherBundle.cryptoScheme, + key, + plainTextDataKey, + log, + (err, cipheredDataKey) => { + if (err) { + log.debug('error encrypting the data key using KMS', { implName, error: err }); + return next(err); + } + log.trace('data key ciphered by the kms'); + return next(null, plainTextDataKey, cipheredDataKey); } - log.trace('data key ciphered by the kms'); - return next(null, plainTextDataKey, cipheredDataKey); - }); - } - return res; - }, - function createCipher(plainTextDataKey, cipheredDataKey, next) { - log.debug('creating a cipher'); - cipherBundle.cipheredDataKey = - cipheredDataKey.toString('base64'); - return Common.createCipher(cipherBundle.cryptoScheme, - plainTextDataKey, 0, log, (err, cipher) => { + ); + } + return res; + }, + function createCipher(plainTextDataKey, cipheredDataKey, next) { + log.debug('creating a cipher'); + cipherBundle.cipheredDataKey = cipheredDataKey.toString('base64'); + return Common.createCipher(cipherBundle.cryptoScheme, plainTextDataKey, 0, log, (err, cipher) => { plainTextDataKey.fill(0); if (err) { - log.debug('error from kms', - { implName, error: err }); + log.debug('error from kms', { implName, error: err }); return next(err); } log.trace('cipher created by the kms'); return next(null, cipher); }); - }, - function finishCipherBundle(cipher, next) { - cipherBundle.cipher = cipher; - return next(null, cipherBundle); - }, - ], (err, cipherBundle) => { - if (err) { - log.error('error processing cipher bundle', - { implName, error: err }); + }, + function finishCipherBundle(cipher, next) { + cipherBundle.cipher = cipher; + return next(null, cipherBundle); + }, + ], + (err, cipherBundle) => { + if (err) { + log.error('error processing cipher bundle', { implName, error: err }); + } + return cb(err, cipherBundle); } - return cb(err, cipherBundle); - }); + ); } - /** - * createDecipherBundle - * @param {object} serverSideEncryptionInfo - info for decryption - * @param {number} serverSideEncryptionInfo.cryptoScheme - - * cryptoScheme used - * @param {string} serverSideEncryptionInfo.algorithm - - * algorithm to use - * @param {string} serverSideEncryptionInfo.masterKeyId - - * key to get master key - * @param {boolean} serverSideEncryptionInfo.mandatory - - * true for mandatory encryption - * @param {buffer} serverSideEncryptionInfo.cipheredDataKey - - * ciphered data key - * @param {number} offset - offset for decryption - * @param {object} log - logger object - * @param {function} cb - cb from external call - * @returns {undefined} - * @callback called with (err, decipherBundle) - */ - static createDecipherBundle(serverSideEncryptionInfo, offset, - log, cb) { - if (!serverSideEncryptionInfo.masterKeyId || + /** + * createDecipherBundle + * @param {object} serverSideEncryptionInfo - info for decryption + * @param {number} serverSideEncryptionInfo.cryptoScheme - + * cryptoScheme used + * @param {string} serverSideEncryptionInfo.algorithm - + * algorithm to use + * @param {string} serverSideEncryptionInfo.masterKeyId - + * key to get master key + * @param {boolean} serverSideEncryptionInfo.mandatory - + * true for mandatory encryption + * @param {buffer} serverSideEncryptionInfo.cipheredDataKey - + * ciphered data key + * @param {number} offset - offset for decryption + * @param {object} log - logger object + * @param {function} cb - cb from external call + * @returns {undefined} + * @callback called with (err, decipherBundle) + */ + static createDecipherBundle(serverSideEncryptionInfo, offset, log, cb) { + if ( + !serverSideEncryptionInfo.masterKeyId || !serverSideEncryptionInfo.cipheredDataKey || - !serverSideEncryptionInfo.cryptoScheme) { + !serverSideEncryptionInfo.cryptoScheme + ) { log.error('Invalid cryptographic information', { implName }); return cb(errors.InternalError); } @@ -469,55 +477,61 @@ class KMS { }; // shadowing global client for key - implName already used can't be shadowed here - const { error, client, implName: _impl, key } = getClientForKey( - serverSideEncryptionInfo.masterKeyId, log); + const { error, client, implName: _impl, key } = getClientForKey(serverSideEncryptionInfo.masterKeyId, log); if (error) { return cb(error); } - return async.waterfall([ - function decipherDataKey(next) { - return client.decipherDataKey( - decipherBundle.cryptoScheme, - key, - serverSideEncryptionInfo.cipheredDataKey, - log, (err, plainTextDataKey) => { - log.debug('deciphering a data key'); - if (err) { - log.debug('error from kms', - { implName: _impl, error: err }); - return next(err); + return async.waterfall( + [ + function decipherDataKey(next) { + return client.decipherDataKey( + decipherBundle.cryptoScheme, + key, + serverSideEncryptionInfo.cipheredDataKey, + log, + (err, plainTextDataKey) => { + log.debug('deciphering a data key'); + if (err) { + log.debug('error from kms', { implName: _impl, error: err }); + return next(err); + } + log.trace('data key deciphered by the kms'); + return next(null, plainTextDataKey); } - log.trace('data key deciphered by the kms'); - return next(null, plainTextDataKey); - }); - }, - function createDecipher(plainTextDataKey, next) { - log.debug('creating a decipher'); - return Common.createDecipher(decipherBundle.cryptoScheme, - plainTextDataKey, offset, log, (err, decipher) => { - plainTextDataKey.fill(0); - if (err) { - log.debug('error from kms', - { implName: _impl, error: err }); - return next(err); + ); + }, + function createDecipher(plainTextDataKey, next) { + log.debug('creating a decipher'); + return Common.createDecipher( + decipherBundle.cryptoScheme, + plainTextDataKey, + offset, + log, + (err, decipher) => { + plainTextDataKey.fill(0); + if (err) { + log.debug('error from kms', { implName: _impl, error: err }); + return next(err); + } + log.trace('decipher created by the kms'); + return next(null, decipher); } - log.trace('decipher created by the kms'); - return next(null, decipher); - }); - }, - function finishDecipherBundle(decipher, next) { - decipherBundle.decipher = decipher; - return next(null, decipherBundle); - }, - ], (err, decipherBundle) => { - if (err) { - log.error('error processing decipher bundle', - { implName: _impl, error: err }); - return cb(err); + ); + }, + function finishDecipherBundle(decipher, next) { + decipherBundle.decipher = decipher; + return next(null, decipherBundle); + }, + ], + (err, decipherBundle) => { + if (err) { + log.error('error processing decipher bundle', { implName: _impl, error: err }); + return cb(err); + } + return cb(err, decipherBundle); } - return cb(err, decipherBundle); - }); + ); } static checkHealth(log, cb) { diff --git a/lib/management/agentClient.js b/lib/management/agentClient.js index f88b199565..e10490766f 100644 --- a/lib/management/agentClient.js +++ b/lib/management/agentClient.js @@ -6,7 +6,6 @@ const _config = require('../Config').config; const { patchConfiguration } = require('./configuration'); const { reshapeExceptionError } = arsenal.errorUtils; - const managementAgentMessageType = { /** Message that contains the loaded overlay */ NEW_OVERLAY: 1, @@ -14,7 +13,6 @@ const managementAgentMessageType = { const CONNECTION_RETRY_TIMEOUT_MS = 5000; - function initManagementClient() { const { host, port } = _config.managementAgent; @@ -62,22 +60,22 @@ function initManagementClient() { } switch (msg.messageType) { - case managementAgentMessageType.NEW_OVERLAY: - patchConfiguration(msg.payload, log, err => { - if (err) { - log.error('failed to patch overlay', { - error: reshapeExceptionError(err), - method, - }); - } - }); - return; - default: - log.error('new overlay message with unmanaged message type', { - method, - type: msg.messageType, - }); - return; + case managementAgentMessageType.NEW_OVERLAY: + patchConfiguration(msg.payload, log, err => { + if (err) { + log.error('failed to patch overlay', { + error: reshapeExceptionError(err), + method, + }); + } + }); + return; + default: + log.error('new overlay message with unmanaged message type', { + method, + type: msg.messageType, + }); + return; } }); } @@ -86,7 +84,6 @@ function isManagementAgentUsed() { return process.env.MANAGEMENT_USE_AGENT === '1'; } - module.exports = { managementAgentMessageType, initManagementClient, diff --git a/lib/management/configuration.js b/lib/management/configuration.js index bd3cc08bea..18c2bf485d 100644 --- a/lib/management/configuration.js +++ b/lib/management/configuration.js @@ -19,9 +19,10 @@ function overlayHasVersion(overlay) { } function remoteOverlayIsNewer(cachedOverlay, remoteOverlay) { - return (overlayHasVersion(remoteOverlay) && - (!overlayHasVersion(cachedOverlay) || - remoteOverlay.version > cachedOverlay.version)); + return ( + overlayHasVersion(remoteOverlay) && + (!overlayHasVersion(cachedOverlay) || remoteOverlay.version > cachedOverlay.version) + ); } /** @@ -41,10 +42,8 @@ function patchConfiguration(newConf, log, cb) { return process.nextTick(cb, null, newConf); } - if (_config.overlayVersion !== undefined && - newConf.version <= _config.overlayVersion) { - log.debug('configuration version already applied', - { configurationVersion: newConf.version }); + if (_config.overlayVersion !== undefined && newConf.version <= _config.overlayVersion) { + log.debug('configuration version already applied', { configurationVersion: newConf.version }); return process.nextTick(cb, null, newConf); } return getStoredCredentials(log, (err, creds) => { @@ -62,8 +61,12 @@ function patchConfiguration(newConf, log, cb) { serviceName = u.accountType.split('-')[1]; } const newAccount = buildAuthDataAccount( - u.accessKey, secretKey, u.canonicalId, serviceName, - u.userName); + u.accessKey, + secretKey, + u.canonicalId, + serviceName, + u.userName + ); accounts.push(newAccount.accounts[0]); } }); @@ -86,29 +89,30 @@ function patchConfiguration(newConf, log, cb) { _config.setLocationConstraints(locations); } catch (error) { const exceptionError = reshapeExceptionError(error); - log.error('could not apply configuration version location ' + - 'constraints', { error: exceptionError, - method: 'getStoredCredentials' }); + log.error('could not apply configuration version location ' + 'constraints', { + error: exceptionError, + method: 'getStoredCredentials', + }); return cb(exceptionError); } try { const locationsWithReplicationBackend = Object.keys(locations) - // NOTE: In Orbit, we don't need to have Scality location in our - // replication endpoind config, since we do not replicate to - // any Scality Instance yet. - .filter(key => replicationBackends - [locations[key].type]) - .reduce((obj, key) => { - /* eslint no-param-reassign:0 */ - obj[key] = locations[key]; - return obj; - }, {}); - _config.setReplicationEndpoints( - locationsWithReplicationBackend); + // NOTE: In Orbit, we don't need to have Scality location in our + // replication endpoind config, since we do not replicate to + // any Scality Instance yet. + .filter(key => replicationBackends[locations[key].type]) + .reduce((obj, key) => { + /* eslint no-param-reassign:0 */ + obj[key] = locations[key]; + return obj; + }, {}); + _config.setReplicationEndpoints(locationsWithReplicationBackend); } catch (error) { const exceptionError = reshapeExceptionError(error); - log.error('could not apply replication endpoints', - { error: exceptionError, method: 'getStoredCredentials' }); + log.error('could not apply replication endpoints', { + error: exceptionError, + method: 'getStoredCredentials', + }); return cb(exceptionError); } } @@ -118,18 +122,15 @@ function patchConfiguration(newConf, log, cb) { _config.setPublicInstanceId(newConf.instanceId); if (newConf.browserAccess) { - if (Boolean(_config.browserAccessEnabled) !== - Boolean(newConf.browserAccess.enabled)) { - _config.browserAccessEnabled = - Boolean(newConf.browserAccess.enabled); + if (Boolean(_config.browserAccessEnabled) !== Boolean(newConf.browserAccess.enabled)) { + _config.browserAccessEnabled = Boolean(newConf.browserAccess.enabled); _config.emit('browser-access-enabled-change'); } } _config.overlayVersion = newConf.version; - log.info('applied configuration version', - { configurationVersion: _config.overlayVersion }); + log.info('applied configuration version', { configurationVersion: _config.overlayVersion }); return cb(null, newConf); }); @@ -149,28 +150,33 @@ function patchConfiguration(newConf, log, cb) { function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) { if (remoteOverlayIsNewer(cachedOverlay, remoteOverlay)) { const objName = `configuration/overlay/${remoteOverlay.version}`; - metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay, - {}, log, error => { - if (error) { - const exceptionError = reshapeExceptionError(error); - log.error('could not save configuration', - { error: exceptionError, - method: 'saveConfigurationVersion', - configurationVersion: remoteOverlay.version }); - cb(exceptionError); - return; + metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay, {}, log, error => { + if (error) { + const exceptionError = reshapeExceptionError(error); + log.error('could not save configuration', { + error: exceptionError, + method: 'saveConfigurationVersion', + configurationVersion: remoteOverlay.version, + }); + cb(exceptionError); + return; + } + metadata.putObjectMD( + managementDatabaseName, + latestOverlayVersionKey, + remoteOverlay.version, + {}, + log, + error => { + if (error) { + log.error('could not save configuration version', { + configurationVersion: remoteOverlay.version, + }); + } + cb(error, remoteOverlay); } - metadata.putObjectMD(managementDatabaseName, - latestOverlayVersionKey, remoteOverlay.version, {}, log, - error => { - if (error) { - log.error('could not save configuration version', { - configurationVersion: remoteOverlay.version, - }); - } - cb(error, remoteOverlay); - }); - }); + ); + }); } else { log.debug('no remote configuration to cache yet'); process.nextTick(cb, null, remoteOverlay); @@ -187,25 +193,29 @@ function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) { * @returns {undefined} */ function loadCachedOverlay(log, callback) { - return metadata.getObjectMD(managementDatabaseName, - latestOverlayVersionKey, {}, log, (err, version) => { - if (err) { - if (err.is.NoSuchKey) { - return process.nextTick(callback, null, {}); - } - return callback(err); + return metadata.getObjectMD(managementDatabaseName, latestOverlayVersionKey, {}, log, (err, version) => { + if (err) { + if (err.is.NoSuchKey) { + return process.nextTick(callback, null, {}); } - return metadata.getObjectMD(managementDatabaseName, - `configuration/overlay/${version}`, {}, log, (err, conf) => { - if (err) { - if (err.is.NoSuchKey) { - return process.nextTick(callback, null, {}); - } - return callback(err); + return callback(err); + } + return metadata.getObjectMD( + managementDatabaseName, + `configuration/overlay/${version}`, + {}, + log, + (err, conf) => { + if (err) { + if (err.is.NoSuchKey) { + return process.nextTick(callback, null, {}); } - return callback(null, conf); - }); - }); + return callback(err); + } + return callback(null, conf); + } + ); + }); } function applyAndSaveOverlay(overlay, log) { diff --git a/lib/management/credentials.js b/lib/management/credentials.js index 814a731dc9..48eb485bbe 100644 --- a/lib/management/credentials.js +++ b/lib/management/credentials.js @@ -21,8 +21,7 @@ const { reshapeExceptionError } = arsenal.errorUtils; * @returns {undefined} */ function getStoredCredentials(log, callback) { - metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {}, - log, callback); + metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {}, log, callback); } function issueCredentials(managementEndpoint, instanceId, log, callback) { @@ -36,8 +35,10 @@ function issueCredentials(managementEndpoint, instanceId, log, callback) { publicKey, }; - request.post(`${managementEndpoint}/${instanceId}/register`, - { body: postData, json: true }, (error, response, body) => { + request.post( + `${managementEndpoint}/${instanceId}/register`, + { body: postData, json: true }, + (error, response, body) => { if (error) { return callback(error); } @@ -51,11 +52,11 @@ function issueCredentials(managementEndpoint, instanceId, log, callback) { body.privateKey = privateKey; /* eslint-enable no-param-reassign */ return callback(null, body); - }); + } + ); } -function confirmInstanceCredentials( - managementEndpoint, instanceId, creds, log, callback) { +function confirmInstanceCredentials(managementEndpoint, instanceId, creds, log, callback) { const postData = { serial: creds.serial || 0, publicKey: creds.publicKey, @@ -68,16 +69,15 @@ function confirmInstanceCredentials( body: postData, }; - request.post(`${managementEndpoint}/${instanceId}/confirm`, - opts, (error, response) => { - if (error) { - return callback(error); - } - if (response.statusCode === 200) { - return callback(null, instanceId, creds.token); - } - return callback(arsenal.errors.InternalError); - }); + request.post(`${managementEndpoint}/${instanceId}/confirm`, opts, (error, response) => { + if (error) { + return callback(error); + } + if (response.statusCode === 200) { + return callback(null, instanceId, creds.token); + } + return callback(arsenal.errors.InternalError); + }); } /** @@ -95,35 +95,37 @@ function confirmInstanceCredentials( * * @returns {undefined} */ -function initManagementCredentials( - managementEndpoint, instanceId, log, callback) { +function initManagementCredentials(managementEndpoint, instanceId, log, callback) { getStoredCredentials(log, (error, value) => { if (error) { if (error.is.NoSuchKey) { - return issueCredentials(managementEndpoint, instanceId, log, - (error, value) => { + return issueCredentials(managementEndpoint, instanceId, log, (error, value) => { if (error) { - log.error('could not issue token', - { error: reshapeExceptionError(error), - method: 'initManagementCredentials' }); + log.error('could not issue token', { + error: reshapeExceptionError(error), + method: 'initManagementCredentials', + }); return callback(error); } log.debug('saving token'); - return metadata.putObjectMD(managementDatabaseName, - tokenConfigurationKey, value, {}, log, error => { + return metadata.putObjectMD( + managementDatabaseName, + tokenConfigurationKey, + value, + {}, + log, + error => { if (error) { - log.error('could not save token', - { error: reshapeExceptionError(error), - method: 'initManagementCredentials', - }); + log.error('could not save token', { + error: reshapeExceptionError(error), + method: 'initManagementCredentials', + }); return callback(error); } - log.info('saved token locally, ' + - 'confirming instance'); - return confirmInstanceCredentials( - managementEndpoint, instanceId, value, log, - callback); - }); + log.info('saved token locally, ' + 'confirming instance'); + return confirmInstanceCredentials(managementEndpoint, instanceId, value, log, callback); + } + ); }); } log.debug('could not get token', { error }); diff --git a/lib/management/index.js b/lib/management/index.js index d98f3c46bc..9f5b48250f 100644 --- a/lib/management/index.js +++ b/lib/management/index.js @@ -4,11 +4,7 @@ const async = require('async'); const metadata = require('../metadata/wrapper'); const logger = require('../utilities/logger'); -const { - loadCachedOverlay, - managementDatabaseName, - patchConfiguration, -} = require('./configuration'); +const { loadCachedOverlay, managementDatabaseName, patchConfiguration } = require('./configuration'); const { initManagementCredentials } = require('./credentials'); const { startWSManagementClient } = require('./push'); const { startPollingManagementClient } = require('./poll'); @@ -17,20 +13,20 @@ const { isManagementAgentUsed } = require('./agentClient'); const initRemoteManagementRetryDelay = 10000; -const managementEndpointRoot = - process.env.MANAGEMENT_ENDPOINT || - 'https://api.zenko.io'; +const managementEndpointRoot = process.env.MANAGEMENT_ENDPOINT || 'https://api.zenko.io'; const managementEndpoint = `${managementEndpointRoot}/api/v1/instance`; -const pushEndpointRoot = - process.env.PUSH_ENDPOINT || - 'https://push.api.zenko.io'; +const pushEndpointRoot = process.env.PUSH_ENDPOINT || 'https://push.api.zenko.io'; const pushEndpoint = `${pushEndpointRoot}/api/v1/instance`; function initManagementDatabase(log, callback) { // XXX choose proper owner names - const md = new arsenal.models.BucketInfo(managementDatabaseName, 'owner', - 'owner display name', new Date().toJSON()); + const md = new arsenal.models.BucketInfo( + managementDatabaseName, + 'owner', + 'owner display name', + new Date().toJSON() + ); metadata.createBucket(managementDatabaseName, md, log, error => { if (error) { @@ -38,9 +34,10 @@ function initManagementDatabase(log, callback) { log.info('created management database'); return callback(); } - log.error('could not initialize management database', - { error: reshapeExceptionError(error), - method: 'initManagementDatabase' }); + log.error('could not initialize management database', { + error: reshapeExceptionError(error), + method: 'initManagementDatabase', + }); return callback(error); } log.info('initialized management database'); @@ -75,61 +72,63 @@ function startManagementListeners(instanceId, token) { * @returns {undefined} */ function initManagement(log, callback) { - if ((process.env.REMOTE_MANAGEMENT_DISABLE && - process.env.REMOTE_MANAGEMENT_DISABLE !== '0') - || process.env.S3BACKEND === 'mem') { + if ( + (process.env.REMOTE_MANAGEMENT_DISABLE && process.env.REMOTE_MANAGEMENT_DISABLE !== '0') || + process.env.S3BACKEND === 'mem' + ) { log.info('remote management disabled'); return; } /* Temporary check before to fully move to the process management agent. */ - if (isManagementAgentUsed() ^ typeof callback === 'function') { + if (isManagementAgentUsed() ^ (typeof callback === 'function')) { let msg = 'misuse of initManagement function: '; msg += `MANAGEMENT_USE_AGENT: ${process.env.MANAGEMENT_USE_AGENT}`; msg += `, callback type: ${typeof callback}`; throw new Error(msg); } - async.waterfall([ - // eslint-disable-next-line arrow-body-style - cb => { return isManagementAgentUsed() ? metadata.setup(cb) : cb(); }, - cb => initManagementDatabase(log, cb), - cb => metadata.getUUID(log, cb), - (instanceId, cb) => initManagementCredentials( - managementEndpoint, instanceId, log, cb), - (instanceId, token, cb) => { - if (!isManagementAgentUsed()) { - cb(null, instanceId, token, {}); - return; - } - loadCachedOverlay(log, (err, overlay) => cb(err, instanceId, - token, overlay)); - }, - (instanceId, token, overlay, cb) => { - if (!isManagementAgentUsed()) { - cb(null, instanceId, token, overlay); - return; - } - patchConfiguration(overlay, log, - err => cb(err, instanceId, token, overlay)); - }, - ], (error, instanceId, token, overlay) => { - if (error) { - log.error('could not initialize remote management, retrying later', - { error: reshapeExceptionError(error), - method: 'initManagement' }); - setTimeout(initManagement, - initRemoteManagementRetryDelay, - logger.newRequestLogger()); - } else { - log.info(`this deployment's Instance ID is ${instanceId}`); - log.end('management init done'); - startManagementListeners(instanceId, token); - if (callback) { - callback(overlay); + async.waterfall( + [ + // eslint-disable-next-line arrow-body-style + cb => { + return isManagementAgentUsed() ? metadata.setup(cb) : cb(); + }, + cb => initManagementDatabase(log, cb), + cb => metadata.getUUID(log, cb), + (instanceId, cb) => initManagementCredentials(managementEndpoint, instanceId, log, cb), + (instanceId, token, cb) => { + if (!isManagementAgentUsed()) { + cb(null, instanceId, token, {}); + return; + } + loadCachedOverlay(log, (err, overlay) => cb(err, instanceId, token, overlay)); + }, + (instanceId, token, overlay, cb) => { + if (!isManagementAgentUsed()) { + cb(null, instanceId, token, overlay); + return; + } + patchConfiguration(overlay, log, err => cb(err, instanceId, token, overlay)); + }, + ], + (error, instanceId, token, overlay) => { + if (error) { + log.error('could not initialize remote management, retrying later', { + error: reshapeExceptionError(error), + method: 'initManagement', + }); + setTimeout(initManagement, initRemoteManagementRetryDelay, logger.newRequestLogger()); + } else { + log.info(`this deployment's Instance ID is ${instanceId}`); + log.end('management init done'); + startManagementListeners(instanceId, token); + if (callback) { + callback(overlay); + } } } - }); + ); } module.exports = { diff --git a/lib/management/poll.js b/lib/management/poll.js index 83a72c46c7..b2bc6f4ea8 100644 --- a/lib/management/poll.js +++ b/lib/management/poll.js @@ -5,18 +5,13 @@ const request = require('../utilities/request'); const _config = require('../Config').config; const logger = require('../utilities/logger'); const metadata = require('../metadata/wrapper'); -const { - loadCachedOverlay, - patchConfiguration, - saveConfigurationVersion, -} = require('./configuration'); +const { loadCachedOverlay, patchConfiguration, saveConfigurationVersion } = require('./configuration'); const { reshapeExceptionError } = arsenal.errorUtils; const pushReportDelay = 30000; const pullConfigurationOverlayDelay = 60000; -function loadRemoteOverlay( - managementEndpoint, instanceId, remoteToken, cachedOverlay, log, cb) { +function loadRemoteOverlay(managementEndpoint, instanceId, remoteToken, cachedOverlay, log, cb) { log.debug('loading remote overlay'); const opts = { headers: { @@ -25,45 +20,48 @@ function loadRemoteOverlay( }, json: true, }; - request.get(`${managementEndpoint}/${instanceId}/config/overlay`, opts, - (error, response, body) => { - if (error) { - return cb(error); - } - if (response.statusCode === 200) { - return cb(null, cachedOverlay, body); - } - if (response.statusCode === 404) { - return cb(null, cachedOverlay, {}); - } - return cb(arsenal.errors.AccessForbidden, cachedOverlay, {}); - }); + request.get(`${managementEndpoint}/${instanceId}/config/overlay`, opts, (error, response, body) => { + if (error) { + return cb(error); + } + if (response.statusCode === 200) { + return cb(null, cachedOverlay, body); + } + if (response.statusCode === 404) { + return cb(null, cachedOverlay, {}); + } + return cb(arsenal.errors.AccessForbidden, cachedOverlay, {}); + }); } // TODO save only after successful patch -function applyConfigurationOverlay( - managementEndpoint, instanceId, remoteToken, log) { - async.waterfall([ - wcb => loadCachedOverlay(log, wcb), - (cachedOverlay, wcb) => patchConfiguration(cachedOverlay, - log, wcb), - (cachedOverlay, wcb) => - loadRemoteOverlay(managementEndpoint, instanceId, remoteToken, - cachedOverlay, log, wcb), - (cachedOverlay, remoteOverlay, wcb) => - saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb), - (remoteOverlay, wcb) => patchConfiguration(remoteOverlay, - log, wcb), - ], error => { - if (error) { - log.error('could not apply managed configuration', - { error: reshapeExceptionError(error), - method: 'applyConfigurationOverlay' }); +function applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken, log) { + async.waterfall( + [ + wcb => loadCachedOverlay(log, wcb), + (cachedOverlay, wcb) => patchConfiguration(cachedOverlay, log, wcb), + (cachedOverlay, wcb) => + loadRemoteOverlay(managementEndpoint, instanceId, remoteToken, cachedOverlay, log, wcb), + (cachedOverlay, remoteOverlay, wcb) => saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb), + (remoteOverlay, wcb) => patchConfiguration(remoteOverlay, log, wcb), + ], + error => { + if (error) { + log.error('could not apply managed configuration', { + error: reshapeExceptionError(error), + method: 'applyConfigurationOverlay', + }); + } + setTimeout( + applyConfigurationOverlay, + pullConfigurationOverlayDelay, + managementEndpoint, + instanceId, + remoteToken, + logger.newRequestLogger() + ); } - setTimeout(applyConfigurationOverlay, pullConfigurationOverlayDelay, - managementEndpoint, instanceId, remoteToken, - logger.newRequestLogger()); - }); + ); } function postStats(managementEndpoint, instanceId, remoteToken, report, next) { @@ -115,18 +113,11 @@ function pushStats(managementEndpoint, instanceId, remoteToken, next) { } logger.debug('report', { report }); - postStats( - managementEndpoint, - instanceId, - remoteToken, - report, - next - ); + postStats(managementEndpoint, instanceId, remoteToken, report, next); return; }); - setTimeout(pushStats, pushReportDelay, - managementEndpoint, instanceId, remoteToken); + setTimeout(pushStats, pushReportDelay, managementEndpoint, instanceId, remoteToken); } /** @@ -141,15 +132,13 @@ function pushStats(managementEndpoint, instanceId, remoteToken, next) { * * @returns {undefined} */ -function startPollingManagementClient( - managementEndpoint, instanceId, remoteToken) { +function startPollingManagementClient(managementEndpoint, instanceId, remoteToken) { metadata.notifyBucketChange(() => { pushStats(managementEndpoint, instanceId, remoteToken); }); pushStats(managementEndpoint, instanceId, remoteToken); - applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken, - logger.newRequestLogger()); + applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken, logger.newRequestLogger()); } module.exports = { diff --git a/lib/management/push.js b/lib/management/push.js index 82ad9be7b5..38aa2024fd 100644 --- a/lib/management/push.js +++ b/lib/management/push.js @@ -14,25 +14,15 @@ const metadata = require('../metadata/wrapper'); const { reshapeExceptionError } = arsenal.errorUtils; const { isManagementAgentUsed } = require('./agentClient'); const { applyAndSaveOverlay } = require('./configuration'); -const { - ChannelMessageV0, - MessageType, -} = require('./ChannelMessageV0'); - -const { - CONFIG_OVERLAY_MESSAGE, - METRICS_REQUEST_MESSAGE, - CHANNEL_CLOSE_MESSAGE, - CHANNEL_PAYLOAD_MESSAGE, -} = MessageType; +const { ChannelMessageV0, MessageType } = require('./ChannelMessageV0'); + +const { CONFIG_OVERLAY_MESSAGE, METRICS_REQUEST_MESSAGE, CHANNEL_CLOSE_MESSAGE, CHANNEL_PAYLOAD_MESSAGE } = MessageType; const PING_INTERVAL_MS = 10000; const subprotocols = [ChannelMessageV0.protocolName]; -const cloudServerHost = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_HOST - || 'localhost'; -const cloudServerPort = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_PORT - || _config.port; +const cloudServerHost = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_HOST || 'localhost'; +const cloudServerPort = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_PORT || _config.port; let overlayMessageListener = null; let connected = false; @@ -40,8 +30,7 @@ let connected = false; // No wildcard nor cidr/mask match for now function createWSAgent(pushEndpoint, env, log) { const url = new _URL(pushEndpoint); - const noProxy = (env.NO_PROXY || env.no_proxy - || '').split(','); + const noProxy = (env.NO_PROXY || env.no_proxy || '').split(','); if (noProxy.includes(url.hostname)) { log.info('push server ws has proxy exclusion', { noProxy }); @@ -49,20 +38,20 @@ function createWSAgent(pushEndpoint, env, log) { } if (url.protocol === 'https:' || url.protocol === 'wss:') { - const httpsProxy = (env.HTTPS_PROXY || env.https_proxy); + const httpsProxy = env.HTTPS_PROXY || env.https_proxy; if (httpsProxy) { log.info('push server ws using https proxy', { httpsProxy }); return new HttpsProxyAgent(httpsProxy); } } else if (url.protocol === 'http:' || url.protocol === 'ws:') { - const httpProxy = (env.HTTP_PROXY || env.http_proxy); + const httpProxy = env.HTTP_PROXY || env.http_proxy; if (httpProxy) { log.info('push server ws using http proxy', { httpProxy }); return new HttpsProxyAgent(httpProxy); } } - const allProxy = (env.ALL_PROXY || env.all_proxy); + const allProxy = env.ALL_PROXY || env.all_proxy; if (allProxy) { log.info('push server ws using wildcard proxy', { allProxy }); return new HttpsProxyAgent(allProxy); @@ -88,8 +77,7 @@ function startWSManagementClient(url, token, cb) { logger.info('connecting to push server', { url }); function _logError(error, errorMessage, method) { if (error) { - logger.error(`management client error: ${errorMessage}`, - { error: reshapeExceptionError(error), method }); + logger.error(`management client error: ${errorMessage}`, { error: reshapeExceptionError(error), method }); } } @@ -131,9 +119,9 @@ function startWSManagementClient(url, token, cb) { _logError(err, 'failed to get metrics report', 'pushStats'); return; } - ws.send(ChannelMessageV0.encodeMetricsReportMessage(body), - err => _logError(err, 'failed to send metrics report message', - 'pushStats')); + ws.send(ChannelMessageV0.encodeMetricsReportMessage(body), err => + _logError(err, 'failed to send metrics report message', 'pushStats') + ); }); } @@ -151,17 +139,14 @@ function startWSManagementClient(url, token, cb) { socket = net.createConnection(cloudServerPort, cloudServerHost); socket.on('data', data => { - ws.send(ChannelMessageV0. - encodeChannelDataMessage(channelId, data), err => - _logError(err, 'failed to send channel data message', - 'receiveChannelData')); + ws.send(ChannelMessageV0.encodeChannelDataMessage(channelId, data), err => + _logError(err, 'failed to send channel data message', 'receiveChannelData') + ); }); - socket.on('connect', () => { - }); + socket.on('connect', () => {}); - socket.on('drain', () => { - }); + socket.on('drain', () => {}); socket.on('error', error => { logger.error('failed to connect to S3', { @@ -174,10 +159,9 @@ function startWSManagementClient(url, token, cb) { socket.on('end', () => { socket.destroy(); socketsByChannelId[channelId] = null; - ws.send(ChannelMessageV0.encodeChannelCloseMessage(channelId), - err => _logError(err, - 'failed to send channel close message', - 'receiveChannelData')); + ws.send(ChannelMessageV0.encodeChannelCloseMessage(channelId), err => + _logError(err, 'failed to send channel close message', 'receiveChannelData') + ); }); socketsByChannelId[channelId] = socket; @@ -208,8 +192,7 @@ function startWSManagementClient(url, token, cb) { ws.on('close', () => { logger.info('disconnected from push server, reconnecting in 10s'); metadata.notifyBucketChange(null); - _config.removeListener('browser-access-enabled-change', - browserAccessChangeHandler); + _config.removeListener('browser-access-enabled-change', browserAccessChangeHandler); setTimeout(startWSManagementClient, 10000, url, token); connected = false; @@ -241,31 +224,29 @@ function startWSManagementClient(url, token, cb) { const log = logger.newRequestLogger(); const message = new ChannelMessageV0(data); switch (message.getType()) { - case CONFIG_OVERLAY_MESSAGE: - if (!isManagementAgentUsed()) { - applyAndSaveOverlay(JSON.parse(message.getPayload()), log); - } else { - if (overlayMessageListener) { - overlayMessageListener(message.getPayload().toString()); + case CONFIG_OVERLAY_MESSAGE: + if (!isManagementAgentUsed()) { + applyAndSaveOverlay(JSON.parse(message.getPayload()), log); + } else { + if (overlayMessageListener) { + overlayMessageListener(message.getPayload().toString()); + } } - } - break; - case METRICS_REQUEST_MESSAGE: - pushStats(); - break; - case CHANNEL_CLOSE_MESSAGE: - closeChannel(message.getChannelNumber()); - break; - case CHANNEL_PAYLOAD_MESSAGE: - // browserAccessEnabled defaults to true unless explicitly false - if (_config.browserAccessEnabled !== false) { - receiveChannelData( - message.getChannelNumber(), message.getPayload()); - } - break; - default: - logger.error('unknown message type from push server', - { messageType: message.getType() }); + break; + case METRICS_REQUEST_MESSAGE: + pushStats(); + break; + case CHANNEL_CLOSE_MESSAGE: + closeChannel(message.getChannelNumber()); + break; + case CHANNEL_PAYLOAD_MESSAGE: + // browserAccessEnabled defaults to true unless explicitly false + if (_config.browserAccessEnabled !== false) { + receiveChannelData(message.getChannelNumber(), message.getPayload()); + } + break; + default: + logger.error('unknown message type from push server', { messageType: message.getType() }); } }); } diff --git a/lib/metadata/acl.js b/lib/metadata/acl.js index f48ab7aa42..4bf035f433 100644 --- a/lib/metadata/acl.js +++ b/lib/metadata/acl.js @@ -31,14 +31,16 @@ const acl = { * contain the same number of elements, and all elements from one * grant are incuded in the other grant */ - return oldAcl[grant].length === newAcl[grant].length - && oldAcl[grant].every(value => newAcl[grant].includes(value)); + return ( + oldAcl[grant].length === newAcl[grant].length && oldAcl[grant].every(value => newAcl[grant].includes(value)) + ); }, addObjectACL(bucket, objectKey, objectMD, addACLParams, params, log, cb) { log.trace('updating object acl in metadata'); - const isAclUnchanged = Object.keys(objectMD.acl).length === Object.keys(addACLParams).length - && Object.keys(objectMD.acl).every(grant => this._aclGrantDidNotChange(grant, objectMD.acl, addACLParams)); + const isAclUnchanged = + Object.keys(objectMD.acl).length === Object.keys(addACLParams).length && + Object.keys(objectMD.acl).every(grant => this._aclGrantDidNotChange(grant, objectMD.acl, addACLParams)); if (!isAclUnchanged) { /* eslint-disable no-param-reassign */ objectMD.acl = addACLParams; @@ -77,14 +79,22 @@ const acl = { }; let validCannedACL = []; if (resourceType === 'bucket') { - validCannedACL = - ['private', 'public-read', 'public-read-write', - 'authenticated-read', 'log-delivery-write']; + validCannedACL = [ + 'private', + 'public-read', + 'public-read-write', + 'authenticated-read', + 'log-delivery-write', + ]; } else if (resourceType === 'object') { - validCannedACL = - ['private', 'public-read', 'public-read-write', - 'authenticated-read', 'bucket-owner-read', - 'bucket-owner-full-control']; + validCannedACL = [ + 'private', + 'public-read', + 'public-read-write', + 'authenticated-read', + 'bucket-owner-read', + 'bucket-owner-full-control', + ]; } // parse canned acl @@ -98,45 +108,34 @@ const acl = { } // parse grant headers - const grantReadHeader = - aclUtils.parseGrant(headers['x-amz-grant-read'], 'READ'); + const grantReadHeader = aclUtils.parseGrant(headers['x-amz-grant-read'], 'READ'); let grantWriteHeader = []; if (resourceType === 'bucket') { - grantWriteHeader = aclUtils - .parseGrant(headers['x-amz-grant-write'], 'WRITE'); + grantWriteHeader = aclUtils.parseGrant(headers['x-amz-grant-write'], 'WRITE'); } - const grantReadACPHeader = aclUtils - .parseGrant(headers['x-amz-grant-read-acp'], 'READ_ACP'); - const grantWriteACPHeader = aclUtils - .parseGrant(headers['x-amz-grant-write-acp'], 'WRITE_ACP'); - const grantFullControlHeader = aclUtils - .parseGrant(headers['x-amz-grant-full-control'], 'FULL_CONTROL'); - const allGrantHeaders = - [].concat(grantReadHeader, grantWriteHeader, - grantReadACPHeader, grantWriteACPHeader, - grantFullControlHeader).filter(item => item !== undefined); + const grantReadACPHeader = aclUtils.parseGrant(headers['x-amz-grant-read-acp'], 'READ_ACP'); + const grantWriteACPHeader = aclUtils.parseGrant(headers['x-amz-grant-write-acp'], 'WRITE_ACP'); + const grantFullControlHeader = aclUtils.parseGrant(headers['x-amz-grant-full-control'], 'FULL_CONTROL'); + const allGrantHeaders = [] + .concat(grantReadHeader, grantWriteHeader, grantReadACPHeader, grantWriteACPHeader, grantFullControlHeader) + .filter(item => item !== undefined); if (allGrantHeaders.length === 0) { return cb(null, currentResourceACL); } - const usersIdentifiedByEmail = allGrantHeaders - .filter(it => it && it.userIDType.toLowerCase() === 'emailaddress'); - const usersIdentifiedByGroup = allGrantHeaders - .filter(item => item && item.userIDType.toLowerCase() === 'uri'); + const usersIdentifiedByEmail = allGrantHeaders.filter( + it => it && it.userIDType.toLowerCase() === 'emailaddress' + ); + const usersIdentifiedByGroup = allGrantHeaders.filter(item => item && item.userIDType.toLowerCase() === 'uri'); const justEmails = usersIdentifiedByEmail.map(item => item.identifier); - const validGroups = [ - constants.allAuthedUsersId, - constants.publicId, - constants.logId, - ]; + const validGroups = [constants.allAuthedUsersId, constants.publicId, constants.logId]; for (let i = 0; i < usersIdentifiedByGroup.length; i++) { if (validGroups.indexOf(usersIdentifiedByGroup[i].identifier) < 0) { return cb(errors.InvalidArgument); } } - const usersIdentifiedByID = allGrantHeaders - .filter(item => item && item.userIDType.toLowerCase() === 'id'); + const usersIdentifiedByID = allGrantHeaders.filter(item => item && item.userIDType.toLowerCase() === 'id'); // TODO: Consider whether want to verify with Vault // whether canonicalID is associated with existing // account before adding to ACL @@ -148,22 +147,22 @@ const acl = { if (err) { return cb(err); } - const reconstructedUsersIdentifiedByEmail = aclUtils. - reconstructUsersIdentifiedByEmail(results, - usersIdentifiedByEmail); + const reconstructedUsersIdentifiedByEmail = aclUtils.reconstructUsersIdentifiedByEmail( + results, + usersIdentifiedByEmail + ); const allUsers = [].concat( reconstructedUsersIdentifiedByEmail, usersIdentifiedByGroup, - usersIdentifiedByID); - const revisedACL = - aclUtils.sortHeaderGrants(allUsers, resourceACL); + usersIdentifiedByID + ); + const revisedACL = aclUtils.sortHeaderGrants(allUsers, resourceACL); return cb(null, revisedACL); }); } else { // If don't have to look up canonicalID's just sort grants // and add to bucket - const revisedACL = aclUtils - .sortHeaderGrants(allGrantHeaders, resourceACL); + const revisedACL = aclUtils.sortHeaderGrants(allGrantHeaders, resourceACL); return cb(null, revisedACL); } return undefined; @@ -171,4 +170,3 @@ const acl = { }; module.exports = acl; - diff --git a/lib/metadata/metadataUtils.js b/lib/metadata/metadataUtils.js index 62b2bd9225..c6de69bc50 100644 --- a/lib/metadata/metadataUtils.js +++ b/lib/metadata/metadataUtils.js @@ -3,8 +3,7 @@ const { errors } = require('arsenal'); const metadata = require('./wrapper'); const BucketInfo = require('arsenal').models.BucketInfo; -const { isBucketAuthorized, isObjAuthorized } = - require('../api/apiUtils/authorization/permissionChecks'); +const { isBucketAuthorized, isObjAuthorized } = require('../api/apiUtils/authorization/permissionChecks'); const bucketShield = require('../api/apiUtils/bucket/bucketShield'); const { onlyOwnerAllowed } = require('../../constants'); const { actionNeedQuotaCheck, actionWithDataDeletion } = require('arsenal/build/lib/policyEvaluator/RequestContext'); @@ -28,35 +27,38 @@ const { processBytesToWrite, validateQuotas } = require('../api/apiUtils/quotas/ * @return {undefined} */ function getNullVersionFromMaster(bucketName, objectKey, log, cb) { - async.waterfall([ - next => metadata.getObjectMD(bucketName, objectKey, {}, log, next), - (masterMD, next) => { - if (masterMD.isNull || !masterMD.versionId) { - log.debug('null version is master version'); - return process.nextTick(() => next(null, masterMD)); + async.waterfall( + [ + next => metadata.getObjectMD(bucketName, objectKey, {}, log, next), + (masterMD, next) => { + if (masterMD.isNull || !masterMD.versionId) { + log.debug('null version is master version'); + return process.nextTick(() => next(null, masterMD)); + } + if (masterMD.nullVersionId) { + // the latest version is not the null version, but null version exists + // NOTE: for backward-compat with old null version scheme + log.debug('get the null version via nullVersionId'); + const getOptions = { + versionId: masterMD.nullVersionId, + }; + return metadata.getObjectMD(bucketName, objectKey, getOptions, log, next); + } + return next(errors.NoSuchKey); + }, + ], + (err, nullMD) => { + if (err && err.is && err.is.NoSuchKey) { + log.debug('could not find a null version'); + return cb(); } - if (masterMD.nullVersionId) { - // the latest version is not the null version, but null version exists - // NOTE: for backward-compat with old null version scheme - log.debug('get the null version via nullVersionId'); - const getOptions = { - versionId: masterMD.nullVersionId, - }; - return metadata.getObjectMD(bucketName, objectKey, getOptions, log, next); + if (err) { + log.debug('err getting object MD from metadata', { error: err }); + return cb(err); } - return next(errors.NoSuchKey); - }, - ], (err, nullMD) => { - if (err && err.is && err.is.NoSuchKey) { - log.debug('could not find a null version'); - return cb(); + return cb(null, nullMD); } - if (err) { - log.debug('err getting object MD from metadata', { error: err }); - return cb(err); - } - return cb(null, nullMD); - }); + ); } /** metadataGetObject - retrieves specified object or version from metadata @@ -75,21 +77,20 @@ function metadataGetObject(bucketName, objectKey, versionId, cachedDocuments, lo if (cachedDocuments && cachedDocuments[objectKey]) { return cb(null, cachedDocuments[objectKey]); } - return metadata.getObjectMD(bucketName, objectKey, options, log, - (err, objMD) => { - if (err) { - if (err.is && err.is.NoSuchKey && versionId === 'null') { - return getNullVersionFromMaster(bucketName, objectKey, log, cb); - } - if (err.is && err.is.NoSuchKey) { - log.debug('object does not exist in metadata'); - return cb(); - } - log.debug('err getting object MD from metadata', { error: err }); - return cb(err); + return metadata.getObjectMD(bucketName, objectKey, options, log, (err, objMD) => { + if (err) { + if (err.is && err.is.NoSuchKey && versionId === 'null') { + return getNullVersionFromMaster(bucketName, objectKey, log, cb); } - return cb(null, objMD); - }); + if (err.is && err.is.NoSuchKey) { + log.debug('object does not exist in metadata'); + return cb(); + } + log.debug('err getting object MD from metadata', { error: err }); + return cb(err); + } + return cb(null, objMD); + }); } /** metadataGetObjects - retrieves specified object or version from metadata. This @@ -162,8 +163,17 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) { if (bucket.getOwner() !== canonicalID && requestType.some(type => onlyOwnerAllowed.includes(type))) { return errors.MethodNotAllowed; } - if (!isBucketAuthorized(bucket, (preciseRequestType || requestType), canonicalID, - authInfo, log, request, actionImplicitDenies)) { + if ( + !isBucketAuthorized( + bucket, + preciseRequestType || requestType, + canonicalID, + authInfo, + log, + request, + actionImplicitDenies + ) + ) { log.debug('access denied for user on bucket', { requestType }); return errors.AccessDenied; } @@ -189,77 +199,105 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, if (!Array.isArray(requestType)) { requestType = [requestType]; } - async.waterfall([ - next => { - // versionId may be 'null', which asks metadata to fetch the null key specifically - const getOptions = { versionId }; - if (getDeleteMarker) { - getOptions.getDeleteMarker = true; - } - return metadata.getBucketAndObjectMD(bucketName, objectKey, getOptions, log, (err, getResult) => { - if (err) { - // if some implicit iamAuthzResults, return AccessDenied - // before leaking any state information - if (actionImplicitDenies && Object.values(actionImplicitDenies).some(v => v === true)) { - return next(errors.AccessDenied); - } - return next(err); + async.waterfall( + [ + next => { + // versionId may be 'null', which asks metadata to fetch the null key specifically + const getOptions = { versionId }; + if (getDeleteMarker) { + getOptions.getDeleteMarker = true; } - return next(null, getResult); - }); - }, - (getResult, next) => { - const bucket = getResult.bucket ? - BucketInfo.deSerialize(getResult.bucket) : undefined; - if (!bucket) { - log.debug('bucketAttrs is undefined', { - bucket: bucketName, - method: 'metadataValidateBucketAndObj', + return metadata.getBucketAndObjectMD(bucketName, objectKey, getOptions, log, (err, getResult) => { + if (err) { + // if some implicit iamAuthzResults, return AccessDenied + // before leaking any state information + if (actionImplicitDenies && Object.values(actionImplicitDenies).some(v => v === true)) { + return next(errors.AccessDenied); + } + return next(err); + } + return next(null, getResult); }); - return next(errors.NoSuchBucket); - } - const validationError = validateBucket(bucket, params, log, actionImplicitDenies); - if (validationError) { - return next(validationError, bucket); - } - const objMD = getResult.obj ? JSON.parse(getResult.obj) : undefined; - if (!objMD && versionId === 'null') { - return getNullVersionFromMaster(bucketName, objectKey, log, - (err, nullVer) => next(err, bucket, nullVer)); - } - return next(null, bucket, objMD); - }, - (bucket, objMD, next) => { - const canonicalID = authInfo.getCanonicalID(); - if (!isObjAuthorized(bucket, objMD, requestType, canonicalID, authInfo, log, request, - actionImplicitDenies)) { - log.debug('access denied for user on object', { requestType }); - return next(errors.AccessDenied, bucket); - } - return next(null, bucket, objMD); - }, - (bucket, objMD, next) => { - const needQuotaCheck = requestType => requestType.some(type => actionNeedQuotaCheck[type] || - actionWithDataDeletion[type]); - const checkQuota = params.checkQuota === undefined ? needQuotaCheck(requestType) : params.checkQuota; - // withVersionId cover cases when an object is being restored with a specific version ID. - // In this case, the storage space was already accounted for when the RestoreObject API call - // was made, so we don't need to add any inflight, but quota must be evaluated. - if (!checkQuota) { + }, + (getResult, next) => { + const bucket = getResult.bucket ? BucketInfo.deSerialize(getResult.bucket) : undefined; + if (!bucket) { + log.debug('bucketAttrs is undefined', { + bucket: bucketName, + method: 'metadataValidateBucketAndObj', + }); + return next(errors.NoSuchBucket); + } + const validationError = validateBucket(bucket, params, log, actionImplicitDenies); + if (validationError) { + return next(validationError, bucket); + } + const objMD = getResult.obj ? JSON.parse(getResult.obj) : undefined; + if (!objMD && versionId === 'null') { + return getNullVersionFromMaster(bucketName, objectKey, log, (err, nullVer) => + next(err, bucket, nullVer) + ); + } + return next(null, bucket, objMD); + }, + (bucket, objMD, next) => { + const canonicalID = authInfo.getCanonicalID(); + if ( + !isObjAuthorized( + bucket, + objMD, + requestType, + canonicalID, + authInfo, + log, + request, + actionImplicitDenies + ) + ) { + log.debug('access denied for user on object', { requestType }); + return next(errors.AccessDenied, bucket); + } return next(null, bucket, objMD); + }, + (bucket, objMD, next) => { + const needQuotaCheck = requestType => + requestType.some(type => actionNeedQuotaCheck[type] || actionWithDataDeletion[type]); + const checkQuota = params.checkQuota === undefined ? needQuotaCheck(requestType) : params.checkQuota; + // withVersionId cover cases when an object is being restored with a specific version ID. + // In this case, the storage space was already accounted for when the RestoreObject API call + // was made, so we don't need to add any inflight, but quota must be evaluated. + if (!checkQuota) { + return next(null, bucket, objMD); + } + const contentLength = processBytesToWrite( + request.apiMethod, + bucket, + versionId, + request?.parsedContentLength || 0, + objMD, + params.destObjMD + ); + return validateQuotas( + request, + bucket, + request.accountQuotas, + requestType, + request.apiMethod, + contentLength, + withVersionId, + log, + err => next(err, bucket, objMD) + ); + }, + ], + (err, bucket, objMD) => { + if (err) { + // still return bucket for cors headers + return callback(err, bucket); } - const contentLength = processBytesToWrite(request.apiMethod, bucket, versionId, - request?.parsedContentLength || 0, objMD, params.destObjMD); - return validateQuotas(request, bucket, request.accountQuotas, requestType, request.apiMethod, - contentLength, withVersionId, log, err => next(err, bucket, objMD)); - }, - ], (err, bucket, objMD) => { - if (err) { - // still return bucket for cors headers - return callback(err, bucket); + return callback(null, bucket, objMD); } - return callback(null, bucket, objMD); - }); + ); } /** standardMetadataValidateBucket - retrieve bucket from metadata and check if user * is authorized to access it diff --git a/lib/metadata/wrapper.js b/lib/metadata/wrapper.js index 6bd800e60f..947d103847 100644 --- a/lib/metadata/wrapper.js +++ b/lib/metadata/wrapper.js @@ -39,6 +39,5 @@ if (clientName === 'mem') { }; } -const metadata = new MetadataWrapper(config.backends.metadata, params, - bucketclient, logger); +const metadata = new MetadataWrapper(config.backends.metadata, params, bucketclient, logger); module.exports = metadata; diff --git a/lib/nfs/utilities.js b/lib/nfs/utilities.js index bd2fd8619d..e39cfe127b 100644 --- a/lib/nfs/utilities.js +++ b/lib/nfs/utilities.js @@ -4,8 +4,7 @@ const http = require('http'); const https = require('https'); const logger = require('../utilities/logger'); -function _createBucketWithNFSEnabled(host, port, bucketName, accessKey, - secretKey, verbose, ssl, locationConstraint) { +function _createBucketWithNFSEnabled(host, port, bucketName, accessKey, secretKey, verbose, ssl, locationConstraint) { const options = { host, port, @@ -47,10 +46,11 @@ function _createBucketWithNFSEnabled(host, port, bucketName, accessKey, logger.info('request headers', { headers: request._headers }); } if (locationConstraint) { - const createBucketConfiguration = '' + - `${locationConstraint}` + - ''; + const createBucketConfiguration = + '' + + `${locationConstraint}` + + ''; request.write(createBucketConfiguration); } request.end(); @@ -73,20 +73,17 @@ function createBucketWithNFSEnabled() { .option('-p, --port ', 'Port of the server') .option('-s', '--ssl', 'Enable ssl') .option('-v, --verbose') - .option('-l, --location-constraint ', - 'location Constraint') + .option('-l, --location-constraint ', 'location Constraint') .parse(process.argv); - const { host, port, accessKey, secretKey, bucket, verbose, - ssl, locationConstraint } = commander; + const { host, port, accessKey, secretKey, bucket, verbose, ssl, locationConstraint } = commander; if (!host || !port || !accessKey || !secretKey || !bucket) { logger.error('missing parameter'); commander.outputHelp(); process.exit(1); } - _createBucketWithNFSEnabled(host, port, bucket, accessKey, secretKey, - verbose, ssl, locationConstraint); + _createBucketWithNFSEnabled(host, port, bucket, accessKey, secretKey, verbose, ssl, locationConstraint); } module.exports = { diff --git a/lib/quotas/quotas.js b/lib/quotas/quotas.js index f072064142..d07c4c89d1 100644 --- a/lib/quotas/quotas.js +++ b/lib/quotas/quotas.js @@ -6,12 +6,12 @@ let instance = null; switch (config.backends.quota) { case 'scuba': instance = new ScubaClientImpl(config); - break; + break; default: instance = { enabled: false, }; - break; + break; } module.exports = instance; diff --git a/lib/quotas/scuba/wrapper.js b/lib/quotas/scuba/wrapper.js index 0cde306868..2329c06b01 100644 --- a/lib/quotas/scuba/wrapper.js +++ b/lib/quotas/scuba/wrapper.js @@ -27,28 +27,30 @@ class ScubaClientImpl extends ScubaClient { } _healthCheck() { - return this.healthCheck().then(data => { - if (data?.date) { - const date = new Date(data.date); - if (Date.now() - date.getTime() > this.maxStaleness) { - throw new Error('Data is stale, disabling quotas'); + return this.healthCheck() + .then(data => { + if (data?.date) { + const date = new Date(data.date); + if (Date.now() - date.getTime() > this.maxStaleness) { + throw new Error('Data is stale, disabling quotas'); + } } - } - if (!this.enabled) { - this._log.info('Scuba health check passed, enabling quotas'); - } - monitoring.utilizationServiceAvailable.set(1); - this.enabled = true; - }).catch(err => { - if (this.enabled) { - this._log.warn('Scuba health check failed, disabling quotas', { - err: err.name, - description: err.message, - }); - } - monitoring.utilizationServiceAvailable.set(0); - this.enabled = false; - }); + if (!this.enabled) { + this._log.info('Scuba health check passed, enabling quotas'); + } + monitoring.utilizationServiceAvailable.set(1); + this.enabled = true; + }) + .catch(err => { + if (this.enabled) { + this._log.warn('Scuba health check failed, disabling quotas', { + err: err.name, + description: err.message, + }); + } + monitoring.utilizationServiceAvailable.set(0); + this.enabled = false; + }); } periodicHealthCheck() { @@ -56,20 +58,24 @@ class ScubaClientImpl extends ScubaClient { clearInterval(this._healthCheckTimer); } this._healthCheck(); - this._healthCheckTimer = setInterval(async () => { - this._healthCheck(); - }, Number(process.env.SCUBA_HEALTHCHECK_FREQUENCY) - || externalBackendHealthCheckInterval); + this._healthCheckTimer = setInterval( + async () => { + this._healthCheck(); + }, + Number(process.env.SCUBA_HEALTHCHECK_FREQUENCY) || externalBackendHealthCheckInterval + ); } getUtilizationMetrics(metricsClass, resourceName, options, body, callback) { const requestStartTime = process.hrtime.bigint(); return this._getLatestMetricsCallback(metricsClass, resourceName, options, body, (err, data) => { const responseTimeInNs = Number(process.hrtime.bigint() - requestStartTime); - monitoring.utilizationMetricsRetrievalDuration.labels({ - code: err ? (err.statusCode || 500) : 200, - class: metricsClass, - }).observe(responseTimeInNs / 1e9); + monitoring.utilizationMetricsRetrievalDuration + .labels({ + code: err ? err.statusCode || 500 : 200, + class: metricsClass, + }) + .observe(responseTimeInNs / 1e9); return callback(err, data); }); } diff --git a/lib/routes/routeBackbeat.js b/lib/routes/routeBackbeat.js index 3d69912bd3..3bd2eb23e3 100644 --- a/lib/routes/routeBackbeat.js +++ b/lib/routes/routeBackbeat.js @@ -7,8 +7,7 @@ const joi = require('@hapi/joi'); const backbeatProxy = httpProxy.createProxyServer({ ignorePath: true, }); -const { auth, errors, errorInstances, s3middleware, s3routes, models, storage } = - require('arsenal'); +const { auth, errors, errorInstances, s3middleware, s3routes, models, storage } = require('arsenal'); const { responseJSONBody } = s3routes.routesUtils; const { getSubPartIds } = s3middleware.azureHelper.mpuUtils; @@ -17,18 +16,13 @@ const { parseLC, MultipleBackendGateway } = storage.data; const vault = require('../auth/vault'); const dataWrapper = require('../data/wrapper'); const metadata = require('../metadata/wrapper'); -const locationConstraintCheck = require( - '../api/apiUtils/object/locationConstraintCheck'); -const locationStorageCheck = - require('../api/apiUtils/object/locationStorageCheck'); +const locationConstraintCheck = require('../api/apiUtils/object/locationConstraintCheck'); +const locationStorageCheck = require('../api/apiUtils/object/locationStorageCheck'); const { dataStore } = require('../api/apiUtils/object/storeObject'); -const prepareRequestContexts = require( -'../api/apiUtils/authorization/prepareRequestContexts'); +const prepareRequestContexts = require('../api/apiUtils/authorization/prepareRequestContexts'); const { decodeVersionId } = require('../api/apiUtils/object/versioning'); -const locationKeysHaveChanged - = require('../api/apiUtils/object/locationKeysHaveChanged'); -const { standardMetadataValidateBucketAndObj, - metadataGetObject } = require('../metadata/metadataUtils'); +const locationKeysHaveChanged = require('../api/apiUtils/object/locationKeysHaveChanged'); +const { standardMetadataValidateBucketAndObj, metadataGetObject } = require('../metadata/metadataUtils'); const { config } = require('../Config'); const constants = require('../../constants'); const { BackendInfo } = models; @@ -61,8 +55,7 @@ config.on('location-constraints-update', () => { locationConstraints = config.locationConstraints; if (implName === 'multipleBackends') { const clients = parseLC(config, vault); - dataClient = new MultipleBackendGateway( - clients, metadata, locationStorageCheck); + dataClient = new MultipleBackendGateway(clients, metadata, locationStorageCheck); } }); @@ -85,12 +78,7 @@ function _normalizeBackbeatRequest(req) { } function _isObjectRequest(req) { - return [ - 'data', - 'metadata', - 'multiplebackenddata', - 'multiplebackendmetadata', - ].includes(req.resourceType); + return ['data', 'metadata', 'multiplebackenddata', 'multiplebackendmetadata'].includes(req.resourceType); } function _respondWithHeaders(response, payload, extraHeaders, log, callback) { @@ -100,12 +88,15 @@ function _respondWithHeaders(response, payload, extraHeaders, log, callback) { } else if (typeof payload === 'object') { body = JSON.stringify(payload); } - const httpHeaders = Object.assign({ - 'x-amz-id-2': log.getSerializedUids(), - 'x-amz-request-id': log.getSerializedUids(), - 'content-type': 'application/json', - 'content-length': Buffer.byteLength(body), - }, extraHeaders); + const httpHeaders = Object.assign( + { + 'x-amz-id-2': log.getSerializedUids(), + 'x-amz-request-id': log.getSerializedUids(), + 'content-type': 'application/json', + 'content-length': Buffer.byteLength(body), + }, + extraHeaders + ); response.writeHead(200, httpHeaders); response.end(body, 'utf8', () => { log.end().info('responded with payload', { @@ -126,8 +117,9 @@ function _getRequestPayload(req, cb) { req.on('data', chunk => { payload.push(chunk); payloadLen += chunk.length; - }).on('error', cb) - .on('end', () => cb(null, Buffer.concat(payload, payloadLen).toString())); + }) + .on('error', cb) + .on('end', () => cb(null, Buffer.concat(payload, payloadLen).toString())); } function _checkMultipleBackendRequest(request, log) { @@ -140,31 +132,29 @@ function _checkMultipleBackendRequest(request, log) { log.error(errMessage); return errorInstances.BadRequest.customizeDescription(errMessage); } - if (operation === 'putpart' && - headers['x-scal-part-number'] === undefined) { + if (operation === 'putpart' && headers['x-scal-part-number'] === undefined) { errMessage = 'bad request: missing part-number header'; log.error(errMessage); return errorInstances.BadRequest.customizeDescription(errMessage); } - const isMPUOperation = - ['putpart', 'completempu', 'abortmpu'].includes(operation); + const isMPUOperation = ['putpart', 'completempu', 'abortmpu'].includes(operation); if (isMPUOperation && headers['x-scal-upload-id'] === undefined) { errMessage = 'bad request: missing upload-id header'; log.error(errMessage); return errorInstances.BadRequest.customizeDescription(errMessage); } - if (operation === 'putobject' && - headers['x-scal-canonical-id'] === undefined) { + if (operation === 'putobject' && headers['x-scal-canonical-id'] === undefined) { errMessage = 'bad request: missing x-scal-canonical-id header'; log.error(errMessage); return errorInstances.BadRequest.customizeDescription(errMessage); } // Ensure the external backend has versioning before asserting version ID. - if (!constants.versioningNotImplBackends[storageType] && - (operation === 'puttagging' || operation === 'deletetagging')) { + if ( + !constants.versioningNotImplBackends[storageType] && + (operation === 'puttagging' || operation === 'deletetagging') + ) { if (headers['x-scal-data-store-version-id'] === undefined) { - errMessage = - 'bad request: missing x-scal-data-store-version-id header'; + errMessage = 'bad request: missing x-scal-data-store-version-id header'; log.error(errMessage); return errorInstances.BadRequest.customizeDescription(errMessage); } @@ -174,14 +164,12 @@ function _checkMultipleBackendRequest(request, log) { return errorInstances.BadRequest.customizeDescription(errMessage); } if (headers['x-scal-replication-endpoint-site'] === undefined) { - errMessage = 'bad request: missing ' + - 'x-scal-replication-endpoint-site'; + errMessage = 'bad request: missing ' + 'x-scal-replication-endpoint-site'; log.error(errMessage); return errorInstances.BadRequest.customizeDescription(errMessage); } } - if (operation === 'putobject' && - headers['content-md5'] === undefined) { + if (operation === 'putobject' && headers['content-md5'] === undefined) { errMessage = 'bad request: missing content-md5 header'; log.error(errMessage); return errorInstances.BadRequest.customizeDescription(errMessage); @@ -193,8 +181,7 @@ function _checkMultipleBackendRequest(request, log) { } const location = locationConstraints[headers['x-scal-storage-class']]; const storageTypeList = storageType.split(','); - const isValidLocation = location && - storageTypeList.includes(location.type); + const isValidLocation = location && storageTypeList.includes(location.type); if (!isValidLocation) { errMessage = 'invalid request: invalid location constraint in request'; log.debug(errMessage, { @@ -236,13 +223,11 @@ function generateMpuAggregateInfo(parts) { // MultipleBackendTask does not, so check if size is defined in // the first part. if (parts[0] && parts[0].Size) { - aggregateSize = parts.reduce( - (agg, part) => agg + Number.parseInt(part.Size[0], 10), 0); + aggregateSize = parts.reduce((agg, part) => agg + Number.parseInt(part.Size[0], 10), 0); } return { aggregateSize, - aggregateETag: s3middleware.processMpuParts.createAggregateETag( - parts.map(part => part.ETag[0])), + aggregateETag: s3middleware.processMpuParts.createAggregateETag(parts.map(part => part.ETag[0])), }; } @@ -267,15 +252,17 @@ function constructPutResponse(params) { // create the location as they are usually stored in the // "locations" attribute, with size/start info. - const location = [{ - dataStoreName: params.dataStoreName, - dataStoreType: params.dataStoreType, - key: params.key, - start: 0, - size: params.size, - dataStoreETag: params.dataStoreETag, - dataStoreVersionId: params.dataStoreVersionId, - }]; + const location = [ + { + dataStoreName: params.dataStoreName, + dataStoreType: params.dataStoreType, + key: params.key, + start: 0, + size: params.size, + dataStoreETag: params.dataStoreETag, + dataStoreVersionId: params.dataStoreVersionId, + }, + ]; return { // TODO: Remove '' when versioning implemented for Azure. versionId: params.dataStoreVersionId || '', @@ -283,8 +270,7 @@ function constructPutResponse(params) { }; } -function handleTaggingOperation(request, response, type, dataStoreVersionId, - log, callback) { +function handleTaggingOperation(request, response, type, dataStoreVersionId, log, callback) { const storageLocation = request.headers['x-scal-storage-class']; const objectMD = { dataStoreName: storageLocation, @@ -299,8 +285,7 @@ function handleTaggingOperation(request, response, type, dataStoreVersionId, return callback(errors.MalformedPOSTRequest); } } - return dataClient.objectTagging(type, request.objectKey, - request.bucketName, objectMD, log, err => { + return dataClient.objectTagging(type, request.objectKey, request.bucketName, objectMD, log, err => { if (err) { log.error(`error during object tagging: ${type}`, { error: err, @@ -414,8 +399,7 @@ function putData(request, response, bucketInfo, objMd, log, callback) { objectKey: request.objectKey, }; const payloadLen = parseInt(request.headers['content-length'], 10); - const backendInfoObj = locationConstraintCheck( - request, null, bucketInfo, log); + const backendInfoObj = locationConstraintCheck(request, null, bucketInfo, log); if (backendInfoObj.err) { log.error('error getting backendInfo', { error: backendInfoObj.err, @@ -434,8 +418,14 @@ function putData(request, response, bucketInfo, objMd, log, callback) { return callback(errors.InternalError); } return dataStore( - context, cipherBundle, request, payloadLen, {}, - backendInfo, log, (err, retrievalInfo, md5) => { + context, + cipherBundle, + request, + payloadLen, + {}, + backendInfo, + log, + (err, retrievalInfo, md5) => { if (err) { log.error('error putting data', { error: err, @@ -447,20 +437,29 @@ function putData(request, response, bucketInfo, objMd, log, callback) { return callback(errors.BadDigest); } const { key, dataStoreName } = retrievalInfo; - const dataRetrievalInfo = [{ - key, - dataStoreName, - }]; + const dataRetrievalInfo = [ + { + key, + dataStoreName, + }, + ]; if (cipherBundle) { dataRetrievalInfo[0].cryptoScheme = cipherBundle.cryptoScheme; dataRetrievalInfo[0].cipheredDataKey = cipherBundle.cipheredDataKey; - return _respondWithHeaders(response, dataRetrievalInfo, { - 'x-amz-server-side-encryption': cipherBundle.algorithm, - 'x-amz-server-side-encryption-aws-kms-key-id': cipherBundle.masterKeyId, - }, log, callback); + return _respondWithHeaders( + response, + dataRetrievalInfo, + { + 'x-amz-server-side-encryption': cipherBundle.algorithm, + 'x-amz-server-side-encryption-aws-kms-key-id': cipherBundle.masterKeyId, + }, + log, + callback + ); } return _respond(response, dataRetrievalInfo, log, callback); - }); + } + ); }); } @@ -475,7 +474,7 @@ function putData(request, response, bucketInfo, objMd, log, callback) { */ /** - * + * * @param {string} accountId - account ID * @param {Log} log - logger instance * @param {CanonicalIdCallback} cb - callback function @@ -628,32 +627,35 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) { options.isNull = isNull; } - return async.series([ - // Zenko's CRR delegates replacing the account - // information to the destination's Cloudserver, as - // Vault admin APIs are not exposed externally. - next => { - // Internal users of this API (other features in Zenko) will - // not provide the accountId in the request, as they only update - // the metadata of existing objects, so there is no need to - // replace the account information. - if (!request.query?.accountId) { - return next(); - } - return getCanonicalIdsByAccountId(request.query.accountId, log, (err, res) => { - if (err) { - return next(err); + return async.series( + [ + // Zenko's CRR delegates replacing the account + // information to the destination's Cloudserver, as + // Vault admin APIs are not exposed externally. + next => { + // Internal users of this API (other features in Zenko) will + // not provide the accountId in the request, as they only update + // the metadata of existing objects, so there is no need to + // replace the account information. + if (!request.query?.accountId) { + return next(); } - omVal['owner-display-name'] = res.name; - omVal['owner-id'] = res.canonicalId; - return next(); - }); - }, - next => { - log.trace('putting object version', { - objectKey: request.objectKey, omVal, options }); - return metadata.putObjectMD(bucketName, objectKey, omVal, options, log, - (err, md) => { + return getCanonicalIdsByAccountId(request.query.accountId, log, (err, res) => { + if (err) { + return next(err); + } + omVal['owner-display-name'] = res.name; + omVal['owner-id'] = res.canonicalId; + return next(); + }); + }, + next => { + log.trace('putting object version', { + objectKey: request.objectKey, + omVal, + options, + }); + return metadata.putObjectMD(bucketName, objectKey, omVal, options, log, (err, md) => { if (err) { log.error('error putting object metadata', { error: err, @@ -662,47 +664,54 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) { return next(err); } pushReplicationMetric(objMd, omVal, bucketName, objectKey, log); - if (objMd && + if ( + objMd && headers['x-scal-replication-content'] !== 'METADATA' && versionId && // The new data location is set to null when archiving to a Cold site. // In that case "removing old data location key" is handled by the lifecycle // transition processor. Check the content-length as a null location can // also be from an empty object. - (omVal['content-length'] === 0 || - (omVal.location && Array.isArray(omVal.location))) && - locationKeysHaveChanged(objMd.location, omVal.location)) { + (omVal['content-length'] === 0 || (omVal.location && Array.isArray(omVal.location))) && + locationKeysHaveChanged(objMd.location, omVal.location) + ) { log.info('removing old data locations', { method: 'putMetadata', bucketName, objectKey, }); - async.eachLimit(objMd.location, 5, - (loc, nextEach) => dataWrapper.data.delete(loc, log, err => { - if (err) { - log.warn('error removing old data location key', { + async.eachLimit( + objMd.location, + 5, + (loc, nextEach) => + dataWrapper.data.delete(loc, log, err => { + if (err) { + log.warn('error removing old data location key', { + bucketName, + objectKey, + locationKey: loc, + error: err.message, + }); + } + // do not forward the error to let other + // locations be deleted + nextEach(); + }), + () => { + log.debug('done removing old data locations', { + method: 'putMetadata', bucketName, objectKey, - locationKey: loc, - error: err.message, }); } - // do not forward the error to let other - // locations be deleted - nextEach(); - }), - () => { - log.debug('done removing old data locations', { - method: 'putMetadata', - bucketName, - objectKey, - }); - }); + ); } return _respond(response, md, log, next); }); - } - ], callback); + }, + ], + callback + ); }); } @@ -756,29 +765,27 @@ function putObject(request, response, log, callback) { } const payloadLen = parseInt(request.headers['content-length'], 10); const backendInfo = new BackendInfo(config, storageLocation); - return dataStore(context, CIPHER, request, payloadLen, {}, backendInfo, log, - (err, retrievalInfo, md5) => { - if (err) { - log.error('error putting data', { - error: err, - method: 'putObject', - }); - return callback(err); - } - if (contentMD5 !== md5) { - return callback(errors.BadDigest); - } - const responsePayload = constructPutResponse({ - dataStoreName: retrievalInfo.dataStoreName, - dataStoreType: retrievalInfo.dataStoreType, - key: retrievalInfo.key, - size: payloadLen, - dataStoreETag: retrievalInfo.dataStoreETag ? - `1:${retrievalInfo.dataStoreETag}` : `1:${md5}`, - dataStoreVersionId: retrievalInfo.dataStoreVersionId, + return dataStore(context, CIPHER, request, payloadLen, {}, backendInfo, log, (err, retrievalInfo, md5) => { + if (err) { + log.error('error putting data', { + error: err, + method: 'putObject', }); - return _respond(response, responsePayload, log, callback); + return callback(err); + } + if (contentMD5 !== md5) { + return callback(errors.BadDigest); + } + const responsePayload = constructPutResponse({ + dataStoreName: retrievalInfo.dataStoreName, + dataStoreType: retrievalInfo.dataStoreType, + key: retrievalInfo.key, + size: payloadLen, + dataStoreETag: retrievalInfo.dataStoreETag ? `1:${retrievalInfo.dataStoreETag}` : `1:${md5}`, + dataStoreVersionId: retrievalInfo.dataStoreVersionId, }); + return _respond(response, responsePayload, log, callback); + }); } function deleteObjectFromExpiration(request, response, userInfo, log, callback) { @@ -800,8 +807,7 @@ function deleteObject(request, response, log, callback) { return callback(err); } const storageLocation = request.headers['x-scal-storage-class']; - const objectGetInfo = dataClient.toObjectGetInfo( - request.objectKey, request.bucketName, storageLocation); + const objectGetInfo = dataClient.toObjectGetInfo(request.objectKey, request.bucketName, storageLocation); if (!objectGetInfo) { log.error('error deleting object in multiple backend', { error: 'cannot create objectGetInfo', @@ -865,9 +871,18 @@ function initiateMultipartUpload(request, response, log, callback) { return callback(errors.MalformedPOSTRequest); } } - return dataClient.createMPU(request.objectKey, metaHeaders, - request.bucketName, undefined, storageLocation, contentType, - cacheControl, contentDisposition, contentEncoding, tagging, log, + return dataClient.createMPU( + request.objectKey, + metaHeaders, + request.bucketName, + undefined, + storageLocation, + contentType, + cacheControl, + contentDisposition, + contentEncoding, + tagging, + log, (err, data) => { if (err) { log.error('error initiating multipart upload', { @@ -880,7 +895,8 @@ function initiateMultipartUpload(request, response, log, callback) { uploadId: data.UploadId, }; return _respond(response, dataRetrievalInfo, log, callback); - }); + } + ); } function abortMultipartUpload(request, response, log, callback) { @@ -890,17 +906,16 @@ function abortMultipartUpload(request, response, log, callback) { } const storageLocation = request.headers['x-scal-storage-class']; const uploadId = request.headers['x-scal-upload-id']; - return dataClient.abortMPU(request.objectKey, uploadId, - storageLocation, request.bucketName, log, err => { - if (err) { - log.error('error aborting MPU', { - error: err, - method: 'abortMultipartUpload', - }); - return callback(err); - } - return _respond(response, {}, log, callback); - }); + return dataClient.abortMPU(request.objectKey, uploadId, storageLocation, request.bucketName, log, err => { + if (err) { + log.error('error aborting MPU', { + error: err, + method: 'abortMultipartUpload', + }); + return callback(err); + } + return _respond(response, {}, log, callback); + }); } function putPart(request, response, log, callback) { @@ -912,9 +927,18 @@ function putPart(request, response, log, callback) { const partNumber = request.headers['x-scal-part-number']; const uploadId = request.headers['x-scal-upload-id']; const payloadLen = parseInt(request.headers['content-length'], 10); - return dataClient.uploadPart(undefined, {}, request, payloadLen, - storageLocation, request.objectKey, uploadId, partNumber, - request.bucketName, log, (err, data) => { + return dataClient.uploadPart( + undefined, + {}, + request, + payloadLen, + storageLocation, + request.objectKey, + uploadId, + partNumber, + request.bucketName, + log, + (err, data) => { if (err) { log.error('error putting MPU part', { error: err, @@ -928,7 +952,8 @@ function putPart(request, response, log, callback) { numberSubParts: data.numberSubParts, }; return _respond(response, dataRetrievalInfo, log, callback); - }); + } + ); } function completeMultipartUpload(request, response, log, callback) { @@ -959,8 +984,7 @@ function completeMultipartUpload(request, response, log, callback) { // FIXME: add error type MalformedJSON return callback(errors.MalformedPOSTRequest); } - const partList = getPartList( - parts, request.objectKey, uploadId, storageLocation); + const partList = getPartList(parts, request.objectKey, uploadId, storageLocation); // Azure client will set user metadata at this point. const metaHeaders = { 'x-amz-meta-scal-replication-status': 'REPLICA' }; if (sourceVersionId) { @@ -993,9 +1017,17 @@ function completeMultipartUpload(request, response, log, callback) { contentDisposition: contentDisposition || undefined, contentEncoding: contentEncoding || undefined, }; - return dataClient.completeMPU(request.objectKey, uploadId, - storageLocation, partList, undefined, request.bucketName, - metaHeaders, contentSettings, tagging, log, + return dataClient.completeMPU( + request.objectKey, + uploadId, + storageLocation, + partList, + undefined, + request.bucketName, + metaHeaders, + contentSettings, + tagging, + log, (err, retrievalInfo) => { if (err) { log.error('error completing MPU', { @@ -1007,16 +1039,14 @@ function completeMultipartUpload(request, response, log, callback) { // The logic here is an aggregate of code coming from // lib/api/completeMultipartUpload.js. - const { key, dataStoreType, dataStoreVersionId } = - retrievalInfo; + const { key, dataStoreType, dataStoreVersionId } = retrievalInfo; let size; let dataStoreETag; if (skipMpuPartProcessing(retrievalInfo)) { size = retrievalInfo.contentLength; dataStoreETag = retrievalInfo.eTag; } else { - const { aggregateSize, aggregateETag } = - generateMpuAggregateInfo(parts); + const { aggregateSize, aggregateETag } = generateMpuAggregateInfo(parts); size = aggregateSize; dataStoreETag = aggregateETag; } @@ -1029,7 +1059,8 @@ function completeMultipartUpload(request, response, log, callback) { dataStoreVersionId, }); return _respond(response, responsePayload, log, callback); - }); + } + ); }); return undefined; } @@ -1047,23 +1078,19 @@ function putObjectTagging(request, response, log, callback) { // Kafka entry will not have the dataStoreVersionId available so we // retrieve it from metadata here. if (dataStoreVersionId === '') { - return metadataGetObject(sourceBucket, request.objectKey, - sourceVersionId, null, log, (err, objMD) => { - if (err) { - return callback(err); - } - if (!objMD) { - return callback(errors.NoSuchKey); - } - const backend = objMD.replicationInfo.backends.find(o => - o.site === site); - dataStoreVersionId = backend.dataStoreVersionId; - return handleTaggingOperation(request, response, 'Put', - dataStoreVersionId, log, callback); - }); + return metadataGetObject(sourceBucket, request.objectKey, sourceVersionId, null, log, (err, objMD) => { + if (err) { + return callback(err); + } + if (!objMD) { + return callback(errors.NoSuchKey); + } + const backend = objMD.replicationInfo.backends.find(o => o.site === site); + dataStoreVersionId = backend.dataStoreVersionId; + return handleTaggingOperation(request, response, 'Put', dataStoreVersionId, log, callback); + }); } - return handleTaggingOperation(request, response, 'Put', dataStoreVersionId, - log, callback); + return handleTaggingOperation(request, response, 'Put', dataStoreVersionId, log, callback); } function deleteObjectTagging(request, response, log, callback) { @@ -1079,29 +1106,24 @@ function deleteObjectTagging(request, response, log, callback) { // Kafka entry will not have the dataStoreVersionId available so we // retrieve it from metadata here. if (dataStoreVersionId === '') { - return metadataGetObject(sourceBucket, request.objectKey, - sourceVersionId, null, log, (err, objMD) => { - if (err) { - return callback(err); - } - if (!objMD) { - return callback(errors.NoSuchKey); - } - const backend = objMD.replicationInfo.backends.find(o => - o.site === site); - dataStoreVersionId = backend.dataStoreVersionId; - return handleTaggingOperation(request, response, 'Delete', - dataStoreVersionId, log, callback); - }); + return metadataGetObject(sourceBucket, request.objectKey, sourceVersionId, null, log, (err, objMD) => { + if (err) { + return callback(err); + } + if (!objMD) { + return callback(errors.NoSuchKey); + } + const backend = objMD.replicationInfo.backends.find(o => o.site === site); + dataStoreVersionId = backend.dataStoreVersionId; + return handleTaggingOperation(request, response, 'Delete', dataStoreVersionId, log, callback); + }); } - return handleTaggingOperation(request, response, 'Delete', - dataStoreVersionId, log, callback); + return handleTaggingOperation(request, response, 'Delete', dataStoreVersionId, log, callback); } function _createAzureConditionalDeleteObjectGetInfo(request) { const { objectKey, bucketName, headers } = request; - const objectGetInfo = dataClient.toObjectGetInfo( - objectKey, bucketName, headers['x-scal-storage-class']); + const objectGetInfo = dataClient.toObjectGetInfo(objectKey, bucketName, headers['x-scal-storage-class']); return Object.assign({}, objectGetInfo, { options: { accessConditions: { @@ -1190,10 +1212,7 @@ function _shouldConditionallyDelete(request, locations) { return false; } const storageClass = request.headers['x-scal-storage-class']; - const type = - storageClass && - locationConstraints[storageClass] && - locationConstraints[storageClass].type; + const type = storageClass && locationConstraints[storageClass] && locationConstraints[storageClass].type; const isExternalBackend = type && constants.externalBackends[type]; const isNotVersioned = !locations[0].dataStoreVersionId; return isExternalBackend && isNotVersioned; @@ -1216,66 +1235,83 @@ function batchDelete(request, response, userInfo, log, callback) { } const locations = parsedPayload.Locations; if (_shouldConditionallyDelete(request, locations)) { - return _performConditionalDelete( - request, response, locations, log, callback); + return _performConditionalDelete(request, response, locations, log, callback); } log.trace('batch delete locations', { locations }); - return async.eachLimit(locations, 5, (loc, next) => { - const _loc = Object.assign({}, loc); - if (_loc.dataStoreVersionId !== undefined) { - // required by cloud backends - _loc.deleteVersion = true; - } - dataWrapper.data.delete(_loc, log, err => { - if (err?.is?.ObjNotFound) { - log.info('batch delete: data location do not exist', { - method: 'batchDelete', - location: loc, - }); - return next(); + return async.eachLimit( + locations, + 5, + (loc, next) => { + const _loc = Object.assign({}, loc); + if (_loc.dataStoreVersionId !== undefined) { + // required by cloud backends + _loc.deleteVersion = true; } - return next(err); - }); - }, err => { - if (err) { - log.error('batch delete failed', { - method: 'batchDelete', - locations, - error: err, + dataWrapper.data.delete(_loc, log, err => { + if (err?.is?.ObjNotFound) { + log.info('batch delete: data location do not exist', { + method: 'batchDelete', + location: loc, + }); + return next(); + } + return next(err); }); - return callback(err); - } - log.debug('batch delete successful', { locations }); - - // Update inflight metrics for the data which has just been freed - const bucket = request.bucketName; - const contentLength = locations.reduce((length, loc) => length + loc.size, 0); - - // TODO: `bucket` should probably always be passed, to be confirmed in CLDSRV-643 - // For now be leniant and skip inflight updates if it is not specified, to avoid any - // impact esp. on CRR - if (!bucket || !config.isQuotaEnabled() || contentLength == 0) { - return _respond(response, null, log, callback); - } - - return async.waterfall([ - next => metadata.getBucket(bucket, log, next), - (bucketMD, next) => quotaUtils.validateQuotas(request, bucketMD, request.accountQuotas, - ['objectDelete'], 'objectDelete', -contentLength, false, log, next), - ], err => { + }, + err => { if (err) { - // Ignore error, as the data has been deleted already: only inflight count - // has not been updated, and will be eventually consistent anyway - log.warn('batch delete failed to update inflights', { + log.error('batch delete failed', { method: 'batchDelete', locations, error: err, }); + return callback(err); } + log.debug('batch delete successful', { locations }); - return _respond(response, null, log, callback); - }); - }); + // Update inflight metrics for the data which has just been freed + const bucket = request.bucketName; + const contentLength = locations.reduce((length, loc) => length + loc.size, 0); + + // TODO: `bucket` should probably always be passed, to be confirmed in CLDSRV-643 + // For now be leniant and skip inflight updates if it is not specified, to avoid any + // impact esp. on CRR + if (!bucket || !config.isQuotaEnabled() || contentLength == 0) { + return _respond(response, null, log, callback); + } + + return async.waterfall( + [ + next => metadata.getBucket(bucket, log, next), + (bucketMD, next) => + quotaUtils.validateQuotas( + request, + bucketMD, + request.accountQuotas, + ['objectDelete'], + 'objectDelete', + -contentLength, + false, + log, + next + ), + ], + err => { + if (err) { + // Ignore error, as the data has been deleted already: only inflight count + // has not been updated, and will be eventually consistent anyway + log.warn('batch delete failed to update inflights', { + method: 'batchDelete', + locations, + error: err, + }); + } + + return _respond(response, null, log, callback); + } + ); + } + ); }); } @@ -1309,59 +1345,45 @@ function listLifecycle(request, response, userInfo, log, cb) { } function putBucketIndexes(indexes, request, response, userInfo, log, callback) { - metadata.putBucketIndexes( - request.bucketName, - indexes, - log, - err => { - if (err) { - log.error('error putting indexes', { - error: err, - method: 'putBucketindexes', - }); - return callback(err); - } + metadata.putBucketIndexes(request.bucketName, indexes, log, err => { + if (err) { + log.error('error putting indexes', { + error: err, + method: 'putBucketindexes', + }); + return callback(err); + } - return _respond(response, {}, log, callback); - }, - ); + return _respond(response, {}, log, callback); + }); } function getBucketIndexes(request, response, userInfo, log, callback) { - metadata.getBucketIndexes( - request.bucketName, - log, - (err, indexObj) => { - if (err) { - log.error('error getting indexes', { - error: err, - method: 'getBucketindexes', - }); - return callback(err); - } + metadata.getBucketIndexes(request.bucketName, log, (err, indexObj) => { + if (err) { + log.error('error getting indexes', { + error: err, + method: 'getBucketindexes', + }); + return callback(err); + } - return _respond(response, { Indexes: indexObj }, log, callback); - }, - ); + return _respond(response, { Indexes: indexObj }, log, callback); + }); } function deleteBucketIndexes(indexes, request, response, userInfo, log, callback) { - metadata.deleteBucketIndexes( - request.bucketName, - indexes, - log, - err => { - if (err) { - log.error('error deleting indexes', { - error: err, - method: 'deleteBucketindexes', - }); - return callback(err); - } + metadata.deleteBucketIndexes(request.bucketName, indexes, log, err => { + if (err) { + log.error('error deleting indexes', { + error: err, + method: 'deleteBucketindexes', + }); + return callback(err); + } - return _respond(response, {}, log, callback); - }, - ); + return _respond(response, {}, log, callback); + }); } const backbeatRoutes = { @@ -1403,13 +1425,17 @@ const backbeatRoutes = { const indexEntrySchema = joi.object({ name: joi.string().required(), - keys: joi.array().items(joi.object({ - key: joi.string(), - order: joi.number().valid(1, -1), - })).required(), + keys: joi + .array() + .items( + joi.object({ + key: joi.string(), + order: joi.number().valid(1, -1), + }) + ) + .required(), }); - const indexingSchema = joi.array().items(indexEntrySchema).min(1); function routeIndexingAPIs(request, response, userInfo, log) { @@ -1458,7 +1484,6 @@ function routeIndexingAPIs(request, response, userInfo, log) { }); } - function routeBackbeat(clientIP, request, response, log) { // Attach the apiMethod method to the request, so it can used by monitoring in the server // eslint-disable-next-line no-param-reassign @@ -1501,158 +1526,174 @@ function routeBackbeat(clientIP, request, response, log) { log.debug('unable to proxy backbeat api request', { backbeatConfig: config.backbeat, }); - return responseJSONBody(errors.MethodNotAllowed, null, response, - log); + return responseJSONBody(errors.MethodNotAllowed, null, response, log); } const path = request.url.replace('/_/backbeat/api', '/_/'); const { host, port } = config.backbeat; const target = `http://${host}:${port}${path}`; // TODO CLDSRV-591: shall we use the authorization results here? - return auth.server.doAuth(request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { - if (err) { - log.debug('authentication error', { - error: err, - }); - return responseJSONBody(err, null, response, log); - } - // eslint-disable-next-line no-param-reassign - request.accountQuotas = infos?.accountQuota; - // FIXME for now, any authenticated user can access API - // routes. We should introduce admin accounts or accounts - // with admin privileges, and restrict access to those - // only. - if (userInfo.getCanonicalID() === constants.publicId) { - log.debug('unauthenticated access to API routes', { - method: request.method, + return auth.server.doAuth( + request, + log, + (err, userInfo, authorizationResults, streamingV4Params, infos) => { + if (err) { + log.debug('authentication error', { + error: err, + }); + return responseJSONBody(err, null, response, log); + } + // eslint-disable-next-line no-param-reassign + request.accountQuotas = infos?.accountQuota; + // FIXME for now, any authenticated user can access API + // routes. We should introduce admin accounts or accounts + // with admin privileges, and restrict access to those + // only. + if (userInfo.getCanonicalID() === constants.publicId) { + log.debug('unauthenticated access to API routes', { + method: request.method, + }); + return responseJSONBody(errors.AccessDenied, null, response, log); + } + return backbeatProxy.web(request, response, { target }, err => { + log.error('error proxying request to api server', { error: err.message }); + return responseJSONBody(errors.ServiceUnavailable, null, response, log); }); - return responseJSONBody( - errors.AccessDenied, null, response, log); - } - return backbeatProxy.web(request, response, { target }, err => { - log.error('error proxying request to api server', - { error: err.message }); - return responseJSONBody(errors.ServiceUnavailable, null, - response, log); - }); - }, 's3', requestContexts); + }, + 's3', + requestContexts + ); } - const useMultipleBackend = - request.resourceType && request.resourceType.startsWith('multiplebackend'); + const useMultipleBackend = request.resourceType && request.resourceType.startsWith('multiplebackend'); const invalidRequest = - (!request.resourceType || - (_isObjectRequest(request) && - (!request.bucketName || !request.objectKey)) || - (!request.query.operation && - request.resourceType === 'multiplebackenddata')); + !request.resourceType || + (_isObjectRequest(request) && (!request.bucketName || !request.objectKey)) || + (!request.query.operation && request.resourceType === 'multiplebackenddata'); const invalidRoute = - (backbeatRoutes[request.method] === undefined || - backbeatRoutes[request.method][request.resourceType] === undefined || - (backbeatRoutes[request.method][request.resourceType] - [request.query.operation] === undefined && - request.resourceType === 'multiplebackenddata')); + backbeatRoutes[request.method] === undefined || + backbeatRoutes[request.method][request.resourceType] === undefined || + (backbeatRoutes[request.method][request.resourceType][request.query.operation] === undefined && + request.resourceType === 'multiplebackenddata'); if (invalidRequest || invalidRoute) { log.debug(invalidRequest ? 'invalid request' : 'no such route'); return responseJSONBody(errors.MethodNotAllowed, null, response, log); } - return async.waterfall([next => auth.server.doAuth( - // TODO CLDSRV-591: shall we use the authorization results here? - request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { - if (err) { - log.debug('authentication error', { - error: err, - }); - } - // eslint-disable-next-line no-param-reassign - request.accountQuotas = infos?.accountQuota; - return next(err, userInfo); - }, 's3', requestContexts), - (userInfo, next) => { - // TODO: understand why non-object requests (batchdelete) were not authenticated - if (!_isObjectRequest(request)) { - if (userInfo.getCanonicalID() === constants.publicId) { - log.debug(`unauthenticated access to backbeat ${request.resourceType} routes`); - return responseJSONBody( - errors.AccessDenied, null, response, log); - } + return async.waterfall( + [ + next => + auth.server.doAuth( + // TODO CLDSRV-591: shall we use the authorization results here? + request, + log, + (err, userInfo, authorizationResults, streamingV4Params, infos) => { + if (err) { + log.debug('authentication error', { + error: err, + }); + } + // eslint-disable-next-line no-param-reassign + request.accountQuotas = infos?.accountQuota; + return next(err, userInfo); + }, + 's3', + requestContexts + ), + (userInfo, next) => { + // TODO: understand why non-object requests (batchdelete) were not authenticated + if (!_isObjectRequest(request)) { + if (userInfo.getCanonicalID() === constants.publicId) { + log.debug(`unauthenticated access to backbeat ${request.resourceType} routes`); + return responseJSONBody(errors.AccessDenied, null, response, log); + } + + if (request.resourceType === 'index') { + return routeIndexingAPIs(request, response, userInfo, log); + } - if (request.resourceType === 'index') { - return routeIndexingAPIs(request, response, userInfo, log); + const route = backbeatRoutes[request.method][request.resourceType]; + return route(request, response, userInfo, log, err => { + if (err) { + return responseJSONBody(err, null, response, log); + } + return undefined; + }); } - const route = backbeatRoutes[request.method][request.resourceType]; - return route(request, response, userInfo, log, err => { + const decodedVidResult = decodeVersionId(request.query); + if (decodedVidResult instanceof Error) { + log.trace('invalid versionId query', { + versionId: request.query.versionId, + error: decodedVidResult, + }); + return responseJSONBody(errors.InvalidArgument, null, response, log); + } + const versionId = decodedVidResult; + if (useMultipleBackend) { + // Bucket and object do not exist in metadata. + return next(null, null, null); + } + const mdValParams = { + bucketName: request.bucketName, + objectKey: request.objectKey, + authInfo: userInfo, + versionId, + requestType: request.apiMethods || 'ReplicateObject', + request, + }; + return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, next); + }, + (bucketInfo, objMd, next) => { + if (!useMultipleBackend) { + const versioningConfig = bucketInfo.getVersioningConfiguration(); + // The following makes sure that only replication destination-related operations + // target buckets with versioning enabled. + const isVersioningRequired = request.headers['x-scal-versioning-required'] === 'true'; + if (isVersioningRequired && (!versioningConfig || versioningConfig.Status !== 'Enabled')) { + log.debug('bucket versioning is not enabled'); + return next(errors.InvalidBucketState); + } + return backbeatRoutes[request.method][request.resourceType]( + request, + response, + bucketInfo, + objMd, + log, + next + ); + } + if (request.resourceType === 'multiplebackendmetadata') { + return backbeatRoutes[request.method][request.resourceType](request, response, log, next); + } + return backbeatRoutes[request.method][request.resourceType][request.query.operation]( + request, + response, + log, + next + ); + }, + ], + err => + async.forEachLimit( + // Finalizer hooks are used in a quota context and ensure consistent + // metrics in case of API errors. No operation required if the API + // completed successfully. + request.finalizerHooks, + 5, + (hook, done) => hook(err, done), + () => { if (err) { + log.error('error processing backbeat request', { + error: err, + }); return responseJSONBody(err, null, response, log); } + log.debug('backbeat route response sent successfully'); return undefined; - }); - } - - const decodedVidResult = decodeVersionId(request.query); - if (decodedVidResult instanceof Error) { - log.trace('invalid versionId query', { - versionId: request.query.versionId, - error: decodedVidResult, - }); - return responseJSONBody(errors.InvalidArgument, null, response, log); - } - const versionId = decodedVidResult; - if (useMultipleBackend) { - // Bucket and object do not exist in metadata. - return next(null, null, null); - } - const mdValParams = { - bucketName: request.bucketName, - objectKey: request.objectKey, - authInfo: userInfo, - versionId, - requestType: request.apiMethods || 'ReplicateObject', - request, - }; - return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, next); - }, - (bucketInfo, objMd, next) => { - if (!useMultipleBackend) { - const versioningConfig = bucketInfo.getVersioningConfiguration(); - // The following makes sure that only replication destination-related operations - // target buckets with versioning enabled. - const isVersioningRequired = request.headers['x-scal-versioning-required'] === 'true'; - if (isVersioningRequired && (!versioningConfig || versioningConfig.Status !== 'Enabled')) { - log.debug('bucket versioning is not enabled'); - return next(errors.InvalidBucketState); } - return backbeatRoutes[request.method][request.resourceType]( - request, response, bucketInfo, objMd, log, next); - } - if (request.resourceType === 'multiplebackendmetadata') { - return backbeatRoutes[request.method][request.resourceType]( - request, response, log, next); - } - return backbeatRoutes[request.method][request.resourceType] - [request.query.operation](request, response, log, next); - }], - err => async.forEachLimit( - // Finalizer hooks are used in a quota context and ensure consistent - // metrics in case of API errors. No operation required if the API - // completed successfully. - request.finalizerHooks, - 5, - (hook, done) => hook(err, done), - () => { - if (err) { - log.error('error processing backbeat request', { - error: err, - }); - return responseJSONBody(err, null, response, log); - } - log.debug('backbeat route response sent successfully'); - return undefined; - }, - )); + ) + ); } - module.exports = routeBackbeat; diff --git a/lib/routes/routeMetadata.js b/lib/routes/routeMetadata.js index a5cac213e2..ca405924e5 100644 --- a/lib/routes/routeMetadata.js +++ b/lib/routes/routeMetadata.js @@ -45,68 +45,85 @@ function routeMetadata(clientIP, request, response, log) { // restrict access to only routes ending in bucket, log or id const { resourceType, subResource } = request; - if (resourceType === 'admin' - && !['bucket', 'log', 'id'].includes(subResource)) { + if (resourceType === 'admin' && !['bucket', 'log', 'id'].includes(subResource)) { return responseJSONBody(errors.NotImplemented, null, response, log); } const ip = requestUtils.getClientIp(request, config); const isSecure = requestUtils.getHttpProtocolSecurity(request, config); - const requestContexts = [new RequestContext(request.headers, request.query, - request.generalResource, request.specificResource, ip, - isSecure, request.resourceType, 'metadata')]; - return waterfall([ - next => auth.server.doAuth(request, log, (err, userInfo, authRes) => { - if (err) { - log.debug('authentication error', { - error: err, - method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey, - }); - return next(err); - } - // authRes is not defined for account credentials - if (authRes && !authRes[0].isAllowed) { - return next(errors.AccessDenied); - } - return next(null, userInfo); - }, 's3', requestContexts), - (userInfo, next) => { - if (userInfo.getCanonicalID() === constants.publicId) { - log.debug('unauthenticated access to API routes', { - method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey, - }); - return next(errors.AccessDenied); - } - const { url } = request; - const path = url.startsWith('/_/metadata/admin') ? - url.replace('/_/metadata/admin/', '/_/') : - url.replace('/_/metadata/', '/'); - // bucketd is always configured on the loopback interface in s3c - const endpoint = bootstrap[0]; - const target = `http://${endpoint}${path}`; - return metadataProxy.web(request, response, { target }, err => { - if (err) { - log.error('error proxying request to metadata admin server', - { error: err.message }); - return next(errors.ServiceUnavailable); + const requestContexts = [ + new RequestContext( + request.headers, + request.query, + request.generalResource, + request.specificResource, + ip, + isSecure, + request.resourceType, + 'metadata' + ), + ]; + return waterfall( + [ + next => + auth.server.doAuth( + request, + log, + (err, userInfo, authRes) => { + if (err) { + log.debug('authentication error', { + error: err, + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); + return next(err); + } + // authRes is not defined for account credentials + if (authRes && !authRes[0].isAllowed) { + return next(errors.AccessDenied); + } + return next(null, userInfo); + }, + 's3', + requestContexts + ), + (userInfo, next) => { + if (userInfo.getCanonicalID() === constants.publicId) { + log.debug('unauthenticated access to API routes', { + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); + return next(errors.AccessDenied); } - return next(); - }); - }], + const { url } = request; + const path = url.startsWith('/_/metadata/admin') + ? url.replace('/_/metadata/admin/', '/_/') + : url.replace('/_/metadata/', '/'); + // bucketd is always configured on the loopback interface in s3c + const endpoint = bootstrap[0]; + const target = `http://${endpoint}${path}`; + return metadataProxy.web(request, response, { target }, err => { + if (err) { + log.error('error proxying request to metadata admin server', { error: err.message }); + return next(errors.ServiceUnavailable); + } + return next(); + }); + }, + ], err => { if (err) { return responseJSONBody(err, null, response, log); } - log.debug('metadata route response sent successfully', - { method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey }); + log.debug('metadata route response sent successfully', { + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); return undefined; - }); + } + ); } - module.exports = routeMetadata; diff --git a/lib/routes/routeVeeam.js b/lib/routes/routeVeeam.js index b42055ea3c..064e809b63 100644 --- a/lib/routes/routeVeeam.js +++ b/lib/routes/routeVeeam.js @@ -16,10 +16,7 @@ const { responseXMLBody } = s3routes.routesUtils; auth.setHandler(vault); -const validObjectKeys = [ - `${validPath}system.xml`, - `${validPath}capacity.xml`, -]; +const validObjectKeys = [`${validPath}system.xml`, `${validPath}capacity.xml`]; const apiToAction = { PUT: 'PutObject', @@ -67,10 +64,14 @@ function checkBucketAndKey(bucketName, objectKey, requestQueryParams, method, lo // Download relies on GETs calls with auth in query parameters, that can be // checked if 'X-Amz-Credential' is included. // Deletion requires that the tags of the object are returned. - if (requestQueryParams && Object.keys(requestQueryParams).length > 0 - && !(method === 'GET' && (requestQueryParams['X-Amz-Credential'] || ('tagging' in requestQueryParams)))) { - return errorInstances.InvalidRequest - .customizeDescription('The Veeam SOSAPI folder does not support this action.'); + if ( + requestQueryParams && + Object.keys(requestQueryParams).length > 0 && + !(method === 'GET' && (requestQueryParams['X-Amz-Credential'] || 'tagging' in requestQueryParams)) + ) { + return errorInstances.InvalidRequest.customizeDescription( + 'The Veeam SOSAPI folder does not support this action.' + ); } if (typeof objectKey !== 'string' || !validObjectKeys.includes(objectKey)) { log.debug('invalid object name', { objectKey }); @@ -96,44 +97,54 @@ function authorizationMiddleware(request, response, api, log, callback) { return responseXMLBody(errors.AccessDenied, null, response, log); } const requestContexts = prepareRequestContexts(api, request); - return async.waterfall([ - next => auth.server.doAuth(request, log, (err, userInfo, authorizationResults, streamingV4Params) => { - if (err) { - log.debug('authentication error', { - error: err, - method: request.method, + return async.waterfall( + [ + next => + auth.server.doAuth( + request, + log, + (err, userInfo, authorizationResults, streamingV4Params) => { + if (err) { + log.debug('authentication error', { + error: err, + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); + } + /* eslint-disable no-param-reassign */ + request.authorizationResults = authorizationResults; + request.streamingV4Params = streamingV4Params; + /* eslint-enable no-param-reassign */ + return next(err, userInfo); + }, + 's3', + requestContexts + ), + (userInfo, next) => { + // Ensure only supported HTTP verbs and actions are called, + // otherwise deny access + const requestType = apiToAction[api]; + if (!requestType) { + return next(errors.AccessDenied); + } + const mdValParams = { bucketName: request.bucketName, - objectKey: request.objectKey, - }); + authInfo: userInfo, + requestType, + request, + }; + return next(null, mdValParams); + }, + (mdValParams, next) => standardMetadataValidateBucket(mdValParams, request.actionImplicitDenies, log, next), + ], + (err, bucketMd) => { + if (err || !bucketMd) { + return responseXMLBody(err, null, response, log); } - /* eslint-disable no-param-reassign */ - request.authorizationResults = authorizationResults; - request.streamingV4Params = streamingV4Params; - /* eslint-enable no-param-reassign */ - return next(err, userInfo); - }, 's3', requestContexts), - (userInfo, next) => { - // Ensure only supported HTTP verbs and actions are called, - // otherwise deny access - const requestType = apiToAction[api]; - if (!requestType) { - return next(errors.AccessDenied); - } - const mdValParams = { - bucketName: request.bucketName, - authInfo: userInfo, - requestType, - request, - }; - return next(null, mdValParams); - }, - (mdValParams, next) => standardMetadataValidateBucket(mdValParams, request.actionImplicitDenies, log, next), - ], (err, bucketMd) => { - if (err || !bucketMd) { - return responseXMLBody(err, null, response, log); + return callback(request, response, bucketMd, log); } - return callback(request, response, bucketMd, log); - }); + ); } function _normalizeVeeamRequest(req) { @@ -151,11 +162,10 @@ function _normalizeVeeamRequest(req) { req.query = parsedUrl.query; req.bucketName = pathArr[1]; req.objectKey = pathArr.slice(2).join('/'); - const contentLength = req.headers['x-amz-decoded-content-length'] ? - req.headers['x-amz-decoded-content-length'] : - req.headers['content-length']; - req.parsedContentLength = - Number.parseInt(contentLength?.toString() ?? '', 10); + const contentLength = req.headers['x-amz-decoded-content-length'] + ? req.headers['x-amz-decoded-content-length'] + : req.headers['content-length']; + req.parsedContentLength = Number.parseInt(contentLength?.toString() ?? '', 10); /* eslint-enable no-param-reassign */ } @@ -205,11 +215,15 @@ function routeVeeam(clientIP, request, response, log) { return responseXMLBody(error, '', response, log); } const bucketOrKeyError = checkBucketAndKey( - request.bucketName, request.objectKey, request.query, requestMethod, log); + request.bucketName, + request.objectKey, + request.query, + requestMethod, + log + ); if (bucketOrKeyError) { - log.error('error with bucket or key value', - { error: bucketOrKeyError }); + log.error('error with bucket or key value', { error: bucketOrKeyError }); return routesUtils.responseXMLBody(bucketOrKeyError, null, response, log); } return authorizationMiddleware(request, response, requestMethod, log, method); diff --git a/lib/routes/routeWorkflowEngineOperator.js b/lib/routes/routeWorkflowEngineOperator.js index bf73a1ed68..183ddbed51 100644 --- a/lib/routes/routeWorkflowEngineOperator.js +++ b/lib/routes/routeWorkflowEngineOperator.js @@ -4,12 +4,10 @@ const httpProxy = require('http-proxy'); const workflowEngineOperatorProxy = httpProxy.createProxyServer({ ignorePath: true, }); -const { auth, errors, s3routes } = - require('arsenal'); +const { auth, errors, s3routes } = require('arsenal'); const { responseJSONBody } = s3routes.routesUtils; const vault = require('../auth/vault'); -const prepareRequestContexts = require( -'../api/apiUtils/authorization/prepareRequestContexts'); +const prepareRequestContexts = require('../api/apiUtils/authorization/prepareRequestContexts'); const { config } = require('../Config'); const constants = require('../../constants'); @@ -52,45 +50,45 @@ function routeWorkflowEngineOperator(clientIP, request, response, log) { log.debug('unable to proxy workflow engine operator request', { workflowEngineConfig: config.workflowEngineOperator, }); - return responseJSONBody(errors.MethodNotAllowed, null, response, - log); + return responseJSONBody(errors.MethodNotAllowed, null, response, log); } const path = request.url.replace('/_/workflow-engine-operator/api', '/_/'); const { host, port } = config.workflowEngineOperator; const target = `http://${host}:${port}${path}`; - return auth.server.doAuth(request, log, (err, userInfo) => { - if (err) { - log.debug('authentication error', { - error: err, - method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey, + return auth.server.doAuth( + request, + log, + (err, userInfo) => { + if (err) { + log.debug('authentication error', { + error: err, + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); + return responseJSONBody(err, null, response, log); + } + // FIXME for now, any authenticated user can access API + // routes. We should introduce admin accounts or accounts + // with admin privileges, and restrict access to those + // only. + if (userInfo.getCanonicalID() === constants.publicId) { + log.debug('unauthenticated access to API routes', { + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); + return responseJSONBody(errors.AccessDenied, null, response, log); + } + return workflowEngineOperatorProxy.web(request, response, { target }, err => { + log.error('error proxying request to api server', { error: err.message }); + return responseJSONBody(errors.ServiceUnavailable, null, response, log); }); - return responseJSONBody(err, null, response, log); - } - // FIXME for now, any authenticated user can access API - // routes. We should introduce admin accounts or accounts - // with admin privileges, and restrict access to those - // only. - if (userInfo.getCanonicalID() === constants.publicId) { - log.debug('unauthenticated access to API routes', { - method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey, - }); - return responseJSONBody( - errors.AccessDenied, null, response, log); - } - return workflowEngineOperatorProxy.web( - request, response, { target }, err => { - log.error('error proxying request to api server', - { error: err.message }); - return responseJSONBody(errors.ServiceUnavailable, null, - response, log); - }); - }, 's3', requestContexts); + }, + 's3', + requestContexts + ); } } - module.exports = routeWorkflowEngineOperator; diff --git a/lib/routes/utilities/pushReplicationMetric.js b/lib/routes/utilities/pushReplicationMetric.js index c81027a494..1fb95f24c3 100644 --- a/lib/routes/utilities/pushReplicationMetric.js +++ b/lib/routes/utilities/pushReplicationMetric.js @@ -14,10 +14,7 @@ function getMetricToPush(prevObjectMD, newObjectMD) { // metrics if their value has changed. try { assert.deepStrictEqual(prevObjectMD.getAcl(), newObjectMD.getAcl()); - assert.deepStrictEqual( - prevObjectMD.getTags(), - newObjectMD.getTags() - ); + assert.deepStrictEqual(prevObjectMD.getTags(), newObjectMD.getTags()); } catch { return 'replicateTags'; } diff --git a/lib/routes/veeam/delete.js b/lib/routes/veeam/delete.js index 20d73a1662..cfdba191c9 100644 --- a/lib/routes/veeam/delete.js +++ b/lib/routes/veeam/delete.js @@ -1,4 +1,3 @@ - const { s3routes, errors } = require('arsenal'); const metadata = require('../../metadata/wrapper'); const { isSystemXML } = require('./utils'); @@ -18,8 +17,7 @@ function deleteVeeamCapabilities(bucketName, objectKey, bucketMd, log, callback) const capabilityFieldName = isSystemXML(objectKey) ? 'SystemInfo' : 'CapacityInfo'; // Ensure file exists in metadata before deletion - if (!bucketMd._capabilities?.VeeamSOSApi - || !bucketMd._capabilities?.VeeamSOSApi[capabilityFieldName]) { + if (!bucketMd._capabilities?.VeeamSOSApi || !bucketMd._capabilities?.VeeamSOSApi[capabilityFieldName]) { return callback(errors.NoSuchKey); } // eslint-disable-next-line no-param-reassign diff --git a/lib/routes/veeam/get.js b/lib/routes/veeam/get.js index d9eb242cb0..5f298eda15 100644 --- a/lib/routes/veeam/get.js +++ b/lib/routes/veeam/get.js @@ -18,8 +18,7 @@ function getVeeamFile(request, response, bucketMd, log) { return responseXMLBody(errors.NoSuchBucket, null, response, log); } if ('tagging' in request.query) { - return respondWithData(request, response, log, bucketMd, - buildHeadXML('')); + return respondWithData(request, response, log, bucketMd, buildHeadXML('')); } return metadata.getBucket(request.bucketName, log, (err, data) => { if (err) { @@ -38,8 +37,14 @@ function getVeeamFile(request, response, bucketMd, log) { const builder = new xml2js.Builder({ headless: true, }); - return respondWithData(request, response, log, data, - buildHeadXML(builder.buildObject(fileToBuild.value)), modified); + return respondWithData( + request, + response, + log, + data, + buildHeadXML(builder.buildObject(fileToBuild.value)), + modified + ); }); } diff --git a/lib/routes/veeam/head.js b/lib/routes/veeam/head.js index b94624b364..4da353926c 100644 --- a/lib/routes/veeam/head.js +++ b/lib/routes/veeam/head.js @@ -35,8 +35,13 @@ function headVeeamFile(request, response, bucketMd, log) { headless: true, }); const dataBuffer = Buffer.from(buildHeadXML(builder.buildObject(fileToBuild))); - return responseContentHeaders(null, {}, getResponseHeader(request, data, - dataBuffer, modified, log), response, log); + return responseContentHeaders( + null, + {}, + getResponseHeader(request, data, dataBuffer, modified, log), + response, + log + ); }); } diff --git a/lib/routes/veeam/list.js b/lib/routes/veeam/list.js index 1fcb2e0158..4cc23bc97f 100644 --- a/lib/routes/veeam/list.js +++ b/lib/routes/veeam/list.js @@ -7,7 +7,6 @@ const { responseXMLBody } = require('arsenal/build/lib/s3routes/routesUtils'); const { respondWithData, getResponseHeader, buildHeadXML, validPath } = require('./utils'); const { processVersions, processMasterVersions } = require('../../api/bucketGet'); - /** * Utility function to build a standard response for the LIST route. * It adds the supported path by default as a static and default file. @@ -46,7 +45,7 @@ function buildXMLResponse(request, arrayOfFiles, versioned = false) { DisplayName: 'Veeam SOSAPI', }, StorageClass: 'VIRTUAL', - } + }, })); entries.push({ key: validPath, @@ -61,7 +60,7 @@ function buildXMLResponse(request, arrayOfFiles, versioned = false) { DisplayName: 'Veeam SOSAPI', }, StorageClass: 'VIRTUAL', - } + }, }); // Add the folder as the base file if (versioned) { @@ -88,8 +87,12 @@ function listVeeamFiles(request, response, bucketMd, log) { } // Only accept list-type query parameter if (!('list-type' in request.query) && !('versions' in request.query)) { - return responseXMLBody(errorInstances.InvalidRequest - .customizeDescription('The Veeam folder does not support this action.'), null, response, log); + return responseXMLBody( + errorInstances.InvalidRequest.customizeDescription('The Veeam folder does not support this action.'), + null, + response, + log + ); } return metadata.getBucket(request.bucketName, log, (err, data) => { if (err) { @@ -118,14 +121,18 @@ function listVeeamFiles(request, response, bucketMd, log) { }); const dataBuffer = Buffer.from(buildHeadXML(builder.buildObject(file))); filesToBuild.push({ - ...getResponseHeader(request, data, - dataBuffer, lastModified, log), + ...getResponseHeader(request, data, dataBuffer, lastModified, log), name: file.name, }); }); // When `versions` is present, listing should return a versioned list - return respondWithData(request, response, log, data, - buildXMLResponse(request, filesToBuild, 'versions' in request.query)); + return respondWithData( + request, + response, + log, + data, + buildXMLResponse(request, filesToBuild, 'versions' in request.query) + ); }); } diff --git a/lib/routes/veeam/put.js b/lib/routes/veeam/put.js index 521d0d7d59..06d3074053 100644 --- a/lib/routes/veeam/put.js +++ b/lib/routes/veeam/put.js @@ -24,57 +24,68 @@ function putVeeamFile(request, response, bucketMd, log) { return errors.NoSuchBucket; } - return async.waterfall([ - next => { - // Extract the data from the request, keep it in memory - writeContinue(request, response); - return receiveData(request, log, next); - }, - (value, next) => parseString(value, { explicitArray: false }, (err, parsed) => { - // Convert the received XML to a JS object + return async.waterfall( + [ + next => { + // Extract the data from the request, keep it in memory + writeContinue(request, response); + return receiveData(request, log, next); + }, + (value, next) => + parseString(value, { explicitArray: false }, (err, parsed) => { + // Convert the received XML to a JS object + if (err) { + return next(errors.MalformedXML); + } + return next(null, parsed); + }), + (parsedXML, next) => { + const capabilities = bucketMd._capabilities || { + VeeamSOSApi: {}, + }; + // Validate the JS object schema with joi and prepare the object for + // further logic + const validateFn = isSystemXML(request.objectKey) ? parseSystemSchema : parseCapacitySchema; + let validatedData = null; + try { + validatedData = validateFn(parsedXML); + } catch (err) { + log.error('xml file did not pass validation', { err }); + return next(errors.MalformedXML); + } + const file = getFileToBuild(request, validatedData, true); + if (file.error) { + return next(file.error); + } + capabilities.VeeamSOSApi = { + ...(capabilities.VeeamSOSApi || {}), + ...file.value, + }; + // Write data to bucketMD with the same (validated) format + // eslint-disable-next-line no-param-reassign + bucketMd = { + ...bucketMd, + _capabilities: capabilities, + }; + // Update bucket metadata + return metadata.updateBucketCapabilities( + request.bucketName, + bucketMd, + 'VeeamSOSApi', + file.fieldName, + file.value[file.fieldName], + log, + next + ); + }, + ], + err => { if (err) { - return next(errors.MalformedXML); + return responseXMLBody(err, null, response, log); } - return next(null, parsed); - }), - (parsedXML, next) => { - const capabilities = bucketMd._capabilities || { - VeeamSOSApi: {}, - }; - // Validate the JS object schema with joi and prepare the object for - // further logic - const validateFn = isSystemXML(request.objectKey) ? parseSystemSchema : parseCapacitySchema; - let validatedData = null; - try { - validatedData = validateFn(parsedXML); - } catch (err) { - log.error('xml file did not pass validation', { err }); - return next(errors.MalformedXML); - } - const file = getFileToBuild(request, validatedData, true); - if (file.error) { - return next(file.error); - } - capabilities.VeeamSOSApi = { - ...(capabilities.VeeamSOSApi || {}), - ...file.value, - }; - // Write data to bucketMD with the same (validated) format - // eslint-disable-next-line no-param-reassign - bucketMd = { - ...bucketMd, - _capabilities: capabilities, - }; - // Update bucket metadata - return metadata.updateBucketCapabilities( - request.bucketName, bucketMd, 'VeeamSOSApi', file.fieldName, file.value[file.fieldName], log, next); - } - ], err => { - if (err) { - return responseXMLBody(err, null, response, log); + return responseNoBody(null, null, response, 200, log); } - return responseNoBody(null, null, response, 200, log); - }); + ); } module.exports = putVeeamFile; diff --git a/lib/routes/veeam/schemas/capacity.js b/lib/routes/veeam/schemas/capacity.js index 3cd81f7b40..4897b1dae4 100644 --- a/lib/routes/veeam/schemas/capacity.js +++ b/lib/routes/veeam/schemas/capacity.js @@ -18,11 +18,13 @@ const { errors } = require('arsenal'); */ function validateCapacitySchema(parsedXML) { const schema = joi.object({ - CapacityInfo: joi.object({ - Capacity: joi.number().min(-1).integer().required(), - Available: joi.number().min(-1).integer().required(), - Used: joi.number().min(-1).integer().required(), - }).required(), + CapacityInfo: joi + .object({ + Capacity: joi.number().min(-1).integer().required(), + Available: joi.number().min(-1).integer().required(), + Used: joi.number().min(-1).integer().required(), + }) + .required(), }); const validatedData = schema.validate(parsedXML, { // Allow any unknown keys for future compatibility diff --git a/lib/routes/veeam/schemas/system.js b/lib/routes/veeam/schemas/system.js index 400bf22c32..b483ac9e78 100644 --- a/lib/routes/veeam/schemas/system.js +++ b/lib/routes/veeam/schemas/system.js @@ -3,29 +3,31 @@ const { errors, errorInstances } = require('arsenal'); // Allow supporting any version of the protocol const systemSchemasPerVersion = { - 'unsupported': joi.object({}), + unsupported: joi.object({}), '"1.0"': joi.object({ - SystemInfo: joi.object({ - ProtocolVersion: joi.string().required(), - ModelName: joi.string().required(), - ProtocolCapabilities: joi.object({ - CapacityInfo: joi.boolean().required(), - UploadSessions: joi.boolean().required(), - IAMSTS: joi.boolean().default(false), - }).required(), - APIEndpoints: joi.object({ - IAMEndpoint: joi.string().required(), - STSEndpoint: joi.string().required() - }), - SystemRecommendations: joi.object({ - S3ConcurrentTaskLimit: joi.number().min(0).default(64), - S3MultiObjectDeleteLimit: joi.number().min(1).default(1000), - StorageCurrentTasksLimit: joi.number().min(0).default(0), - KbBlockSize: joi.number() - .valid(256, 512, 1024, 2048, 4096, 8192) - .default(1024), - }), - }).required() + SystemInfo: joi + .object({ + ProtocolVersion: joi.string().required(), + ModelName: joi.string().required(), + ProtocolCapabilities: joi + .object({ + CapacityInfo: joi.boolean().required(), + UploadSessions: joi.boolean().required(), + IAMSTS: joi.boolean().default(false), + }) + .required(), + APIEndpoints: joi.object({ + IAMEndpoint: joi.string().required(), + STSEndpoint: joi.string().required(), + }), + SystemRecommendations: joi.object({ + S3ConcurrentTaskLimit: joi.number().min(0).default(64), + S3MultiObjectDeleteLimit: joi.number().min(1).default(1000), + StorageCurrentTasksLimit: joi.number().min(0).default(0), + KbBlockSize: joi.number().valid(256, 512, 1024, 2048, 4096, 8192).default(1024), + }), + }) + .required(), }), }; @@ -62,8 +64,9 @@ function validateSystemSchema(parsedXML) { const protocolVersion = parsedXML?.SystemInfo?.ProtocolVersion; let schema = systemSchemasPerVersion.unsupported; if (!protocolVersion) { - throw new Error(errorInstances.MalformedXML - .customizeDescription('ProtocolVersion must be set for the system.xml file')); + throw new Error( + errorInstances.MalformedXML.customizeDescription('ProtocolVersion must be set for the system.xml file') + ); } if (protocolVersion && protocolVersion in systemSchemasPerVersion) { schema = systemSchemasPerVersion[parsedXML?.SystemInfo?.ProtocolVersion]; @@ -80,8 +83,10 @@ function validateSystemSchema(parsedXML) { case '"1.0"': // Ensure conditional fields are set // IAMSTS === true implies that SystemInfo.APIEndpoints is defined - if (validatedData.value.SystemInfo.ProtocolCapabilities.IAMSTS - && !validatedData.value.SystemInfo.APIEndpoints) { + if ( + validatedData.value.SystemInfo.ProtocolCapabilities.IAMSTS && + !validatedData.value.SystemInfo.APIEndpoints + ) { throw new Error(errors.MalformedXML); } break; diff --git a/lib/routes/veeam/utils.js b/lib/routes/veeam/utils.js index 5ab082c6d5..5e7b641448 100644 --- a/lib/routes/veeam/utils.js +++ b/lib/routes/veeam/utils.js @@ -30,8 +30,11 @@ function receiveData(request, log, callback) { // Prevent memory overloads by limiting the size of the // received data. if (parsedContentLength > ContentLengthThreshold) { - return callback(errorInstances.InvalidInput - .customizeDescription(`maximum allowed content-length is ${ContentLengthThreshold} bytes`)); + return callback( + errorInstances.InvalidInput.customizeDescription( + `maximum allowed content-length is ${ContentLengthThreshold} bytes` + ) + ); } const value = Buffer.alloc(parsedContentLength); const cbOnce = jsutil.once(callback); @@ -49,8 +52,7 @@ function receiveData(request, log, callback) { }); dataStream.on('end', () => { if (exceeded) { - log.error('data stream exceed announced size', - { parsedContentLength, overflow: cursor }); + log.error('data stream exceed announced size', { parsedContentLength, overflow: cursor }); return callback(errors.InternalError); } else { return callback(null, value.toString()); @@ -79,17 +81,18 @@ function buildHeadXML(xmlContent) { * @returns {object} - response headers */ function getResponseHeader(request, bucket, dataBuffer, lastModified, log) { - const corsHeaders = collectCorsHeaders(request.headers.origin, - request.method, bucket); - const responseMetaHeaders = collectResponseHeaders({ - 'last-modified': lastModified || new Date().toISOString(), - 'content-md5': crypto - .createHash('md5') - .update(dataBuffer) - .digest('hex'), - 'content-length': dataBuffer.byteLength, - 'content-type': 'text/xml', - }, corsHeaders, null, false); + const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); + const responseMetaHeaders = collectResponseHeaders( + { + 'last-modified': lastModified || new Date().toISOString(), + 'content-md5': crypto.createHash('md5').update(dataBuffer).digest('hex'), + 'content-length': dataBuffer.byteLength, + 'content-type': 'text/xml', + }, + corsHeaders, + null, + false + ); responseMetaHeaders.versionId = 'null'; responseMetaHeaders['x-amz-id-2'] = log.getSerializedUids(); responseMetaHeaders['x-amz-request-id'] = log.getSerializedUids(); @@ -127,10 +130,10 @@ function respondWithData(request, response, log, bucket, data, lastModified) { try { response.setHeader(key, responseMetaHeaders[key]); } catch (e) { - log.debug('header can not be added ' + - 'to the response', { + log.debug('header can not be added ' + 'to the response', { header: responseMetaHeaders[key], - error: e.stack, method: 'routeVeeam/respondWithData' + error: e.stack, + method: 'routeVeeam/respondWithData', }); } } @@ -172,12 +175,11 @@ function isSystemXML(objectKey) { */ function getFileToBuild(request, data, inlineLastModified = false) { const _isSystemXML = isSystemXML(request.objectKey); - const fileToBuild = _isSystemXML ? data?.SystemInfo - : data?.CapacityInfo; + const fileToBuild = _isSystemXML ? data?.SystemInfo : data?.CapacityInfo; if (!fileToBuild) { return { error: errors.NoSuchKey }; } - const modified = fileToBuild.LastModified || (new Date()).toISOString(); + const modified = fileToBuild.LastModified || new Date().toISOString(); const fieldName = _isSystemXML ? 'SystemInfo' : 'CapacityInfo'; if (inlineLastModified) { fileToBuild.LastModified = modified; diff --git a/lib/server.js b/lib/server.js index 4e302f9904..59cbbad7e4 100644 --- a/lib/server.js +++ b/lib/server.js @@ -14,15 +14,11 @@ const { blacklistedPrefixes } = require('../constants'); const api = require('./api/api'); const dataWrapper = require('./data/wrapper'); const kms = require('./kms/wrapper'); -const locationStorageCheck = - require('./api/apiUtils/object/locationStorageCheck'); +const locationStorageCheck = require('./api/apiUtils/object/locationStorageCheck'); const vault = require('./auth/vault'); const metadata = require('./metadata/wrapper'); const { initManagement } = require('./management'); -const { - initManagementClient, - isManagementAgentUsed, -} = require('./management/agentClient'); +const { initManagementClient, isManagementAgentUsed } = require('./management/agentClient'); const HttpAgent = require('agentkeepalive'); const QuotaService = require('./quotas/quotas'); @@ -40,8 +36,7 @@ updateAllEndpoints(); _config.on('location-constraints-update', () => { if (implName === 'multipleBackends') { const clients = parseLC(_config, vault); - client = new MultipleBackendGateway( - clients, metadata, locationStorageCheck); + client = new MultipleBackendGateway(clients, metadata, locationStorageCheck); } }); @@ -53,8 +48,7 @@ if (_config.localCache) { // stats client const STATS_INTERVAL = 5; // 5 seconds const STATS_EXPIRY = 30; // 30 seconds -const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL, - STATS_EXPIRY); +const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL, STATS_EXPIRY); const enableRemoteManagement = true; class S3Server { @@ -78,7 +72,7 @@ class S3Server { process.on('SIGHUP', this.cleanUp.bind(this)); process.on('SIGQUIT', this.cleanUp.bind(this)); process.on('SIGTERM', this.cleanUp.bind(this)); - process.on('SIGPIPE', () => { }); + process.on('SIGPIPE', () => {}); // This will pick up exceptions up the stack process.on('uncaughtException', err => { // If just send the error object results in empty @@ -140,9 +134,7 @@ class S3Server { labels.action = req.apiMethod; } monitoringClient.httpRequestsTotal.labels(labels).inc(); - monitoringClient.httpRequestDurationSeconds - .labels(labels) - .observe(responseTimeInNs / 1e9); + monitoringClient.httpRequestDurationSeconds.labels(labels).observe(responseTimeInNs / 1e9); monitoringClient.httpActiveRequests.dec(); }; res.on('close', monitorEndOfRequest); @@ -195,14 +187,13 @@ class S3Server { }; let reqUids = req.headers['x-scal-request-uids']; - if (reqUids !== undefined && !/*isValidReqUids*/(reqUids.length < 128)) { + if (reqUids !== undefined && !(/*isValidReqUids*/ (reqUids.length < 128))) { // simply ignore invalid id (any user can provide an // invalid request ID through a crafted header) reqUids = undefined; } - const log = (reqUids !== undefined ? - logger.newRequestLoggerFromSerializedUids(reqUids) : - logger.newRequestLogger()); + const log = + reqUids !== undefined ? logger.newRequestLoggerFromSerializedUids(reqUids) : logger.newRequestLogger(); log.end().addDefaultFields(clientInfo); log.debug('received admin request', clientInfo); @@ -254,8 +245,7 @@ class S3Server { server.requestTimeout = 0; // disabling request timeout server.on('connection', socket => { - socket.on('error', err => logger.info('request rejected', - { error: err })); + socket.on('error', err => logger.info('request rejected', { error: err })); }); // https://nodejs.org/dist/latest-v6.x/ @@ -271,8 +261,11 @@ class S3Server { }; const { address } = addr; logger.info('server started', { - address, port, - pid: process.pid, serverIP: address, serverPort: port + address, + port, + pid: process.pid, + serverIP: address, + serverPort: port, }); }); @@ -290,9 +283,9 @@ class S3Server { */ cleanUp() { logger.info('server shutting down'); - Promise.all(this.servers.map(server => - new Promise(resolve => server.close(resolve)) - )).then(() => process.exit(0)); + Promise.all(this.servers.map(server => new Promise(resolve => server.close(resolve)))).then(() => + process.exit(0) + ); } caughtExceptionShutdown() { @@ -311,10 +304,7 @@ class S3Server { } initiateStartup(log) { - series([ - next => metadata.setup(next), - next => clientCheck(true, log, next), - ], (err, results) => { + series([next => metadata.setup(next), next => clientCheck(true, log, next)], (err, results) => { if (err) { log.warn('initial health check failed, delaying startup', { error: err, diff --git a/lib/services.js b/lib/services.js index d26d1eb1fb..c35aa25479 100644 --- a/lib/services.js +++ b/lib/services.js @@ -11,8 +11,7 @@ const constants = require('../constants'); const { config } = require('./Config'); const { data } = require('./data/wrapper'); const metadata = require('./metadata/wrapper'); -const { setObjectLockInformation } - = require('./api/apiUtils/object/objectLockHelpers'); +const { setObjectLockInformation } = require('./api/apiUtils/object/objectLockHelpers'); const removeAWSChunked = require('./api/apiUtils/object/removeAWSChunked'); const { parseTagFromQuery } = s3middleware.tagging; @@ -39,52 +38,53 @@ const services = { // (without special increase) // TODO: Consider implementing pagination like object listing // with respect to bucket listing so can go beyond 10000 - metadata.listObject(bucketUsers, { prefix, maxKeys: 10000 }, log, - (err, listResponse) => { - // If MD responds with NoSuchBucket, this means the - // hidden usersBucket has not yet been created for - // the domain. If this is the case, it means - // that no buckets in this domain have been created so - // it follows that this particular user has no buckets. - // So, the get service listing should not have any - // buckets to list. By returning an empty array, the - // getService API will just respond with the user info - // without listing any buckets. - if (err?.is?.NoSuchBucket) { - log.trace('no buckets found'); - // If we checked the old user bucket, that means we - // already checked the new user bucket. If neither the - // old user bucket or the new user bucket exist, no buckets - // have yet been created in the namespace so an empty - // listing should be returned - if (overrideUserbucket) { - return cb(null, [], splitter); - } - // Since there were no results from checking the - // new users bucket, we check the old users bucket - return this.getService(authInfo, request, log, - constants.oldSplitter, cb, oldUsersBucket); - } - if (err) { - log.error('error from metadata', { error: err }); - return cb(err); + metadata.listObject(bucketUsers, { prefix, maxKeys: 10000 }, log, (err, listResponse) => { + // If MD responds with NoSuchBucket, this means the + // hidden usersBucket has not yet been created for + // the domain. If this is the case, it means + // that no buckets in this domain have been created so + // it follows that this particular user has no buckets. + // So, the get service listing should not have any + // buckets to list. By returning an empty array, the + // getService API will just respond with the user info + // without listing any buckets. + if (err?.is?.NoSuchBucket) { + log.trace('no buckets found'); + // If we checked the old user bucket, that means we + // already checked the new user bucket. If neither the + // old user bucket or the new user bucket exist, no buckets + // have yet been created in the namespace so an empty + // listing should be returned + if (overrideUserbucket) { + return cb(null, [], splitter); } - return cb(null, listResponse.Contents, splitter); - }); + // Since there were no results from checking the + // new users bucket, we check the old users bucket + return this.getService(authInfo, request, log, constants.oldSplitter, cb, oldUsersBucket); + } + if (err) { + log.error('error from metadata', { error: err }); + return cb(err); + } + return cb(null, listResponse.Contents, splitter); + }); }, - /** - * Check that hashedStream.completedHash matches header contentMd5. - * @param {object} contentMD5 - content-md5 header - * @param {string} completedHash - hashed stream once completed - * @param {RequestLogger} log - the current request logger - * @return {boolean} - true if contentMD5 matches or is undefined, - * false otherwise - */ + /** + * Check that hashedStream.completedHash matches header contentMd5. + * @param {object} contentMD5 - content-md5 header + * @param {string} completedHash - hashed stream once completed + * @param {RequestLogger} log - the current request logger + * @return {boolean} - true if contentMD5 matches or is undefined, + * false otherwise + */ checkHashMatchMD5(contentMD5, completedHash, log) { if (contentMD5 && completedHash && contentMD5 !== completedHash) { - log.debug('contentMD5 and completedHash does not match', - { method: 'checkHashMatchMD5', completedHash, contentMD5 }); + log.debug('contentMD5 and completedHash does not match', { + method: 'checkHashMatchMD5', + completedHash, + contentMD5, + }); return false; } return true; @@ -101,15 +101,45 @@ const services = { * @return {function} executes callback with err or ETag as arguments */ metadataStoreObject(bucketName, dataGetInfo, cipherBundle, params, cb) { - const { objectKey, authInfo, size, contentMD5, metaHeaders, - contentType, cacheControl, contentDisposition, contentEncoding, - expires, multipart, headers, overrideMetadata, log, - lastModifiedDate, versioning, versionId, uploadId, - tagging, taggingCopy, replicationInfo, defaultRetention, - dataStoreName, creationTime, retentionMode, retentionDate, - legalHold, originOp, updateMicroVersionId, archive, oldReplayId, - deleteNullKey, amzStorageClass, overheadField, needOplogUpdate, - restoredEtag, bucketOwnerId } = params; + const { + objectKey, + authInfo, + size, + contentMD5, + metaHeaders, + contentType, + cacheControl, + contentDisposition, + contentEncoding, + expires, + multipart, + headers, + overrideMetadata, + log, + lastModifiedDate, + versioning, + versionId, + uploadId, + tagging, + taggingCopy, + replicationInfo, + defaultRetention, + dataStoreName, + creationTime, + retentionMode, + retentionDate, + legalHold, + originOp, + updateMicroVersionId, + archive, + oldReplayId, + deleteNullKey, + amzStorageClass, + overheadField, + needOplogUpdate, + restoredEtag, + bucketOwnerId, + } = params; log.trace('storing object in metadata'); assert.strictEqual(typeof bucketName, 'string'); const md = new ObjectMD(); @@ -191,12 +221,15 @@ const services = { // update restore if (archive) { md.setAmzStorageClass(amzStorageClass); - md.setArchive(new ObjectMDArchive( - archive.archiveInfo, - archive.restoreRequestedAt, - archive.restoreRequestedDays, - archive.restoreCompletedAt, - archive.restoreWillExpireAt)); + md.setArchive( + new ObjectMDArchive( + archive.archiveInfo, + archive.restoreRequestedAt, + archive.restoreRequestedDays, + archive.restoreCompletedAt, + archive.restoreWillExpireAt + ) + ); md.setAmzRestore({ 'ongoing-request': false, 'expiry-date': archive.restoreWillExpireAt, @@ -282,54 +315,56 @@ const services = { // If this is not the completion of a multipart upload or // the creation of a delete marker, parse the headers to // get the ACL's if any - return async.waterfall([ - callback => { - if (multipart || md.getIsDeleteMarker()) { - return callback(); + return async.waterfall( + [ + callback => { + if (multipart || md.getIsDeleteMarker()) { + return callback(); + } + const parseAclParams = { + headers, + resourceType: 'object', + acl: md.getAcl(), + log, + }; + log.trace('parsing acl from headers'); + acl.parseAclFromHeaders(parseAclParams, (err, parsedACL) => { + if (err) { + log.debug('error parsing acl', { error: err }); + return callback(err); + } + md.setAcl(parsedACL); + return callback(); + }); + return null; + }, + callback => metadata.putObjectMD(bucketName, objectKey, md, options, log, callback), + ], + (err, data) => { + if (err) { + log.error('error from metadata', { error: err }); + return cb(err); } - const parseAclParams = { - headers, - resourceType: 'object', - acl: md.getAcl(), - log, - }; - log.trace('parsing acl from headers'); - acl.parseAclFromHeaders(parseAclParams, (err, parsedACL) => { - if (err) { - log.debug('error parsing acl', { error: err }); - return callback(err); + log.trace('object successfully stored in metadata'); + // if versioning is enabled, data will be returned from metadata + // as JSON containing a versionId which some APIs will need sent + // back to them + let versionId; + if (data) { + if (params.isNull && params.isDeleteMarker) { + versionId = 'null'; + } else if (!params.isNull) { + versionId = JSON.parse(data).versionId; } - md.setAcl(parsedACL); - return callback(); - }); - return null; - }, - callback => metadata.putObjectMD(bucketName, objectKey, md, - options, log, callback), - ], (err, data) => { - if (err) { - log.error('error from metadata', { error: err }); - return cb(err); - } - log.trace('object successfully stored in metadata'); - // if versioning is enabled, data will be returned from metadata - // as JSON containing a versionId which some APIs will need sent - // back to them - let versionId; - if (data) { - if (params.isNull && params.isDeleteMarker) { - versionId = 'null'; - } else if (!params.isNull) { - versionId = JSON.parse(data).versionId; } + return cb(err, { + lastModified: md.getLastModified(), + tags: md.getTags(), + contentMD5, + versionId, + }); } - return cb(err, { - lastModified: md.getLastModified(), - tags: md.getTags(), - contentMD5, - versionId, - }); - }); + ); }, /** @@ -352,7 +387,11 @@ const services = { assert.strictEqual(typeof objectMD, 'object'); function deleteMDandData() { - return metadata.deleteObjectMD(bucketName, objectKey, options, log, + return metadata.deleteObjectMD( + bucketName, + objectKey, + options, + log, (err, res) => { if (err) { return cb(err, res); @@ -363,8 +402,7 @@ const services = { } if (deferLocationDeletion) { - return cb(null, Array.isArray(objectMD.location) - ? objectMD.location : [objectMD.location]); + return cb(null, Array.isArray(objectMD.location) ? objectMD.location : [objectMD.location]); } if (!Array.isArray(objectMD.location)) { @@ -378,14 +416,15 @@ const services = { } return cb(null, res); }); - }, originOp); + }, + originOp + ); } const objGetInfo = objectMD.location; // special case that prevents azure blocks from unecessary deletion // will return null if no need - return data.protectAzureBlocks(bucketName, objectKey, objGetInfo, - log, err => { + return data.protectAzureBlocks(bucketName, objectKey, objGetInfo, log, err => { if (err) { return cb(err); } @@ -405,16 +444,14 @@ const services = { */ getObjectListing(bucketName, listingParams, log, cb) { assert.strictEqual(typeof bucketName, 'string'); - log.trace('performing metadata get object listing', - { listingParams }); - metadata.listObject(bucketName, listingParams, log, - (err, listResponse) => { - if (err) { - log.debug('error from metadata', { error: err }); - return cb(err); - } - return cb(null, listResponse); - }); + log.trace('performing metadata get object listing', { listingParams }); + metadata.listObject(bucketName, listingParams, log, (err, listResponse) => { + if (err) { + log.debug('error from metadata', { error: err }); + return cb(err); + } + return cb(null, listResponse); + }); }, /** @@ -429,16 +466,14 @@ const services = { */ getLifecycleListing(bucketName, listingParams, log, cb) { assert.strictEqual(typeof bucketName, 'string'); - log.trace('performing metadata get object listing for lifecycle', - { listingParams }); - metadata.listLifecycleObject(bucketName, listingParams, log, - (err, listResponse) => { - if (err) { - log.debug('error from metadata', { error: err }); - return cb(err); - } - return cb(null, listResponse); - }); + log.trace('performing metadata get object listing for lifecycle', { listingParams }); + metadata.listLifecycleObject(bucketName, listingParams, log, (err, listResponse) => { + if (err) { + log.debug('error from metadata', { error: err }); + return cb(err); + } + return cb(null, listResponse); + }); }, metadataStoreMPObject(bucketName, cipherBundle, params, log, cb) { @@ -451,9 +486,7 @@ const services = { // the splitter. // 2) UploadId's are UUID version 4 const splitter = params.splitter; - const longMPUIdentifier = - `overview${splitter}${params.objectKey}` + - `${splitter}${params.uploadId}`; + const longMPUIdentifier = `overview${splitter}${params.objectKey}` + `${splitter}${params.uploadId}`; const multipartObjectMD = {}; multipartObjectMD.id = params.uploadId; multipartObjectMD.eventualStorageBucket = params.eventualStorageBucket; @@ -475,28 +508,19 @@ const services = { multipartObjectMD.key = params.objectKey; multipartObjectMD.uploadId = params.uploadId; multipartObjectMD['cache-control'] = params.headers['cache-control']; - multipartObjectMD['content-disposition'] = - params.headers['content-disposition']; - multipartObjectMD['content-encoding'] = - removeAWSChunked(params.headers['content-encoding']); - multipartObjectMD['content-type'] = - params.headers['content-type']; - multipartObjectMD.expires = - params.headers.expires; - multipartObjectMD['x-amz-storage-class'] = params.storageClass; // TODO: removed CLDSRV-639 - multipartObjectMD['x-amz-website-redirect-location'] = - params.headers['x-amz-website-redirect-location']; + multipartObjectMD['content-disposition'] = params.headers['content-disposition']; + multipartObjectMD['content-encoding'] = removeAWSChunked(params.headers['content-encoding']); + multipartObjectMD['content-type'] = params.headers['content-type']; + multipartObjectMD.expires = params.headers.expires; + multipartObjectMD['x-amz-storage-class'] = params.storageClass; // TODO: removed CLDSRV-639 + multipartObjectMD['x-amz-website-redirect-location'] = params.headers['x-amz-website-redirect-location']; if (cipherBundle) { - multipartObjectMD['x-amz-server-side-encryption'] = - cipherBundle.algorithm; + multipartObjectMD['x-amz-server-side-encryption'] = cipherBundle.algorithm; if (cipherBundle.masterKeyId) { - multipartObjectMD[ - 'x-amz-server-side-encryption-aws-kms-key-id'] = - cipherBundle.masterKeyId; + multipartObjectMD['x-amz-server-side-encryption-aws-kms-key-id'] = cipherBundle.masterKeyId; } } - multipartObjectMD.controllingLocationConstraint = - params.controllingLocationConstraint; + multipartObjectMD.controllingLocationConstraint = params.controllingLocationConstraint; multipartObjectMD.dataStoreName = params.dataStoreName; if (params.tagging) { const validationTagRes = parseTagFromQuery(params.tagging); @@ -539,15 +563,14 @@ const services = { return cb(err); } multipartObjectMD.acl = parsedACL; - metadata.putObjectMD(bucketName, longMPUIdentifier, - multipartObjectMD, {}, log, err => { - if (err) { - log.error('error from metadata', { error: err }); - return cb(err); - } + metadata.putObjectMD(bucketName, longMPUIdentifier, multipartObjectMD, {}, log, err => { + if (err) { + log.error('error from metadata', { error: err }); + return cb(err); + } - return cb(null, multipartObjectMD); - }); + return cb(null, multipartObjectMD); + }); return undefined; }); }, @@ -574,12 +597,10 @@ const services = { assert.strictEqual(typeof params.splitter, 'string'); assert.strictEqual(typeof params.storedMetadata, 'object'); const splitter = params.splitter; - const longMPUIdentifier = - `overview${splitter}${params.objectKey}${splitter}${params.uploadId}`; + const longMPUIdentifier = `overview${splitter}${params.objectKey}${splitter}${params.uploadId}`; const multipartObjectMD = Object.assign({}, params.storedMetadata); multipartObjectMD.completeInProgress = true; - metadata.putObjectMD(params.bucketName, longMPUIdentifier, multipartObjectMD, - {}, log, err => { + metadata.putObjectMD(params.bucketName, longMPUIdentifier, multipartObjectMD, {}, log, err => { if (err) { log.error('error from metadata', { error: err }); return cb(err); @@ -611,20 +632,18 @@ const services = { const mpuBucketName = `${constants.mpuBucketPrefix}${params.bucketName}`; const splitter = params.splitter; - const mpuOverviewKey = - `overview${splitter}${params.objectKey}${splitter}${params.uploadId}`; - return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log, - (err, res) => { - if (err) { - log.error('error getting the overview object from mpu bucket', { - error: err, - method: 'services.isCompleteMPUInProgress', - params, - }); - return cb(err); - } - return cb(null, Boolean(res.completeInProgress)); - }); + const mpuOverviewKey = `overview${splitter}${params.objectKey}${splitter}${params.uploadId}`; + return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log, (err, res) => { + if (err) { + log.error('error getting the overview object from mpu bucket', { + error: err, + method: 'services.isCompleteMPUInProgress', + params, + }); + return cb(err); + } + return cb(null, Boolean(res.completeInProgress)); + }); }, /** @@ -641,8 +660,7 @@ const services = { * - the overview key stored metadata */ metadataValidateMultipart(params, cb) { - const { bucketName, uploadId, authInfo, - objectKey, requestType, log } = params; + const { bucketName, uploadId, authInfo, objectKey, requestType, log } = params; assert.strictEqual(typeof bucketName, 'string'); // This checks whether the mpu bucket exists. @@ -650,13 +668,11 @@ const services = { const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`; metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => { if (err?.is?.NoSuchBucket) { - log.debug('bucket not found in metadata', { error: err, - method: 'services.metadataValidateMultipart' }); + log.debug('bucket not found in metadata', { error: err, method: 'services.metadataValidateMultipart' }); return cb(errors.NoSuchUpload); } if (err) { - log.error('error from metadata', { error: err, - method: 'services.metadataValidateMultipart' }); + log.error('error from metadata', { error: err, method: 'services.metadataValidateMultipart' }); return cb(err); } @@ -665,84 +681,75 @@ const services = { if (mpuBucket.getMdBucketModelVersion() < 2) { splitter = constants.oldSplitter; } - const mpuOverviewKey = - `overview${splitter}${objectKey}${splitter}${uploadId}`; + const mpuOverviewKey = `overview${splitter}${objectKey}${splitter}${uploadId}`; - metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey, - {}, log, (err, storedMetadata) => { - if (err) { - if (err.is.NoSuchKey) { - return cb(errors.NoSuchUpload); - } - log.error('error from metadata', { error: err }); - return cb(err); + metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey, {}, log, (err, storedMetadata) => { + if (err) { + if (err.is.NoSuchKey) { + return cb(errors.NoSuchUpload); } + log.error('error from metadata', { error: err }); + return cb(err); + } - const initiatorID = storedMetadata.initiator.ID; - const ownerID = storedMetadata['owner-id']; - const mpuOverview = { - key: storedMetadata.key, - id: storedMetadata.id, - eventualStorageBucket: - storedMetadata.eventualStorageBucket, - initiatorID, - initiatorDisplayName: - storedMetadata.initiator.DisplayName, - ownerID, - ownerDisplayName: - storedMetadata['owner-display-name'], - storageClass: - storedMetadata['x-amz-storage-class'], - initiated: storedMetadata.initiated, - controllingLocationConstraint: - storedMetadata.controllingLocationConstraint, - }; + const initiatorID = storedMetadata.initiator.ID; + const ownerID = storedMetadata['owner-id']; + const mpuOverview = { + key: storedMetadata.key, + id: storedMetadata.id, + eventualStorageBucket: storedMetadata.eventualStorageBucket, + initiatorID, + initiatorDisplayName: storedMetadata.initiator.DisplayName, + ownerID, + ownerDisplayName: storedMetadata['owner-display-name'], + storageClass: storedMetadata['x-amz-storage-class'], + initiated: storedMetadata.initiated, + controllingLocationConstraint: storedMetadata.controllingLocationConstraint, + }; - const tagging = storedMetadata['x-amz-tagging']; - if (tagging) { - mpuOverview.tagging = tagging; - } - // If access was provided by the destination bucket's - // bucket policies, go ahead. - if (requestType === 'bucketPolicyGoAhead') { - return cb(null, mpuBucket, mpuOverview, storedMetadata); - } + const tagging = storedMetadata['x-amz-tagging']; + if (tagging) { + mpuOverview.tagging = tagging; + } + // If access was provided by the destination bucket's + // bucket policies, go ahead. + if (requestType === 'bucketPolicyGoAhead') { + return cb(null, mpuBucket, mpuOverview, storedMetadata); + } - const requesterID = authInfo.isRequesterAnIAMUser() ? - authInfo.getArn() : authInfo.getCanonicalID(); - const isRequesterInitiator = - initiatorID === requesterID; - const isRequesterParentAccountOfInitiator = - ownerID === authInfo.getCanonicalID(); - if (requestType === 'putPart or complete') { - // Only the initiator of the multipart - // upload can upload a part or complete the mpu - if (!isRequesterInitiator) { - return cb(errors.AccessDenied); - } + const requesterID = authInfo.isRequesterAnIAMUser() ? authInfo.getArn() : authInfo.getCanonicalID(); + const isRequesterInitiator = initiatorID === requesterID; + const isRequesterParentAccountOfInitiator = ownerID === authInfo.getCanonicalID(); + if (requestType === 'putPart or complete') { + // Only the initiator of the multipart + // upload can upload a part or complete the mpu + if (!isRequesterInitiator) { + return cb(errors.AccessDenied); } - if (requestType === 'deleteMPU' - || requestType === 'listParts') { - // In order for account/user to be - // authorized must either be the - // bucket owner or intitator of - // the multipart upload request - // (or parent account of initiator). - // In addition if the bucket policy - // designates someone else with - // s3:AbortMultipartUpload or - // s3:ListMultipartUploadPartsrights, - // as applicable, that account/user will have the right. - // If got to this step, it means there is - // no bucket policy on this. - if (mpuBucket.getOwner() !== authInfo.getCanonicalID() - && !isRequesterInitiator - && !isRequesterParentAccountOfInitiator) { - return cb(errors.AccessDenied); - } + } + if (requestType === 'deleteMPU' || requestType === 'listParts') { + // In order for account/user to be + // authorized must either be the + // bucket owner or intitator of + // the multipart upload request + // (or parent account of initiator). + // In addition if the bucket policy + // designates someone else with + // s3:AbortMultipartUpload or + // s3:ListMultipartUploadPartsrights, + // as applicable, that account/user will have the right. + // If got to this step, it means there is + // no bucket policy on this. + if ( + mpuBucket.getOwner() !== authInfo.getCanonicalID() && + !isRequesterInitiator && + !isRequesterParentAccountOfInitiator + ) { + return cb(errors.AccessDenied); } - return cb(null, mpuBucket, mpuOverview, storedMetadata); - }); + } + return cb(null, mpuBucket, mpuOverview, storedMetadata); + }); return undefined; }); }, @@ -764,13 +771,11 @@ const services = { * @param {function} cb - callback to send error or move to next task * @return {undefined} */ - metadataStorePart(mpuBucketName, partLocations, - metaStoreParams, log, cb) { + metadataStorePart(mpuBucketName, partLocations, metaStoreParams, log, cb) { assert.strictEqual(typeof mpuBucketName, 'string'); - const { partNumber, contentMD5, size, uploadId, lastModified, splitter, overheadField, ownerId } - = metaStoreParams; - const dateModified = typeof lastModified === 'string' ? - lastModified : new Date().toJSON(); + const { partNumber, contentMD5, size, uploadId, lastModified, splitter, overheadField, ownerId } = + metaStoreParams; + const dateModified = typeof lastModified === 'string' ? lastModified : new Date().toJSON(); assert.strictEqual(typeof splitter, 'string'); const partKey = `${uploadId}${splitter}${partNumber}`; const omVal = { @@ -778,7 +783,7 @@ const services = { // from an object to an array 'md-model-version': 3, partLocations, - 'key': partKey, + key: partKey, 'last-modified': dateModified, 'content-md5': contentMD5, 'content-length': size, @@ -800,14 +805,14 @@ const services = { }, /** - * Gets list of open multipart uploads in bucket - * @param {object} MPUbucketName - bucket in which objectMetadata is stored - * @param {object} listingParams - params object passing on - * needed items from request object - * @param {object} log - Werelogs logger - * @param {function} cb - callback to listMultipartUploads.js - * @return {undefined} - */ + * Gets list of open multipart uploads in bucket + * @param {object} MPUbucketName - bucket in which objectMetadata is stored + * @param {object} listingParams - params object passing on + * needed items from request object + * @param {object} log - Werelogs logger + * @param {function} cb - callback to listMultipartUploads.js + * @return {undefined} + */ getMultipartUploadListing(MPUbucketName, listingParams, log, cb) { assert.strictEqual(typeof MPUbucketName, 'string'); assert.strictEqual(typeof listingParams.splitter, 'string'); @@ -834,8 +839,7 @@ const services = { if (bucket.getMdBucketModelVersion() < 2) { listParams.splitter = constants.oldSplitter; } - metadata.listMultipartUploads(MPUbucketName, listParams, log, - cb); + metadata.listMultipartUploads(MPUbucketName, listParams, log, cb); return undefined; }); }, @@ -857,24 +861,26 @@ const services = { if (err?.is?.NoSuchBucket) { log.trace('no buckets found'); const creationDate = new Date().toJSON(); - const mpuBucket = new BucketInfo(MPUBucketName, + const mpuBucket = new BucketInfo( + MPUBucketName, destinationBucket.getOwner(), - destinationBucket.getOwnerDisplayName(), creationDate, - BucketInfo.currentModelVersion()); + destinationBucket.getOwnerDisplayName(), + creationDate, + BucketInfo.currentModelVersion() + ); // Note that unlike during the creation of a normal bucket, // we do NOT add this bucket to the lists of a user's buckets. // By not adding this bucket to the lists of a user's buckets, // a getService request should not return a reference to this // bucket. This is the desired behavior since this should be // a hidden bucket. - return metadata.createBucket(MPUBucketName, mpuBucket, log, - err => { - if (err) { - log.error('error from metadata', { error: err }); - return cb(err); - } - return cb(null, mpuBucket); - }); + return metadata.createBucket(MPUBucketName, mpuBucket, log, err => { + if (err) { + log.error('error from metadata', { error: err }); + return cb(err); + } + return cb(null, mpuBucket); + }); } if (err) { log.error('error from metadata', { @@ -899,8 +905,7 @@ const services = { }, getSomeMPUparts(params, cb) { - const { uploadId, mpuBucketName, maxParts, partNumberMarker, log } = - params; + const { uploadId, mpuBucketName, maxParts, partNumberMarker, log } = params; assert.strictEqual(typeof mpuBucketName, 'string'); assert.strictEqual(typeof params.splitter, 'string'); const paddedPartNumber = `000000${partNumberMarker}`.substr(-5); @@ -917,9 +922,14 @@ const services = { // If have efficient way to batch delete metadata, should so this // all at once in production implementation assert.strictEqual(typeof mpuBucketName, 'string'); - async.eachLimit(keysToDelete, 5, (key, callback) => { - metadata.deleteObjectMD(mpuBucketName, key, { overheadField: constants.overheadField }, log, callback); - }, err => cb(err)); + async.eachLimit( + keysToDelete, + 5, + (key, callback) => { + metadata.deleteObjectMD(mpuBucketName, key, { overheadField: constants.overheadField }, log, callback); + }, + err => cb(err) + ); }, }; diff --git a/lib/utapi/utapi.js b/lib/utapi/utapi.js index 83c6925540..ba6dc9279a 100644 --- a/lib/utapi/utapi.js +++ b/lib/utapi/utapi.js @@ -4,8 +4,7 @@ const { utapiVersion, UtapiServer: utapiServer } = require('utapi'); // start utapi server if (utapiVersion === 1 && _config.utapi) { - const fullConfig = Object.assign({}, _config.utapi, - { redis: _config.redis }); + const fullConfig = Object.assign({}, _config.utapi, { redis: _config.redis }); if (_config.vaultd) { Object.assign(fullConfig, { vaultd: _config.vaultd }); } diff --git a/lib/utapi/utapiReindex.js b/lib/utapi/utapiReindex.js index 9bc1522355..ad58f02e81 100644 --- a/lib/utapi/utapiReindex.js +++ b/lib/utapi/utapiReindex.js @@ -4,8 +4,7 @@ const { config } = require('../Config'); const reindexConfig = config.utapi && config.utapi.reindex; if (reindexConfig && reindexConfig.password === undefined) { - reindexConfig.password = config.utapi && config.utapi.redis - && config.utapi.redis.password; + reindexConfig.password = config.utapi && config.utapi.redis && config.utapi.redis.password; } const reindex = new UtapiReindex(reindexConfig); reindex.start(); diff --git a/lib/utapi/utapiReplay.js b/lib/utapi/utapiReplay.js index e958d83da0..dba3f5a006 100644 --- a/lib/utapi/utapiReplay.js +++ b/lib/utapi/utapiReplay.js @@ -2,7 +2,6 @@ require('werelogs').stderrUtils.catchAndTimestampStderr(); const UtapiReplay = require('utapi').UtapiReplay; const _config = require('../Config').config; -const utapiConfig = _config.utapi && - Object.assign({}, _config.utapi, { redis: _config.redis }); +const utapiConfig = _config.utapi && Object.assign({}, _config.utapi, { redis: _config.redis }); const replay = new UtapiReplay(utapiConfig); // start utapi server replay.start(); diff --git a/lib/utapi/utilities.js b/lib/utapi/utilities.js index b29ed172bd..251d51435c 100644 --- a/lib/utapi/utilities.js +++ b/lib/utapi/utilities.js @@ -16,10 +16,13 @@ if (utapiVersion === 1 && _config.utapi) { utapiConfig = Object.assign({}, _config.utapi); } } else if (utapiVersion === 2) { - utapiConfig = Object.assign({ - tls: _config.https, - suppressedEventFields, - }, _config.utapi || {}); + utapiConfig = Object.assign( + { + tls: _config.https, + suppressedEventFields, + }, + _config.utapi || {} + ); } const utapi = new UtapiClient(utapiConfig); @@ -37,8 +40,7 @@ const bucketOwnerMetrics = [ function evalAuthInfo(authInfo, canonicalID, action) { let accountId = authInfo.getCanonicalID(); - let userId = authInfo.isRequesterAnIAMUser() ? - authInfo.getShortid() : undefined; + let userId = authInfo.isRequesterAnIAMUser() ? authInfo.getShortid() : undefined; // If action impacts 'numberOfObjectsStored' or 'storageUtilized' metric // only the bucket owner account's metrics should be updated const canonicalIdMatch = authInfo.getCanonicalID() === canonicalID; @@ -52,21 +54,10 @@ function evalAuthInfo(authInfo, canonicalID, action) { }; } -function _listMetrics(host, - port, - metric, - metricType, - timeRange, - accessKey, - secretKey, - verbose, - recent, - ssl) { +function _listMetrics(host, port, metric, metricType, timeRange, accessKey, secretKey, verbose, recent, ssl) { const listAction = recent ? 'ListRecentMetrics' : 'ListMetrics'; // If recent listing, we do not provide `timeRange` in the request - const requestObj = recent - ? { [metric]: metricType } - : { timeRange, [metric]: metricType }; + const requestObj = recent ? { [metric]: metricType } : { timeRange, [metric]: metricType }; const requestBody = JSON.stringify(requestObj); const options = { host, @@ -108,8 +99,7 @@ function _listMetrics(host, }); // TODO: cleanup with refactor of generateV4Headers request.path = `/${metric}`; - auth.client.generateV4Headers(request, { Action: listAction }, - accessKey, secretKey, 's3'); + auth.client.generateV4Headers(request, { Action: listAction }, accessKey, secretKey, 's3'); request.path = `/${metric}?Action=${listAction}`; if (verbose) { logger.info('request headers', { headers: request._headers }); @@ -136,24 +126,18 @@ function listMetrics(metricType) { // bin/list_bucket_metrics.js when prior method of listing bucket metrics is // no longer supported. if (metricType === 'buckets') { - commander - .option('-b, --buckets ', 'Name of bucket(s) with ' + - 'a comma separator if more than one'); + commander.option('-b, --buckets ', 'Name of bucket(s) with ' + 'a comma separator if more than one'); } else { commander .option('-m, --metric ', 'Metric type') - .option('--buckets ', 'Name of bucket(s) with a comma ' + - 'separator if more than one') - .option('--accounts ', 'Account ID(s) with a comma ' + - 'separator if more than one') - .option('--users ', 'User ID(s) with a comma separator if ' + - 'more than one') + .option('--buckets ', 'Name of bucket(s) with a comma ' + 'separator if more than one') + .option('--accounts ', 'Account ID(s) with a comma ' + 'separator if more than one') + .option('--users ', 'User ID(s) with a comma separator if ' + 'more than one') .option('--service ', 'Name of service'); } commander .option('-s, --start ', 'Start of time range') - .option('-r, --recent', 'List metrics including the previous and ' + - 'current 15 minute interval') + .option('-r, --recent', 'List metrics including the previous and ' + 'current 15 minute interval') .option('-e --end ', 'End of time range') .option('-h, --host ', 'Host of the server') .option('-p, --port ', 'Port of the server') @@ -161,17 +145,14 @@ function listMetrics(metricType) { .option('-v, --verbose') .parse(process.argv); - const { host, port, accessKey, secretKey, start, end, verbose, recent, - ssl } = - commander; + const { host, port, accessKey, secretKey, start, end, verbose, recent, ssl } = commander; const requiredOptions = { host, port, accessKey, secretKey }; // If not old style bucket metrics, we require usage of the metric option if (metricType !== 'buckets') { requiredOptions.metric = commander.metric; const validMetrics = ['buckets', 'accounts', 'users', 'service']; if (validMetrics.indexOf(commander.metric) < 0) { - logger.error('metric must be \'buckets\', \'accounts\', ' + - '\'users\', or \'service\''); + logger.error("metric must be 'buckets', 'accounts', " + "'users', or 'service'"); commander.outputHelp(); process.exit(1); return; @@ -229,8 +210,7 @@ function listMetrics(metricType) { } } - _listMetrics(host, port, metric, resources, timeRange, accessKey, secretKey, - verbose, recent, ssl); + _listMetrics(host, port, metric, resources, timeRange, accessKey, secretKey, verbose, recent, ssl); } /** @@ -290,9 +270,11 @@ function pushMetric(action, log, metricObj) { let objectDelta = isDelete ? -numberOfObjects : numberOfObjects; // putDeleteMarkerObject does not pass numberOfObjects - if ((action === 'putDeleteMarkerObject' && byteLength === null) - || action === 'replicateDelete' - || action === 'replicateObject') { + if ( + (action === 'putDeleteMarkerObject' && byteLength === null) || + action === 'replicateDelete' || + action === 'replicateObject' + ) { objectDelta = 1; } else if (action === 'multiObjectDelete') { objectDelta = -(numberOfObjects + removedDeleteMarkers); @@ -309,8 +291,10 @@ function pushMetric(action, log, metricObj) { }; // Any operation from lifecycle that does not change object count or size is dropped - const isLifecycle = _config.lifecycleRoleName - && authInfo && authInfo.arn.endsWith(`:assumed-role/${_config.lifecycleRoleName}/backbeat-lifecycle`); + const isLifecycle = + _config.lifecycleRoleName && + authInfo && + authInfo.arn.endsWith(`:assumed-role/${_config.lifecycleRoleName}/backbeat-lifecycle`); if (isLifecycle && !objectDelta && !sizeDelta) { log.trace('ignoring pushMetric from lifecycle service user', { action, bucket, keys }); return undefined; @@ -382,8 +366,7 @@ function getLocationMetric(location, log, cb) { */ function pushLocationMetric(location, byteLength, log, cb) { const locationId = _getLocationId(location); - return utapi.pushLocationMetric(locationId, byteLength, - log.getSerializedUids(), cb); + return utapi.pushLocationMetric(locationId, byteLength, log.getSerializedUids(), cb); } module.exports = { diff --git a/lib/utilities/aclUtils.js b/lib/utilities/aclUtils.js index 7a3b820369..18189df393 100644 --- a/lib/utilities/aclUtils.js +++ b/lib/utilities/aclUtils.js @@ -4,18 +4,18 @@ const { errors, s3middleware } = require('arsenal'); const constants = require('../../constants'); const escapeForXml = s3middleware.escapeForXml; -const possibleGrantHeaders = ['x-amz-grant-read', 'x-amz-grant-write', - 'x-amz-grant-read-acp', 'x-amz-grant-write-acp', - 'x-amz-grant-full-control']; +const possibleGrantHeaders = [ + 'x-amz-grant-read', + 'x-amz-grant-write', + 'x-amz-grant-read-acp', + 'x-amz-grant-write-acp', + 'x-amz-grant-full-control', +]; const regexpEmailAddress = /^\S+@\S+.\S+$/; const aclUtils = {}; -const grantsByURI = [ - constants.publicId, - constants.allAuthedUsersId, - constants.logId, -]; +const grantsByURI = [constants.publicId, constants.allAuthedUsersId, constants.logId]; /** * handleCannedGrant - Populate grantInfo for a bucketGetACL or objectGetACL @@ -26,86 +26,81 @@ const grantsByURI = [ * are different) * @returns {array} cannedGrants - containing canned ACL settings */ -aclUtils.handleCannedGrant = - function handleCannedGrant(grantType, - ownerGrant, separateBucketOwner) { - const cannedGrants = []; - const actions = { - 'private': () => { - cannedGrants.push(ownerGrant); - }, - 'public-read': () => { - const publicGrant = { - URI: constants.publicId, - permission: 'READ', - }; - cannedGrants.push(ownerGrant, publicGrant); - }, - 'public-read-write': () => { - const publicReadGrant = { - URI: constants.publicId, +aclUtils.handleCannedGrant = function handleCannedGrant(grantType, ownerGrant, separateBucketOwner) { + const cannedGrants = []; + const actions = { + private: () => { + cannedGrants.push(ownerGrant); + }, + 'public-read': () => { + const publicGrant = { + URI: constants.publicId, + permission: 'READ', + }; + cannedGrants.push(ownerGrant, publicGrant); + }, + 'public-read-write': () => { + const publicReadGrant = { + URI: constants.publicId, + permission: 'READ', + }; + const publicWriteGrant = { + URI: constants.publicId, + permission: 'WRITE', + }; + cannedGrants.push(ownerGrant, publicReadGrant, publicWriteGrant); + }, + 'authenticated-read': () => { + const authGrant = { + URI: constants.allAuthedUsersId, + permission: 'READ', + }; + cannedGrants.push(ownerGrant, authGrant); + }, + // Note: log-delivery-write is just for bucketGetACL + 'log-delivery-write': () => { + const logWriteGrant = { + URI: constants.logId, + permission: 'WRITE', + }; + const logReadACPGrant = { + URI: constants.logId, + permission: 'READ_ACP', + }; + cannedGrants.push(ownerGrant, logWriteGrant, logReadACPGrant); + }, + // Note: bucket-owner-read is just for objectGetACL + 'bucket-owner-read': () => { + // If the bucket owner and object owner are different, + // add separate entries for each + if (separateBucketOwner) { + const bucketOwnerReadGrant = { + ID: separateBucketOwner.getOwner(), + displayName: separateBucketOwner.getOwnerDisplayName(), permission: 'READ', }; - const publicWriteGrant = { - URI: constants.publicId, - permission: 'WRITE', - }; - cannedGrants. - push(ownerGrant, publicReadGrant, publicWriteGrant); - }, - 'authenticated-read': () => { - const authGrant = { - URI: constants.allAuthedUsersId, - permission: 'READ', - }; - cannedGrants.push(ownerGrant, authGrant); - }, - // Note: log-delivery-write is just for bucketGetACL - 'log-delivery-write': () => { - const logWriteGrant = { - URI: constants.logId, - permission: 'WRITE', - }; - const logReadACPGrant = { - URI: constants.logId, - permission: 'READ_ACP', + cannedGrants.push(ownerGrant, bucketOwnerReadGrant); + } else { + cannedGrants.push(ownerGrant); + } + }, + // Note: bucket-owner-full-control is just for objectGetACL + 'bucket-owner-full-control': () => { + if (separateBucketOwner) { + const bucketOwnerFCGrant = { + ID: separateBucketOwner.getOwner(), + displayName: separateBucketOwner.getOwnerDisplayName(), + permission: 'FULL_CONTROL', }; - cannedGrants. - push(ownerGrant, logWriteGrant, logReadACPGrant); - }, - // Note: bucket-owner-read is just for objectGetACL - 'bucket-owner-read': () => { - // If the bucket owner and object owner are different, - // add separate entries for each - if (separateBucketOwner) { - const bucketOwnerReadGrant = { - ID: separateBucketOwner.getOwner(), - displayName: separateBucketOwner.getOwnerDisplayName(), - permission: 'READ', - }; - cannedGrants.push(ownerGrant, bucketOwnerReadGrant); - } else { - cannedGrants.push(ownerGrant); - } - }, - // Note: bucket-owner-full-control is just for objectGetACL - 'bucket-owner-full-control': () => { - if (separateBucketOwner) { - const bucketOwnerFCGrant = { - ID: separateBucketOwner.getOwner(), - displayName: separateBucketOwner.getOwnerDisplayName(), - permission: 'FULL_CONTROL', - }; - cannedGrants.push(ownerGrant, bucketOwnerFCGrant); - } else { - cannedGrants.push(ownerGrant); - } - }, - }; - actions[grantType](); - return cannedGrants; + cannedGrants.push(ownerGrant, bucketOwnerFCGrant); + } else { + cannedGrants.push(ownerGrant); + } + }, }; - + actions[grantType](); + return cannedGrants; +}; aclUtils.parseAclXml = function parseAclXml(toBeParsed, log, next) { return parseString(toBeParsed, (err, result) => { @@ -113,24 +108,26 @@ aclUtils.parseAclXml = function parseAclXml(toBeParsed, log, next) { log.debug('invalid xml', { xmlObj: toBeParsed }); return next(errors.MalformedXML); } - if (!result.AccessControlPolicy - || !result.AccessControlPolicy.AccessControlList - || result.AccessControlPolicy.AccessControlList.length !== 1 - || (result.AccessControlPolicy.AccessControlList[0] !== '' && - Object.keys(result.AccessControlPolicy.AccessControlList[0]) - .some(listKey => listKey !== 'Grant'))) { + if ( + !result.AccessControlPolicy || + !result.AccessControlPolicy.AccessControlList || + result.AccessControlPolicy.AccessControlList.length !== 1 || + (result.AccessControlPolicy.AccessControlList[0] !== '' && + Object.keys(result.AccessControlPolicy.AccessControlList[0]).some(listKey => listKey !== 'Grant')) + ) { log.debug('invalid acl', { acl: result }); return next(errors.MalformedACLError); } - const jsonGrants = result - .AccessControlPolicy.AccessControlList[0].Grant; + const jsonGrants = result.AccessControlPolicy.AccessControlList[0].Grant; log.trace('acl grants', { aclGrants: jsonGrants }); - if (!Array.isArray(result.AccessControlPolicy.Owner) - || result.AccessControlPolicy.Owner.length !== 1 - || !Array.isArray(result.AccessControlPolicy.Owner[0].ID) - || result.AccessControlPolicy.Owner[0].ID.length !== 1 - || result.AccessControlPolicy.Owner[0].ID[0] === '') { + if ( + !Array.isArray(result.AccessControlPolicy.Owner) || + result.AccessControlPolicy.Owner.length !== 1 || + !Array.isArray(result.AccessControlPolicy.Owner[0].ID) || + result.AccessControlPolicy.Owner[0].ID.length !== 1 || + result.AccessControlPolicy.Owner[0].ID[0] === '' + ) { return next(errors.MalformedACLError); } const ownerID = result.AccessControlPolicy.Owner[0].ID[0]; @@ -139,8 +136,7 @@ aclUtils.parseAclXml = function parseAclXml(toBeParsed, log, next) { }); }; -aclUtils.getPermissionType = function getPermissionType(identifier, resourceACL, - resourceType) { +aclUtils.getPermissionType = function getPermissionType(identifier, resourceACL, resourceType) { const fullControlIndex = resourceACL.FULL_CONTROL.indexOf(identifier); let writeIndex; if (resourceType === 'bucket') { @@ -196,32 +192,29 @@ aclUtils.isValidCanonicalId = function isValidCanonicalId(canonicalID) { return /^(?=.*?[a-f])(?=.*?[0-9])[a-f0-9]{64}$/.test(canonicalID); }; -aclUtils.reconstructUsersIdentifiedByEmail = - function reconstruct(userInfofromVault, userGrantInfo) { - return userGrantInfo.map(item => { - const userEmail = item.identifier.toLowerCase(); - const user = {}; - // Find the full user grant info based on email - const userId = userInfofromVault - .find(elem => elem.email.toLowerCase() === userEmail); - // Set the identifier to be the canonicalID instead of email - user.identifier = userId.canonicalID; - user.userIDType = 'id'; - // copy over ACL grant type: i.e. READ/WRITE... - user.grantType = item.grantType; - return user; - }); - }; +aclUtils.reconstructUsersIdentifiedByEmail = function reconstruct(userInfofromVault, userGrantInfo) { + return userGrantInfo.map(item => { + const userEmail = item.identifier.toLowerCase(); + const user = {}; + // Find the full user grant info based on email + const userId = userInfofromVault.find(elem => elem.email.toLowerCase() === userEmail); + // Set the identifier to be the canonicalID instead of email + user.identifier = userId.canonicalID; + user.userIDType = 'id'; + // copy over ACL grant type: i.e. READ/WRITE... + user.grantType = item.grantType; + return user; + }); +}; -aclUtils.sortHeaderGrants = - function sortHeaderGrants(allGrantHeaders, addACLParams) { - allGrantHeaders.forEach(item => { - if (item) { - addACLParams[item.grantType].push(item.identifier); - } - }); - return addACLParams; - }; +aclUtils.sortHeaderGrants = function sortHeaderGrants(allGrantHeaders, addACLParams) { + allGrantHeaders.forEach(item => { + if (item) { + addACLParams[item.grantType].push(item.identifier); + } + }); + return addACLParams; +}; /** * convertToXml - Converts the `grantInfo` object (defined in `objectGetACL()`) @@ -234,12 +227,12 @@ aclUtils.convertToXml = grantInfo => { const { grants, ownerInfo } = grantInfo; const xml = []; - xml.push('', + xml.push( + '', '', '', `${ownerInfo.ID}`, - `${escapeForXml(ownerInfo.displayName)}` + - '', + `${escapeForXml(ownerInfo.displayName)}` + '', '', '' ); @@ -250,32 +243,25 @@ aclUtils.convertToXml = grantInfo => { // The `` tag has different attributes depending on whether the // grant has an ID or URI if (grant.ID) { - xml.push('', + xml.push( + '', `${grant.ID}` ); } else if (grant.URI) { - xml.push('', + xml.push( + '', `${escapeForXml(grant.URI)}` ); } if (grant.displayName) { - xml.push(`${escapeForXml(grant.displayName)}` + - '' - ); + xml.push(`${escapeForXml(grant.displayName)}` + ''); } - xml.push('', - `${grant.permission}`, - '' - ); + xml.push('', `${grant.permission}`, ''); }); - xml.push('', - '' - ); + xml.push('', ''); return xml.join(''); }; @@ -303,9 +289,11 @@ aclUtils.checkGrantHeaderValidity = function checkGrantHeaderValidity(headers) { const identifier = singleGrantArr[0].trim().toLowerCase(); const value = singleGrantArr[1].trim(); if (identifier === 'uri') { - if (value !== constants.publicId && + if ( + value !== constants.publicId && value !== constants.allAuthedUsersId && - value !== constants.logId) { + value !== constants.logId + ) { return false; } } else if (identifier === 'emailaddress') { @@ -346,13 +334,7 @@ function getGrants(acl) { * @returns {array} canonicalIDs - array of unique canonicalIDs from acl */ aclUtils.getCanonicalIDs = function getCanonicalIDs(acl) { - const aclGrantees = [].concat( - acl.FULL_CONTROL, - acl.WRITE, - acl.WRITE_ACP, - acl.READ, - acl.READ_ACP - ); + const aclGrantees = [].concat(acl.FULL_CONTROL, acl.WRITE, acl.WRITE_ACP, acl.READ, acl.READ_ACP); const uniqueGrantees = Array.from(new Set(aclGrantees)); // grantees can be a mix of canonicalIDs and predefined groups in the form // of uri, so filter out only canonicalIDs @@ -367,11 +349,12 @@ aclUtils.getCanonicalIDs = function getCanonicalIDs(acl) { aclUtils.getUriGrantInfo = function getUriGrantInfo(acl) { const grants = getGrants(acl); const uriGrantInfo = []; - const validGrants = Object.entries(grants) - .filter(([permission, grantees]) => permission - && Array.isArray(grantees)); + const validGrants = Object.entries(grants).filter( + ([permission, grantees]) => permission && Array.isArray(grantees) + ); validGrants.forEach(([permission, grantees]) => { - grantees.filter(grantee => grantsByURI.includes(grantee)) + grantees + .filter(grantee => grantsByURI.includes(grantee)) .forEach(grantee => { uriGrantInfo.push({ URI: grantee, @@ -391,16 +374,15 @@ aclUtils.getUriGrantInfo = function getUriGrantInfo(acl) { * @returns {array} individualGrantInfo - array of grants mapped to * canonicalID/email */ -aclUtils.getIndividualGrants = function getIndividualGrants(acl, canonicalIDs, - emails) { +aclUtils.getIndividualGrants = function getIndividualGrants(acl, canonicalIDs, emails) { const grants = getGrants(acl); const individualGrantInfo = []; - const validGrants = Object.entries(grants) - .filter(([permission, grantees]) => permission - && Array.isArray(grantees)); + const validGrants = Object.entries(grants).filter( + ([permission, grantees]) => permission && Array.isArray(grantees) + ); validGrants.forEach(([permission, grantees]) => { - grantees.filter(grantee => canonicalIDs.includes(grantee) - && emails[grantee]) + grantees + .filter(grantee => canonicalIDs.includes(grantee) && emails[grantee]) .forEach(grantee => { individualGrantInfo.push({ ID: grantee, diff --git a/lib/utilities/collectCorsHeaders.js b/lib/utilities/collectCorsHeaders.js index 508bbe55ab..96185f225e 100644 --- a/lib/utilities/collectCorsHeaders.js +++ b/lib/utilities/collectCorsHeaders.js @@ -1,5 +1,4 @@ -const { findCorsRule, generateCorsResHeaders } = - require('../api/apiUtils/object/corsResponse.js'); +const { findCorsRule, generateCorsResHeaders } = require('../api/apiUtils/object/corsResponse.js'); /** * collectCorsHeaders - gather any relevant CORS headers diff --git a/lib/utilities/collectResponseHeaders.js b/lib/utilities/collectResponseHeaders.js index 2e6facd656..1c5dd562ec 100644 --- a/lib/utilities/collectResponseHeaders.js +++ b/lib/utilities/collectResponseHeaders.js @@ -1,6 +1,5 @@ const { getVersionIdResHeader } = require('../api/apiUtils/object/versioning'); -const checkUserMetadataSize - = require('../api/apiUtils/object/checkUserMetadataSize'); +const checkUserMetadataSize = require('../api/apiUtils/object/checkUserMetadataSize'); const { getAmzRestoreResHeader } = require('../api/apiUtils/object/coldStorage'); const { config } = require('../Config'); const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface'); @@ -16,38 +15,36 @@ const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface'); * @return {object} responseMetaHeaders headers with object metadata to include * in response to client */ -function collectResponseHeaders(objectMD, corsHeaders, versioningCfg, - returnTagCount) { +function collectResponseHeaders(objectMD, corsHeaders, versioningCfg, returnTagCount) { // Add user meta headers from objectMD let responseMetaHeaders = Object.assign({}, corsHeaders); - Object.keys(objectMD).filter(val => (val.startsWith('x-amz-meta-'))) - .forEach(id => { responseMetaHeaders[id] = objectMD[id]; }); + Object.keys(objectMD) + .filter(val => val.startsWith('x-amz-meta-')) + .forEach(id => { + responseMetaHeaders[id] = objectMD[id]; + }); // Check user metadata size responseMetaHeaders = checkUserMetadataSize(responseMetaHeaders); // TODO: When implement lifecycle, add additional response headers // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html - responseMetaHeaders['x-amz-version-id'] = - getVersionIdResHeader(versioningCfg, objectMD); + responseMetaHeaders['x-amz-version-id'] = getVersionIdResHeader(versioningCfg, objectMD); if (objectMD['x-amz-website-redirect-location']) { - responseMetaHeaders['x-amz-website-redirect-location'] = - objectMD['x-amz-website-redirect-location']; + responseMetaHeaders['x-amz-website-redirect-location'] = objectMD['x-amz-website-redirect-location']; } if (objectMD['x-amz-storage-class'] !== 'STANDARD') { - responseMetaHeaders['x-amz-storage-class'] = - objectMD['x-amz-storage-class']; + responseMetaHeaders['x-amz-storage-class'] = objectMD['x-amz-storage-class']; } if (objectMD['x-amz-server-side-encryption']) { - responseMetaHeaders['x-amz-server-side-encryption'] - = objectMD['x-amz-server-side-encryption']; + responseMetaHeaders['x-amz-server-side-encryption'] = objectMD['x-amz-server-side-encryption']; } const kmsKey = objectMD['x-amz-server-side-encryption-aws-kms-key-id']; - if (kmsKey && - objectMD['x-amz-server-side-encryption'] === 'aws:kms') { - responseMetaHeaders['x-amz-server-side-encryption-aws-kms-key-id'] - = config.kmsHideScalityArn ? getKeyIdFromArn(kmsKey) : kmsKey; + if (kmsKey && objectMD['x-amz-server-side-encryption'] === 'aws:kms') { + responseMetaHeaders['x-amz-server-side-encryption-aws-kms-key-id'] = config.kmsHideScalityArn + ? getKeyIdFromArn(kmsKey) + : kmsKey; } const restoreHeader = getAmzRestoreResHeader(objectMD); @@ -65,8 +62,7 @@ function collectResponseHeaders(objectMD, corsHeaders, versioningCfg, responseMetaHeaders['Cache-Control'] = objectMD['cache-control']; } if (objectMD['content-disposition']) { - responseMetaHeaders['Content-Disposition'] - = objectMD['content-disposition']; + responseMetaHeaders['Content-Disposition'] = objectMD['content-disposition']; } if (objectMD['content-encoding']) { responseMetaHeaders['Content-Encoding'] = objectMD['content-encoding']; @@ -78,43 +74,35 @@ function collectResponseHeaders(objectMD, corsHeaders, versioningCfg, // Note: ETag must have a capital "E" and capital "T" for cosbench // to work. responseMetaHeaders.ETag = `"${objectMD['content-md5']}"`; - responseMetaHeaders['Last-Modified'] = - new Date(objectMD['last-modified']).toUTCString(); + responseMetaHeaders['Last-Modified'] = new Date(objectMD['last-modified']).toUTCString(); if (objectMD['content-type']) { responseMetaHeaders['Content-Type'] = objectMD['content-type']; } - if (returnTagCount && objectMD.tags && - Object.keys(objectMD.tags).length > 0) { - responseMetaHeaders['x-amz-tagging-count'] = - Object.keys(objectMD.tags).length; + if (returnTagCount && objectMD.tags && Object.keys(objectMD.tags).length > 0) { + responseMetaHeaders['x-amz-tagging-count'] = Object.keys(objectMD.tags).length; } - const hasRetentionInfo = objectMD.retentionMode - && objectMD.retentionDate; + const hasRetentionInfo = objectMD.retentionMode && objectMD.retentionDate; if (hasRetentionInfo) { - responseMetaHeaders['x-amz-object-lock-retain-until-date'] - = objectMD.retentionDate; - responseMetaHeaders['x-amz-object-lock-mode'] - = objectMD.retentionMode; + responseMetaHeaders['x-amz-object-lock-retain-until-date'] = objectMD.retentionDate; + responseMetaHeaders['x-amz-object-lock-mode'] = objectMD.retentionMode; } if (objectMD.legalHold !== undefined) { - responseMetaHeaders['x-amz-object-lock-legal-hold'] - = objectMD.legalHold ? 'ON' : 'OFF'; + responseMetaHeaders['x-amz-object-lock-legal-hold'] = objectMD.legalHold ? 'ON' : 'OFF'; } if (objectMD.replicationInfo && objectMD.replicationInfo.status) { - responseMetaHeaders['x-amz-replication-status'] = - objectMD.replicationInfo.status; + responseMetaHeaders['x-amz-replication-status'] = objectMD.replicationInfo.status; } - if (objectMD.replicationInfo && + if ( + objectMD.replicationInfo && // Use storageType to determine if user metadata is needed. objectMD.replicationInfo.storageType && - Array.isArray(objectMD.replicationInfo.backends)) { + Array.isArray(objectMD.replicationInfo.backends) + ) { objectMD.replicationInfo.backends.forEach(backend => { const { status, site, dataStoreVersionId } = backend; - responseMetaHeaders[`x-amz-meta-${site}-replication-status`] = - status; + responseMetaHeaders[`x-amz-meta-${site}-replication-status`] = status; if (status === 'COMPLETED' && dataStoreVersionId) { - responseMetaHeaders[`x-amz-meta-${site}-version-id`] = - dataStoreVersionId; + responseMetaHeaders[`x-amz-meta-${site}-version-id`] = dataStoreVersionId; } }); } diff --git a/lib/utilities/healthcheckHandler.js b/lib/utilities/healthcheckHandler.js index 7b0192206a..22c509c0d9 100644 --- a/lib/utilities/healthcheckHandler.js +++ b/lib/utilities/healthcheckHandler.js @@ -41,12 +41,7 @@ function writeResponse(res, error, log, results, cb) { * @return {undefined} */ function clientCheck(flightCheckOnStartUp, log, cb) { - const clients = [ - data, - metadata, - vault, - kms, - ]; + const clients = [data, metadata, vault, kms]; const clientTasks = []; clients.forEach(client => { if (typeof client.checkHealth === 'function') { @@ -64,11 +59,12 @@ function clientCheck(flightCheckOnStartUp, log, cb) { // other than aws_s3 or azure const obj = results.reduce((obj, item) => Object.assign(obj, item), {}); // fail only if *all* backend fail, so that we can still serve the ones which are working - fail = Object.keys(obj).every(k => - // if there is an error from an external backend, - // only return a 500 if it is on startup - // (flightCheckOnStartUp set to true) - obj[k].error && (flightCheckOnStartUp || !obj[k].external) + fail = Object.keys(obj).every( + k => + // if there is an error from an external backend, + // only return a 500 if it is on startup + // (flightCheckOnStartUp set to true) + obj[k].error && (flightCheckOnStartUp || !obj[k].external) ); if (fail) { return cb(errors.InternalError, obj); @@ -96,8 +92,7 @@ function routeHandler(deep, req, res, log, statsClient, cb) { } function checkIP(clientIP) { - return ipCheck.ipMatchCidrList( - _config.healthChecks.allowFrom, clientIP); + return ipCheck.ipMatchCidrList(_config.healthChecks.allowFrom, clientIP); } /** @@ -121,7 +116,7 @@ function healthcheckHandler(clientIP, req, res, log, statsClient) { }); } - const deep = (req.url === '/ready'); + const deep = req.url === '/ready'; // Attach the apiMethod method to the request, so it can used by monitoring in the server // eslint-disable-next-line no-param-reassign @@ -130,8 +125,7 @@ function healthcheckHandler(clientIP, req, res, log, statsClient) { if (!checkIP(clientIP)) { return healthcheckEndHandler(errors.AccessDenied, []); } - return routeHandler(deep, req, res, log, statsClient, - healthcheckEndHandler); + return routeHandler(deep, req, res, log, statsClient, healthcheckEndHandler); } module.exports = { diff --git a/lib/utilities/internalHandlers.js b/lib/utilities/internalHandlers.js index 96af32bc05..7423541756 100644 --- a/lib/utilities/internalHandlers.js +++ b/lib/utilities/internalHandlers.js @@ -1,7 +1,6 @@ const routeBackbeat = require('../routes/routeBackbeat'); const routeMetadata = require('../routes/routeMetadata'); -const routeWorkflowEngineOperator = - require('../routes/routeWorkflowEngineOperator'); +const routeWorkflowEngineOperator = require('../routes/routeWorkflowEngineOperator'); const { reportHandler } = require('./reportHandler'); const routeVeeam = require('../routes/routeVeeam').routeVeeam; diff --git a/lib/utilities/legacyAWSBehavior.js b/lib/utilities/legacyAWSBehavior.js index c8a457a687..859d064972 100644 --- a/lib/utilities/legacyAWSBehavior.js +++ b/lib/utilities/legacyAWSBehavior.js @@ -10,8 +10,10 @@ const { config } = require('../Config'); */ function isLegacyAwsBehavior(locationConstraint) { - return (config.locationConstraints[locationConstraint] && - config.locationConstraints[locationConstraint].legacyAwsBehavior); + return ( + config.locationConstraints[locationConstraint] && + config.locationConstraints[locationConstraint].legacyAwsBehavior + ); } module.exports = isLegacyAwsBehavior; diff --git a/lib/utilities/monitoringHandler.js b/lib/utilities/monitoringHandler.js index 87a455193a..2b917544a8 100644 --- a/lib/utilities/monitoringHandler.js +++ b/lib/utilities/monitoringHandler.js @@ -114,69 +114,72 @@ if (config.isQuotaEnabled) { // labels and buckets. const lifecycleDuration = new client.Histogram({ name: 's3_lifecycle_duration_seconds', - help: 'Duration of the lifecycle operation, calculated from the theoretical date to the end ' + - 'of the operation', + help: 'Duration of the lifecycle operation, calculated from the theoretical date to the end ' + 'of the operation', labelNames: ['type', 'location'], buckets: [0.2, 1, 5, 30, 120, 600, 3600, 4 * 3600, 8 * 3600, 16 * 3600, 24 * 3600], }); -function promMetrics(method, bucketName, code, action, - newByteLength, oldByteLength, isVersionedObj, - numOfObjectsRemoved, ingestSize) { +function promMetrics( + method, + bucketName, + code, + action, + newByteLength, + oldByteLength, + isVersionedObj, + numOfObjectsRemoved, + ingestSize +) { let bytes; switch (action) { - case 'putObject': - case 'copyObject': - case 'putObjectPart': - if (code === '200') { - bytes = newByteLength - (isVersionedObj ? 0 : oldByteLength); - httpRequestSizeBytes - .labels(method, action, code) - .observe(newByteLength); - dataDiskAvailable.dec(bytes); - dataDiskFree.dec(bytes); - if (ingestSize) { - numberOfIngestedObjects.inc(); - dataIngested.inc(ingestSize); + case 'putObject': + case 'copyObject': + case 'putObjectPart': + if (code === '200') { + bytes = newByteLength - (isVersionedObj ? 0 : oldByteLength); + httpRequestSizeBytes.labels(method, action, code).observe(newByteLength); + dataDiskAvailable.dec(bytes); + dataDiskFree.dec(bytes); + if (ingestSize) { + numberOfIngestedObjects.inc(); + dataIngested.inc(ingestSize); + } + numberOfObjects.inc(); } - numberOfObjects.inc(); - } - break; - case 'createBucket': - if (code === '200') { - numberOfBuckets.inc(); - } - break; - case 'getObject': - if (code === '200') { - httpResponseSizeBytes - .labels(method, action, code) - .observe(newByteLength); - } - break; - case 'deleteBucket': - case 'deleteBucketWebsite': - if (code === '200' || code === '204') { - numberOfBuckets.dec(); - } - break; - case 'deleteObject': - case 'abortMultipartUpload': - case 'multiObjectDelete': - if (code === '200') { - dataDiskAvailable.inc(newByteLength); - dataDiskFree.inc(newByteLength); - const objs = numOfObjectsRemoved || 1; - numberOfObjects.dec(objs); - if (ingestSize) { - numberOfIngestedObjects.dec(objs); - dataIngested.dec(ingestSize); + break; + case 'createBucket': + if (code === '200') { + numberOfBuckets.inc(); } - } - break; - default: - break; + break; + case 'getObject': + if (code === '200') { + httpResponseSizeBytes.labels(method, action, code).observe(newByteLength); + } + break; + case 'deleteBucket': + case 'deleteBucketWebsite': + if (code === '200' || code === '204') { + numberOfBuckets.dec(); + } + break; + case 'deleteObject': + case 'abortMultipartUpload': + case 'multiObjectDelete': + if (code === '200') { + dataDiskAvailable.inc(newByteLength); + dataDiskFree.inc(newByteLength); + const objs = numOfObjectsRemoved || 1; + numberOfObjects.dec(objs); + if (ingestSize) { + numberOfIngestedObjects.dec(objs); + dataIngested.dec(ingestSize); + } + } + break; + default: + break; } } @@ -215,7 +218,6 @@ function writeResponse(res, error, results, cb) { }); } - async function routeHandler(req, res, cb) { if (req.method !== 'GET') { return cb(errors.BadRequest, []); diff --git a/lib/utilities/reportHandler.js b/lib/utilities/reportHandler.js index 1316662f13..4b87119619 100644 --- a/lib/utilities/reportHandler.js +++ b/lib/utilities/reportHandler.js @@ -62,8 +62,10 @@ function cleanup(obj) { } function isAuthorized(clientIP, req) { - return ipCheck.ipMatchCidrList(config.healthChecks.allowFrom, clientIP) && - req.headers['x-scal-report-token'] === config.reportToken; + return ( + ipCheck.ipMatchCidrList(config.healthChecks.allowFrom, clientIP) && + req.headers['x-scal-report-token'] === config.reportToken + ); } function getGitVersion(cb) { @@ -82,22 +84,28 @@ function getSystemStats() { const cpuInfo = os.cpus(); const model = cpuInfo[0].model; const speed = cpuInfo[0].speed; - const times = cpuInfo. - map(c => c.times). - reduce((prev, cur) => - Object.assign({}, { - user: prev.user + cur.user, - nice: prev.nice + cur.nice, - sys: prev.sys + cur.sys, - idle: prev.idle + cur.idle, - irq: prev.irq + cur.irq, - }), { + const times = cpuInfo + .map(c => c.times) + .reduce( + (prev, cur) => + Object.assign( + {}, + { + user: prev.user + cur.user, + nice: prev.nice + cur.nice, + sys: prev.sys + cur.sys, + idle: prev.idle + cur.idle, + irq: prev.irq + cur.irq, + } + ), + { user: 0, nice: 0, sys: 0, idle: 0, irq: 0, - }); + } + ); return { memory: { @@ -227,18 +235,19 @@ function _getMetricsByLocation(endpoint, sites, requestMethod, log, cb) { async.mapLimit( sites, ASYNCLIMIT, - (site, next) => requestMethod(endpoint, site, log, (err, res) => { - if (err) { - log.debug('Error in retrieving site metrics', { - method: '_getMetricsByLocation', - error: err, - site, - requestType: requestMethod.name, - }); - return next(null, { site, stats: {} }); - } - return next(null, { site, stats: res }); - }), + (site, next) => + requestMethod(endpoint, site, log, (err, res) => { + if (err) { + log.debug('Error in retrieving site metrics', { + method: '_getMetricsByLocation', + error: err, + site, + requestType: requestMethod.name, + }); + return next(null, { site, stats: {} }); + } + return next(null, { site, stats: res }); + }), (err, locStats) => { if (err) { log.error('failed to get stats for site', { @@ -261,19 +270,21 @@ function _getMetrics(sites, requestMethod, log, cb, _testConfig) { const conf = (_testConfig && _testConfig.backbeat) || config.backbeat; const { host, port } = conf; const endpoint = `http://${host}:${port}`; - return async.parallel({ - all: done => requestMethod(endpoint, 'all', log, done), - byLocation: done => _getMetricsByLocation(endpoint, sites, - requestMethod, log, done), - }, (err, res) => { - if (err) { - return cb(err); + return async.parallel( + { + all: done => requestMethod(endpoint, 'all', log, done), + byLocation: done => _getMetricsByLocation(endpoint, sites, requestMethod, log, done), + }, + (err, res) => { + if (err) { + return cb(err); + } + const all = (res && res.all) || {}; + const byLocation = (res && res.byLocation) || {}; + const retObj = Object.assign({}, all, { byLocation }); + return cb(null, retObj); } - const all = (res && res.all) || {}; - const byLocation = (res && res.byLocation) || {}; - const retObj = Object.assign({}, all, { byLocation }); - return cb(null, retObj); - }); + ); } function getCRRMetrics(log, cb, _testConfig) { @@ -282,58 +293,73 @@ function getCRRMetrics(log, cb, _testConfig) { }); const { replicationEndpoints } = _testConfig || config; const sites = replicationEndpoints.map(endpoint => endpoint.site); - return _getMetrics(sites, _crrMetricRequest, log, (err, retObj) => { - if (err) { - log.error('failed to get CRR stats', { - method: 'getCRRMetrics', - error: err, - }); - return cb(null, {}); - } - return cb(null, retObj); - }, _testConfig); + return _getMetrics( + sites, + _crrMetricRequest, + log, + (err, retObj) => { + if (err) { + log.error('failed to get CRR stats', { + method: 'getCRRMetrics', + error: err, + }); + return cb(null, {}); + } + return cb(null, retObj); + }, + _testConfig + ); } function getIngestionMetrics(sites, log, cb, _testConfig) { log.debug('request Ingestion metrics from backbeat api', { method: 'getIngestionMetrics', }); - return _getMetrics(sites, _ingestionMetricRequest, log, (err, retObj) => { - if (err) { - log.error('failed to get Ingestion stats', { - method: 'getIngestionMetrics', - error: err, - }); - return cb(null, {}); - } - return cb(null, retObj); - }, _testConfig); + return _getMetrics( + sites, + _ingestionMetricRequest, + log, + (err, retObj) => { + if (err) { + log.error('failed to get Ingestion stats', { + method: 'getIngestionMetrics', + error: err, + }); + return cb(null, {}); + } + return cb(null, retObj); + }, + _testConfig + ); } function _getStates(statusPath, schedulePath, log, cb, _testConfig) { const conf = (_testConfig && _testConfig.backbeat) || config.backbeat; const { host, port } = conf; const endpoint = `http://${host}:${port}`; - async.parallel({ - states: done => _makeRequest(endpoint, statusPath, done), - schedules: done => _makeRequest(endpoint, schedulePath, done), - }, (err, res) => { - if (err) { - return cb(err); - } - const locationSchedules = {}; - Object.keys(res.schedules).forEach(loc => { - const val = res.schedules[loc]; - if (!isNaN(Date.parse(val))) { - locationSchedules[loc] = new Date(val); + async.parallel( + { + states: done => _makeRequest(endpoint, statusPath, done), + schedules: done => _makeRequest(endpoint, schedulePath, done), + }, + (err, res) => { + if (err) { + return cb(err); } - }); - const retObj = { - states: res.states || {}, - schedules: locationSchedules, - }; - return cb(null, retObj); - }); + const locationSchedules = {}; + Object.keys(res.schedules).forEach(loc => { + const val = res.schedules[loc]; + if (!isNaN(Date.parse(val))) { + locationSchedules[loc] = new Date(val); + } + }); + const retObj = { + states: res.states || {}, + schedules: locationSchedules, + }; + return cb(null, retObj); + } + ); } function getReplicationStates(log, cb, _testConfig) { @@ -341,25 +367,31 @@ function getReplicationStates(log, cb, _testConfig) { method: 'getReplicationStates', }); const { crrStatus, crrSchedules } = REQ_PATHS; - return _getStates(crrStatus, crrSchedules, log, (err, res) => { - if (err) { - if (err === 'responseError') { - log.error('error response from backbeat api', { - error: res, - method: 'getReplicationStates', - service: 'replication', - }); - } else { - log.error('unable to perform request to backbeat api', { - error: err, - method: 'getReplicationStates', - service: 'replication', - }); + return _getStates( + crrStatus, + crrSchedules, + log, + (err, res) => { + if (err) { + if (err === 'responseError') { + log.error('error response from backbeat api', { + error: res, + method: 'getReplicationStates', + service: 'replication', + }); + } else { + log.error('unable to perform request to backbeat api', { + error: err, + method: 'getReplicationStates', + service: 'replication', + }); + } + return cb(null, {}); } - return cb(null, {}); - } - return cb(null, res); - }, _testConfig); + return cb(null, res); + }, + _testConfig + ); } function getIngestionStates(log, cb, _testConfig) { @@ -367,67 +399,81 @@ function getIngestionStates(log, cb, _testConfig) { method: 'getIngestionStates', }); const { ingestionStatus, ingestionSchedules } = REQ_PATHS; - return _getStates(ingestionStatus, ingestionSchedules, log, (err, res) => { - if (err) { - if (err === 'responseError') { - log.error('error response from backbeat api', { - error: res, - method: 'getIngestionStates', - service: 'ingestion', - }); - } else { - log.error('unable to perform request to backbeat api', { - error: err, - method: 'getIngestionStates', - service: 'ingestion', - }); + return _getStates( + ingestionStatus, + ingestionSchedules, + log, + (err, res) => { + if (err) { + if (err === 'responseError') { + log.error('error response from backbeat api', { + error: res, + method: 'getIngestionStates', + service: 'ingestion', + }); + } else { + log.error('unable to perform request to backbeat api', { + error: err, + method: 'getIngestionStates', + service: 'ingestion', + }); + } + return cb(null, {}); } - return cb(null, {}); - } - return cb(null, res); - }, _testConfig); + return cb(null, res); + }, + _testConfig + ); } function getIngestionInfo(log, cb, _testConfig) { log.debug('requesting location ingestion info from backbeat api', { method: 'getIngestionInfo', }); - async.waterfall([ - done => getIngestionStates(log, done, _testConfig), - (stateObj, done) => { - // if getIngestionStates returned an error or the returned object - // did not return an expected response - if (Object.keys(stateObj).length === 0 || !stateObj.states) { - log.debug('no ingestion locations found', { - method: 'getIngestionInfo', - }); - return done(null, stateObj, {}); - } - const sites = Object.keys(stateObj.states); - return getIngestionMetrics(sites, log, (err, res) => { - if (err) { - log.error('failed to get Ingestion stats', { + async.waterfall( + [ + done => getIngestionStates(log, done, _testConfig), + (stateObj, done) => { + // if getIngestionStates returned an error or the returned object + // did not return an expected response + if (Object.keys(stateObj).length === 0 || !stateObj.states) { + log.debug('no ingestion locations found', { method: 'getIngestionInfo', - error: err, }); return done(null, stateObj, {}); } - return done(null, stateObj, res); - }, _testConfig); - }, - ], (err, stateObj, metricObj) => { - if (err) { - log.error('failed to get ingestion info', { - method: 'getIngestionInfo', - error: err, + const sites = Object.keys(stateObj.states); + return getIngestionMetrics( + sites, + log, + (err, res) => { + if (err) { + log.error('failed to get Ingestion stats', { + method: 'getIngestionInfo', + error: err, + }); + return done(null, stateObj, {}); + } + return done(null, stateObj, res); + }, + _testConfig + ); + }, + ], + (err, stateObj, metricObj) => { + if (err) { + log.error('failed to get ingestion info', { + method: 'getIngestionInfo', + error: err, + }); + return cb(null, {}); + } + return cb(null, { + metrics: metricObj, + status: stateObj, }); - return cb(null, {}); } - return cb(null, { - metrics: metricObj, - status: stateObj, - }); - }); + ); } /** @@ -453,52 +499,54 @@ function reportHandler(clientIP, req, res, log) { } // TODO propagate value of req.headers['x-scal-report-skip-cache'] - async.parallel({ - getUUID: cb => metadata.getUUID(log, cb), - getMDDiskUsage: cb => metadata.getDiskUsage(log, cb), - getDataDiskUsage: cb => data.getDiskUsage(log, cb), - getVersion: cb => getGitVersion(cb), - getObjectCount: cb => metadata.countItems(log, cb), - getCRRMetrics: cb => getCRRMetrics(log, cb), - getReplicationStates: cb => getReplicationStates(log, cb), - getIngestionInfo: cb => getIngestionInfo(log, cb), - getVaultReport: cb => vault.report(log, cb), - }, - (err, results) => { - if (err) { - res.writeHead(500, { 'Content-Type': 'application/json' }); - res.write(JSON.stringify(err)); - log.errorEnd('could not gather report', { error: err }); - } else { - const getObjectCount = results.getObjectCount; - const crrStatsObj = Object.assign({}, results.getCRRMetrics); - crrStatsObj.stalled = { count: getObjectCount.stalled || 0 }; - delete getObjectCount.stalled; - const response = { - utcTime: new Date(), - uuid: results.getUUID, - reportModelVersion: REPORT_MODEL_VERSION, + async.parallel( + { + getUUID: cb => metadata.getUUID(log, cb), + getMDDiskUsage: cb => metadata.getDiskUsage(log, cb), + getDataDiskUsage: cb => data.getDiskUsage(log, cb), + getVersion: cb => getGitVersion(cb), + getObjectCount: cb => metadata.countItems(log, cb), + getCRRMetrics: cb => getCRRMetrics(log, cb), + getReplicationStates: cb => getReplicationStates(log, cb), + getIngestionInfo: cb => getIngestionInfo(log, cb), + getVaultReport: cb => vault.report(log, cb), + }, + (err, results) => { + if (err) { + res.writeHead(500, { 'Content-Type': 'application/json' }); + res.write(JSON.stringify(err)); + log.errorEnd('could not gather report', { error: err }); + } else { + const getObjectCount = results.getObjectCount; + const crrStatsObj = Object.assign({}, results.getCRRMetrics); + crrStatsObj.stalled = { count: getObjectCount.stalled || 0 }; + delete getObjectCount.stalled; + const response = { + utcTime: new Date(), + uuid: results.getUUID, + reportModelVersion: REPORT_MODEL_VERSION, - mdDiskUsage: results.getMDDiskUsage, - dataDiskUsage: results.getDataDiskUsage, - serverVersion: results.getVersion, - systemStats: getSystemStats(), - itemCounts: getObjectCount, - crrStats: crrStatsObj, - repStatus: results.getReplicationStates, - config: cleanup(config), - capabilities: getCapabilities(), - ingestStats: results.getIngestionInfo.metrics, - ingestStatus: results.getIngestionInfo.status, - vaultReport: results.getVaultReport, - }; - monitoring.crrCacheToProm(results); - res.writeHead(200, { 'Content-Type': 'application/json' }); - res.write(JSON.stringify(response)); - log.end().debug('report handler finished'); + mdDiskUsage: results.getMDDiskUsage, + dataDiskUsage: results.getDataDiskUsage, + serverVersion: results.getVersion, + systemStats: getSystemStats(), + itemCounts: getObjectCount, + crrStats: crrStatsObj, + repStatus: results.getReplicationStates, + config: cleanup(config), + capabilities: getCapabilities(), + ingestStats: results.getIngestionInfo.metrics, + ingestStatus: results.getIngestionInfo.status, + vaultReport: results.getVaultReport, + }; + monitoring.crrCacheToProm(results); + res.writeHead(200, { 'Content-Type': 'application/json' }); + res.write(JSON.stringify(response)); + log.end().debug('report handler finished'); + } + res.end(); } - res.end(); - }); + ); } module.exports = { diff --git a/lib/utilities/request.js b/lib/utilities/request.js index 7be85927a6..7b5b09e4af 100644 --- a/lib/utilities/request.js +++ b/lib/utilities/request.js @@ -5,9 +5,7 @@ const { HttpProxyAgent } = require('http-proxy-agent'); const { HttpsProxyAgent } = require('https-proxy-agent'); const { jsutil } = require('arsenal'); -const { - proxyCompareUrl, -} = require('arsenal').storage.data.external.backendUtils; +const { proxyCompareUrl } = require('arsenal').storage.data.external.backendUtils; const validVerbs = new Set(['HEAD', 'GET', 'POST', 'PUT', 'DELETE']); const updateVerbs = new Set(['POST', 'PUT']); @@ -16,7 +14,7 @@ const updateVerbs = new Set(['POST', 'PUT']); * create a new header object from an existing header object. Similar keys * will be ignored if a value has been set for theirlower-cased form */ -function createHeaders(headers) { +function createHeaders(headers) { if (typeof headers !== 'object') { return {}; } @@ -72,7 +70,7 @@ function request(endpoint, options, callback) { let reqParams; if (typeof endpoint === 'string') { try { - reqParams = url.parse(endpoint); + reqParams = url.parse(endpoint); } catch (error) { return cb(error); } @@ -115,8 +113,10 @@ function request(endpoint, options, callback) { const req = request.request(reqParams); req.on('error', cb); req.on('response', res => { - const rawData = []; - res.on('data', chunk => { rawData.push(chunk); }); + const rawData = []; + res.on('data', chunk => { + rawData.push(chunk); + }); res.on('end', () => { const data = rawData.join(''); if (res.statusCode >= 400) { diff --git a/lib/utilities/validateQueryAndHeaders.js b/lib/utilities/validateQueryAndHeaders.js index 376e3c7c24..37c07727ba 100644 --- a/lib/utilities/validateQueryAndHeaders.js +++ b/lib/utilities/validateQueryAndHeaders.js @@ -14,7 +14,6 @@ function _validateKeys(unsupportedKeys, obj) { return unsupportedKey; } - /** * validateQueryAndHeaders - Check request for unsupported queries or headers * @param {object} request - request object @@ -28,8 +27,7 @@ function validateQueryAndHeaders(request, log) { const isBucketQuery = !request.objectKey; // if the request is at bucket level, check for unsupported bucket queries if (isBucketQuery) { - const unsupportedQuery = - _validateKeys(constants.unsupportedBucketQueries, reqQuery); + const unsupportedQuery = _validateKeys(constants.unsupportedBucketQueries, reqQuery); if (unsupportedQuery) { log.debug('encountered unsupported query', { query: unsupportedQuery, @@ -38,8 +36,7 @@ function validateQueryAndHeaders(request, log) { return { error: errors.NotImplemented }; } } - const unsupportedQuery = _validateKeys(constants.unsupportedQueries, - reqQuery); + const unsupportedQuery = _validateKeys(constants.unsupportedQueries, reqQuery); if (unsupportedQuery) { log.debug('encountered unsupported query', { query: unsupportedQuery, @@ -47,8 +44,7 @@ function validateQueryAndHeaders(request, log) { }); return { error: errors.NotImplemented }; } - const unsupportedHeader = _validateKeys(constants.unsupportedHeaders, - reqHeaders); + const unsupportedHeader = _validateKeys(constants.unsupportedHeaders, reqHeaders); if (unsupportedHeader) { log.debug('encountered unsupported header', { header: unsupportedHeader, diff --git a/managementAgent.js b/managementAgent.js index d7161ef693..4413d36c3b 100644 --- a/managementAgent.js +++ b/managementAgent.js @@ -8,13 +8,11 @@ const { managementAgentMessageType } = require('./lib/management/agentClient'); const { addOverlayMessageListener } = require('./lib/management/push'); const { saveConfigurationVersion } = require('./lib/management/configuration'); - // TODO: auth? // TODO: werelogs with a specific name. const CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS = 15000; - class ManagementAgentServer { constructor() { this.port = _config.managementAgent.port || 8010; @@ -34,9 +32,7 @@ class ManagementAgentServer { /* Define REPORT_TOKEN env variable needed by the management * module. */ - process.env.REPORT_TOKEN = process.env.REPORT_TOKEN - || _config.reportToken - || Uuid.v4(); + process.env.REPORT_TOKEN = process.env.REPORT_TOKEN || _config.reportToken || Uuid.v4(); initManagement(logger.newRequestLogger(), overlay => { let error = null; @@ -73,8 +69,7 @@ class ManagementAgentServer { this.wss.on('listening', this.onListening.bind(this)); this.wss.on('error', this.onError.bind(this)); - setInterval(this.checkBrokenConnections.bind(this), - CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS); + setInterval(this.checkBrokenConnections.bind(this), CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS); addOverlayMessageListener(this.onNewOverlay.bind(this)); } @@ -114,8 +109,7 @@ class ManagementAgentServer { } onListening() { - logger.info('websocket server listening', - { port: this.port }); + logger.info('websocket server listening', { port: this.port }); } onError(error) { @@ -137,27 +131,24 @@ class ManagementAgentServer { }; client.send(JSON.stringify(msg), error => { if (error) { - logger.error( - 'failed to send remoteOverlay to management agent client', { - error, client: client._socket._peername, - }); + logger.error('failed to send remoteOverlay to management agent client', { + error, + client: client._socket._peername, + }); } }); } onNewOverlay(remoteOverlay) { const remoteOverlayObj = JSON.parse(remoteOverlay); - saveConfigurationVersion( - this.loadedOverlay, remoteOverlayObj, logger, err => { - if (err) { - logger.error('failed to save remote overlay', { err }); - return; - } - this.loadedOverlay = remoteOverlayObj; - this.wss.clients.forEach( - this._sendNewOverlayToClient.bind(this) - ); - }); + saveConfigurationVersion(this.loadedOverlay, remoteOverlayObj, logger, err => { + if (err) { + logger.error('failed to save remote overlay', { err }); + return; + } + this.loadedOverlay = remoteOverlayObj; + this.wss.clients.forEach(this._sendNewOverlayToClient.bind(this)); + }); } checkBrokenConnections() { diff --git a/mdserver.js b/mdserver.js index 4244be1cbc..c5113a46a3 100644 --- a/mdserver.js +++ b/mdserver.js @@ -1,8 +1,7 @@ 'use strict'; const { config } = require('./lib/Config.js'); -const MetadataFileServer = - require('arsenal').storage.metadata.file.MetadataFileServer; +const MetadataFileServer = require('arsenal').storage.metadata.file.MetadataFileServer; const logger = require('./lib/utilities/logger'); process.on('uncaughtException', err => { @@ -16,14 +15,15 @@ process.on('uncaughtException', err => { }); if (config.backends.metadata === 'file') { - const mdServer = new MetadataFileServer( - { bindAddress: config.metadataDaemon.bindAddress, - port: config.metadataDaemon.port, - path: config.metadataDaemon.metadataPath, - restEnabled: config.metadataDaemon.restEnabled, - restPort: config.metadataDaemon.restPort, - recordLog: config.recordLog, - versioning: { replicationGroupId: config.replicationGroupId }, - log: config.log }); + const mdServer = new MetadataFileServer({ + bindAddress: config.metadataDaemon.bindAddress, + port: config.metadataDaemon.port, + path: config.metadataDaemon.metadataPath, + restEnabled: config.metadataDaemon.restEnabled, + restPort: config.metadataDaemon.restPort, + recordLog: config.recordLog, + versioning: { replicationGroupId: config.replicationGroupId }, + log: config.log, + }); mdServer.startServer(); } diff --git a/monitoring/dashboard.json b/monitoring/dashboard.json index 1eca3701c1..7b6664a4b0 100644 --- a/monitoring/dashboard.json +++ b/monitoring/dashboard.json @@ -1,3629 +1,3528 @@ { - "__inputs": [ - { - "description": "", - "label": "Prometheus", - "name": "DS_PROMETHEUS", - "pluginId": "prometheus", - "pluginName": "Prometheus", - "type": "datasource" - }, - { - "description": "", - "label": "Loki", - "name": "DS_LOKI", - "pluginId": "loki", - "pluginName": "Loki", - "type": "datasource" - }, - { - "description": "Namespace associated with the Zenko instance", - "label": "namespace", - "name": "namespace", - "type": "constant", - "value": "zenko" - }, - { - "description": "Name of the Zenko instance", - "label": "instance", - "name": "zenkoName", - "type": "constant", - "value": "artesca-data" - }, - { - "description": "Name of the Cloudserver container, used to filter only the Cloudserver services.", - "label": "container", - "name": "container", - "type": "constant", - "value": "connector-cloudserver" - }, - { - "description": "Name of the Cloudserver Report job, used to filter only the Report Handler instances.", - "label": "report job", - "name": "reportJob", - "type": "constant", - "value": "artesca-data-ops-report-handler" - }, - { - "description": "Name of the Count-Items cronjob, used to filter only the Count-Items instances.", - "label": "count-items job", - "name": "countItemsJob", - "type": "constant", - "value": "artesca-data-ops-count-items" - } - ], - "annotations": { - "list": [] - }, - "description": "", - "editable": true, - "gnetId": null, - "hideControls": false, - "id": null, - "links": [], - "panels": [ - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "none", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "red", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "green", - "index": 1, - "line": true, - "op": "gt", - "value": 1.0, - "yaxis": "left" - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 0, - "y": 0 - }, - "hideTimeOverride": false, - "id": 1, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "targets": [ + "__inputs": [ { - "datasource": null, - "expr": "sum(up{namespace=\"${namespace}\", job=~\"$job\"})", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Up", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "0", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - } - ] - }, - "unit": "ops" + "description": "", + "label": "Prometheus", + "name": "DS_PROMETHEUS", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 3, - "y": 0 - }, - "hideTimeOverride": false, - "id": 2, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Http requests rate", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "calcs": [ - "mean" - ], - "decimals": null, - "limit": null, - "links": [], - "mappings": [], - "max": 100, - "min": 0, - "noValue": "-", - "override": {}, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "#808080", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "red", - "index": 1, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" - }, - { - "color": "orange", - "index": 2, - "line": true, - "op": "gt", - "value": 80.0, - "yaxis": "left" - }, - { - "color": "green", - "index": 3, - "line": true, - "op": "gt", - "value": 90.0, - "yaxis": "left" - } - ] - }, - "title": null, - "unit": "percent", - "values": false + "description": "", + "label": "Loki", + "name": "DS_LOKI", + "pluginId": "loki", + "pluginName": "Loki", + "type": "datasource" }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 6, - "y": 0 - }, - "hideTimeOverride": false, - "id": 3, - "links": [], - "maxDataPoints": 100, - "options": { - "reduceOptions": { - "calcs": [ - "mean" - ] - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\", code=~\"2..\"}[$__rate_interval])) * 100\n /\nsum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]) > 0)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Success rate", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Success rate", - "transformations": [], - "transparent": false, - "type": "gauge" - }, - { - "datasource": "${DS_PROMETHEUS}", - "description": "Rate of data ingested : cumulative amount of data created (>0) or deleted (<0) per second.", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": 1, - "mappings": [], - "noValue": "none", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "dark-purple", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - } - ] - }, - "unit": "binBps" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 9, - "y": 0 - }, - "hideTimeOverride": false, - "id": 4, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false + "description": "Namespace associated with the Zenko instance", + "label": "namespace", + "name": "namespace", + "type": "constant", + "value": "zenko" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "-sum(deriv(s3_cloudserver_disk_available_bytes{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Injection Data Rate", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "description": "Rate of object ingestion : cumulative count of object created (>0) or deleted (<0) per second.", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": 1, - "mappings": [], - "noValue": "none", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "dark-purple", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - } - ] - }, - "unit": "O/s" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 12, - "y": 0 - }, - "hideTimeOverride": false, - "id": 5, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false + "description": "Name of the Zenko instance", + "label": "instance", + "name": "zenkoName", + "type": "constant", + "value": "artesca-data" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "sum(deriv(s3_cloudserver_objects_count{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Injection Rate", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "description": "Number of S3 buckets available in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "-", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "#808080", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "blue", - "index": 1, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 7, - "x": 15, - "y": 0 - }, - "hideTimeOverride": false, - "id": 6, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "description": "Name of the Cloudserver container, used to filter only the Cloudserver services.", + "label": "container", + "name": "container", + "type": "constant", + "value": "connector-cloudserver" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "sum(s3_cloudserver_buckets_count{namespace=\"${namespace}\", job=\"${reportJob}\"})", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Buckets", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "description": "Status of the reports-handler pod.", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "none", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "red", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "green", - "index": 1, - "line": true, - "op": "gt", - "value": 1.0, - "yaxis": "left" - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 2, - "x": 22, - "y": 0 - }, - "hideTimeOverride": false, - "id": 7, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false + "description": "Name of the Cloudserver Report job, used to filter only the Report Handler instances.", + "label": "report job", + "name": "reportJob", + "type": "constant", + "value": "artesca-data-ops-report-handler" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "sum(up{namespace=\"${namespace}\", job=\"${reportJob}\"})", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "description": "Name of the Count-Items cronjob, used to filter only the Count-Items instances.", + "label": "count-items job", + "name": "countItemsJob", + "type": "constant", + "value": "artesca-data-ops-count-items" } - ], - "title": "Reporter", - "transformations": [], - "transparent": false, - "type": "stat" + ], + "annotations": { + "list": [] }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "0", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "semi-dark-blue", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 0, - "y": 4 - }, - "hideTimeOverride": false, - "id": 8, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "targets": [ + "description": "", + "editable": true, + "gnetId": null, + "hideControls": false, + "id": null, + "links": [], + "panels": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\",code=\"200\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Status 200", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "0", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "semi-dark-blue", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 2, - "x": 3, - "y": 4 - }, - "hideTimeOverride": false, - "id": 9, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "none", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "green", + "index": 1, + "line": true, + "op": "gt", + "value": 1.0, + "yaxis": "left" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 0 + }, + "hideTimeOverride": false, + "id": 1, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["last"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(up{namespace=\"${namespace}\", job=~\"$job\"})", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Up", + "transformations": [], + "transparent": false, + "type": "stat" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\",code=~\"4..\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Status 4xx", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "0", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "semi-dark-blue", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 2, - "x": 5, - "y": 4 - }, - "hideTimeOverride": false, - "id": 10, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + } + ] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 3, + "y": 0 + }, + "hideTimeOverride": false, + "id": 2, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Http requests rate", + "transformations": [], + "transparent": false, + "type": "stat" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\",code=~\"5..\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Status 5xx", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "none", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "red", - "index": 1, - "line": true, - "op": "gt", - "value": 80.0, - "yaxis": "left" - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 2, - "x": 7, - "y": 4 - }, - "hideTimeOverride": false, - "id": 11, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "calcs": ["mean"], + "decimals": null, + "limit": null, + "links": [], + "mappings": [], + "max": 100, + "min": 0, + "noValue": "-", + "override": {}, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#808080", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "red", + "index": 1, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + }, + { + "color": "orange", + "index": 2, + "line": true, + "op": "gt", + "value": 80.0, + "yaxis": "left" + }, + { + "color": "green", + "index": 3, + "line": true, + "op": "gt", + "value": 90.0, + "yaxis": "left" + } + ] + }, + "title": null, + "unit": "percent", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 6, + "y": 0 + }, + "hideTimeOverride": false, + "id": 3, + "links": [], + "maxDataPoints": 100, + "options": { + "reduceOptions": { + "calcs": ["mean"] + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\", code=~\"2..\"}[$__rate_interval])) * 100\n /\nsum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]) > 0)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Success rate", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Success rate", + "transformations": [], + "transparent": false, + "type": "gauge" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "sum(s3_cloudserver_http_active_requests{namespace=\"${namespace}\", job=~\"$job\"})", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Active requests", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "description": "Rate of data ingested out-of-band (OOB) : cumulative amount of OOB data created (>0) or deleted (<0) per second.", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": 1, - "mappings": [], - "noValue": "none", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "purple", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - } - ] - }, - "unit": "binBps" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 9, - "y": 4 - }, - "hideTimeOverride": false, - "id": 12, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "description": "Rate of data ingested : cumulative amount of data created (>0) or deleted (<0) per second.", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": 1, + "mappings": [], + "noValue": "none", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "dark-purple", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 9, + "y": 0 + }, + "hideTimeOverride": false, + "id": 4, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "-sum(deriv(s3_cloudserver_disk_available_bytes{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Injection Data Rate", + "transformations": [], + "transparent": false, + "type": "stat" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "sum(deriv(s3_cloudserver_ingested_bytes{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "OOB Inject. Data Rate", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "description": "Rate of object ingested out-of-band (OOB) : cumulative count of OOB object created (>0) or deleted (<0) per second.", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": 1, - "mappings": [], - "noValue": "none", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "purple", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - } - ] - }, - "unit": "O/s" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 12, - "y": 4 - }, - "hideTimeOverride": false, - "id": 13, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "description": "Rate of object ingestion : cumulative count of object created (>0) or deleted (<0) per second.", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": 1, + "mappings": [], + "noValue": "none", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "dark-purple", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + } + ] + }, + "unit": "O/s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 12, + "y": 0 + }, + "hideTimeOverride": false, + "id": 5, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(deriv(s3_cloudserver_objects_count{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Injection Rate", + "transformations": [], + "transparent": false, + "type": "stat" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "sum(deriv(s3_cloudserver_ingested_objects_count{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "OOB Inject. Rate", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "description": "Number of S3 objects available in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "-", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "#808080", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "blue", - "index": 1, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 7, - "x": 15, - "y": 4 - }, - "hideTimeOverride": false, - "id": 14, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "description": "Number of S3 buckets available in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#808080", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "blue", + "index": 1, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 7, + "x": 15, + "y": 0 + }, + "hideTimeOverride": false, + "id": 6, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(s3_cloudserver_buckets_count{namespace=\"${namespace}\", job=\"${reportJob}\"})", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Buckets", + "transformations": [], + "transparent": false, + "type": "stat" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "sum(s3_cloudserver_objects_count{namespace=\"${namespace}\", job=\"${reportJob}\"})", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Objects", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "description": "Time elapsed since the last report, when object/bucket count was updated.", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "-", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "#808080", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "green", - "index": 1, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" - }, - { - "color": "super-light-yellow", - "index": 2, - "line": true, - "op": "gt", - "value": 1800.0, - "yaxis": "left" - }, - { - "color": "orange", - "index": 3, - "line": true, - "op": "gt", - "value": 3600.0, - "yaxis": "left" - }, - { - "color": "red", - "index": 4, - "line": true, - "op": "gt", - "value": 3700.0, - "yaxis": "left" - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 2, - "x": 22, - "y": 4 - }, - "hideTimeOverride": false, - "id": 15, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "description": "Status of the reports-handler pod.", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "none", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "green", + "index": 1, + "line": true, + "op": "gt", + "value": 1.0, + "yaxis": "left" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 22, + "y": 0 + }, + "hideTimeOverride": false, + "id": 7, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["last"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(up{namespace=\"${namespace}\", job=\"${reportJob}\"})", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Reporter", + "transformations": [], + "transparent": false, + "type": "stat" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "time()\n- max(s3_cloudserver_last_report_timestamp{namespace=\"${namespace}\", job=\"${reportJob}\"})\n+ (max(s3_cloudserver_last_report_timestamp{namespace=\"${namespace}\", job=\"${reportJob}\"})\n - max(kube_cronjob_status_last_schedule_time{namespace=\"${namespace}\", cronjob=\"${countItemsJob}\"})\n > 0 or vector(0))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Last Report", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "collapsed": false, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [] - } - } - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 8 - }, - "hideTimeOverride": false, - "id": 16, - "links": [], - "maxDataPoints": 100, - "panels": [], - "targets": [], - "title": "Response codes", - "transformations": [], - "transparent": false, - "type": "row" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 30, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 9 - }, - "hideTimeOverride": false, - "id": 17, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "semi-dark-blue", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + } + ] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 4 + }, + "hideTimeOverride": false, + "id": 8, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\",code=\"200\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Status 200", + "transformations": [], + "transparent": false, + "type": "stat" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum by (code) (rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{code}}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Http status code over time", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 39, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "log" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "normal" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "ops" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Success" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "dark-blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "User errors" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "semi-dark-orange", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "System errors" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "semi-dark-red", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 9 - }, - "hideTimeOverride": false, - "id": 18, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "semi-dark-blue", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + } + ] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 3, + "y": 4 + }, + "hideTimeOverride": false, + "id": 9, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\",code=~\"4..\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Status 4xx", + "transformations": [], + "transparent": false, + "type": "stat" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\", code=~\"2..\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Success", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "semi-dark-blue", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + } + ] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 5, + "y": 4 + }, + "hideTimeOverride": false, + "id": 10, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\",code=~\"5..\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Status 5xx", + "transformations": [], + "transparent": false, + "type": "stat" }, { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\", code=~\"4..\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "User errors", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "none", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "red", + "index": 1, + "line": true, + "op": "gt", + "value": 80.0, + "yaxis": "left" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 7, + "y": 4 + }, + "hideTimeOverride": false, + "id": 11, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(s3_cloudserver_http_active_requests{namespace=\"${namespace}\", job=~\"$job\"})", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Active requests", + "transformations": [], + "transparent": false, + "type": "stat" }, { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\", code=~\"5..\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "System errors", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Aggregated status over time", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "collapsed": false, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [] - } - } - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 17 - }, - "hideTimeOverride": false, - "id": 19, - "links": [], - "maxDataPoints": 100, - "panels": [], - "targets": [], - "title": "Operations", - "transformations": [], - "transparent": false, - "type": "row" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 18, - "x": 0, - "y": 18 - }, - "hideTimeOverride": false, - "id": 20, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [ - "min", - "mean", - "max" - ], - "displayMode": "table", - "placement": "right" + "datasource": "${DS_PROMETHEUS}", + "description": "Rate of data ingested out-of-band (OOB) : cumulative amount of OOB data created (>0) or deleted (<0) per second.", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": 1, + "mappings": [], + "noValue": "none", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "purple", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 9, + "y": 4 + }, + "hideTimeOverride": false, + "id": 12, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(deriv(s3_cloudserver_ingested_bytes{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "OOB Inject. Data Rate", + "transformations": [], + "transparent": false, + "type": "stat" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by(action)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{action}}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Request rate per S3 action", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 6, - "x": 18, - "y": 18 - }, - "hideTimeOverride": false, - "id": 21, - "links": [], - "maxDataPoints": 100, - "options": { - "displayLabels": [ - "name", - "percent" - ], - "legend": { - "displayMode": "list", - "placement": "bottom", - "values": [] - }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "sum" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "description": "Rate of object ingested out-of-band (OOB) : cumulative count of OOB object created (>0) or deleted (<0) per second.", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": 1, + "mappings": [], + "noValue": "none", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "purple", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + } + ] + }, + "unit": "O/s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 12, + "y": 4 + }, + "hideTimeOverride": false, + "id": 13, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(deriv(s3_cloudserver_ingested_objects_count{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "OOB Inject. Rate", + "transformations": [], + "transparent": false, + "type": "stat" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(round(increase(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))) by(method)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{method}}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "HTTP Method breakdown", - "transformations": [], - "transparent": false, - "type": "piechart" - }, - { - "collapsed": false, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [] - } - } - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 28 - }, - "hideTimeOverride": false, - "id": 22, - "links": [], - "maxDataPoints": 100, - "panels": [], - "targets": [], - "title": "Latency", - "transformations": [], - "transparent": false, - "type": "row" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": 180000, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 29 - }, - "hideTimeOverride": false, - "id": 23, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" + "datasource": "${DS_PROMETHEUS}", + "description": "Number of S3 objects available in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#808080", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "blue", + "index": 1, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 7, + "x": 15, + "y": 4 + }, + "hideTimeOverride": false, + "id": 14, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "sum(s3_cloudserver_objects_count{namespace=\"${namespace}\", job=\"${reportJob}\"})", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Objects", + "transformations": [], + "transparent": false, + "type": "stat" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Overall", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "datasource": "${DS_PROMETHEUS}", + "description": "Time elapsed since the last report, when object/bucket count was updated.", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#808080", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "green", + "index": 1, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + }, + { + "color": "super-light-yellow", + "index": 2, + "line": true, + "op": "gt", + "value": 1800.0, + "yaxis": "left" + }, + { + "color": "orange", + "index": 3, + "line": true, + "op": "gt", + "value": 3600.0, + "yaxis": "left" + }, + { + "color": "red", + "index": 4, + "line": true, + "op": "gt", + "value": 3700.0, + "yaxis": "left" + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 22, + "y": 4 + }, + "hideTimeOverride": false, + "id": 15, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["last"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "time()\n- max(s3_cloudserver_last_report_timestamp{namespace=\"${namespace}\", job=\"${reportJob}\"})\n+ (max(s3_cloudserver_last_report_timestamp{namespace=\"${namespace}\", job=\"${reportJob}\"})\n - max(kube_cronjob_status_last_schedule_time{namespace=\"${namespace}\", cronjob=\"${countItemsJob}\"})\n > 0 or vector(0))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Last Report", + "transformations": [], + "transparent": false, + "type": "stat" }, { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\", action=~\"objectPut|objectPutPart|objectCopy|objectPutCopyPart\"}[$__rate_interval]))\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\", action=~\"objectPut|objectPutPart|objectCopy|objectPutCopyPart\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Upload", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "collapsed": false, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [] + } + } + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "hideTimeOverride": false, + "id": 16, + "links": [], + "maxDataPoints": 100, + "panels": [], + "targets": [], + "title": "Response codes", + "transformations": [], + "transparent": false, + "type": "row" }, { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\", action=\"objectDelete\"}[$__rate_interval]))\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\", action=\"objectDelete\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Delete", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "hideTimeOverride": false, + "id": 17, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum by (code) (rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{code}}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Http status code over time", + "transformations": [], + "transparent": false, + "type": "timeseries" }, { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\", action=\"objectGet\"}[$__rate_interval]))\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\", action=\"objectGet\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Download", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 39, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "ops" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Success" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "User errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "System errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-red", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "hideTimeOverride": false, + "id": 18, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\", code=~\"2..\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Success", + "metric": "", + "refId": "", + "step": 10, + "target": "" + }, + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\", code=~\"4..\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "User errors", + "metric": "", + "refId": "", + "step": 10, + "target": "" + }, + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\", code=~\"5..\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "System errors", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Aggregated status over time", + "transformations": [], + "transparent": false, + "type": "timeseries" }, { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\", action=~\"multiObjectDelete|multipartDelete\"}[$__rate_interval]))\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\", action=~\"multiObjectDelete|multipartDelete\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Multi-delete", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Average latencies", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "cards": { - "cardPadding": null, - "cardRound": null - }, - "color": { - "cardColor": "#b4ff00", - "colorScale": "sqrt", - "colorScheme": "interpolateOranges", - "exponent": 0.5, - "max": null, - "min": null, - "mode": "opacity" - }, - "dataFormat": "tsbuckets", - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [] - } - } - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 29 - }, - "heatmap": {}, - "hideTimeOverride": false, - "hideZeroBuckets": false, - "highlightCards": true, - "id": 24, - "legend": { - "show": false - }, - "links": [], - "maxDataPoints": 25, - "reverseYBuckets": false, - "targets": [ - { - "datasource": null, - "expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", - "format": "heatmap", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ le }}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Request duration", - "tooltip": { - "show": true, - "showHistogram": true - }, - "transformations": [], - "transparent": false, - "type": "heatmap", - "xAxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yAxis": { - "decimals": null, - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": 180000, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 24, - "x": 0, - "y": 37 - }, - "hideTimeOverride": false, - "id": 25, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [ - "max", - "mean" - ], - "displayMode": "table", - "placement": "right" + "collapsed": false, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [] + } + } + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "hideTimeOverride": false, + "id": 19, + "links": [], + "maxDataPoints": 100, + "panels": [], + "targets": [], + "title": "Operations", + "transformations": [], + "transparent": false, + "type": "row" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by (action)\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by (action)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{action}}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Latencies per S3 action", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "collapsed": false, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [] - } - } - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 47 - }, - "hideTimeOverride": false, - "id": 26, - "links": [], - "maxDataPoints": 100, - "panels": [], - "targets": [], - "title": "Data rate", - "transformations": [], - "transparent": false, - "type": "row" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "red", - "index": 1, - "line": true, - "op": "gt", - "value": 80.0, - "yaxis": "left" - } - ] - }, - "unit": "binBps" + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 18, + "x": 0, + "y": 18 + }, + "hideTimeOverride": false, + "id": 20, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": ["min", "mean", "max"], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by(action)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{action}}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Request rate per S3 action", + "transformations": [], + "transparent": false, + "type": "timeseries" }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Out" - }, - "properties": [ - { - "id": "custom.axisPlacement", - "value": "right" - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 48 - }, - "hideTimeOverride": false, - "id": 27, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" + { + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 6, + "x": 18, + "y": 18 + }, + "hideTimeOverride": false, + "id": 21, + "links": [], + "maxDataPoints": 100, + "options": { + "displayLabels": ["name", "percent"], + "legend": { + "displayMode": "list", + "placement": "bottom", + "values": [] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": ["sum"], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(round(increase(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))) by(method)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{method}}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "HTTP Method breakdown", + "transformations": [], + "transparent": false, + "type": "piechart" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Out", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "collapsed": false, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [] + } + } + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 28 + }, + "hideTimeOverride": false, + "id": 22, + "links": [], + "maxDataPoints": 100, + "panels": [], + "targets": [], + "title": "Latency", + "transformations": [], + "transparent": false, + "type": "row" }, { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "In", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Bandwidth", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "#808080", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "green", - "index": 1, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" - } - ] - } - } - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 48 - }, - "hideTimeOverride": false, - "id": 28, - "links": [], - "maxDataPoints": 100, - "options": { - "displayMode": "gradient", - "fieldOptions": { - "calcs": [ - "lastNotNull" - ], - "defaults": { - "decimals": null, + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": 180000, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 29 + }, + "hideTimeOverride": false, + "id": 23, "links": [], - "max": null, - "min": null, - "noValue": "-", - "title": null, - "unit": "bytes" - }, - "limit": null, - "mappings": [], - "override": {}, - "thresholds": [ - { - "color": "#808080", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } }, - { - "color": "green", - "index": 1, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" - } - ], - "values": false + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Overall", + "metric": "", + "refId": "", + "step": 10, + "target": "" + }, + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\", action=~\"objectPut|objectPutPart|objectCopy|objectPutCopyPart\"}[$__rate_interval]))\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\", action=~\"objectPut|objectPutPart|objectCopy|objectPutCopyPart\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Upload", + "metric": "", + "refId": "", + "step": 10, + "target": "" + }, + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\", action=\"objectDelete\"}[$__rate_interval]))\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\", action=\"objectDelete\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Delete", + "metric": "", + "refId": "", + "step": 10, + "target": "" + }, + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\", action=\"objectGet\"}[$__rate_interval]))\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\", action=\"objectGet\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Download", + "metric": "", + "refId": "", + "step": 10, + "target": "" + }, + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\", action=~\"multiObjectDelete|multipartDelete\"}[$__rate_interval]))\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\", action=~\"multiObjectDelete|multipartDelete\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Multi-delete", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Average latencies", + "transformations": [], + "transparent": false, + "type": "timeseries" }, - "orientation": "vertical", - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "targets": [ { - "datasource": null, - "expr": "avg(s3_cloudserver_http_request_size_bytes{namespace=\"${namespace}\", job=~\"$job\"}) by (quantile)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ quantile }}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Avg upload chunk size by \u03c6-quantile", - "transformations": [], - "transparent": false, - "type": "bargauge" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "#808080", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "green", - "index": 1, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" - } - ] - } - } - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 48 - }, - "hideTimeOverride": false, - "id": 29, - "links": [], - "maxDataPoints": 100, - "options": { - "displayMode": "gradient", - "fieldOptions": { - "calcs": [ - "lastNotNull" - ], - "defaults": { - "decimals": null, + "cards": { + "cardPadding": null, + "cardRound": null + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateOranges", + "exponent": 0.5, + "max": null, + "min": null, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [] + } + } + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 29 + }, + "heatmap": {}, + "hideTimeOverride": false, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 24, + "legend": { + "show": false + }, "links": [], - "max": null, - "min": null, - "noValue": "-", - "title": null, - "unit": "bytes" - }, - "limit": null, - "mappings": [], - "override": {}, - "thresholds": [ - { - "color": "#808080", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" + "maxDataPoints": 25, + "reverseYBuckets": false, + "targets": [ + { + "datasource": null, + "expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", + "format": "heatmap", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ le }}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Request duration", + "tooltip": { + "show": true, + "showHistogram": true }, - { - "color": "green", - "index": 1, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" + "transformations": [], + "transparent": false, + "type": "heatmap", + "xAxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yAxis": { + "decimals": null, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true } - ], - "values": false }, - "orientation": "vertical", - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "targets": [ { - "datasource": null, - "expr": "avg(s3_cloudserver_http_response_size_bytes{namespace=\"${namespace}\", job=~\"$job\"}) by (quantile)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ quantile }}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Avg download chunk size by \u03c6-quantile", - "transformations": [], - "transparent": false, - "type": "bargauge" - }, - { - "collapsed": false, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [] - } - } - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 56 - }, - "hideTimeOverride": false, - "id": 30, - "links": [], - "maxDataPoints": 100, - "panels": [], - "targets": [], - "title": "Errors", - "transformations": [], - "transparent": false, - "type": "row" - }, - { - "datasource": "${DS_LOKI}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 57 - }, - "hideTimeOverride": false, - "id": 31, - "links": [], - "maxDataPoints": 100, - "options": { - "displayLabels": [ - "name" - ], - "legend": { - "displayMode": "table", - "placement": "right", - "values": [ - "value" - ] - }, - "pieType": "donut", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": 180000, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 37 + }, + "hideTimeOverride": false, + "id": 25, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": ["max", "mean"], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_request_duration_seconds_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by (action)\n /\nsum(rate(s3_cloudserver_http_request_duration_seconds_count{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by (action)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{action}}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Latencies per S3 action", + "transformations": [], + "transparent": false, + "type": "timeseries" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "topk(10, sum by(bucketName) (\n count_over_time({namespace=\"${namespace}\", pod=~\"$pod\"}\n | json | bucketName!=\"\" and httpCode=\"404\"\n [$__interval])\n))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{bucketName}}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "404 : Top10 by Bucket", - "transformations": [], - "transparent": false, - "type": "piechart" - }, - { - "datasource": "${DS_LOKI}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 57 - }, - "hideTimeOverride": false, - "id": 32, - "links": [], - "maxDataPoints": 100, - "options": { - "displayLabels": [ - "name" - ], - "legend": { - "displayMode": "table", - "placement": "right", - "values": [ - "value" - ] - }, - "pieType": "donut", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "collapsed": false, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [] + } + } + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 47 + }, + "hideTimeOverride": false, + "id": 26, + "links": [], + "maxDataPoints": 100, + "panels": [], + "targets": [], + "title": "Data rate", + "transformations": [], + "transparent": false, + "type": "row" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "topk(10, sum by(bucketName) (\n count_over_time({namespace=\"${namespace}\", pod=~\"$pod\"}\n | json | bucketName!=\"\" and httpCode=\"500\"\n [$__interval])\n))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{bucketName}}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "500 : Top10 by Bucket", - "transformations": [], - "transparent": false, - "type": "piechart" - }, - { - "datasource": "${DS_LOKI}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 16, - "y": 57 - }, - "hideTimeOverride": false, - "id": 33, - "links": [], - "maxDataPoints": 100, - "options": { - "displayLabels": [ - "name" - ], - "legend": { - "displayMode": "table", - "placement": "right", - "values": [ - "value" - ] - }, - "pieType": "donut", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "red", + "index": 1, + "line": true, + "op": "gt", + "value": 80.0, + "yaxis": "left" + } + ] + }, + "unit": "binBps" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Out" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 48 + }, + "hideTimeOverride": false, + "id": 27, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Out", + "metric": "", + "refId": "", + "step": 10, + "target": "" + }, + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "In", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Bandwidth", + "transformations": [], + "transparent": false, + "type": "timeseries" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "topk(10, sum by(bucketName) (\n count_over_time({namespace=\"${namespace}\", pod=~\"$pod\"}\n | json | bucketName!=\"\" and httpCode=~\"5..\"\n [$__interval])\n))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{bucketName}}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "5xx : Top10 by Bucket", - "transformations": [], - "transparent": false, - "type": "piechart" - }, - { - "collapsed": false, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [] - } - } - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 65 - }, - "hideTimeOverride": false, - "id": 34, - "links": [], - "maxDataPoints": 100, - "panels": [], - "targets": [], - "title": "Quotas", - "transformations": [], - "transparent": false, - "type": "row" - }, - { - "datasource": "${DS_PROMETHEUS}", - "description": "Number of S3 buckets with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "-", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "#808080", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "blue", - "index": 1, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 6, - "x": 0, - "y": 66 - }, - "hideTimeOverride": false, - "id": 35, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#808080", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "green", + "index": 1, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + } + ] + } + } + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 48 + }, + "hideTimeOverride": false, + "id": 28, + "links": [], + "maxDataPoints": 100, + "options": { + "displayMode": "gradient", + "fieldOptions": { + "calcs": ["lastNotNull"], + "defaults": { + "decimals": null, + "links": [], + "max": null, + "min": null, + "noValue": "-", + "title": null, + "unit": "bytes" + }, + "limit": null, + "mappings": [], + "override": {}, + "thresholds": [ + { + "color": "#808080", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "green", + "index": 1, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + } + ], + "values": false + }, + "orientation": "vertical", + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "targets": [ + { + "datasource": null, + "expr": "avg(s3_cloudserver_http_request_size_bytes{namespace=\"${namespace}\", job=~\"$job\"}) by (quantile)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ quantile }}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Avg upload chunk size by \u03c6-quantile", + "transformations": [], + "transparent": false, + "type": "bargauge" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "max(s3_cloudserver_quota_buckets_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Buckets with quota", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "description": "Number of accounts with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "custom": {}, - "decimals": null, - "mappings": [], - "noValue": "-", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "#808080", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "blue", - "index": 1, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 6, - "x": 0, - "y": 70 - }, - "hideTimeOverride": false, - "id": 36, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#808080", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "green", + "index": 1, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + } + ] + } + } + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 48 + }, + "hideTimeOverride": false, + "id": 29, + "links": [], + "maxDataPoints": 100, + "options": { + "displayMode": "gradient", + "fieldOptions": { + "calcs": ["lastNotNull"], + "defaults": { + "decimals": null, + "links": [], + "max": null, + "min": null, + "noValue": "-", + "title": null, + "unit": "bytes" + }, + "limit": null, + "mappings": [], + "override": {}, + "thresholds": [ + { + "color": "#808080", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "green", + "index": 1, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + } + ], + "values": false + }, + "orientation": "vertical", + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "targets": [ + { + "datasource": null, + "expr": "avg(s3_cloudserver_http_response_size_bytes{namespace=\"${namespace}\", job=~\"$job\"}) by (quantile)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ quantile }}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Avg download chunk size by \u03c6-quantile", + "transformations": [], + "transparent": false, + "type": "bargauge" }, - "textMode": "auto" - }, - "targets": [ { - "datasource": null, - "expr": "max(s3_cloudserver_quota_accounts_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Accounts with quota", - "transformations": [], - "transparent": false, - "type": "stat" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 30, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 6, - "y": 66 - }, - "hideTimeOverride": false, - "id": 37, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [], - "displayMode": "hidden", - "placement": "bottom" + "collapsed": false, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [] + } + } + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 56 + }, + "hideTimeOverride": false, + "id": 30, + "links": [], + "maxDataPoints": 100, + "panels": [], + "targets": [], + "title": "Errors", + "transformations": [], + "transparent": false, + "type": "row" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_quota_unavailable_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Operations with unavailable metrics", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 66 - }, - "hideTimeOverride": false, - "id": 38, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [ - "min", - "mean", - "max" - ], - "displayMode": "table", - "placement": "right" + "datasource": "${DS_LOKI}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 57 + }, + "hideTimeOverride": false, + "id": 31, + "links": [], + "maxDataPoints": 100, + "options": { + "displayLabels": ["name"], + "legend": { + "displayMode": "table", + "placement": "right", + "values": ["value"] + }, + "pieType": "donut", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "topk(10, sum by(bucketName) (\n count_over_time({namespace=\"${namespace}\", pod=~\"$pod\"}\n | json | bucketName!=\"\" and httpCode=\"404\"\n [$__interval])\n))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{bucketName}}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "404 : Top10 by Bucket", + "transformations": [], + "transparent": false, + "type": "piechart" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval])) by(action)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{action}}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Quota evaluaton rate per S3 action", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 30, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "stepAfter", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "index": 0, - "line": true, - "op": "gt", - "value": "null", - "yaxis": "left" - }, - { - "color": "orange", - "index": 1, - "line": true, - "op": "gt", - "value": 90.0, - "yaxis": "left" - }, - { - "color": "red", - "index": 2, - "line": true, - "op": "gt", - "value": 0.0, - "yaxis": "left" - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 0, - "y": 74 - }, - "hideTimeOverride": false, - "id": 39, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [], - "displayMode": "hidden", - "placement": "bottom" + "datasource": "${DS_LOKI}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 57 + }, + "hideTimeOverride": false, + "id": 32, + "links": [], + "maxDataPoints": 100, + "options": { + "displayLabels": ["name"], + "legend": { + "displayMode": "table", + "placement": "right", + "values": ["value"] + }, + "pieType": "donut", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "topk(10, sum by(bucketName) (\n count_over_time({namespace=\"${namespace}\", pod=~\"$pod\"}\n | json | bucketName!=\"\" and httpCode=\"500\"\n [$__interval])\n))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{bucketName}}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "500 : Top10 by Bucket", + "transformations": [], + "transparent": false, + "type": "piechart" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace=\"${namespace}\",job=\"${job}\"}[$__rate_interval])) * 100", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Quota service uptime", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 30, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "ops" + "datasource": "${DS_LOKI}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 57 + }, + "hideTimeOverride": false, + "id": 33, + "links": [], + "maxDataPoints": 100, + "options": { + "displayLabels": ["name"], + "legend": { + "displayMode": "table", + "placement": "right", + "values": ["value"] + }, + "pieType": "donut", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "topk(10, sum by(bucketName) (\n count_over_time({namespace=\"${namespace}\", pod=~\"$pod\"}\n | json | bucketName!=\"\" and httpCode=~\"5..\"\n [$__interval])\n))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{bucketName}}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "5xx : Top10 by Bucket", + "transformations": [], + "transparent": false, + "type": "piechart" }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 6, - "y": 74 - }, - "hideTimeOverride": false, - "id": 40, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" + { + "collapsed": false, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [] + } + } + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 65 + }, + "hideTimeOverride": false, + "id": 34, + "links": [], + "maxDataPoints": 100, + "panels": [], + "targets": [], + "title": "Quotas", + "transformations": [], + "transparent": false, + "type": "row" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=~\"2..\", job=\"${job}\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Success", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "datasource": "${DS_PROMETHEUS}", + "description": "Number of S3 buckets with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#808080", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "blue", + "index": 1, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 66 + }, + "hideTimeOverride": false, + "id": 35, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "max(s3_cloudserver_quota_buckets_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Buckets with quota", + "transformations": [], + "transparent": false, + "type": "stat" }, { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=\"429\", job=\"${job}\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Quota Exceeded", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Quota evaluation status code over time", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": 180000, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "s" + "datasource": "${DS_PROMETHEUS}", + "description": "Number of accounts with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "decimals": null, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#808080", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "blue", + "index": 1, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 70 + }, + "hideTimeOverride": false, + "id": 36, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": ["lastNotNull"], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "targets": [ + { + "datasource": null, + "expr": "max(s3_cloudserver_quota_accounts_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Accounts with quota", + "transformations": [], + "transparent": false, + "type": "stat" }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 74 - }, - "hideTimeOverride": false, - "id": 41, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [ - "min", - "mean", - "max" - ], - "displayMode": "table", - "placement": "right" + { + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 66 + }, + "hideTimeOverride": false, + "id": 37, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_quota_unavailable_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Operations with unavailable metrics", + "transformations": [], + "transparent": false, + "type": "timeseries" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ type }} (success)", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 66 + }, + "hideTimeOverride": false, + "id": 38, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": ["min", "mean", "max"], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval])) by(action)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{action}}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Quota evaluaton rate per S3 action", + "transformations": [], + "transparent": false, + "type": "timeseries" }, { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ type }} (exceeded)", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Average quota evaluation latencies", - "transformations": [], - "transparent": false, - "type": "timeseries" - }, - { - "cards": { - "cardPadding": null, - "cardRound": null - }, - "color": { - "cardColor": "#b4ff00", - "colorScale": "sqrt", - "colorScheme": "interpolateOranges", - "exponent": 0.5, - "max": null, - "min": null, - "mode": "opacity" - }, - "dataFormat": "tsbuckets", - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [] - } - } - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 0, - "y": 82 - }, - "heatmap": {}, - "hideTimeOverride": false, - "hideZeroBuckets": false, - "highlightCards": true, - "id": 42, - "legend": { - "show": false - }, - "links": [], - "maxDataPoints": 25, - "reverseYBuckets": false, - "targets": [ + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "stepAfter", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "orange", + "index": 1, + "line": true, + "op": "gt", + "value": 90.0, + "yaxis": "left" + }, + { + "color": "red", + "index": 2, + "line": true, + "op": "gt", + "value": 0.0, + "yaxis": "left" + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 74 + }, + "hideTimeOverride": false, + "id": 39, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace=\"${namespace}\",job=\"${job}\"}[$__rate_interval])) * 100", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Quota service uptime", + "transformations": [], + "transparent": false, + "type": "timeseries" + }, { - "datasource": null, - "expr": "sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", - "format": "heatmap", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ le }}", - "metric": "", - "refId": "", - "step": 10, - "target": "" - } - ], - "title": "Quota evaluation duration", - "tooltip": { - "show": true, - "showHistogram": true - }, - "transformations": [], - "transparent": false, - "type": "heatmap", - "xAxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yAxis": { - "decimals": null, - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - }, - { - "datasource": "${DS_PROMETHEUS}", - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "log": 2, - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": 180000, - "stacking": {}, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - }, - "unit": "s" + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 74 + }, + "hideTimeOverride": false, + "id": 40, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=~\"2..\", job=\"${job}\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Success", + "metric": "", + "refId": "", + "step": 10, + "target": "" + }, + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=\"429\", job=\"${job}\"}[$__rate_interval]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Quota Exceeded", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Quota evaluation status code over time", + "transformations": [], + "transparent": false, + "type": "timeseries" }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 18, - "x": 6, - "y": 82 - }, - "hideTimeOverride": false, - "id": 43, - "links": [], - "maxDataPoints": 100, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" + { + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": 180000, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 74 + }, + "hideTimeOverride": false, + "id": 41, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": ["min", "mean", "max"], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ type }} (success)", + "metric": "", + "refId": "", + "step": 10, + "target": "" + }, + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ type }} (exceeded)", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Average quota evaluation latencies", + "transformations": [], + "transparent": false, + "type": "timeseries" }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ class }} (success)", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "cards": { + "cardPadding": null, + "cardRound": null + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateOranges", + "exponent": 0.5, + "max": null, + "min": null, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [] + } + } + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 82 + }, + "heatmap": {}, + "hideTimeOverride": false, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 42, + "legend": { + "show": false + }, + "links": [], + "maxDataPoints": 25, + "reverseYBuckets": false, + "targets": [ + { + "datasource": null, + "expr": "sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", + "format": "heatmap", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ le }}", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Quota evaluation duration", + "tooltip": { + "show": true, + "showHistogram": true + }, + "transformations": [], + "transparent": false, + "type": "heatmap", + "xAxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yAxis": { + "decimals": null, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } }, { - "datasource": null, - "expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ class }} (error)", - "metric": "", - "refId": "", - "step": 10, - "target": "" + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": 180000, + "stacking": {}, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 18, + "x": 6, + "y": 82 + }, + "hideTimeOverride": false, + "id": 43, + "links": [], + "maxDataPoints": 100, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ class }} (success)", + "metric": "", + "refId": "", + "step": 10, + "target": "" + }, + { + "datasource": null, + "expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ class }} (error)", + "metric": "", + "refId": "", + "step": 10, + "target": "" + } + ], + "title": "Average utilization metrics retrieval latencies", + "transformations": [], + "transparent": false, + "type": "timeseries" } - ], - "title": "Average utilization metrics retrieval latencies", - "transformations": [], - "transparent": false, - "type": "timeseries" - } - ], - "refresh": "30s", - "rows": [], - "schemaVersion": 12, - "sharedCrosshair": false, - "style": "dark", - "tags": [ - "CloudServer" - ], - "templating": { - "list": [ - { - "allValue": null, - "auto": false, - "auto_count": 30, - "auto_min": "10s", - "current": { - "selected": false, - "tags": [], - "text": null, - "value": null - }, - "datasource": "${DS_PROMETHEUS}", - "hide": 0, - "includeAll": false, - "label": "Group", - "multi": true, - "name": "job", - "options": [], - "query": "label_values(s3_cloudserver_http_active_requests{namespace=\"${namespace}\", container=\"${container}\"}, job)", - "refresh": 1, - "regex": "/(?${zenkoName}-(?\\w*).*)/", - "sort": 1, - "tagValuesQuery": null, - "tagsQuery": null, - "type": "query", - "useTags": false - }, - { - "allValue": null, - "auto": false, - "auto_count": 30, - "auto_min": "10s", - "current": { - "selected": false, - "tags": [], - "text": null, - "value": null - }, - "datasource": "${DS_PROMETHEUS}", - "hide": 2, - "includeAll": false, - "label": "pod", - "multi": false, - "name": "pod", - "options": [], - "query": "label_values(s3_cloudserver_http_active_requests{namespace=\"${namespace}\", container=\"${container}\", job=~\"$job\"}, pod)", - "refresh": 1, - "regex": null, - "sort": 1, - "tagValuesQuery": null, - "tagsQuery": null, - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "hidden": false, - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "S3 service", - "uid": null, - "version": 110 + "refresh": "30s", + "rows": [], + "schemaVersion": 12, + "sharedCrosshair": false, + "style": "dark", + "tags": ["CloudServer"], + "templating": { + "list": [ + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "tags": [], + "text": null, + "value": null + }, + "datasource": "${DS_PROMETHEUS}", + "hide": 0, + "includeAll": false, + "label": "Group", + "multi": true, + "name": "job", + "options": [], + "query": "label_values(s3_cloudserver_http_active_requests{namespace=\"${namespace}\", container=\"${container}\"}, job)", + "refresh": 1, + "regex": "/(?${zenkoName}-(?\\w*).*)/", + "sort": 1, + "tagValuesQuery": null, + "tagsQuery": null, + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "tags": [], + "text": null, + "value": null + }, + "datasource": "${DS_PROMETHEUS}", + "hide": 2, + "includeAll": false, + "label": "pod", + "multi": false, + "name": "pod", + "options": [], + "query": "label_values(s3_cloudserver_http_active_requests{namespace=\"${namespace}\", container=\"${container}\", job=~\"$job\"}, pod)", + "refresh": 1, + "regex": null, + "sort": 1, + "tagValuesQuery": null, + "tagsQuery": null, + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "hidden": false, + "refresh_intervals": ["5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d"], + "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"] + }, + "timezone": "", + "title": "S3 service", + "uid": null, + "version": 110 } diff --git a/package.json b/package.json index a8351f3cb9..3e3aade6cd 100644 --- a/package.json +++ b/package.json @@ -1,132 +1,135 @@ { - "name": "@zenko/cloudserver", - "version": "9.0.21", - "description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol", - "main": "index.js", - "engines": { - "node": ">=22" - }, - "repository": "scality/S3", - "keywords": [ - "s3", - "cloud", - "server" - ], - "author": "Scality Inc.", - "license": "Apache-2.0", - "bugs": { - "url": "https://github.com/scality/S3/issues" - }, - "homepage": "https://github.com/scality/S3#readme", - "dependencies": { - "@azure/storage-blob": "^12.25.0", - "@hapi/joi": "^17.1.1", - "arsenal": "git+https://github.com/scality/Arsenal#8.2.26", - "async": "2.6.4", - "aws-sdk": "^2.1692.0", - "bucketclient": "scality/bucketclient#8.2.4", - "bufferutil": "^4.0.8", - "commander": "^12.1.0", - "cron-parser": "^4.9.0", - "diskusage": "^1.2.0", - "google-auto-auth": "^0.10.1", - "http-proxy": "^1.18.1", - "http-proxy-agent": "^7.0.2", - "https-proxy-agent": "^7.0.5", - "level-mem": "^5.0.1", - "moment": "^2.30.1", - "mongodb": "^6.11.0", - "node-fetch": "^2.6.0", - "node-forge": "^1.3.1", - "npm-run-all": "^4.1.5", - "prom-client": "^15.1.3", - "request": "^2.88.2", - "scubaclient": "git+https://github.com/scality/scubaclient.git#fb7375a9298bda7df0e9f9ed81d7fc5b363590a9", - "sql-where-parser": "^2.2.1", - "utapi": "github:scality/utapi#8.2.1", - "utf-8-validate": "^6.0.5", - "utf8": "^3.0.0", - "uuid": "^11.0.3", - "vaultclient": "scality/vaultclient#8.5.2", - "werelogs": "scality/werelogs#8.2.0", - "ws": "^8.18.0", - "xml2js": "^0.6.2" - }, - "devDependencies": { - "@eslint/compat": "^1.2.2", - "@scality/eslint-config-scality": "scality/Guidelines#8.3.0", - "eslint": "^9.14.0", - "eslint-plugin-import": "^2.31.0", - "eslint-plugin-mocha": "^10.5.0", - "express": "^4.21.1", - "ioredis": "^5.4.1", - "istanbul": "^0.4.5", - "istanbul-api": "^3.0.0", - "lolex": "^6.0.0", - "mocha": "^10.8.2", - "mocha-junit-reporter": "^2.2.1", - "mocha-multi-reporters": "^1.5.1", - "node-mocks-http": "^1.16.1", - "nyc": "^15.1.0", - "sinon": "^13.0.1", - "tv4": "^1.3.0" - }, - "resolutions": { - "string-width": "4.2.3", - "jsonwebtoken": "^9.0.0", - "nan": "v2.22.0" - }, - "scripts": { - "cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server", - "ft_awssdk": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/ --exit", - "ft_awssdk_aws": "cd tests/functional/aws-node-sdk && AWS_ON_AIR=true mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/ --exit", - "ft_awssdk_buckets": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/bucket --exit", - "ft_awssdk_objects_misc": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/legacy test/object test/service test/support --exit", - "ft_awssdk_versioning": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/versioning/ --exit", - "ft_awssdk_external_backends": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/multipleBackend --exit", - "ft_mixed_bucket_format_version": "cd tests/functional/metadata && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json MixedVersionFormat.js --exit", - "ft_management": "cd tests/functional/report && yarn test", - "ft_backbeat": "cd tests/functional/backbeat && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js --exit", - "ft_node": "cd tests/functional/raw-node && yarn test", - "ft_node_routes": "cd tests/functional/raw-node && yarn run test-routes", - "ft_node_route_backbeat": "cd tests/functional/raw-node && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 test/routes/routeBackbeat.js --exit", - "ft_gcp": "cd tests/functional/raw-node && yarn run test-gcp", - "ft_healthchecks": "cd tests/functional/healthchecks && yarn test", - "ft_s3cmd": "cd tests/functional/s3cmd && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js --exit", - "ft_s3curl": "cd tests/functional/s3curl && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js --exit", - "ft_util": "cd tests/functional/utilities && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js --exit", - "ft_test": "npm-run-all -s ft_awssdk ft_s3cmd ft_s3curl ft_node ft_healthchecks ft_management ft_util ft_backbeat", - "ft_search": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 90000 test/mdSearch --exit", - "ft_kmip": "cd tests/functional/kmip && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js --exit", - "ft_kmip_cluster": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 300000 load.js --exit", - "ft_sse_cleanup": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 cleanup.js --exit", - "ft_sse_before_migration": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 cleanup.js beforeMigration.js --exit", - "ft_sse_migration": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 migration.js --exit", - "ft_sse_arn": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 cleanup.js arnPrefix.js --exit", - "install_ft_deps": "yarn install aws-sdk@2.28.0 bluebird@3.3.1 mocha@2.3.4 mocha-junit-reporter@1.23.1 tv4@1.2.7", - "lint": "eslint $(git ls-files '*.js')", - "lint_md": "mdlint $(git ls-files '*.md')", - "mem_backend": "S3BACKEND=mem node index.js", - "start": "npm-run-all --parallel start_dmd start_s3server", - "start_mongo": "yarn run cloudserver", - "start_mdserver": "node mdserver.js", - "start_dataserver": "node dataserver.js", - "start_pfsserver": "node pfsserver.js", - "start_s3server": "node index.js", - "start_dmd": "npm-run-all --parallel start_mdserver start_dataserver", - "start_utapi": "node lib/utapi/utapi.js", - "start_secure_channel_proxy": "node bin/secure_channel_proxy.js", - "start_metrics_server": "node bin/metrics_server.js", - "utapi_replay": "node lib/utapi/utapiReplay.js", - "utapi_reindex": "node lib/utapi/utapiReindex.js", - "management_agent": "node managementAgent.js", - "test": "NO_PROXY=\"test.scality.com,scality.com\" CI=true S3BACKEND=mem S3QUOTA=scuba mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit --exit", - "test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api --exit", - "test_legacy_location": "CI=true S3QUOTA=scuba S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit --exit", - "test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi --exit", - "test_quota": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/quota --exit", - "multiple_backend_test": "CI=true S3BACKEND=mem S3METADATA=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend --exit", - "cover": "nyc --clean --silent yarn run", - "postcover": "nyc report --report-dir ./coverage/test --reporter=lcov" - } + "name": "@zenko/cloudserver", + "version": "9.0.21", + "description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol", + "main": "index.js", + "engines": { + "node": ">=22" + }, + "repository": "scality/S3", + "keywords": [ + "s3", + "cloud", + "server" + ], + "author": "Scality Inc.", + "license": "Apache-2.0", + "bugs": { + "url": "https://github.com/scality/S3/issues" + }, + "homepage": "https://github.com/scality/S3#readme", + "dependencies": { + "@azure/storage-blob": "^12.25.0", + "@hapi/joi": "^17.1.1", + "arsenal": "git+https://github.com/scality/Arsenal#8.2.26", + "async": "2.6.4", + "aws-sdk": "^2.1692.0", + "bucketclient": "scality/bucketclient#8.2.4", + "bufferutil": "^4.0.8", + "commander": "^12.1.0", + "cron-parser": "^4.9.0", + "diskusage": "^1.2.0", + "google-auto-auth": "^0.10.1", + "http-proxy": "^1.18.1", + "http-proxy-agent": "^7.0.2", + "https-proxy-agent": "^7.0.5", + "level-mem": "^5.0.1", + "moment": "^2.30.1", + "mongodb": "^6.11.0", + "node-fetch": "^2.6.0", + "node-forge": "^1.3.1", + "npm-run-all": "^4.1.5", + "prom-client": "^15.1.3", + "request": "^2.88.2", + "scubaclient": "git+https://github.com/scality/scubaclient.git#fb7375a9298bda7df0e9f9ed81d7fc5b363590a9", + "sql-where-parser": "^2.2.1", + "utapi": "github:scality/utapi#8.2.1", + "utf-8-validate": "^6.0.5", + "utf8": "^3.0.0", + "uuid": "^11.0.3", + "vaultclient": "scality/vaultclient#8.5.2", + "werelogs": "scality/werelogs#8.2.0", + "ws": "^8.18.0", + "xml2js": "^0.6.2" + }, + "devDependencies": { + "@eslint/compat": "^1.2.2", + "@scality/eslint-config-scality": "scality/Guidelines#8.3.0", + "eslint": "^9.14.0", + "eslint-config-prettier": "^9.1.0", + "eslint-plugin-import": "^2.31.0", + "eslint-plugin-mocha": "^10.5.0", + "prettier": "^3.3.3", + "express": "^4.21.1", + "ioredis": "^5.4.1", + "istanbul": "^0.4.5", + "istanbul-api": "^3.0.0", + "lolex": "^6.0.0", + "mocha": "^10.8.2", + "mocha-junit-reporter": "^2.2.1", + "mocha-multi-reporters": "^1.5.1", + "node-mocks-http": "^1.16.1", + "nyc": "^15.1.0", + "sinon": "^13.0.1", + "tv4": "^1.3.0" + }, + "resolutions": { + "string-width": "4.2.3", + "jsonwebtoken": "^9.0.0", + "nan": "v2.22.0" + }, + "scripts": { + "cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server", + "ft_awssdk": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/ --exit", + "ft_awssdk_aws": "cd tests/functional/aws-node-sdk && AWS_ON_AIR=true mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/ --exit", + "ft_awssdk_buckets": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/bucket --exit", + "ft_awssdk_objects_misc": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/legacy test/object test/service test/support --exit", + "ft_awssdk_versioning": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/versioning/ --exit", + "ft_awssdk_external_backends": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json test/multipleBackend --exit", + "ft_mixed_bucket_format_version": "cd tests/functional/metadata && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json MixedVersionFormat.js --exit", + "ft_management": "cd tests/functional/report && yarn test", + "ft_backbeat": "cd tests/functional/backbeat && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js --exit", + "ft_node": "cd tests/functional/raw-node && yarn test", + "ft_node_routes": "cd tests/functional/raw-node && yarn run test-routes", + "ft_node_route_backbeat": "cd tests/functional/raw-node && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 test/routes/routeBackbeat.js --exit", + "ft_gcp": "cd tests/functional/raw-node && yarn run test-gcp", + "ft_healthchecks": "cd tests/functional/healthchecks && yarn test", + "ft_s3cmd": "cd tests/functional/s3cmd && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js --exit", + "ft_s3curl": "cd tests/functional/s3curl && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js --exit", + "ft_util": "cd tests/functional/utilities && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js --exit", + "ft_test": "npm-run-all -s ft_awssdk ft_s3cmd ft_s3curl ft_node ft_healthchecks ft_management ft_util ft_backbeat", + "ft_search": "cd tests/functional/aws-node-sdk && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 90000 test/mdSearch --exit", + "ft_kmip": "cd tests/functional/kmip && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 40000 *.js --exit", + "ft_kmip_cluster": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 300000 load.js --exit", + "ft_sse_cleanup": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 cleanup.js --exit", + "ft_sse_before_migration": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 cleanup.js beforeMigration.js --exit", + "ft_sse_migration": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 migration.js --exit", + "ft_sse_arn": "cd tests/functional/sse-kms-migration && mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 10000 cleanup.js arnPrefix.js --exit", + "install_ft_deps": "yarn install aws-sdk@2.28.0 bluebird@3.3.1 mocha@2.3.4 mocha-junit-reporter@1.23.1 tv4@1.2.7", + "lint": "eslint $(git ls-files '*.js')", + "format": "prettier --write $(git ls-files '*.js' '*.mjs' '*.json' '*.md')", + "check-format": "prettier --check $(git ls-files '*.js' '*.mjs' '*.json' '*.md')", + "mem_backend": "S3BACKEND=mem node index.js", + "start": "npm-run-all --parallel start_dmd start_s3server", + "start_mongo": "yarn run cloudserver", + "start_mdserver": "node mdserver.js", + "start_dataserver": "node dataserver.js", + "start_pfsserver": "node pfsserver.js", + "start_s3server": "node index.js", + "start_dmd": "npm-run-all --parallel start_mdserver start_dataserver", + "start_utapi": "node lib/utapi/utapi.js", + "start_secure_channel_proxy": "node bin/secure_channel_proxy.js", + "start_metrics_server": "node bin/metrics_server.js", + "utapi_replay": "node lib/utapi/utapiReplay.js", + "utapi_reindex": "node lib/utapi/utapiReindex.js", + "management_agent": "node managementAgent.js", + "test": "NO_PROXY=\"test.scality.com,scality.com\" CI=true S3BACKEND=mem S3QUOTA=scuba mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit --exit", + "test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api --exit", + "test_legacy_location": "CI=true S3QUOTA=scuba S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit --exit", + "test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi --exit", + "test_quota": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/quota --exit", + "multiple_backend_test": "CI=true S3BACKEND=mem S3METADATA=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend --exit", + "cover": "nyc --clean --silent yarn run", + "postcover": "nyc report --report-dir ./coverage/test --reporter=lcov" + } } diff --git a/tests/functional/aws-node-sdk/lib/fixtures/project.js b/tests/functional/aws-node-sdk/lib/fixtures/project.js index 3b71a4ff79..1c1a4bbdda 100644 --- a/tests/functional/aws-node-sdk/lib/fixtures/project.js +++ b/tests/functional/aws-node-sdk/lib/fixtures/project.js @@ -10,9 +10,7 @@ const fakeDataSource = { generateManyBucketNames(numberOfBuckets) { const random = Math.round(Math.random() * 100).toString(); - return Array - .from(Array(numberOfBuckets).keys()) - .map(i => `${baseName}-${random}-${i}`); + return Array.from(Array(numberOfBuckets).keys()).map(i => `${baseName}-${random}-${i}`); }, }; diff --git a/tests/functional/aws-node-sdk/lib/json/mem_credentials.json b/tests/functional/aws-node-sdk/lib/json/mem_credentials.json index 66c4599b8e..b55449f72e 100644 --- a/tests/functional/aws-node-sdk/lib/json/mem_credentials.json +++ b/tests/functional/aws-node-sdk/lib/json/mem_credentials.json @@ -1,18 +1,18 @@ { - "default": { - "accessKey": "accessKey1", - "secretKey": "verySecretKey1" - }, - "lisa": { - "accessKey": "accessKey2", - "secretKey": "verySecretKey2" - }, - "replication": { - "accessKey": "replicationKey1", - "secretKey": "replicationSecretKey1" - }, - "vault": { - "accessKey": "TESTAK00000000000000", - "secretKey": "TESTSK0000000000000000000000000000000000" - } + "default": { + "accessKey": "accessKey1", + "secretKey": "verySecretKey1" + }, + "lisa": { + "accessKey": "accessKey2", + "secretKey": "verySecretKey2" + }, + "replication": { + "accessKey": "replicationKey1", + "secretKey": "replicationSecretKey1" + }, + "vault": { + "accessKey": "TESTAK00000000000000", + "secretKey": "TESTSK0000000000000000000000000000000000" + } } diff --git a/tests/functional/aws-node-sdk/lib/utility/bucket-util.js b/tests/functional/aws-node-sdk/lib/utility/bucket-util.js index a95a434162..5e3d913b56 100644 --- a/tests/functional/aws-node-sdk/lib/utility/bucket-util.js +++ b/tests/functional/aws-node-sdk/lib/utility/bucket-util.js @@ -17,7 +17,8 @@ class BucketUtility { bucketExists(bucketName) { return this.s3 - .headBucket({ Bucket: bucketName }).promise() + .headBucket({ Bucket: bucketName }) + .promise() .then(() => true) .catch(err => { if (err.code === 'NotFound') { @@ -29,22 +30,23 @@ class BucketUtility { createOne(bucketName) { return this.s3 - .createBucket({ Bucket: bucketName }).promise() + .createBucket({ Bucket: bucketName }) + .promise() .then(() => bucketName); } createOneWithLock(bucketName) { - return this.s3.createBucket({ - Bucket: bucketName, - ObjectLockEnabledForBucket: true, - }).promise() - .then(() => bucketName); + return this.s3 + .createBucket({ + Bucket: bucketName, + ObjectLockEnabledForBucket: true, + }) + .promise() + .then(() => bucketName); } createMany(bucketNames) { - const promises = bucketNames.map( - bucketName => this.createOne(bucketName) - ); + const promises = bucketNames.map(bucketName => this.createOne(bucketName)); return Promise.all(promises); } @@ -56,22 +58,17 @@ class BucketUtility { return this.createOne(bucketName); } - const bucketNames = projectFixture - .generateManyBucketNames(nBuckets) - .sort(() => 0.5 - Math.random()); // Simply shuffle array + const bucketNames = projectFixture.generateManyBucketNames(nBuckets).sort(() => 0.5 - Math.random()); // Simply shuffle array return this.createMany(bucketNames); } deleteOne(bucketName) { - return this.s3 - .deleteBucket({ Bucket: bucketName }).promise(); + return this.s3.deleteBucket({ Bucket: bucketName }).promise(); } deleteMany(bucketNames) { - const promises = bucketNames.map( - bucketName => this.deleteOne(bucketName) - ); + const promises = bucketNames.map(bucketName => this.deleteOne(bucketName)); return Promise.all(promises); } @@ -88,73 +85,77 @@ class BucketUtility { }; return this.s3 - .listObjectVersions(param).promise() + .listObjectVersions(param) + .promise() .then(data => Promise.all( - data.Versions - .filter(object => !object.Key.endsWith('/')) + data.Versions.filter(object => !object.Key.endsWith('/')) // remove all objects .map(object => - this.s3.deleteObject({ - Bucket: bucketName, - Key: object.Key, - VersionId: object.VersionId, - }).promise() - .then(() => object) - ) - .concat(data.Versions - .filter(object => object.Key.endsWith('/')) - // remove all directories - .map(object => - this.s3.deleteObject({ + this.s3 + .deleteObject({ Bucket: bucketName, Key: object.Key, VersionId: object.VersionId, - }).promise() + }) + .promise() .then(() => object) + ) + .concat( + data.Versions.filter(object => object.Key.endsWith('/')) + // remove all directories + .map(object => + this.s3 + .deleteObject({ + Bucket: bucketName, + Key: object.Key, + VersionId: object.VersionId, + }) + .promise() + .then(() => object) + ) + ) + .concat( + data.DeleteMarkers.map(object => + this.s3 + .deleteObject({ + Bucket: bucketName, + Key: object.Key, + VersionId: object.VersionId, + }) + .promise() + .then(() => object) ) ) - .concat(data.DeleteMarkers - .map(object => - this.s3.deleteObject({ - Bucket: bucketName, - Key: object.Key, - VersionId: object.VersionId, - }).promise() - .then(() => object))) ) ); } emptyMany(bucketNames) { - const promises = bucketNames.map( - bucketName => this.empty(bucketName) - ); + const promises = bucketNames.map(bucketName => this.empty(bucketName)); return Promise.all(promises); } emptyIfExists(bucketName) { - return this.bucketExists(bucketName) - .then(exists => { - if (exists) { - return this.empty(bucketName); - } - return undefined; - }); + return this.bucketExists(bucketName).then(exists => { + if (exists) { + return this.empty(bucketName); + } + return undefined; + }); } emptyManyIfExists(bucketNames) { - const promises = bucketNames.map( - bucketName => this.emptyIfExists(bucketName) - ); + const promises = bucketNames.map(bucketName => this.emptyIfExists(bucketName)); return Promise.all(promises); } getOwner() { return this.s3 - .listBuckets().promise() + .listBuckets() + .promise() .then(data => data.Owner); } } diff --git a/tests/functional/aws-node-sdk/lib/utility/cors-util.js b/tests/functional/aws-node-sdk/lib/utility/cors-util.js index a1d52bf8cc..9e14d3fe6d 100644 --- a/tests/functional/aws-node-sdk/lib/utility/cors-util.js +++ b/tests/functional/aws-node-sdk/lib/utility/cors-util.js @@ -6,11 +6,9 @@ const conf = require('../../../../../lib/Config').config; const transport = conf.https ? https : http; const ipAddress = process.env.IP ? process.env.IP : '127.0.0.1'; -const hostname = process.env.AWS_ON_AIR ? 's3.amazonaws.com' : - ipAddress; +const hostname = process.env.AWS_ON_AIR ? 's3.amazonaws.com' : ipAddress; const port = process.env.AWS_ON_AIR ? 80 : 8000; - const statusCode = { 200: 200, 301: 301, // website redirect @@ -25,11 +23,10 @@ const statusCode = { }; function methodRequest(params, callback) { - const { method, bucket, objectKey, query, headers, code, - headersResponse, headersOmitted, isWebsite } = params; - const websiteHostname = process.env.S3_END_TO_END ? - `${bucket}.s3-website-us-east-1.scality.com` : - `${bucket}.s3-website-us-east-1.amazonaws.com`; + const { method, bucket, objectKey, query, headers, code, headersResponse, headersOmitted, isWebsite } = params; + const websiteHostname = process.env.S3_END_TO_END + ? `${bucket}.s3-website-us-east-1.scality.com` + : `${bucket}.s3-website-us-east-1.amazonaws.com`; const options = { port, @@ -59,32 +56,29 @@ function methodRequest(params, callback) { res.on('end', () => { const total = body.join(''); if (code) { - const message = Number.isNaN(parseInt(code, 10)) ? - `${code}` : ''; + const message = Number.isNaN(parseInt(code, 10)) ? `${code}` : ''; assert(total.indexOf(message) > -1, `Expected ${message}`); - assert.deepEqual(res.statusCode, statusCode[code], - `status code expected: ${statusCode[code]}`); + assert.deepEqual(res.statusCode, statusCode[code], `status code expected: ${statusCode[code]}`); } if (headersResponse) { Object.keys(headersResponse).forEach(key => { - assert.deepEqual(res.headers[key], headersResponse[key], - `error header: ${key}`); + assert.deepEqual(res.headers[key], headersResponse[key], `error header: ${key}`); }); } else { - // if no headersResponse provided, should not have these headers - // in the request - ['access-control-allow-origin', + // if no headersResponse provided, should not have these headers + // in the request + [ + 'access-control-allow-origin', 'access-control-allow-methods', 'access-control-allow-credentials', - 'vary'].forEach(key => { - assert.strictEqual(res.headers[key], undefined, - `Error: ${key} should not have value`); - }); + 'vary', + ].forEach(key => { + assert.strictEqual(res.headers[key], undefined, `Error: ${key} should not have value`); + }); } if (headersOmitted) { headersOmitted.forEach(key => { - assert.strictEqual(res.headers[key], undefined, - `Error: ${key} should not have value`); + assert.strictEqual(res.headers[key], undefined, `Error: ${key} should not have value`); }); } return callback(); diff --git a/tests/functional/aws-node-sdk/lib/utility/createEncryptedBucket.js b/tests/functional/aws-node-sdk/lib/utility/createEncryptedBucket.js index efb678f9c8..0b798d9140 100644 --- a/tests/functional/aws-node-sdk/lib/utility/createEncryptedBucket.js +++ b/tests/functional/aws-node-sdk/lib/utility/createEncryptedBucket.js @@ -13,27 +13,29 @@ function safeJSONParse(s) { } function createEncryptedBucket(bucketParams, cb) { - process.stdout.write('Creating encrypted bucket' + - `${bucketParams.Bucket}`); + process.stdout.write('Creating encrypted bucket' + `${bucketParams.Bucket}`); const config = getConfig(); const endpointWithoutHttp = config.endpoint.split('//')[1]; const host = endpointWithoutHttp.split(':')[0]; const port = endpointWithoutHttp.split(':')[1]; let locationConstraint; - if (bucketParams.CreateBucketConfiguration && - bucketParams.CreateBucketConfiguration.LocationConstraint) { - locationConstraint = bucketParams.CreateBucketConfiguration - .LocationConstraint; + if (bucketParams.CreateBucketConfiguration && bucketParams.CreateBucketConfiguration.LocationConstraint) { + locationConstraint = bucketParams.CreateBucketConfiguration.LocationConstraint; } const prog = `${__dirname}/../../../../../bin/create_encrypted_bucket.js`; let args = [ prog, - '-a', config.credentials.accessKeyId, - '-k', config.credentials.secretAccessKey, - '-b', bucketParams.Bucket, - '-h', host, - '-p', port, + '-a', + config.credentials.accessKeyId, + '-k', + config.credentials.secretAccessKey, + '-b', + bucketParams.Bucket, + '-h', + host, + '-p', + port, '-v', ]; if (locationConstraint) { @@ -43,24 +45,27 @@ function createEncryptedBucket(bucketParams, cb) { args = args.concat('-s'); } const body = []; - const child = childProcess.spawn(args[0], args) - .on('exit', () => { - const hasSucceed = body.join('').split('\n').find(item => { - const json = safeJSONParse(item); - const test = !(json instanceof Error) && json.name === 'S3' && - json.statusCode === 200; - if (test) { - return true; + const child = childProcess + .spawn(args[0], args) + .on('exit', () => { + const hasSucceed = body + .join('') + .split('\n') + .find(item => { + const json = safeJSONParse(item); + const test = !(json instanceof Error) && json.name === 'S3' && json.statusCode === 200; + if (test) { + return true; + } + return false; + }); + if (!hasSucceed) { + process.stderr.write(`${body.join('')}\n`); + return cb(new Error('Cannot create encrypted bucket')); } - return false; - }); - if (!hasSucceed) { - process.stderr.write(`${body.join('')}\n`); - return cb(new Error('Cannot create encrypted bucket')); - } - return cb(); - }) - .on('error', cb); + return cb(); + }) + .on('error', cb); child.stdout.on('data', chunk => body.push(chunk.toString())); } diff --git a/tests/functional/aws-node-sdk/lib/utility/genMaxSizeMetaHeaders.js b/tests/functional/aws-node-sdk/lib/utility/genMaxSizeMetaHeaders.js index d2ebeff178..65e402ac47 100644 --- a/tests/functional/aws-node-sdk/lib/utility/genMaxSizeMetaHeaders.js +++ b/tests/functional/aws-node-sdk/lib/utility/genMaxSizeMetaHeaders.js @@ -3,12 +3,10 @@ const constants = require('../../../../../constants'); function genMaxSizeMetaHeaders() { const metaHeaders = {}; const counter = 8; - const bytesPerHeader = - (constants.maximumMetaHeadersSize / counter); + const bytesPerHeader = constants.maximumMetaHeadersSize / counter; for (let i = 0; i < counter; i++) { const key = `header${i}`; - const valueLength = bytesPerHeader - - ('x-amz-meta-'.length + key.length); + const valueLength = bytesPerHeader - ('x-amz-meta-'.length + key.length); metaHeaders[key] = '0'.repeat(valueLength); } return metaHeaders; diff --git a/tests/functional/aws-node-sdk/lib/utility/provideRawOutput.js b/tests/functional/aws-node-sdk/lib/utility/provideRawOutput.js index 403d31777d..2e3e98114f 100644 --- a/tests/functional/aws-node-sdk/lib/utility/provideRawOutput.js +++ b/tests/functional/aws-node-sdk/lib/utility/provideRawOutput.js @@ -26,15 +26,13 @@ function provideRawOutput(args, cb) { httpCode = lines.find(line => { const trimmed = line.trim().toUpperCase(); // ignore 100 Continue HTTP code - if (trimmed.startsWith('HTTP/1.1 ') && - !trimmed.includes('100 CONTINUE')) { + if (trimmed.startsWith('HTTP/1.1 ') && !trimmed.includes('100 CONTINUE')) { return true; } return false; }); if (httpCode) { - httpCode = httpCode.trim().replace('HTTP/1.1 ', '') - .toUpperCase(); + httpCode = httpCode.trim().replace('HTTP/1.1 ', '').toUpperCase(); } } return cb(httpCode, procData); diff --git a/tests/functional/aws-node-sdk/lib/utility/replication.js b/tests/functional/aws-node-sdk/lib/utility/replication.js index 91a8a1ad20..9725ed5b55 100644 --- a/tests/functional/aws-node-sdk/lib/utility/replication.js +++ b/tests/functional/aws-node-sdk/lib/utility/replication.js @@ -1,16 +1,6 @@ const replicationUtils = { - requiredConfigProperties: [ - 'Role', - 'Rules', - 'Status', - 'Prefix', - 'Destination', - 'Bucket', - ], - optionalConfigProperties: [ - 'ID', - 'StorageClass', - ], + requiredConfigProperties: ['Role', 'Rules', 'Status', 'Prefix', 'Destination', 'Bucket'], + optionalConfigProperties: ['ID', 'StorageClass'], invalidRoleARNs: [ '', '*:aws:iam::account-id:role/resource', @@ -33,7 +23,6 @@ const replicationUtils = { 'arn:aws:iam::ac:role', 'arn:aws:iam::a c:role', 'arn:aws:iam::*:role', - ], invalidBucketARNs: [ '', @@ -46,18 +35,9 @@ const replicationUtils = { 'arn:aws:s3:::*', 'arn:aws:s3:::invalidBucketName', ], - validStatuses: [ - 'Enabled', - 'Disabled', - ], - validStorageClasses: [ - 'STANDARD', - 'STANDARD_IA', - 'REDUCED_REDUNDANCY', - ], - validMultipleStorageClasses: [ - 'zenko,us-east-2', - ], + validStatuses: ['Enabled', 'Disabled'], + validStorageClasses: ['STANDARD', 'STANDARD_IA', 'REDUCED_REDUNDANCY'], + validMultipleStorageClasses: ['zenko,us-east-2'], }; module.exports = replicationUtils; diff --git a/tests/functional/aws-node-sdk/lib/utility/tagging.js b/tests/functional/aws-node-sdk/lib/utility/tagging.js index bad7cb17ca..aac756cd09 100644 --- a/tests/functional/aws-node-sdk/lib/utility/tagging.js +++ b/tests/functional/aws-node-sdk/lib/utility/tagging.js @@ -1,12 +1,13 @@ const taggingTests = [ - { tag: { key: '+- =._:/', value: '+- =._:/' }, - it: 'should return tags if tags are valid' }, - { tag: { key: 'key1', value: '' }, - it: 'should return tags if value is an empty string' }, - { tag: { key: 'w'.repeat(129), value: 'foo' }, + { tag: { key: '+- =._:/', value: '+- =._:/' }, it: 'should return tags if tags are valid' }, + { tag: { key: 'key1', value: '' }, it: 'should return tags if value is an empty string' }, + { + tag: { key: 'w'.repeat(129), value: 'foo' }, error: 'InvalidTag', - it: 'should return InvalidTag if key length is greater than 128' }, - { tag: { key: 'bar', value: 'f'.repeat(257) }, + it: 'should return InvalidTag if key length is greater than 128', + }, + { + tag: { key: 'bar', value: 'f'.repeat(257) }, error: 'InvalidTag', it: 'should return InvalidTag if key length is greater than 256', }, diff --git a/tests/functional/aws-node-sdk/lib/utility/versioning-util.js b/tests/functional/aws-node-sdk/lib/utility/versioning-util.js index d3e29628a3..a2818fe025 100644 --- a/tests/functional/aws-node-sdk/lib/utility/versioning-util.js +++ b/tests/functional/aws-node-sdk/lib/utility/versioning-util.js @@ -16,61 +16,69 @@ function _deleteVersionList(versionList, bucket, callback) { const params = { Bucket: bucket, Delete: { Objects: [] } }; versionList.forEach(version => { params.Delete.Objects.push({ - Key: version.Key, VersionId: version.VersionId }); + Key: version.Key, + VersionId: version.VersionId, + }); }); return s3.deleteObjects(params, callback); } function checkOneVersion(s3, bucket, versionId, callback) { - return s3.listObjectVersions({ Bucket: bucket }, - (err, data) => { - if (err) { - callback(err); - } - assert.strictEqual(data.Versions.length, 1); - if (versionId) { - assert.strictEqual(data.Versions[0].VersionId, versionId); - } - assert.strictEqual(data.DeleteMarkers.length, 0); - callback(); - }); + return s3.listObjectVersions({ Bucket: bucket }, (err, data) => { + if (err) { + callback(err); + } + assert.strictEqual(data.Versions.length, 1); + if (versionId) { + assert.strictEqual(data.Versions[0].VersionId, versionId); + } + assert.strictEqual(data.DeleteMarkers.length, 0); + callback(); + }); } function removeAllVersions(params, callback) { const bucket = params.Bucket; - async.waterfall([ - cb => s3.listObjectVersions(params, cb), - (data, cb) => _deleteVersionList(data.DeleteMarkers, bucket, - err => cb(err, data)), - (data, cb) => _deleteVersionList(data.Versions, bucket, - err => cb(err, data)), - (data, cb) => { - if (data.IsTruncated) { - const params = { - Bucket: bucket, - KeyMarker: data.NextKeyMarker, - VersionIdMarker: data.NextVersionIdMarker, - }; - return removeAllVersions(params, cb); - } - return cb(); - }, - ], callback); + async.waterfall( + [ + cb => s3.listObjectVersions(params, cb), + (data, cb) => _deleteVersionList(data.DeleteMarkers, bucket, err => cb(err, data)), + (data, cb) => _deleteVersionList(data.Versions, bucket, err => cb(err, data)), + (data, cb) => { + if (data.IsTruncated) { + const params = { + Bucket: bucket, + KeyMarker: data.NextKeyMarker, + VersionIdMarker: data.NextVersionIdMarker, + }; + return removeAllVersions(params, cb); + } + return cb(); + }, + ], + callback + ); } function suspendVersioning(bucket, callback) { - s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, callback); + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + callback + ); } function enableVersioning(bucket, callback) { - s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, callback); + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + callback + ); } function enableVersioningThenPutObject(bucket, object, callback) { @@ -100,36 +108,40 @@ function enableVersioningThenPutObject(bucket, object, callback) { * @return {undefined} - and call callback */ function createDualNullVersion(s3, bucketName, keyName, cb) { - async.waterfall([ - // put null version - next => s3.putObject({ Bucket: bucketName, Key: keyName }, - err => next(err)), - next => enableVersioning(bucketName, err => next(err)), - // should store null version as separate version before - // putting new version - next => s3.putObject({ Bucket: bucketName, Key: keyName }, - (err, data) => { - assert.strictEqual(err, null, - 'Unexpected err putting new version'); - assert(data.VersionId); - next(null, data.VersionId); - }), - // delete version we just created, master version should be updated - // with value of next most recent version: null version previously put - (versionId, next) => s3.deleteObject({ - Bucket: bucketName, - Key: keyName, - VersionId: versionId, - }, err => next(err)), - // getting object should return null version now - next => s3.getObject({ Bucket: bucketName, Key: keyName }, - (err, data) => { - assert.strictEqual(err, null, - 'Unexpected err getting latest version'); - assert.strictEqual(data.VersionId, 'null'); - next(); - }), - ], err => cb(err)); + async.waterfall( + [ + // put null version + next => s3.putObject({ Bucket: bucketName, Key: keyName }, err => next(err)), + next => enableVersioning(bucketName, err => next(err)), + // should store null version as separate version before + // putting new version + next => + s3.putObject({ Bucket: bucketName, Key: keyName }, (err, data) => { + assert.strictEqual(err, null, 'Unexpected err putting new version'); + assert(data.VersionId); + next(null, data.VersionId); + }), + // delete version we just created, master version should be updated + // with value of next most recent version: null version previously put + (versionId, next) => + s3.deleteObject( + { + Bucket: bucketName, + Key: keyName, + VersionId: versionId, + }, + err => next(err) + ), + // getting object should return null version now + next => + s3.getObject({ Bucket: bucketName, Key: keyName }, (err, data) => { + assert.strictEqual(err, null, 'Unexpected err getting latest version'); + assert.strictEqual(data.VersionId, 'null'); + next(); + }), + ], + err => cb(err) + ); } module.exports = { diff --git a/tests/functional/aws-node-sdk/lib/utility/website-util.js b/tests/functional/aws-node-sdk/lib/utility/website-util.js index 0b48f0e4c6..003c95851f 100644 --- a/tests/functional/aws-node-sdk/lib/utility/website-util.js +++ b/tests/functional/aws-node-sdk/lib/utility/website-util.js @@ -6,8 +6,7 @@ const url = require('url'); const { makeRequest } = require('../../../raw-node/utils/makeRequest'); -const bucketName = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : - 'bucketwebsitetester'; +const bucketName = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : 'bucketwebsitetester'; let awsCredentials; function _parseConfigValue(string, fileSlice) { @@ -31,10 +30,8 @@ function _retrieveAWSCredentials(profile) { const fileContents = file.split('\n'); const profileIndex = file.indexOf(`[${profile}]`); if (profileIndex > -1) { - const accessKey = _parseConfigValue('aws_access_key_id', - fileContents.slice(profileIndex)); - const secretKey = _parseConfigValue('aws_secret_access_key', - fileContents.slice(profileIndex)); + const accessKey = _parseConfigValue('aws_access_key_id', fileContents.slice(profileIndex)); + const secretKey = _parseConfigValue('aws_secret_access_key', fileContents.slice(profileIndex)); return { accessKey, secretKey }; } const msg = `Profile ${profile} does not exist in AWS credential file`; @@ -70,46 +67,41 @@ function _assertResponseHtml(response, elemtag, content) { const startIndex = response.indexOf(startingTag); const endIndex = response.indexOf(''); assert(startIndex > -1 && endIndex > -1, 'Did not find ul element'); - const ulElem = response.slice(startIndex + startingTag.length, - endIndex); + const ulElem = response.slice(startIndex + startingTag.length, endIndex); content.forEach(item => { _assertResponseHtml(ulElem, 'li', item); }); } else { const elem = `<${elemtag}>${content}`; - assert(response.includes(elem), - `Expected but did not find '${elem}' in html`); + assert(response.includes(elem), `Expected but did not find '${elem}' in html`); } } function _assertContainsHtml(responseBody) { - assert(responseBody.startsWith('') && - responseBody.includes(''), 'Did not find html tags'); + assert(responseBody.startsWith('') && responseBody.includes(''), 'Did not find html tags'); } function _assertResponseHtml404(method, response, type) { assert.strictEqual(response.statusCode, 404); if (method === 'HEAD') { if (type === '404-no-such-bucket') { - assert.strictEqual(response.headers['x-amz-error-code'], - 'NoSuchBucket'); + assert.strictEqual(response.headers['x-amz-error-code'], 'NoSuchBucket'); // Need arsenal fixed to remove period at the end // so compatible with aws - assert.strictEqual(response.headers['x-amz-error-message'], - 'The specified bucket does not exist.'); + assert.strictEqual(response.headers['x-amz-error-message'], 'The specified bucket does not exist.'); } else if (type === '404-no-such-website-configuration') { - assert.strictEqual(response.headers['x-amz-error-code'], - 'NoSuchWebsiteConfiguration'); - assert.strictEqual(response.headers['x-amz-error-message'], - 'The specified bucket does not have a website configuration'); + assert.strictEqual(response.headers['x-amz-error-code'], 'NoSuchWebsiteConfiguration'); + assert.strictEqual( + response.headers['x-amz-error-message'], + 'The specified bucket does not have a website configuration' + ); } else if (type === '404-not-found') { - assert.strictEqual(response.headers['x-amz-error-code'], - 'NoSuchKey'); - assert.strictEqual(response.headers['x-amz-error-message'], - 'The specified key does not exist.'); + assert.strictEqual(response.headers['x-amz-error-code'], 'NoSuchKey'); + assert.strictEqual(response.headers['x-amz-error-message'], 'The specified key does not exist.'); } else { - throw new Error(`'${type}' is not a recognized 404 ` + - 'error checked in the WebsiteConfigTester.checkHTML function'); + throw new Error( + `'${type}' is not a recognized 404 ` + 'error checked in the WebsiteConfigTester.checkHTML function' + ); } // don't need to check HTML for head requests return; @@ -126,18 +118,15 @@ function _assertResponseHtml404(method, response, type) { } else if (type === '404-no-such-website-configuration') { _assertResponseHtml(response.body, 'ul', [ 'Code: NoSuchWebsiteConfiguration', - 'Message: The specified bucket does not have a ' + - 'website configuration', + 'Message: The specified bucket does not have a ' + 'website configuration', `BucketName: ${bucketName}`, ]); } else if (type === '404-not-found') { - _assertResponseHtml(response.body, 'ul', [ - 'Code: NoSuchKey', - 'Message: The specified key does not exist.', - ]); + _assertResponseHtml(response.body, 'ul', ['Code: NoSuchKey', 'Message: The specified key does not exist.']); } else { - throw new Error(`'${type}' is not a recognized 404 ` + - 'error checked in the WebsiteConfigTester.checkHTML function'); + throw new Error( + `'${type}' is not a recognized 404 ` + 'error checked in the WebsiteConfigTester.checkHTML function' + ); } } @@ -145,38 +134,35 @@ function _assertResponseHtml403(method, response, type) { assert.strictEqual(response.statusCode, 403); if (method === 'HEAD') { if (type === '403-access-denied') { - assert.strictEqual(response.headers['x-amz-error-code'], - 'AccessDenied'); - assert.strictEqual(response.headers['x-amz-error-message'], - 'Access Denied'); + assert.strictEqual(response.headers['x-amz-error-code'], 'AccessDenied'); + assert.strictEqual(response.headers['x-amz-error-message'], 'Access Denied'); } else if (type !== '403-retrieve-error-document') { - throw new Error(`'${type}' is not a recognized 403 ` + - 'error checked in the WebsiteConfigTester.checkHTML function'); + throw new Error( + `'${type}' is not a recognized 403 ` + 'error checked in the WebsiteConfigTester.checkHTML function' + ); } } else { _assertContainsHtml(response.body); _assertResponseHtml(response.body, 'title', '403 Forbidden'); _assertResponseHtml(response.body, 'h1', '403 Forbidden'); - _assertResponseHtml(response.body, 'ul', [ - 'Code: AccessDenied', - 'Message: Access Denied', - ]); + _assertResponseHtml(response.body, 'ul', ['Code: AccessDenied', 'Message: Access Denied']); if (type === '403-retrieve-error-document') { - _assertResponseHtml(response.body, 'h3', - 'An Error Occurred While Attempting to ' + - 'Retrieve a Custom Error Document'); + _assertResponseHtml( + response.body, + 'h3', + 'An Error Occurred While Attempting to ' + 'Retrieve a Custom Error Document' + ); // start searching for second `ul` element after `h3` element const startingTag = ''; - const startIndex = response.body.indexOf(startingTag) - + startingTag.length; - _assertResponseHtml(response.body.slice(startIndex), - 'ul', [ + const startIndex = response.body.indexOf(startingTag) + startingTag.length; + _assertResponseHtml(response.body.slice(startIndex), 'ul', [ 'Code: AccessDenied', 'Message: Access Denied', ]); } else if (type !== '403-access-denied') { - throw new Error(`'${type}' is not a recognized 403 ` + - 'error checked in the WebsiteConfigTester.checkHTML function'); + throw new Error( + `'${type}' is not a recognized 403 ` + 'error checked in the WebsiteConfigTester.checkHTML function' + ); } } } @@ -187,22 +173,17 @@ function _assertResponseHtmlErrorUser(response, type) { } else if (type === 'error-user-404') { assert.strictEqual(response.statusCode, 404); } - _assertResponseHtml(response.body, 'title', - 'Error!!'); - _assertResponseHtml(response.body, 'h1', - 'It appears you messed up'); + _assertResponseHtml(response.body, 'title', 'Error!!'); + _assertResponseHtml(response.body, 'h1', 'It appears you messed up'); } function _assertResponseHtmlIndexUser(response) { assert.strictEqual(response.statusCode, 200); - _assertResponseHtml(response.body, 'title', - 'Best testing website ever'); - _assertResponseHtml(response.body, 'h1', 'Welcome to my ' + - 'extraordinary bucket website testing page'); + _assertResponseHtml(response.body, 'title', 'Best testing website ever'); + _assertResponseHtml(response.body, 'h1', 'Welcome to my ' + 'extraordinary bucket website testing page'); } -function _assertResponseHtmlRedirect(response, type, redirectUrl, method, - expectedHeaders) { +function _assertResponseHtmlRedirect(response, type, redirectUrl, method, expectedHeaders) { if (type === 'redirect' || type === 'redirect-user') { assert.strictEqual(response.statusCode, 301); assert.strictEqual(response.body, ''); @@ -213,13 +194,10 @@ function _assertResponseHtmlRedirect(response, type, redirectUrl, method, return; // no need to check HTML } - _assertResponseHtml(response.body, 'title', - 'Best redirect link ever'); - _assertResponseHtml(response.body, 'h1', - 'Welcome to your redirection file'); + _assertResponseHtml(response.body, 'title', 'Best redirect link ever'); + _assertResponseHtml(response.body, 'h1', 'Welcome to your redirection file'); } else if (type.startsWith('redirect-error')) { - assert.strictEqual(response.statusCode, - type === 'redirect-error-found' ? 302 : 301); + assert.strictEqual(response.statusCode, type === 'redirect-error-found' ? 302 : 301); assert.strictEqual(response.headers.location, redirectUrl); Object.entries(expectedHeaders || {}).forEach(([key, val]) => { assert.strictEqual(response.headers[key], val); @@ -227,21 +205,18 @@ function _assertResponseHtmlRedirect(response, type, redirectUrl, method, if (type === 'redirect-error-found') { assert.strictEqual(response.headers['x-amz-error-code'], 'Found'); - assert.strictEqual(response.headers['x-amz-error-message'], - 'Resource Found'); + assert.strictEqual(response.headers['x-amz-error-message'], 'Resource Found'); _assertContainsHtml(response.body); _assertResponseHtml(response.body, 'title', '302 Found'); _assertResponseHtml(response.body, 'h1', '302 Found'); - _assertResponseHtml(response.body, 'ul', [ - 'Code: Found', - 'Message: Resource Found', - ]); + _assertResponseHtml(response.body, 'ul', ['Code: Found', 'Message: Resource Found']); } else { _assertResponseHtmlErrorUser(response, type); } } else { - throw new Error(`'${type}' is not a recognized redirect type ` + - 'checked in the WebsiteConfigTester.checkHTML function'); + throw new Error( + `'${type}' is not a recognized redirect type ` + 'checked in the WebsiteConfigTester.checkHTML function' + ); } } @@ -280,21 +255,20 @@ class WebsiteConfigTester { } /** checkHTML - check response for website head or get - * @param {object} params - function params - * @param {string} params.method - type of website request, 'HEAD' or 'GET' - * @param {string} params.responseType - type of response expected - * @param {string} [params.auth] - whether to use valid or invalid auth - * crendentials: 'valid credentials' or 'invalid credentials' - * @param {string} [params.url] - request url - * @param {string} [params.redirectUrl] - redirect - * @param {object} [params.expectedHeaders] - expected headers in response - * with expected values (e.g., {x-amz-error-code: AccessDenied}) - * @param {function} callback - callback - * @return {undefined} - */ + * @param {object} params - function params + * @param {string} params.method - type of website request, 'HEAD' or 'GET' + * @param {string} params.responseType - type of response expected + * @param {string} [params.auth] - whether to use valid or invalid auth + * crendentials: 'valid credentials' or 'invalid credentials' + * @param {string} [params.url] - request url + * @param {string} [params.redirectUrl] - redirect + * @param {object} [params.expectedHeaders] - expected headers in response + * with expected values (e.g., {x-amz-error-code: AccessDenied}) + * @param {function} callback - callback + * @return {undefined} + */ static checkHTML(params, callback) { - const { method, responseType, auth, url, redirectUrl, expectedHeaders } - = params; + const { method, responseType, auth, url, redirectUrl, expectedHeaders } = params; _makeWebsiteRequest(auth, method, url, (err, res) => { assert.strictEqual(err, null, `Unexpected request err ${err}`); if (responseType) { @@ -305,20 +279,20 @@ class WebsiteConfigTester { } else if (responseType.startsWith('error-user')) { _assertResponseHtmlErrorUser(res, responseType); } else if (responseType.startsWith('redirect')) { - _assertResponseHtmlRedirect(res, responseType, redirectUrl, - method, expectedHeaders); + _assertResponseHtmlRedirect(res, responseType, redirectUrl, method, expectedHeaders); if (responseType === 'redirect-user') { process.stdout.write('Following redirect location\n'); - return this.checkHTML({ method, - url: res.headers.location, - responseType: 'redirected-user' }, - callback); + return this.checkHTML( + { method, url: res.headers.location, responseType: 'redirected-user' }, + callback + ); } } else if (responseType === 'index-user') { _assertResponseHtmlIndexUser(res); } else { - throw new Error(`'${responseType}' is not a response ` + - 'type recognized by WebsiteConfigTester.checkHTML'); + throw new Error( + `'${responseType}' is not a response ` + 'type recognized by WebsiteConfigTester.checkHTML' + ); } } return callback(); @@ -336,58 +310,63 @@ class WebsiteConfigTester { * @param {function} cb - callback to end test * @return {undefined} */ - static makeHeadRequest(auth, url, expectedStatusCode, expectedHeaders, - cb) { + static makeHeadRequest(auth, url, expectedStatusCode, expectedHeaders, cb) { _makeWebsiteRequest(auth, 'HEAD', url, (err, res) => { // body should be empty assert.deepStrictEqual(res.body, ''); assert.strictEqual(res.statusCode, expectedStatusCode); const headers = Object.keys(expectedHeaders); headers.forEach(header => { - assert.strictEqual(res.headers[header], - expectedHeaders[header]); + assert.strictEqual(res.headers[header], expectedHeaders[header]); }); return cb(); }); } static createPutBucketWebsite(s3, bucket, bucketACL, objects, done) { - s3.createBucket({ Bucket: bucket, ACL: bucketACL }, - err => { + s3.createBucket({ Bucket: bucket, ACL: bucketACL }, err => { if (err) { return done(err); } - const webConfig = new WebsiteConfigTester('index.html', - 'error.html'); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { + const webConfig = new WebsiteConfigTester('index.html', 'error.html'); + return s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { if (err) { return done(err); } - return async.forEachOf(objects, - (acl, object, next) => { - s3.putObject({ Bucket: bucket, - Key: `${object}.html`, - ACL: acl, - Body: fs.readFileSync(path.join(__dirname, - `/../../test/object/websiteFiles/${object}.html`)), + return async.forEachOf( + objects, + (acl, object, next) => { + s3.putObject( + { + Bucket: bucket, + Key: `${object}.html`, + ACL: acl, + Body: fs.readFileSync( + path.join(__dirname, `/../../test/object/websiteFiles/${object}.html`) + ), + }, + next + ); }, - next); - }, done); + done + ); }); }); } static deleteObjectsThenBucket(s3, bucket, objects, done) { - async.forEachOf(objects, (acl, object, next) => { - s3.deleteObject({ Bucket: bucket, - Key: `${object}.html` }, next); - }, err => { - if (err) { - return done(err); + async.forEachOf( + objects, + (acl, object, next) => { + s3.deleteObject({ Bucket: bucket, Key: `${object}.html` }, next); + }, + err => { + if (err) { + return done(err); + } + return s3.deleteBucket({ Bucket: bucket }, done); } - return s3.deleteBucket({ Bucket: bucket }, done); - }); + ); } } diff --git a/tests/functional/aws-node-sdk/schema/bucket.json b/tests/functional/aws-node-sdk/schema/bucket.json index ee69950078..1d6b3933cd 100644 --- a/tests/functional/aws-node-sdk/schema/bucket.json +++ b/tests/functional/aws-node-sdk/schema/bucket.json @@ -1,94 +1,79 @@ { - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "http://jsonschema.net", - "type": "object", - "properties": { - "Contents": { - "id": "http://jsonschema.net/Contents", - "type": "array", - "minItems": 0, - "items": { - "id": "http://jsonschema.net/Buckets/0", - "type": "object", - "properties": { - "Key": { - "id": "http://jsonschema.net/Contents/0/Key", + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "http://jsonschema.net", + "type": "object", + "properties": { + "Contents": { + "id": "http://jsonschema.net/Contents", + "type": "array", + "minItems": 0, + "items": { + "id": "http://jsonschema.net/Buckets/0", + "type": "object", + "properties": { + "Key": { + "id": "http://jsonschema.net/Contents/0/Key", + "type": "string" + }, + "LastModified": { + "id": "http://jsonschema.net/Contents/0/LastModified", + "type": "object" + }, + "ETag": { + "id": "http://jsonschema.net/Contents/0/ETag", + "type": "string" + }, + "Size": { + "id": "http://jsonschema.net/Contents/0/Size", + "type": "integer" + }, + "StorageClass": { + "id": "http://jsonschema.net/Contents/0/StorageClass", + "enum": ["STANDARD", "REDUCED_REDUNDANCY", "GLACIER"] + }, + "Owner": { + "id": "http://jsonschema.net/Contents/0/Owner", + "type": "object", + "properties": { + "DisplayName": { + "id": "http://jsonschema.net/Contents/0/Owner/DisplayName", + "type": "string" + }, + "ID": { + "id": "http://jsonschema.net/Contents/0/Owner/ID", + "type": "string" + } + }, + "required": ["DisplayName", "ID"] + } + }, + "required": ["Key", "LastModified", "ETag", "Size", "StorageClass", "Owner"] + } + }, + "Marker": { + "id": "http://jsonschema.net/Marker", + "type": "string" + }, + "Name": { + "id": "http://jsonschema.net/Name", "type": "string" - }, - "LastModified": { - "id": "http://jsonschema.net/Contents/0/LastModified", - "type": "object" - }, - "ETag": { - "id": "http://jsonschema.net/Contents/0/ETag", + }, + "Prefix": { + "id": "http://jsonschema.net/Prefix", "type": "string" - }, - "Size": { - "id": "http://jsonschema.net/Contents/0/Size", + }, + "MaxKeys": { + "id": "http://jsonschema.net/MaxKeys", "type": "integer" - }, - "StorageClass": { - "id": "http://jsonschema.net/Contents/0/StorageClass", - "enum": [ "STANDARD", "REDUCED_REDUNDANCY", "GLACIER" ] - }, - "Owner": { - "id": "http://jsonschema.net/Contents/0/Owner", - "type": "object", - "properties": { - "DisplayName": { - "id": "http://jsonschema.net/Contents/0/Owner/DisplayName", - "type": "string" - }, - "ID": { - "id": "http://jsonschema.net/Contents/0/Owner/ID", - "type": "string" - } - }, - "required": [ "DisplayName", "ID" ] - } }, - "required": [ - "Key", - "LastModified", - "ETag", - "Size", - "StorageClass", - "Owner" - ] - } - }, - "Marker": { - "id": "http://jsonschema.net/Marker", - "type": "string" - }, - "Name": { - "id": "http://jsonschema.net/Name", - "type": "string" - }, - "Prefix": { - "id": "http://jsonschema.net/Prefix", - "type": "string" - }, - "MaxKeys": { - "id": "http://jsonschema.net/MaxKeys", - "type": "integer" - }, - "CommonPrefixes": { - "id": "http://jsonschema.net/CommonPrefixes", - "type": "array" + "CommonPrefixes": { + "id": "http://jsonschema.net/CommonPrefixes", + "type": "array" + }, + "IsTruncated": { + "id": "http://jsonschema.net/IsTruncated", + "type": "boolean" + } }, - "IsTruncated": { - "id": "http://jsonschema.net/IsTruncated", - "type": "boolean" - } - }, - "required": [ - "IsTruncated", - "Marker", - "Contents", - "Name", - "Prefix", - "MaxKeys", - "CommonPrefixes" - ] + "required": ["IsTruncated", "Marker", "Contents", "Name", "Prefix", "MaxKeys", "CommonPrefixes"] } diff --git a/tests/functional/aws-node-sdk/schema/bucketV2.json b/tests/functional/aws-node-sdk/schema/bucketV2.json index c868be3318..0594061d1c 100644 --- a/tests/functional/aws-node-sdk/schema/bucketV2.json +++ b/tests/functional/aws-node-sdk/schema/bucketV2.json @@ -1,104 +1,91 @@ { - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "http://jsonschema.net", - "type": "object", - "properties": { - "Contents": { - "id": "http://jsonschema.net/Contents", - "type": "array", - "minItems": 0, - "items": { - "id": "http://jsonschema.net/Buckets/0", - "type": "object", - "properties": { - "Key": { - "id": "http://jsonschema.net/Contents/0/Key", + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "http://jsonschema.net", + "type": "object", + "properties": { + "Contents": { + "id": "http://jsonschema.net/Contents", + "type": "array", + "minItems": 0, + "items": { + "id": "http://jsonschema.net/Buckets/0", + "type": "object", + "properties": { + "Key": { + "id": "http://jsonschema.net/Contents/0/Key", + "type": "string" + }, + "LastModified": { + "id": "http://jsonschema.net/Contents/0/LastModified", + "type": "object" + }, + "ETag": { + "id": "http://jsonschema.net/Contents/0/ETag", + "type": "string" + }, + "Size": { + "id": "http://jsonschema.net/Contents/0/Size", + "type": "integer" + }, + "StorageClass": { + "id": "http://jsonschema.net/Contents/0/StorageClass", + "enum": ["STANDARD", "REDUCED_REDUNDANCY", "GLACIER"] + }, + "Owner": { + "id": "http://jsonschema.net/Contents/0/Owner", + "type": "object", + "properties": { + "DisplayName": { + "id": "http://jsonschema.net/Contents/0/Owner/DisplayName", + "type": "string" + }, + "ID": { + "id": "http://jsonschema.net/Contents/0/Owner/ID", + "type": "string" + } + }, + "required": ["DisplayName", "ID"] + } + }, + "required": ["Key", "LastModified", "ETag", "Size", "StorageClass"] + } + }, + "StartAfter": { + "id": "http://jsonschema.net/StartAfter", + "type": "string" + }, + "ContinuationToken": { + "id": "http://jsonschema.net/ContinuationToken", + "type": "string" + }, + "NextContinuationToken": { + "id": "http://jsonschema.net/NextContinuationToken", "type": "string" - }, - "LastModified": { - "id": "http://jsonschema.net/Contents/0/LastModified", - "type": "object" - }, - "ETag": { - "id": "http://jsonschema.net/Contents/0/ETag", + }, + "Name": { + "id": "http://jsonschema.net/Name", + "type": "string" + }, + "Prefix": { + "id": "http://jsonschema.net/Prefix", "type": "string" - }, - "Size": { - "id": "http://jsonschema.net/Contents/0/Size", + }, + "KeyCount": { + "id": "http://jsonschema.net/KeyCount", "type": "integer" - }, - "StorageClass": { - "id": "http://jsonschema.net/Contents/0/StorageClass", - "enum": [ "STANDARD", "REDUCED_REDUNDANCY", "GLACIER" ] - }, - "Owner": { - "id": "http://jsonschema.net/Contents/0/Owner", - "type": "object", - "properties": { - "DisplayName": { - "id": "http://jsonschema.net/Contents/0/Owner/DisplayName", - "type": "string" - }, - "ID": { - "id": "http://jsonschema.net/Contents/0/Owner/ID", - "type": "string" - } - }, - "required": [ "DisplayName", "ID" ] - } }, - "required": [ - "Key", - "LastModified", - "ETag", - "Size", - "StorageClass" - ] - } - }, - "StartAfter": { - "id": "http://jsonschema.net/StartAfter", - "type": "string" - }, - "ContinuationToken": { - "id": "http://jsonschema.net/ContinuationToken", - "type": "string" - }, - "NextContinuationToken": { - "id": "http://jsonschema.net/NextContinuationToken", - "type": "string" - }, - "Name": { - "id": "http://jsonschema.net/Name", - "type": "string" - }, - "Prefix": { - "id": "http://jsonschema.net/Prefix", - "type": "string" - }, - "KeyCount": { - "id": "http://jsonschema.net/KeyCount", - "type": "integer" - }, - "MaxKeys": { - "id": "http://jsonschema.net/MaxKeys", - "type": "integer" - }, - "CommonPrefixes": { - "id": "http://jsonschema.net/CommonPrefixes", - "type": "array" + "MaxKeys": { + "id": "http://jsonschema.net/MaxKeys", + "type": "integer" + }, + "CommonPrefixes": { + "id": "http://jsonschema.net/CommonPrefixes", + "type": "array" + }, + "IsTruncated": { + "id": "http://jsonschema.net/IsTruncated", + "type": "boolean" + } }, - "IsTruncated": { - "id": "http://jsonschema.net/IsTruncated", - "type": "boolean" - } - }, - "required": [ - "IsTruncated", - "Contents", - "Name", - "Prefix", - "MaxKeys", - "CommonPrefixes" - ] + "required": ["IsTruncated", "Contents", "Name", "Prefix", "MaxKeys", "CommonPrefixes"] } diff --git a/tests/functional/aws-node-sdk/schema/service.json b/tests/functional/aws-node-sdk/schema/service.json index 03b3c1daa6..8cb0a54b1c 100644 --- a/tests/functional/aws-node-sdk/schema/service.json +++ b/tests/functional/aws-node-sdk/schema/service.json @@ -1,48 +1,42 @@ { - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "http://jsonschema.net", - "type": "object", - "properties": { - "Buckets": { - "id": "http://jsonschema.net/Buckets", - "type": "array", - "minItems": 0, - "items": { - "id": "http://jsonschema.net/Buckets/0", - "type": "object", - "properties": { - "Name": { - "id": "http://jsonschema.net/Buckets/0/Name", - "type": "string" - }, - "CreationDate": { - "id": "http://jsonschema.net/Buckets/0/CreationDate", - "type": "object" - } + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "http://jsonschema.net", + "type": "object", + "properties": { + "Buckets": { + "id": "http://jsonschema.net/Buckets", + "type": "array", + "minItems": 0, + "items": { + "id": "http://jsonschema.net/Buckets/0", + "type": "object", + "properties": { + "Name": { + "id": "http://jsonschema.net/Buckets/0/Name", + "type": "string" + }, + "CreationDate": { + "id": "http://jsonschema.net/Buckets/0/CreationDate", + "type": "object" + } + }, + "required": ["Name", "CreationDate"] + } }, - "required": [ - "Name", - "CreationDate" - ] - } - }, - "Owner": { - "id": "http://jsonschema.net/Owner", - "type": "object", - "properties": { - "DisplayName": { - "id": "http://jsonschema.net/Owner/DisplayName", - "type": "string" - }, - "ID": { - "id": "http://jsonschema.net/Owner/ID", - "type": "string" + "Owner": { + "id": "http://jsonschema.net/Owner", + "type": "object", + "properties": { + "DisplayName": { + "id": "http://jsonschema.net/Owner/DisplayName", + "type": "string" + }, + "ID": { + "id": "http://jsonschema.net/Owner/ID", + "type": "string" + } + } } - } - } - }, - "required": [ - "Buckets", - "Owner" - ] + }, + "required": ["Buckets", "Owner"] } diff --git a/tests/functional/aws-node-sdk/test/bucket/aclUsingPredefinedGroups.js b/tests/functional/aws-node-sdk/test/bucket/aclUsingPredefinedGroups.js index d4cd4f8be2..cab5eea5b4 100644 --- a/tests/functional/aws-node-sdk/test/bucket/aclUsingPredefinedGroups.js +++ b/tests/functional/aws-node-sdk/test/bucket/aclUsingPredefinedGroups.js @@ -49,50 +49,63 @@ withV4(sigCfg => { // tests for authenticated user(signed) and anonymous user(unsigned) [true, false].forEach(auth => { const authType = auth ? 'authenticated' : 'unauthenticated'; - const grantUri = `uri=${auth ? - constants.allAuthedUsersId : constants.publicId}`; + const grantUri = `uri=${auth ? constants.allAuthedUsersId : constants.publicId}`; // TODO fix flakiness on E2E and re-enable, see CLDSRV-254 - describeSkipIfE2E('PUT Bucket ACL using predefined groups - ' + - `${authType} request`, () => { + describeSkipIfE2E('PUT Bucket ACL using predefined groups - ' + `${authType} request`, () => { const aclParam = { Bucket: testBucket, ACL: 'private', }; - beforeEach(done => s3.createBucket({ - Bucket: testBucket, - }, err => { - assert.ifError(err); - return s3.putObject({ - Bucket: testBucket, - Body: testBody, - Key: ownerObjKey, - }, done); - })); - afterEach(() => ownerAccountBucketUtil.empty(testBucket) - .then(() => ownerAccountBucketUtil.deleteOne(testBucket))); + beforeEach(done => + s3.createBucket( + { + Bucket: testBucket, + }, + err => { + assert.ifError(err); + return s3.putObject( + { + Bucket: testBucket, + Body: testBody, + Key: ownerObjKey, + }, + done + ); + } + ) + ); + afterEach(() => + ownerAccountBucketUtil.empty(testBucket).then(() => ownerAccountBucketUtil.deleteOne(testBucket)) + ); it('should grant read access', done => { - s3.putBucketAcl({ - Bucket: testBucket, - GrantRead: grantUri, - }, err => { - assert.ifError(err); - const param = { Bucket: testBucket }; - awsRequest(auth, 'listObjects', param, cbNoError(done)); - }); + s3.putBucketAcl( + { + Bucket: testBucket, + GrantRead: grantUri, + }, + err => { + assert.ifError(err); + const param = { Bucket: testBucket }; + awsRequest(auth, 'listObjects', param, cbNoError(done)); + } + ); }); it('should grant read access with grant-full-control', done => { - s3.putBucketAcl({ - Bucket: testBucket, - GrantFullControl: grantUri, - }, err => { - assert.ifError(err); - const param = { Bucket: testBucket }; - awsRequest(auth, 'listObjects', param, cbNoError(done)); - }); + s3.putBucketAcl( + { + Bucket: testBucket, + GrantFullControl: grantUri, + }, + err => { + assert.ifError(err); + const param = { Bucket: testBucket }; + awsRequest(auth, 'listObjects', param, cbNoError(done)); + } + ); }); it('should not grant read access', done => { @@ -104,34 +117,39 @@ withV4(sigCfg => { }); it('should grant write access', done => { - s3.putBucketAcl({ - Bucket: testBucket, - GrantWrite: grantUri, - }, err => { - assert.ifError(err); - const param = { + s3.putBucketAcl( + { Bucket: testBucket, - Body: testBody, - Key: testKey, - }; - awsRequest(auth, 'putObject', param, cbNoError(done)); - }); + GrantWrite: grantUri, + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + Body: testBody, + Key: testKey, + }; + awsRequest(auth, 'putObject', param, cbNoError(done)); + } + ); }); - it('should grant write access with ' + - 'grant-full-control', done => { - s3.putBucketAcl({ - Bucket: testBucket, - GrantFullControl: grantUri, - }, err => { - assert.ifError(err); - const param = { + it('should grant write access with ' + 'grant-full-control', done => { + s3.putBucketAcl( + { Bucket: testBucket, - Body: testBody, - Key: testKey, - }; - awsRequest(auth, 'putObject', param, cbNoError(done)); - }); + GrantFullControl: grantUri, + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + Body: testBody, + Key: testKey, + }; + awsRequest(auth, 'putObject', param, cbNoError(done)); + } + ); }); it('should not grant write access', done => { @@ -147,68 +165,75 @@ withV4(sigCfg => { }); // TODO: S3C-5656 - itSkipIfE2E('should grant write access on an object not owned ' + - 'by the grantee', done => { - s3.putBucketAcl({ - Bucket: testBucket, - GrantWrite: grantUri, - }, err => { - assert.ifError(err); - const param = { + itSkipIfE2E('should grant write access on an object not owned ' + 'by the grantee', done => { + s3.putBucketAcl( + { Bucket: testBucket, - Body: testBody, - Key: ownerObjKey, - }; - awsRequest(auth, 'putObject', param, cbNoError(done)); - }); + GrantWrite: grantUri, + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + Body: testBody, + Key: ownerObjKey, + }; + awsRequest(auth, 'putObject', param, cbNoError(done)); + } + ); }); - it(`should ${auth ? '' : 'not '}delete object not owned by the` + - 'grantee', done => { - s3.putBucketAcl({ - Bucket: testBucket, - GrantWrite: grantUri, - }, err => { - assert.ifError(err); - const param = { + it(`should ${auth ? '' : 'not '}delete object not owned by the` + 'grantee', done => { + s3.putBucketAcl( + { Bucket: testBucket, - Key: ownerObjKey, - }; - awsRequest(auth, 'deleteObject', param, err => { - if (auth) { - assert.ifError(err); - } else { - assert.notStrictEqual(err, null); - assert.strictEqual( - err.statusCode, - errorInstances.AccessDenied.code - ); - } - done(); - }); - }); + GrantWrite: grantUri, + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + Key: ownerObjKey, + }; + awsRequest(auth, 'deleteObject', param, err => { + if (auth) { + assert.ifError(err); + } else { + assert.notStrictEqual(err, null); + assert.strictEqual(err.statusCode, errorInstances.AccessDenied.code); + } + done(); + }); + } + ); }); it('should read bucket acl', done => { - s3.putBucketAcl({ - Bucket: testBucket, - GrantReadACP: grantUri, - }, err => { - assert.ifError(err); - const param = { Bucket: testBucket }; - awsRequest(auth, 'getBucketAcl', param, cbNoError(done)); - }); + s3.putBucketAcl( + { + Bucket: testBucket, + GrantReadACP: grantUri, + }, + err => { + assert.ifError(err); + const param = { Bucket: testBucket }; + awsRequest(auth, 'getBucketAcl', param, cbNoError(done)); + } + ); }); it('should read bucket acl with grant-full-control', done => { - s3.putBucketAcl({ - Bucket: testBucket, - GrantFullControl: grantUri, - }, err => { - assert.ifError(err); - const param = { Bucket: testBucket }; - awsRequest(auth, 'getBucketAcl', param, cbNoError(done)); - }); + s3.putBucketAcl( + { + Bucket: testBucket, + GrantFullControl: grantUri, + }, + err => { + assert.ifError(err); + const param = { Bucket: testBucket }; + awsRequest(auth, 'getBucketAcl', param, cbNoError(done)); + } + ); }); it('should not read bucket acl', done => { @@ -220,31 +245,37 @@ withV4(sigCfg => { }); it('should write bucket acl', done => { - s3.putBucketAcl({ - Bucket: testBucket, - GrantWriteACP: grantUri, - }, err => { - assert.ifError(err); - const param = { + s3.putBucketAcl( + { Bucket: testBucket, - GrantReadACP: `uri=${constants.publicId}`, - }; - awsRequest(auth, 'putBucketAcl', param, cbNoError(done)); - }); + GrantWriteACP: grantUri, + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + GrantReadACP: `uri=${constants.publicId}`, + }; + awsRequest(auth, 'putBucketAcl', param, cbNoError(done)); + } + ); }); it('should write bucket acl with grant-full-control', done => { - s3.putBucketAcl({ - Bucket: testBucket, - GrantFullControl: grantUri, - }, err => { - assert.ifError(err); - const param = { + s3.putBucketAcl( + { Bucket: testBucket, - GrantReadACP: `uri=${constants.publicId}`, - }; - awsRequest(auth, 'putBucketAcl', param, cbNoError(done)); - }); + GrantFullControl: grantUri, + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + GrantReadACP: `uri=${constants.publicId}`, + }; + awsRequest(auth, 'putBucketAcl', param, cbNoError(done)); + } + ); }); it('should not write bucket acl', done => { @@ -259,54 +290,68 @@ withV4(sigCfg => { }); }); - describe('PUT Object ACL using predefined groups - ' + - `${authType} request`, () => { + describe('PUT Object ACL using predefined groups - ' + `${authType} request`, () => { const aclParam = { Bucket: testBucket, Key: testKey, ACL: 'private', }; - beforeEach(done => s3.createBucket({ - Bucket: testBucket, - }, err => { - assert.ifError(err); - return s3.putObject({ - Bucket: testBucket, - Body: testBody, - Key: testKey, - }, done); - })); - afterEach(() => ownerAccountBucketUtil.empty(testBucket) - .then(() => ownerAccountBucketUtil.deleteOne(testBucket))); + beforeEach(done => + s3.createBucket( + { + Bucket: testBucket, + }, + err => { + assert.ifError(err); + return s3.putObject( + { + Bucket: testBucket, + Body: testBody, + Key: testKey, + }, + done + ); + } + ) + ); + afterEach(() => + ownerAccountBucketUtil.empty(testBucket).then(() => ownerAccountBucketUtil.deleteOne(testBucket)) + ); it('should grant read access', done => { - s3.putObjectAcl({ - Bucket: testBucket, - GrantRead: grantUri, - Key: testKey, - }, err => { - assert.ifError(err); - const param = { + s3.putObjectAcl( + { Bucket: testBucket, + GrantRead: grantUri, Key: testKey, - }; - awsRequest(auth, 'getObject', param, cbNoError(done)); - }); + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + Key: testKey, + }; + awsRequest(auth, 'getObject', param, cbNoError(done)); + } + ); }); it('should grant read access with grant-full-control', done => { - s3.putObjectAcl({ - Bucket: testBucket, - GrantFullControl: grantUri, - Key: testKey, - }, err => { - assert.ifError(err); - const param = { + s3.putObjectAcl( + { Bucket: testBucket, + GrantFullControl: grantUri, Key: testKey, - }; - awsRequest(auth, 'getObject', param, cbNoError(done)); - }); + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + Key: testKey, + }; + awsRequest(auth, 'getObject', param, cbNoError(done)); + } + ); }); it('should not grant read access', done => { @@ -321,33 +366,39 @@ withV4(sigCfg => { }); it('should read object acl', done => { - s3.putObjectAcl({ - Bucket: testBucket, - GrantReadACP: grantUri, - Key: testKey, - }, err => { - assert.ifError(err); - const param = { + s3.putObjectAcl( + { Bucket: testBucket, + GrantReadACP: grantUri, Key: testKey, - }; - awsRequest(auth, 'getObjectAcl', param, cbNoError(done)); - }); + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + Key: testKey, + }; + awsRequest(auth, 'getObjectAcl', param, cbNoError(done)); + } + ); }); it('should read object acl with grant-full-control', done => { - s3.putObjectAcl({ - Bucket: testBucket, - GrantFullControl: grantUri, - Key: testKey, - }, err => { - assert.ifError(err); - const param = { + s3.putObjectAcl( + { Bucket: testBucket, + GrantFullControl: grantUri, Key: testKey, - }; - awsRequest(auth, 'getObjectAcl', param, cbNoError(done)); - }); + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + Key: testKey, + }; + awsRequest(auth, 'getObjectAcl', param, cbNoError(done)); + } + ); }); it('should not read object acl', done => { @@ -362,35 +413,41 @@ withV4(sigCfg => { }); it('should write object acl', done => { - s3.putObjectAcl({ - Bucket: testBucket, - GrantWriteACP: grantUri, - Key: testKey, - }, err => { - assert.ifError(err); - const param = { + s3.putObjectAcl( + { Bucket: testBucket, + GrantWriteACP: grantUri, Key: testKey, - GrantReadACP: grantUri, - }; - awsRequest(auth, 'putObjectAcl', param, cbNoError(done)); - }); + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + Key: testKey, + GrantReadACP: grantUri, + }; + awsRequest(auth, 'putObjectAcl', param, cbNoError(done)); + } + ); }); it('should write object acl with grant-full-control', done => { - s3.putObjectAcl({ - Bucket: testBucket, - GrantFullControl: grantUri, - Key: testKey, - }, err => { - assert.ifError(err); - const param = { + s3.putObjectAcl( + { Bucket: testBucket, + GrantFullControl: grantUri, Key: testKey, - GrantReadACP: `uri=${constants.publicId}`, - }; - awsRequest(auth, 'putObjectAcl', param, cbNoError(done)); - }); + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBucket, + Key: testKey, + GrantReadACP: `uri=${constants.publicId}`, + }; + awsRequest(auth, 'putObjectAcl', param, cbNoError(done)); + } + ); }); it('should not write object acl', done => { diff --git a/tests/functional/aws-node-sdk/test/bucket/bucketPolicyWithResourceStatements.js b/tests/functional/aws-node-sdk/test/bucket/bucketPolicyWithResourceStatements.js index b60941678b..91aa464a13 100644 --- a/tests/functional/aws-node-sdk/test/bucket/bucketPolicyWithResourceStatements.js +++ b/tests/functional/aws-node-sdk/test/bucket/bucketPolicyWithResourceStatements.js @@ -39,8 +39,9 @@ withV4(sigCfg => { describe('Bucket policies with resource statement', () => { beforeEach(() => ownerAccountBucketUtil.createMany(testBuckets)); - afterEach(() => ownerAccountBucketUtil.emptyMany(testBuckets) - .then(() => ownerAccountBucketUtil.deleteMany(testBuckets))); + afterEach(() => + ownerAccountBucketUtil.emptyMany(testBuckets).then(() => ownerAccountBucketUtil.deleteMany(testBuckets)) + ); it('should allow action on a bucket specified in the policy', done => { const statement = { @@ -54,14 +55,17 @@ withV4(sigCfg => { Version: '2012-10-17', Statement: [statement], }; - s3.putBucketPolicy({ - Bucket: testBuckets[0], - Policy: JSON.stringify(bucketPolicy), - }, err => { - assert.ifError(err); - const param = { Bucket: testBuckets[0] }; - awsRequest(true, 'listObjects', param, cbNoError(done)); - }); + s3.putBucketPolicy( + { + Bucket: testBuckets[0], + Policy: JSON.stringify(bucketPolicy), + }, + err => { + assert.ifError(err); + const param = { Bucket: testBuckets[0] }; + awsRequest(true, 'listObjects', param, cbNoError(done)); + } + ); }); it('should deny action on a bucket not specified in the policy', done => { @@ -76,14 +80,17 @@ withV4(sigCfg => { Version: '2012-10-17', Statement: [statement], }; - s3.putBucketPolicy({ - Bucket: testBuckets[0], - Policy: JSON.stringify(bucketPolicy), - }, err => { - assert.ifError(err); - const param = { Bucket: testBuckets[1] }; - awsRequest(false, 'listObjects', param, cbWithError(done)); - }); + s3.putBucketPolicy( + { + Bucket: testBuckets[0], + Policy: JSON.stringify(bucketPolicy), + }, + err => { + assert.ifError(err); + const param = { Bucket: testBuckets[1] }; + awsRequest(false, 'listObjects', param, cbWithError(done)); + } + ); }); it('should deny action on a bucket specified in the policy', done => { @@ -98,14 +105,17 @@ withV4(sigCfg => { Version: '2012-10-17', Statement: [statement], }; - s3.putBucketPolicy({ - Bucket: testBuckets[0], - Policy: JSON.stringify(bucketPolicy), - }, err => { - assert.ifError(err); - const param = { Bucket: testBuckets[0] }; - awsRequest(false, 'listObjects', param, cbWithError(done)); - }); + s3.putBucketPolicy( + { + Bucket: testBuckets[0], + Policy: JSON.stringify(bucketPolicy), + }, + err => { + assert.ifError(err); + const param = { Bucket: testBuckets[0] }; + awsRequest(false, 'listObjects', param, cbWithError(done)); + } + ); }); it('should allow action on an object specified in the policy', done => { @@ -122,24 +132,30 @@ withV4(sigCfg => { Version: '2012-10-17', Statement: [statement], }; - s3.putBucketPolicy({ - Bucket: testBuckets[0], - Policy: JSON.stringify(bucketPolicy), - }, err => { - assert.ifError(err); - s3.putObject({ + s3.putBucketPolicy( + { Bucket: testBuckets[0], - Body: testBody, - Key: testKey, - }, er => { - assert.ifError(er); - const param = { - Bucket: testBuckets[0], - Key: testKey, - }; - awsRequest(false, 'getObject', param, cbNoError(done)); - }); - }); + Policy: JSON.stringify(bucketPolicy), + }, + err => { + assert.ifError(err); + s3.putObject( + { + Bucket: testBuckets[0], + Body: testBody, + Key: testKey, + }, + er => { + assert.ifError(er); + const param = { + Bucket: testBuckets[0], + Key: testKey, + }; + awsRequest(false, 'getObject', param, cbNoError(done)); + } + ); + } + ); }); it('should allow action on an object satisfying the wildcard in the policy', done => { @@ -156,24 +172,30 @@ withV4(sigCfg => { Version: '2012-10-17', Statement: [statement], }; - s3.putBucketPolicy({ - Bucket: testBuckets[0], - Policy: JSON.stringify(bucketPolicy), - }, err => { - assert.ifError(err); - s3.putObject({ + s3.putBucketPolicy( + { Bucket: testBuckets[0], - Body: testBody, - Key: testKey, - }, er => { - assert.ifError(er); - const param = { - Bucket: testBuckets[0], - Key: testKey, - }; - awsRequest(false, 'getObject', param, cbNoError(done)); - }); - }); + Policy: JSON.stringify(bucketPolicy), + }, + err => { + assert.ifError(err); + s3.putObject( + { + Bucket: testBuckets[0], + Body: testBody, + Key: testKey, + }, + er => { + assert.ifError(er); + const param = { + Bucket: testBuckets[0], + Key: testKey, + }; + awsRequest(false, 'getObject', param, cbNoError(done)); + } + ); + } + ); }); it('should deny action on an object specified in the policy', done => { @@ -190,24 +212,30 @@ withV4(sigCfg => { Version: '2012-10-17', Statement: [statement], }; - s3.putBucketPolicy({ - Bucket: testBuckets[0], - Policy: JSON.stringify(bucketPolicy), - }, err => { - assert.ifError(err); - s3.putObject({ + s3.putBucketPolicy( + { Bucket: testBuckets[0], - Body: testBody, - Key: testKey, - }, er => { - assert.ifError(er); - const param = { - Bucket: testBuckets[0], - Key: testKey, - }; - awsRequest(false, 'getObject', param, cbWithError(done)); - }); - }); + Policy: JSON.stringify(bucketPolicy), + }, + err => { + assert.ifError(err); + s3.putObject( + { + Bucket: testBuckets[0], + Body: testBody, + Key: testKey, + }, + er => { + assert.ifError(er); + const param = { + Bucket: testBuckets[0], + Key: testKey, + }; + awsRequest(false, 'getObject', param, cbWithError(done)); + } + ); + } + ); }); it('should deny action on an object not specified in the policy', done => { @@ -223,17 +251,20 @@ withV4(sigCfg => { Version: '2012-10-17', Statement: [statement], }; - s3.putBucketPolicy({ - Bucket: testBuckets[0], - Policy: JSON.stringify(bucketPolicy), - }, err => { - assert.ifError(err); - const param = { + s3.putBucketPolicy( + { Bucket: testBuckets[0], - Key: 'invalidkey', - }; - awsRequest(false, 'getObject', param, cbWithError(done)); - }); + Policy: JSON.stringify(bucketPolicy), + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBuckets[0], + Key: 'invalidkey', + }; + awsRequest(false, 'getObject', param, cbWithError(done)); + } + ); }); it('should deny action on a bucket and an object not specified in the policy', done => { @@ -249,17 +280,20 @@ withV4(sigCfg => { Version: '2012-10-17', Statement: [statement], }; - s3.putBucketPolicy({ - Bucket: testBuckets[0], - Policy: JSON.stringify(bucketPolicy), - }, err => { - assert.ifError(err); - const param = { - Bucket: testBuckets[1], - Key: 'invalidkey', - }; - awsRequest(false, 'getObject', param, cbWithError(done)); - }); + s3.putBucketPolicy( + { + Bucket: testBuckets[0], + Policy: JSON.stringify(bucketPolicy), + }, + err => { + assert.ifError(err); + const param = { + Bucket: testBuckets[1], + Key: 'invalidkey', + }; + awsRequest(false, 'getObject', param, cbWithError(done)); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/deleteBucketLifecycle.js b/tests/functional/aws-node-sdk/test/bucket/deleteBucketLifecycle.js index 76bad2ab8b..9b91469c6f 100644 --- a/tests/functional/aws-node-sdk/test/bucket/deleteBucketLifecycle.js +++ b/tests/functional/aws-node-sdk/test/bucket/deleteBucketLifecycle.js @@ -20,11 +20,16 @@ function assertError(err, expectedErr, cb) { if (expectedErr === null) { assert.strictEqual(err, null, `expected no error but got '${err}'`); } else { - assert.strictEqual(err.code, expectedErr, 'incorrect error response ' + - `code: should be '${expectedErr}' but got '${err.code}'`); - assert.strictEqual(err.statusCode, errors[expectedErr].code, - 'incorrect error status code: should be 400 but got ' + - `'${err.statusCode}'`); + assert.strictEqual( + err.code, + expectedErr, + 'incorrect error response ' + `code: should be '${expectedErr}' but got '${err.code}'` + ); + assert.strictEqual( + err.statusCode, + errors[expectedErr].code, + 'incorrect error status code: should be 400 but got ' + `'${err.statusCode}'` + ); } cb(); } @@ -41,8 +46,7 @@ describe('aws-sdk test delete bucket lifecycle', () => { }); it('should return NoSuchBucket error if bucket does not exist', done => { - s3.deleteBucketLifecycle({ Bucket: bucket }, err => - assertError(err, 'NoSuchBucket', done)); + s3.deleteBucketLifecycle({ Bucket: bucket }, err => assertError(err, 'NoSuchBucket', done)); }); describe('config rules', () => { @@ -51,25 +55,22 @@ describe('aws-sdk test delete bucket lifecycle', () => { afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); it('should return AccessDenied if user is not bucket owner', done => { - otherAccountS3.deleteBucketLifecycle({ Bucket: bucket }, - err => assertError(err, 'AccessDenied', done)); + otherAccountS3.deleteBucketLifecycle({ Bucket: bucket }, err => assertError(err, 'AccessDenied', done)); }); it('should return no error if no lifecycle config on bucket', done => { - s3.deleteBucketLifecycle({ Bucket: bucket }, err => - assertError(err, null, done)); + s3.deleteBucketLifecycle({ Bucket: bucket }, err => assertError(err, null, done)); }); it('should delete lifecycle configuration from bucket', done => { - const params = { Bucket: bucket, - LifecycleConfiguration: { Rules: [basicRule] } }; + const params = { Bucket: bucket, LifecycleConfiguration: { Rules: [basicRule] } }; s3.putBucketLifecycleConfiguration(params, err => { assert.equal(err, null); s3.deleteBucketLifecycle({ Bucket: bucket }, err => { assert.equal(err, null); - s3.getBucketLifecycleConfiguration({ Bucket: bucket }, - err => - assertError(err, 'NoSuchLifecycleConfiguration', done)); + s3.getBucketLifecycleConfiguration({ Bucket: bucket }, err => + assertError(err, 'NoSuchLifecycleConfiguration', done) + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/deleteBucketPolicy.js b/tests/functional/aws-node-sdk/test/bucket/deleteBucketPolicy.js index e7cdd2c576..16532c1f53 100644 --- a/tests/functional/aws-node-sdk/test/bucket/deleteBucketPolicy.js +++ b/tests/functional/aws-node-sdk/test/bucket/deleteBucketPolicy.js @@ -8,13 +8,15 @@ const BucketUtility = require('../../lib/utility/bucket-util'); const bucket = 'deletebucketpolicy-test-bucket'; const bucketPolicy = { Version: '2012-10-17', - Statement: [{ - Sid: 'testid', - Effect: 'Allow', - Principal: '*', - Action: 's3:putBucketPolicy', - Resource: `arn:aws:s3:::${bucket}`, - }], + Statement: [ + { + Sid: 'testid', + Effect: 'Allow', + Principal: '*', + Action: 's3:putBucketPolicy', + Resource: `arn:aws:s3:::${bucket}`, + }, + ], }; // Check for the expected error response code and status code. @@ -22,11 +24,16 @@ function assertError(err, expectedErr, cb) { if (expectedErr === null) { assert.strictEqual(err, null, `expected no error but got '${err}'`); } else { - assert.strictEqual(err.code, expectedErr, 'incorrect error response ' + - `code: should be '${expectedErr}' but got '${err.code}'`); - assert.strictEqual(err.statusCode, errors[expectedErr].code, - 'incorrect error status code: should be 400 but got ' + - `'${err.statusCode}'`); + assert.strictEqual( + err.code, + expectedErr, + 'incorrect error response ' + `code: should be '${expectedErr}' but got '${err.code}'` + ); + assert.strictEqual( + err.statusCode, + errors[expectedErr].code, + 'incorrect error status code: should be 400 but got ' + `'${err.statusCode}'` + ); } cb(); } @@ -43,8 +50,7 @@ describe('aws-sdk test delete bucket policy', () => { }); it('should return NoSuchBucket error if bucket does not exist', done => { - s3.deleteBucketPolicy({ Bucket: bucket }, err => - assertError(err, 'NoSuchBucket', done)); + s3.deleteBucketPolicy({ Bucket: bucket }, err => assertError(err, 'NoSuchBucket', done)); }); describe('policy rules', () => { @@ -53,13 +59,11 @@ describe('aws-sdk test delete bucket policy', () => { afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); it('should return MethodNotAllowed if user is not bucket owner', done => { - otherAccountS3.deleteBucketPolicy({ Bucket: bucket }, - err => assertError(err, 'MethodNotAllowed', done)); + otherAccountS3.deleteBucketPolicy({ Bucket: bucket }, err => assertError(err, 'MethodNotAllowed', done)); }); it('should return no error if no policy on bucket', done => { - s3.deleteBucketPolicy({ Bucket: bucket }, err => - assertError(err, null, done)); + s3.deleteBucketPolicy({ Bucket: bucket }, err => assertError(err, null, done)); }); it('should delete policy from bucket', done => { @@ -68,9 +72,7 @@ describe('aws-sdk test delete bucket policy', () => { assert.equal(err, null); s3.deleteBucketPolicy({ Bucket: bucket }, err => { assert.equal(err, null); - s3.getBucketPolicy({ Bucket: bucket }, - err => - assertError(err, 'NoSuchBucketPolicy', done)); + s3.getBucketPolicy({ Bucket: bucket }, err => assertError(err, 'NoSuchBucketPolicy', done)); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/deleteBucketReplication.js b/tests/functional/aws-node-sdk/test/bucket/deleteBucketReplication.js index 1c0eb8c1d0..fe58495d61 100644 --- a/tests/functional/aws-node-sdk/test/bucket/deleteBucketReplication.js +++ b/tests/functional/aws-node-sdk/test/bucket/deleteBucketReplication.js @@ -8,8 +8,7 @@ const BucketUtility = require('../../lib/utility/bucket-util'); const bucket = 'source-bucket'; const replicationConfig = { - Role: 'arn:aws:iam::account-id:role/src-resource,' + - 'arn:aws:iam::account-id:role/dest-resource', + Role: 'arn:aws:iam::account-id:role/src-resource,' + 'arn:aws:iam::account-id:role/dest-resource', Rules: [ { Destination: { Bucket: 'arn:aws:s3:::destination-bucket' }, @@ -26,17 +25,23 @@ describe('aws-node-sdk test deleteBucketReplication', () => { const config = getConfig('default', { signatureVersion: 'v4' }); function putVersioningOnBucket(bucket, cb) { - return s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, cb); + return s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + cb + ); } function putReplicationOnBucket(bucket, cb) { - return s3.putBucketReplication({ - Bucket: bucket, - ReplicationConfiguration: replicationConfig, - }, cb); + return s3.putBucketReplication( + { + Bucket: bucket, + ReplicationConfiguration: replicationConfig, + }, + cb + ); } function deleteReplicationAndCheckResponse(bucket, cb) { @@ -59,32 +64,43 @@ describe('aws-node-sdk test deleteBucketReplication', () => { deleteReplicationAndCheckResponse(bucket, done)); it('should delete a bucket replication config when it has one', done => - series([ - next => putVersioningOnBucket(bucket, next), - next => putReplicationOnBucket(bucket, next), - next => deleteReplicationAndCheckResponse(bucket, next), - ], done)); + series( + [ + next => putVersioningOnBucket(bucket, next), + next => putReplicationOnBucket(bucket, next), + next => deleteReplicationAndCheckResponse(bucket, next), + ], + done + )); - it('should return ReplicationConfigurationNotFoundError if getting ' + - 'replication config after it has been deleted', done => - series([ - next => putVersioningOnBucket(bucket, next), - next => putReplicationOnBucket(bucket, next), - next => s3.getBucketReplication({ Bucket: bucket }, (err, data) => { - if (err) { - return next(err); - } - assert.deepStrictEqual(data, { - ReplicationConfiguration: replicationConfig, - }); - return next(); - }), - next => deleteReplicationAndCheckResponse(bucket, next), - next => s3.getBucketReplication({ Bucket: bucket }, err => { - assert(errorInstances.ReplicationConfigurationNotFoundError.is[err.code]); - return next(); - }), - ], done)); + it( + 'should return ReplicationConfigurationNotFoundError if getting ' + + 'replication config after it has been deleted', + done => + series( + [ + next => putVersioningOnBucket(bucket, next), + next => putReplicationOnBucket(bucket, next), + next => + s3.getBucketReplication({ Bucket: bucket }, (err, data) => { + if (err) { + return next(err); + } + assert.deepStrictEqual(data, { + ReplicationConfiguration: replicationConfig, + }); + return next(); + }), + next => deleteReplicationAndCheckResponse(bucket, next), + next => + s3.getBucketReplication({ Bucket: bucket }, err => { + assert(errorInstances.ReplicationConfigurationNotFoundError.is[err.code]); + return next(); + }), + ], + done + ) + ); it('should return AccessDenied if user is not bucket owner', done => otherAccountS3.deleteBucketReplication({ Bucket: bucket }, err => { diff --git a/tests/functional/aws-node-sdk/test/bucket/deleteBucketTagging.js b/tests/functional/aws-node-sdk/test/bucket/deleteBucketTagging.js index 0689ca981f..38a52030d2 100644 --- a/tests/functional/aws-node-sdk/test/bucket/deleteBucketTagging.js +++ b/tests/functional/aws-node-sdk/test/bucket/deleteBucketTagging.js @@ -33,52 +33,87 @@ describe('aws-sdk test delete bucket tagging', () => { afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); it('should delete tag', done => { - async.series([ - next => s3.putBucketTagging({ - AccountId: s3.AccountId, - Tagging: validTagging, Bucket: bucket, - }, (err, res) => next(err, res)), - next => s3.getBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - }, (err, res) => { - assert.deepStrictEqual(res, validTagging); - next(err, res); - }), - next => s3.deleteBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - }, (err, res) => next(err, res)), - next => s3.getBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - }, next), - ], err => { - assertError(err, 'NoSuchTagSet'); - done(); - }); + async.series( + [ + next => + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: validTagging, + Bucket: bucket, + }, + (err, res) => next(err, res) + ), + next => + s3.getBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + }, + (err, res) => { + assert.deepStrictEqual(res, validTagging); + next(err, res); + } + ), + next => + s3.deleteBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + }, + (err, res) => next(err, res) + ), + next => + s3.getBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + }, + next + ), + ], + err => { + assertError(err, 'NoSuchTagSet'); + done(); + } + ); }); it('should make no change when deleting tags on bucket with no tags', done => { - async.series([ - next => s3.getBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - }, err => { - assertError(err, 'NoSuchTagSet'); - next(); - }), - next => s3.deleteBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - }, (err, res) => next(err, res)), - next => s3.getBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - }, err => { - assertError(err, 'NoSuchTagSet'); - next(); - }), - ], done); + async.series( + [ + next => + s3.getBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + }, + err => { + assertError(err, 'NoSuchTagSet'); + next(); + } + ), + next => + s3.deleteBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + }, + (err, res) => next(err, res) + ), + next => + s3.getBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + }, + err => { + assertError(err, 'NoSuchTagSet'); + next(); + } + ), + ], + done + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/deleteCors.js b/tests/functional/aws-node-sdk/test/bucket/deleteCors.js index 16cc5de0c6..a792e60a7e 100644 --- a/tests/functional/aws-node-sdk/test/bucket/deleteCors.js +++ b/tests/functional/aws-node-sdk/test/bucket/deleteCors.js @@ -4,17 +4,18 @@ const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const bucketName = 'testdeletecorsbucket'; -const sampleCors = { CORSRules: [ - { AllowedMethods: ['PUT', 'POST', 'DELETE'], - AllowedOrigins: ['http://www.example.com'], - AllowedHeaders: ['*'], - MaxAgeSeconds: 3000, - ExposeHeaders: ['x-amz-server-side-encryption'] }, - { AllowedMethods: ['GET'], - AllowedOrigins: ['*'], - AllowedHeaders: ['*'], - MaxAgeSeconds: 3000 }, -] }; +const sampleCors = { + CORSRules: [ + { + AllowedMethods: ['PUT', 'POST', 'DELETE'], + AllowedOrigins: ['http://www.example.com'], + AllowedHeaders: ['*'], + MaxAgeSeconds: 3000, + ExposeHeaders: ['x-amz-server-side-encryption'], + }, + { AllowedMethods: ['GET'], AllowedOrigins: ['*'], AllowedHeaders: ['*'], MaxAgeSeconds: 3000 }, + ], +}; const itSkipIfAWS = process.env.AWS_ON_AIR ? it.skip : it; @@ -42,13 +43,10 @@ describe('DELETE bucket cors', () => { describe('without existing cors configuration', () => { it('should return a 204 response', done => { - s3.deleteBucketCors({ Bucket: bucketName }, - function deleteBucketCors(err) { + s3.deleteBucketCors({ Bucket: bucketName }, function deleteBucketCors(err) { const statusCode = this.httpResponse.statusCode; - assert.strictEqual(statusCode, 204, - `Found unexpected statusCode ${statusCode}`); - assert.strictEqual(err, null, - `Found unexpected err ${err}`); + assert.strictEqual(statusCode, 204, `Found unexpected statusCode ${statusCode}`); + assert.strictEqual(err, null, `Found unexpected err ${err}`); return done(); }); }); @@ -56,21 +54,16 @@ describe('DELETE bucket cors', () => { describe('with existing cors configuration', () => { beforeEach(done => { - s3.putBucketCors({ Bucket: bucketName, - CORSConfiguration: sampleCors }, done); + s3.putBucketCors({ Bucket: bucketName, CORSConfiguration: sampleCors }, done); }); it('should delete bucket configuration successfully', done => { - s3.deleteBucketCors({ Bucket: bucketName }, - function deleteBucketCors(err) { + s3.deleteBucketCors({ Bucket: bucketName }, function deleteBucketCors(err) { const statusCode = this.httpResponse.statusCode; - assert.strictEqual(statusCode, 204, - `Found unexpected statusCode ${statusCode}`); - assert.strictEqual(err, null, - `Found unexpected err ${err}`); + assert.strictEqual(statusCode, 204, `Found unexpected statusCode ${statusCode}`); + assert.strictEqual(err, null, `Found unexpected err ${err}`); s3.getBucketCors({ Bucket: bucketName }, err => { - assert.strictEqual(err.code, - 'NoSuchCORSConfiguration'); + assert.strictEqual(err.code, 'NoSuchCORSConfiguration'); assert.strictEqual(err.statusCode, 404); return done(); }); @@ -83,10 +76,8 @@ describe('DELETE bucket cors', () => { // to add a second set of real aws credentials under a profile // named 'lisa' in ~/.aws/scality, then rename 'itSkipIfAWS' to // 'it'. - itSkipIfAWS('should return AccessDenied if user is not bucket' + - 'owner', done => { - otherAccountS3.deleteBucketCors({ Bucket: bucketName }, - err => { + itSkipIfAWS('should return AccessDenied if user is not bucket' + 'owner', done => { + otherAccountS3.deleteBucketCors({ Bucket: bucketName }, err => { assert(err); assert.strictEqual(err.code, 'AccessDenied'); assert.strictEqual(err.statusCode, 403); diff --git a/tests/functional/aws-node-sdk/test/bucket/deleteWebsite.js b/tests/functional/aws-node-sdk/test/bucket/deleteWebsite.js index d221219421..d053c2fcce 100644 --- a/tests/functional/aws-node-sdk/test/bucket/deleteWebsite.js +++ b/tests/functional/aws-node-sdk/test/bucket/deleteWebsite.js @@ -30,14 +30,10 @@ describe('DELETE bucket website', () => { describe('without existing configuration', () => { it('should return a 204 response', done => { - const request = - s3.deleteBucketWebsite({ Bucket: bucketName }, err => { - const statusCode = - request.response.httpResponse.statusCode; - assert.strictEqual(statusCode, 204, - `Found unexpected statusCode ${statusCode}`); - assert.strictEqual(err, null, - `Found unexpected err ${err}`); + const request = s3.deleteBucketWebsite({ Bucket: bucketName }, err => { + const statusCode = request.response.httpResponse.statusCode; + assert.strictEqual(statusCode, 204, `Found unexpected statusCode ${statusCode}`); + assert.strictEqual(err, null, `Found unexpected err ${err}`); return done(); }); }); @@ -46,22 +42,18 @@ describe('DELETE bucket website', () => { describe('with existing configuration', () => { beforeEach(done => { const config = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucketName, - WebsiteConfiguration: config }, done); + s3.putBucketWebsite({ Bucket: bucketName, WebsiteConfiguration: config }, done); }); it('should delete bucket configuration successfully', done => { s3.deleteBucketWebsite({ Bucket: bucketName }, err => { - assert.strictEqual(err, null, - `Found unexpected err ${err}`); + assert.strictEqual(err, null, `Found unexpected err ${err}`); return done(); }); }); - it('should return AccessDenied if user is not bucket owner', - done => { - otherAccountS3.deleteBucketWebsite({ Bucket: bucketName }, - err => { + it('should return AccessDenied if user is not bucket owner', done => { + otherAccountS3.deleteBucketWebsite({ Bucket: bucketName }, err => { assert(err); assert.strictEqual(err.code, 'AccessDenied'); assert.strictEqual(err.statusCode, 403); diff --git a/tests/functional/aws-node-sdk/test/bucket/get.js b/tests/functional/aws-node-sdk/test/bucket/get.js index af78eb9a20..752621988b 100644 --- a/tests/functional/aws-node-sdk/test/bucket/get.js +++ b/tests/functional/aws-node-sdk/test/bucket/get.js @@ -5,282 +5,245 @@ const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const bucketSchema = require('../../schema/bucket'); const bucketSchemaV2 = require('../../schema/bucketV2'); -const { generateToken, decryptToken } = - require('../../../../../lib/api/apiUtils/object/continueToken'); +const { generateToken, decryptToken } = require('../../../../../lib/api/apiUtils/object/continueToken'); const tests = [ { name: 'return created objects in alphabetical order', - objectPutParams: Bucket => - [ - { Bucket, Key: 'testB/' }, - { Bucket, Key: 'testB/test.json', Body: '{}' }, - { Bucket, Key: 'testA/' }, - { Bucket, Key: 'testA/test.json', Body: '{}' }, - { Bucket, Key: 'testA/test/test.json', Body: '{}' }, - ], + objectPutParams: Bucket => [ + { Bucket, Key: 'testB/' }, + { Bucket, Key: 'testB/test.json', Body: '{}' }, + { Bucket, Key: 'testA/' }, + { Bucket, Key: 'testA/test.json', Body: '{}' }, + { Bucket, Key: 'testA/test/test.json', Body: '{}' }, + ], listObjectParams: Bucket => ({ Bucket }), assertions: (data, Bucket) => { const keys = data.Contents.map(object => object.Key); // ETag should include quotes around value - const emptyObjectHash = - '"d41d8cd98f00b204e9800998ecf8427e"'; + const emptyObjectHash = '"d41d8cd98f00b204e9800998ecf8427e"'; assert.equal(data.Name, Bucket, 'Bucket name mismatch'); - assert.deepEqual(keys, [ - 'testA/', - 'testA/test.json', - 'testA/test/test.json', - 'testB/', - 'testB/test.json', - ], 'Bucket content mismatch'); - assert.deepStrictEqual(data.Contents[0].ETag, - emptyObjectHash, 'Object hash mismatch'); + assert.deepEqual( + keys, + ['testA/', 'testA/test.json', 'testA/test/test.json', 'testB/', 'testB/test.json'], + 'Bucket content mismatch' + ); + assert.deepStrictEqual(data.Contents[0].ETag, emptyObjectHash, 'Object hash mismatch'); }, }, { name: 'return multiple common prefixes', - objectPutParams: Bucket => - [ - { Bucket, Key: 'testB/' }, - { Bucket, Key: 'testB/test.json', Body: '{}' }, - { Bucket, Key: 'testA/' }, - { Bucket, Key: 'testA/test.json', Body: '{}' }, - { Bucket, Key: 'testA/test/test.json', Body: '{}' }, - ], + objectPutParams: Bucket => [ + { Bucket, Key: 'testB/' }, + { Bucket, Key: 'testB/test.json', Body: '{}' }, + { Bucket, Key: 'testA/' }, + { Bucket, Key: 'testA/test.json', Body: '{}' }, + { Bucket, Key: 'testA/test/test.json', Body: '{}' }, + ], listObjectParams: Bucket => ({ Bucket, Delimiter: '/' }), assertions: (data, Bucket) => { const prefixes = data.CommonPrefixes.map(cp => cp.Prefix); assert.equal(data.Name, Bucket, 'Bucket name mismatch'); - assert.deepEqual(prefixes, [ - 'testA/', - 'testB/', - ], 'Bucket content mismatch'); + assert.deepEqual(prefixes, ['testA/', 'testB/'], 'Bucket content mismatch'); }, }, { name: 'list objects with percentage delimiter', - objectPutParams: Bucket => - [ - { Bucket, Key: 'testB%' }, - { Bucket, Key: 'testC%test.json', Body: '{}' }, - { Bucket, Key: 'testA%' }, - ], + objectPutParams: Bucket => [ + { Bucket, Key: 'testB%' }, + { Bucket, Key: 'testC%test.json', Body: '{}' }, + { Bucket, Key: 'testA%' }, + ], listObjectParams: Bucket => ({ Bucket, Delimiter: '%' }), assertions: data => { const prefixes = data.CommonPrefixes.map(cp => cp.Prefix); - assert.deepEqual(prefixes, [ - 'testA%', - 'testB%', - 'testC%', - ], 'Bucket content mismatch'); + assert.deepEqual(prefixes, ['testA%', 'testB%', 'testC%'], 'Bucket content mismatch'); }, }, { name: 'list object titles with white spaces', - objectPutParams: Bucket => - [ - { Bucket, Key: 'whiteSpace/' }, - { Bucket, Key: 'whiteSpace/one whiteSpace', Body: '{}' }, - { Bucket, Key: 'whiteSpace/two white spaces', Body: '{}' }, - { Bucket, Key: 'white space/' }, - { Bucket, Key: 'white space/one whiteSpace', Body: '{}' }, - { Bucket, Key: 'white space/two white spaces', Body: '{}' }, - ], + objectPutParams: Bucket => [ + { Bucket, Key: 'whiteSpace/' }, + { Bucket, Key: 'whiteSpace/one whiteSpace', Body: '{}' }, + { Bucket, Key: 'whiteSpace/two white spaces', Body: '{}' }, + { Bucket, Key: 'white space/' }, + { Bucket, Key: 'white space/one whiteSpace', Body: '{}' }, + { Bucket, Key: 'white space/two white spaces', Body: '{}' }, + ], listObjectParams: Bucket => ({ Bucket }), assertions: (data, Bucket) => { const keys = data.Contents.map(object => object.Key); assert.equal(data.Name, Bucket, 'Bucket name mismatch'); - assert.deepEqual(keys, [ - /* These object names are intentionally listed in a + assert.deepEqual( + keys, + [ + /* These object names are intentionally listed in a different order than they were created to additionally test that they are listed alphabetically. */ - 'white space/', - 'white space/one whiteSpace', - 'white space/two white spaces', - 'whiteSpace/', - 'whiteSpace/one whiteSpace', - 'whiteSpace/two white spaces', - ], 'Bucket content mismatch'); + 'white space/', + 'white space/one whiteSpace', + 'white space/two white spaces', + 'whiteSpace/', + 'whiteSpace/one whiteSpace', + 'whiteSpace/two white spaces', + ], + 'Bucket content mismatch' + ); }, }, { name: 'list object titles that contain special chars', - objectPutParams: Bucket => - [ - { Bucket, Key: 'foo&<>\'"' }, - { Bucket, Key: '*asterixObjTitle/' }, - { Bucket, Key: '*asterixObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: '*asterixObjTitle/*asterixObjTitle', - Body: '{}' }, - { Bucket, Key: '.dotObjTitle/' }, - { Bucket, Key: '.dotObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: '.dotObjTitle/.dotObjTitle', Body: '{}' }, - { Bucket, Key: '(openParenObjTitle/' }, - { Bucket, Key: '(openParenObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: '(openParenObjTitle/(openParenObjTitle', - Body: '{}' }, - { Bucket, Key: ')closeParenObjTitle/' }, - { Bucket, Key: ')closeParenObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: ')closeParenObjTitle/)closeParenObjTitle', - Body: '{}' }, - { Bucket, Key: '!exclamationPointObjTitle/' }, - { Bucket, Key: '!exclamationPointObjTitle/objTitleA', - Body: '{}' }, - { Bucket, Key: - '!exclamationPointObjTitle/!exclamationPointObjTitle', - Body: '{}' }, - { Bucket, Key: '-dashObjTitle/' }, - { Bucket, Key: '-dashObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: '-dashObjTitle/-dashObjTitle', Body: '{}' }, - { Bucket, Key: '_underscoreObjTitle/' }, - { Bucket, Key: '_underscoreObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: '_underscoreObjTitle/_underscoreObjTitle', - Body: '{}' }, - { Bucket, Key: "'apostropheObjTitle/" }, - { Bucket, Key: "'apostropheObjTitle/objTitleA", Body: '{}' }, - { Bucket, Key: "'apostropheObjTitle/'apostropheObjTitle", - Body: '{}' }, - { Bucket, Key: 'çcedilleObjTitle' }, - { Bucket, Key: 'çcedilleObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: 'çcedilleObjTitle/çcedilleObjTitle', - Body: '{}' }, - { Bucket, Key: 'дcyrillicDObjTitle' }, - { Bucket, Key: 'дcyrillicDObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: 'дcyrillicDObjTitle/дcyrillicDObjTitle', - Body: '{}' }, - { Bucket, Key: 'ñenyeObjTitle' }, - { Bucket, Key: 'ñenyeObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: 'ñenyeObjTitle/ñenyeObjTitle', Body: '{}' }, - { Bucket, Key: '山chineseMountainObjTitle' }, - { Bucket, Key: '山chineseMountainObjTitle/objTitleA', - Body: '{}' }, - { Bucket, Key: - '山chineseMountainObjTitle/山chineseMountainObjTitle', - Body: '{}' }, - { Bucket, Key: 'àaGraveLowerCaseObjTitle' }, - { Bucket, Key: 'àaGraveLowerCaseObjTitle/objTitleA', - Body: '{}' }, - { Bucket, - Key: 'àaGraveLowerCaseObjTitle/àaGraveLowerCaseObjTitle', - Body: '{}' }, - { Bucket, Key: 'ÀaGraveUpperCaseObjTitle' }, - { Bucket, Key: 'ÀaGraveUpperCaseObjTitle/objTitleA', - Body: '{}' }, - { Bucket, - Key: 'ÀaGraveUpperCaseObjTitle/ÀaGraveUpperCaseObjTitle', - Body: '{}' }, - { Bucket, Key: 'ßscharfesSObjTitle' }, - { Bucket, Key: 'ßscharfesSObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: 'ßscharfesSObjTitle/ßscharfesSObjTitle', - Body: '{}' }, - { Bucket, Key: '日japaneseMountainObjTitle' }, - { Bucket, Key: '日japaneseMountainObjTitle/objTitleA', - Body: '{}' }, - { Bucket, - Key: '日japaneseMountainObjTitle/日japaneseMountainObjTitle', - Body: '{}' }, - { Bucket, Key: 'بbaArabicObjTitle' }, - { Bucket, Key: 'بbaArabicObjTitle/objTitleA', Body: '{}' }, - { Bucket, Key: 'بbaArabicObjTitle/بbaArabicObjTitle', - Body: '{}' }, - { Bucket, - Key: 'अadevanagariHindiObjTitle' }, - { Bucket, - Key: 'अadevanagariHindiObjTitle/objTitleA', - Body: '{}' }, - { Bucket, - Key: 'अadevanagariHindiObjTitle/अadevanagariHindiObjTitle', - Body: '{}' }, - { Bucket, Key: 'éeacuteLowerCaseObjTitle' }, - { Bucket, Key: 'éeacuteLowerCaseObjTitle/objTitleA', - Body: '{}' }, - { Bucket, - Key: 'éeacuteLowerCaseObjTitle/éeacuteLowerCaseObjTitle', - Body: '{}' }, - ], + objectPutParams: Bucket => [ + { Bucket, Key: 'foo&<>\'"' }, + { Bucket, Key: '*asterixObjTitle/' }, + { Bucket, Key: '*asterixObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: '*asterixObjTitle/*asterixObjTitle', Body: '{}' }, + { Bucket, Key: '.dotObjTitle/' }, + { Bucket, Key: '.dotObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: '.dotObjTitle/.dotObjTitle', Body: '{}' }, + { Bucket, Key: '(openParenObjTitle/' }, + { Bucket, Key: '(openParenObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: '(openParenObjTitle/(openParenObjTitle', Body: '{}' }, + { Bucket, Key: ')closeParenObjTitle/' }, + { Bucket, Key: ')closeParenObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: ')closeParenObjTitle/)closeParenObjTitle', Body: '{}' }, + { Bucket, Key: '!exclamationPointObjTitle/' }, + { Bucket, Key: '!exclamationPointObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: '!exclamationPointObjTitle/!exclamationPointObjTitle', Body: '{}' }, + { Bucket, Key: '-dashObjTitle/' }, + { Bucket, Key: '-dashObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: '-dashObjTitle/-dashObjTitle', Body: '{}' }, + { Bucket, Key: '_underscoreObjTitle/' }, + { Bucket, Key: '_underscoreObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: '_underscoreObjTitle/_underscoreObjTitle', Body: '{}' }, + { Bucket, Key: "'apostropheObjTitle/" }, + { Bucket, Key: "'apostropheObjTitle/objTitleA", Body: '{}' }, + { Bucket, Key: "'apostropheObjTitle/'apostropheObjTitle", Body: '{}' }, + { Bucket, Key: 'çcedilleObjTitle' }, + { Bucket, Key: 'çcedilleObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: 'çcedilleObjTitle/çcedilleObjTitle', Body: '{}' }, + { Bucket, Key: 'дcyrillicDObjTitle' }, + { Bucket, Key: 'дcyrillicDObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: 'дcyrillicDObjTitle/дcyrillicDObjTitle', Body: '{}' }, + { Bucket, Key: 'ñenyeObjTitle' }, + { Bucket, Key: 'ñenyeObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: 'ñenyeObjTitle/ñenyeObjTitle', Body: '{}' }, + { Bucket, Key: '山chineseMountainObjTitle' }, + { Bucket, Key: '山chineseMountainObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: '山chineseMountainObjTitle/山chineseMountainObjTitle', Body: '{}' }, + { Bucket, Key: 'àaGraveLowerCaseObjTitle' }, + { Bucket, Key: 'àaGraveLowerCaseObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: 'àaGraveLowerCaseObjTitle/àaGraveLowerCaseObjTitle', Body: '{}' }, + { Bucket, Key: 'ÀaGraveUpperCaseObjTitle' }, + { Bucket, Key: 'ÀaGraveUpperCaseObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: 'ÀaGraveUpperCaseObjTitle/ÀaGraveUpperCaseObjTitle', Body: '{}' }, + { Bucket, Key: 'ßscharfesSObjTitle' }, + { Bucket, Key: 'ßscharfesSObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: 'ßscharfesSObjTitle/ßscharfesSObjTitle', Body: '{}' }, + { Bucket, Key: '日japaneseMountainObjTitle' }, + { Bucket, Key: '日japaneseMountainObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: '日japaneseMountainObjTitle/日japaneseMountainObjTitle', Body: '{}' }, + { Bucket, Key: 'بbaArabicObjTitle' }, + { Bucket, Key: 'بbaArabicObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: 'بbaArabicObjTitle/بbaArabicObjTitle', Body: '{}' }, + { Bucket, Key: 'अadevanagariHindiObjTitle' }, + { Bucket, Key: 'अadevanagariHindiObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: 'अadevanagariHindiObjTitle/अadevanagariHindiObjTitle', Body: '{}' }, + { Bucket, Key: 'éeacuteLowerCaseObjTitle' }, + { Bucket, Key: 'éeacuteLowerCaseObjTitle/objTitleA', Body: '{}' }, + { Bucket, Key: 'éeacuteLowerCaseObjTitle/éeacuteLowerCaseObjTitle', Body: '{}' }, + ], listObjectParams: Bucket => ({ Bucket }), assertions: (data, Bucket) => { const keys = data.Contents.map(object => object.Key); assert.equal(data.Name, Bucket, 'Bucket name mismatch'); - assert.deepEqual(keys, [ - /* These object names are intentionally listed in a + assert.deepEqual( + keys, + [ + /* These object names are intentionally listed in a different order than they were created to additionally test that they are listed alphabetically. */ - '!exclamationPointObjTitle/', - '!exclamationPointObjTitle/!exclamationPointObjTitle', - '!exclamationPointObjTitle/objTitleA', - "'apostropheObjTitle/", - "'apostropheObjTitle/'apostropheObjTitle", - "'apostropheObjTitle/objTitleA", - '(openParenObjTitle/', - '(openParenObjTitle/(openParenObjTitle', - '(openParenObjTitle/objTitleA', - ')closeParenObjTitle/', - ')closeParenObjTitle/)closeParenObjTitle', - ')closeParenObjTitle/objTitleA', - '*asterixObjTitle/', - '*asterixObjTitle/*asterixObjTitle', - '*asterixObjTitle/objTitleA', - '-dashObjTitle/', - '-dashObjTitle/-dashObjTitle', - '-dashObjTitle/objTitleA', - '.dotObjTitle/', - '.dotObjTitle/.dotObjTitle', - '.dotObjTitle/objTitleA', - '_underscoreObjTitle/', - '_underscoreObjTitle/_underscoreObjTitle', - '_underscoreObjTitle/objTitleA', - 'foo&<>\'"', - 'ÀaGraveUpperCaseObjTitle', - 'ÀaGraveUpperCaseObjTitle/objTitleA', - 'ÀaGraveUpperCaseObjTitle/ÀaGraveUpperCaseObjTitle', - 'ßscharfesSObjTitle', - 'ßscharfesSObjTitle/objTitleA', - 'ßscharfesSObjTitle/ßscharfesSObjTitle', - 'àaGraveLowerCaseObjTitle', - 'àaGraveLowerCaseObjTitle/objTitleA', - 'àaGraveLowerCaseObjTitle/àaGraveLowerCaseObjTitle', - 'çcedilleObjTitle', - 'çcedilleObjTitle/objTitleA', - 'çcedilleObjTitle/çcedilleObjTitle', - 'éeacuteLowerCaseObjTitle', - 'éeacuteLowerCaseObjTitle/objTitleA', - 'éeacuteLowerCaseObjTitle/éeacuteLowerCaseObjTitle', - 'ñenyeObjTitle', - 'ñenyeObjTitle/objTitleA', - 'ñenyeObjTitle/ñenyeObjTitle', - 'дcyrillicDObjTitle', - 'дcyrillicDObjTitle/objTitleA', - 'дcyrillicDObjTitle/дcyrillicDObjTitle', - 'بbaArabicObjTitle', - 'بbaArabicObjTitle/objTitleA', - 'بbaArabicObjTitle/بbaArabicObjTitle', - 'अadevanagariHindiObjTitle', - 'अadevanagariHindiObjTitle/objTitleA', - 'अadevanagariHindiObjTitle/अadevanagariHindiObjTitle', - '山chineseMountainObjTitle', - '山chineseMountainObjTitle/objTitleA', - '山chineseMountainObjTitle/山chineseMountainObjTitle', - '日japaneseMountainObjTitle', - '日japaneseMountainObjTitle/objTitleA', - '日japaneseMountainObjTitle/日japaneseMountainObjTitle', - ], 'Bucket content mismatch'); + '!exclamationPointObjTitle/', + '!exclamationPointObjTitle/!exclamationPointObjTitle', + '!exclamationPointObjTitle/objTitleA', + "'apostropheObjTitle/", + "'apostropheObjTitle/'apostropheObjTitle", + "'apostropheObjTitle/objTitleA", + '(openParenObjTitle/', + '(openParenObjTitle/(openParenObjTitle', + '(openParenObjTitle/objTitleA', + ')closeParenObjTitle/', + ')closeParenObjTitle/)closeParenObjTitle', + ')closeParenObjTitle/objTitleA', + '*asterixObjTitle/', + '*asterixObjTitle/*asterixObjTitle', + '*asterixObjTitle/objTitleA', + '-dashObjTitle/', + '-dashObjTitle/-dashObjTitle', + '-dashObjTitle/objTitleA', + '.dotObjTitle/', + '.dotObjTitle/.dotObjTitle', + '.dotObjTitle/objTitleA', + '_underscoreObjTitle/', + '_underscoreObjTitle/_underscoreObjTitle', + '_underscoreObjTitle/objTitleA', + 'foo&<>\'"', + 'ÀaGraveUpperCaseObjTitle', + 'ÀaGraveUpperCaseObjTitle/objTitleA', + 'ÀaGraveUpperCaseObjTitle/ÀaGraveUpperCaseObjTitle', + 'ßscharfesSObjTitle', + 'ßscharfesSObjTitle/objTitleA', + 'ßscharfesSObjTitle/ßscharfesSObjTitle', + 'àaGraveLowerCaseObjTitle', + 'àaGraveLowerCaseObjTitle/objTitleA', + 'àaGraveLowerCaseObjTitle/àaGraveLowerCaseObjTitle', + 'çcedilleObjTitle', + 'çcedilleObjTitle/objTitleA', + 'çcedilleObjTitle/çcedilleObjTitle', + 'éeacuteLowerCaseObjTitle', + 'éeacuteLowerCaseObjTitle/objTitleA', + 'éeacuteLowerCaseObjTitle/éeacuteLowerCaseObjTitle', + 'ñenyeObjTitle', + 'ñenyeObjTitle/objTitleA', + 'ñenyeObjTitle/ñenyeObjTitle', + 'дcyrillicDObjTitle', + 'дcyrillicDObjTitle/objTitleA', + 'дcyrillicDObjTitle/дcyrillicDObjTitle', + 'بbaArabicObjTitle', + 'بbaArabicObjTitle/objTitleA', + 'بbaArabicObjTitle/بbaArabicObjTitle', + 'अadevanagariHindiObjTitle', + 'अadevanagariHindiObjTitle/objTitleA', + 'अadevanagariHindiObjTitle/अadevanagariHindiObjTitle', + '山chineseMountainObjTitle', + '山chineseMountainObjTitle/objTitleA', + '山chineseMountainObjTitle/山chineseMountainObjTitle', + '日japaneseMountainObjTitle', + '日japaneseMountainObjTitle/objTitleA', + '日japaneseMountainObjTitle/日japaneseMountainObjTitle', + ], + 'Bucket content mismatch' + ); }, }, { name: 'list objects with special chars in CommonPrefixes', - objectPutParams: Bucket => - [ - { Bucket, Key: '&#' }, - { Bucket, Key: '"quot#' }, { Bucket, Key: '\'apos#' }, - { Bucket, Key: ' [ + { Bucket, Key: '&#' }, + { Bucket, Key: '"quot#' }, + { Bucket, Key: "'apos#" }, + { Bucket, Key: ' ({ Bucket, Delimiter: '#' }), assertions: data => { assert.deepStrictEqual(data.CommonPrefixes, [ - { Prefix: '"quot#' }, { Prefix: '&#' }, - { Prefix: '\'apos#' }, { Prefix: ' { before(done => { bucketUtil = new BucketUtility(); - bucketUtil.createRandom(1) - .then(created => { - bucketName = created; - done(); - }) - .catch(done); + bucketUtil + .createRandom(1) + .then(created => { + bucketName = created; + done(); + }) + .catch(done); }); after(done => { - bucketUtil.deleteOne(bucketName) - .then(() => done()) - .catch(done); + bucketUtil + .deleteOne(bucketName) + .then(() => done()) + .catch(done); }); it('should return 403 and AccessDenied on a private bucket', done => { const params = { Bucket: bucketName }; - bucketUtil.s3 - .makeUnauthenticatedRequest('listObjects', params, error => { - assert(error); - assert.strictEqual(error.statusCode, 403); - assert.strictEqual(error.code, 'AccessDenied'); - done(); - }); + bucketUtil.s3.makeUnauthenticatedRequest('listObjects', params, error => { + assert(error); + assert.strictEqual(error.statusCode, 403); + assert.strictEqual(error.code, 'AccessDenied'); + done(); + }); }); }); @@ -324,20 +288,27 @@ describe('GET Bucket - AWS.S3.listObjects', () => { before(done => { bucketUtil = new BucketUtility('default', sigCfg); - bucketUtil.createRandom(1) - .then(created => { - bucketName = created; - done(); - }) - .catch(done); + bucketUtil + .createRandom(1) + .then(created => { + bucketName = created; + done(); + }) + .catch(done); }); after(done => { - bucketUtil.deleteOne(bucketName).then(() => done()).catch(done); + bucketUtil + .deleteOne(bucketName) + .then(() => done()) + .catch(done); }); afterEach(done => { - bucketUtil.empty(bucketName).catch(done).done(() => done()); + bucketUtil + .empty(bucketName) + .catch(done) + .done(() => done()); }); tests.forEach(test => { @@ -361,20 +332,16 @@ describe('GET Bucket - AWS.S3.listObjects', () => { const s3 = bucketUtil.s3; const Bucket = bucketName; - Promise - .mapSeries(test.objectPutParams(Bucket), - param => s3.putObject(param).promise()) - .then(() => - s3.listObjectsV2(test.listObjectParams(Bucket)) - .promise()) + Promise.mapSeries(test.objectPutParams(Bucket), param => s3.putObject(param).promise()) + .then(() => s3.listObjectsV2(test.listObjectParams(Bucket)).promise()) .then(data => { - const isValidResponse = - tv4.validate(data, bucketSchemaV2); + const isValidResponse = tv4.validate(data, bucketSchemaV2); if (!isValidResponse) { throw new Error(tv4.error); } return data; - }).then(data => { + }) + .then(data => { test.assertions(data, Bucket); done(); }) @@ -382,23 +349,22 @@ describe('GET Bucket - AWS.S3.listObjects', () => { }); }); - ['&', '"quot', '\'apos', 'gt'].forEach(k => { + ['&', '"quot', "'apos", 'gt'].forEach(k => { it(`should list objects with key ${k} as Prefix`, done => { const s3 = bucketUtil.s3; const Bucket = bucketName; const objects = [{ Bucket, Key: k }]; - Promise - .mapSeries(objects, param => s3.putObject(param).promise()) + Promise.mapSeries(objects, param => s3.putObject(param).promise()) .then(() => s3.listObjects({ Bucket, Prefix: k }).promise()) .then(data => { - const isValidResponse = tv4.validate(data, - bucketSchema); + const isValidResponse = tv4.validate(data, bucketSchema); if (!isValidResponse) { throw new Error(tv4.error); } return data; - }).then(data => { + }) + .then(data => { assert.deepStrictEqual(data.Prefix, k); done(); }) @@ -406,23 +372,22 @@ describe('GET Bucket - AWS.S3.listObjects', () => { }); }); - ['&', '"quot', '\'apos', 'gt'].forEach(k => { + ['&', '"quot', "'apos", 'gt'].forEach(k => { it(`should list objects with key ${k} as Marker`, done => { const s3 = bucketUtil.s3; const Bucket = bucketName; const objects = [{ Bucket, Key: k }]; - Promise - .mapSeries(objects, param => s3.putObject(param).promise()) + Promise.mapSeries(objects, param => s3.putObject(param).promise()) .then(() => s3.listObjects({ Bucket, Marker: k }).promise()) .then(data => { - const isValidResponse = tv4.validate(data, - bucketSchema); + const isValidResponse = tv4.validate(data, bucketSchema); if (!isValidResponse) { throw new Error(tv4.error); } return data; - }).then(data => { + }) + .then(data => { assert.deepStrictEqual(data.Marker, k); done(); }) @@ -430,24 +395,25 @@ describe('GET Bucket - AWS.S3.listObjects', () => { }); }); - ['&', '"quot', '\'apos', 'gt'].forEach(k => { + ['&', '"quot', "'apos", 'gt'].forEach(k => { it(`should list objects with key ${k} as NextMarker`, done => { const s3 = bucketUtil.s3; const Bucket = bucketName; - const objects = [{ Bucket, Key: k }, { Bucket, Key: 'zzz' }]; + const objects = [ + { Bucket, Key: k }, + { Bucket, Key: 'zzz' }, + ]; - Promise - .mapSeries(objects, param => s3.putObject(param).promise()) - .then(() => s3.listObjects({ Bucket, MaxKeys: 1, - Delimiter: 'foo' }).promise()) + Promise.mapSeries(objects, param => s3.putObject(param).promise()) + .then(() => s3.listObjects({ Bucket, MaxKeys: 1, Delimiter: 'foo' }).promise()) .then(data => { - const isValidResponse = tv4.validate(data, - bucketSchema); + const isValidResponse = tv4.validate(data, bucketSchema); if (!isValidResponse) { throw new Error(tv4.error); } return data; - }).then(data => { + }) + .then(data => { assert.strictEqual(data.NextMarker, k); done(); }) @@ -455,24 +421,22 @@ describe('GET Bucket - AWS.S3.listObjects', () => { }); }); - ['&', '"quot', '\'apos', 'gt'].forEach(k => { + ['&', '"quot', "'apos", 'gt'].forEach(k => { it(`should list objects with key ${k} as StartAfter`, done => { const s3 = bucketUtil.s3; const Bucket = bucketName; const objects = [{ Bucket, Key: k }]; - Promise - .mapSeries(objects, param => s3.putObject(param).promise()) - .then(() => s3.listObjectsV2( - { Bucket, StartAfter: k }).promise()) + Promise.mapSeries(objects, param => s3.putObject(param).promise()) + .then(() => s3.listObjectsV2({ Bucket, StartAfter: k }).promise()) .then(data => { - const isValidResponse = tv4.validate(data, - bucketSchemaV2); + const isValidResponse = tv4.validate(data, bucketSchemaV2); if (!isValidResponse) { throw new Error(tv4.error); } return data; - }).then(data => { + }) + .then(data => { assert.deepStrictEqual(data.StartAfter, k); done(); }) @@ -480,55 +444,55 @@ describe('GET Bucket - AWS.S3.listObjects', () => { }); }); - ['&', '"quot', '\'apos', 'gt'].forEach(k => { - it(`should list objects with key ${k} as ContinuationToken`, - done => { + ['&', '"quot', "'apos", 'gt'].forEach(k => { + it(`should list objects with key ${k} as ContinuationToken`, done => { const s3 = bucketUtil.s3; const Bucket = bucketName; const objects = [{ Bucket, Key: k }]; - Promise - .mapSeries(objects, param => s3.putObject(param).promise()) - .then(() => s3.listObjectsV2({ - Bucket, - ContinuationToken: generateToken(k), - }).promise()) + Promise.mapSeries(objects, param => s3.putObject(param).promise()) + .then(() => + s3 + .listObjectsV2({ + Bucket, + ContinuationToken: generateToken(k), + }) + .promise() + ) .then(data => { - const isValidResponse = tv4.validate(data, - bucketSchemaV2); + const isValidResponse = tv4.validate(data, bucketSchemaV2); if (!isValidResponse) { throw new Error(tv4.error); } return data; - }).then(data => { - assert.deepStrictEqual( - decryptToken(data.ContinuationToken), k); + }) + .then(data => { + assert.deepStrictEqual(decryptToken(data.ContinuationToken), k); done(); }) .catch(done); }); }); - ['&', '"quot', '\'apos', 'gt'].forEach(k => { - it(`should list objects with key ${k} as NextContinuationToken`, - done => { + ['&', '"quot', "'apos", 'gt'].forEach(k => { + it(`should list objects with key ${k} as NextContinuationToken`, done => { const s3 = bucketUtil.s3; const Bucket = bucketName; - const objects = [{ Bucket, Key: k }, { Bucket, Key: 'zzz' }]; - Promise - .mapSeries(objects, param => s3.putObject(param).promise()) - .then(() => s3.listObjectsV2({ Bucket, MaxKeys: 1, - Delimiter: 'foo' }).promise()) + const objects = [ + { Bucket, Key: k }, + { Bucket, Key: 'zzz' }, + ]; + Promise.mapSeries(objects, param => s3.putObject(param).promise()) + .then(() => s3.listObjectsV2({ Bucket, MaxKeys: 1, Delimiter: 'foo' }).promise()) .then(data => { - const isValidResponse = tv4.validate(data, - bucketSchemaV2); + const isValidResponse = tv4.validate(data, bucketSchemaV2); if (!isValidResponse) { throw new Error(tv4.error); } return data; - }).then(data => { - assert.strictEqual( - decryptToken(data.NextContinuationToken), k); + }) + .then(data => { + assert.strictEqual(decryptToken(data.NextContinuationToken), k); done(); }) .catch(done); diff --git a/tests/functional/aws-node-sdk/test/bucket/getBucketEncryption.js b/tests/functional/aws-node-sdk/test/bucket/getBucketEncryption.js index 5b9794f6b2..96d1826ead 100644 --- a/tests/functional/aws-node-sdk/test/bucket/getBucketEncryption.js +++ b/tests/functional/aws-node-sdk/test/bucket/getBucketEncryption.js @@ -57,27 +57,35 @@ describe('aws-sdk test get bucket encryption', () => { }); it('should include KMSMasterKeyID if user has configured a custom master key', done => { - setEncryptionInfo({ cryptoScheme: 1, algorithm: 'aws:kms', masterKeyId: '12345', - configuredMasterKeyId: '54321', mandatory: true }, err => { - assert.ifError(err); - s3.getBucketEncryption({ Bucket: bucketName }, (err, res) => { + setEncryptionInfo( + { + cryptoScheme: 1, + algorithm: 'aws:kms', + masterKeyId: '12345', + configuredMasterKeyId: '54321', + mandatory: true, + }, + err => { assert.ifError(err); - assert.deepStrictEqual(res, { - ServerSideEncryptionConfiguration: { - Rules: [ - { - ApplyServerSideEncryptionByDefault: { - SSEAlgorithm: 'aws:kms', - KMSMasterKeyID: '54321', + s3.getBucketEncryption({ Bucket: bucketName }, (err, res) => { + assert.ifError(err); + assert.deepStrictEqual(res, { + ServerSideEncryptionConfiguration: { + Rules: [ + { + ApplyServerSideEncryptionByDefault: { + SSEAlgorithm: 'aws:kms', + KMSMasterKeyID: '54321', + }, + BucketKeyEnabled: false, }, - BucketKeyEnabled: false, - }, - ], - }, + ], + }, + }); + done(); }); - done(); - }); - }); + } + ); }); it('should not include KMSMasterKeyID if no user configured master key', done => { diff --git a/tests/functional/aws-node-sdk/test/bucket/getBucketLifecycle.js b/tests/functional/aws-node-sdk/test/bucket/getBucketLifecycle.js index 73284ed0e4..bb43a54d49 100644 --- a/tests/functional/aws-node-sdk/test/bucket/getBucketLifecycle.js +++ b/tests/functional/aws-node-sdk/test/bucket/getBucketLifecycle.js @@ -12,11 +12,16 @@ function assertError(err, expectedErr, cb) { if (expectedErr === null) { assert.strictEqual(err, null, `expected no error but got '${err}'`); } else { - assert.strictEqual(err.code, expectedErr, 'incorrect error response ' + - `code: should be '${expectedErr}' but got '${err.code}'`); - assert.strictEqual(err.statusCode, errors[expectedErr].code, - 'incorrect error status code: should be 400 but got ' + - `'${err.statusCode}'`); + assert.strictEqual( + err.code, + expectedErr, + 'incorrect error response ' + `code: should be '${expectedErr}' but got '${err.code}'` + ); + assert.strictEqual( + err.statusCode, + errors[expectedErr].code, + 'incorrect error status code: should be 400 but got ' + `'${err.statusCode}'` + ); } cb(); } @@ -33,8 +38,7 @@ describe('aws-sdk test get bucket lifecycle', () => { }); it('should return NoSuchBucket error if bucket does not exist', done => { - s3.getBucketLifecycleConfiguration({ Bucket: bucket }, err => - assertError(err, 'NoSuchBucket', done)); + s3.getBucketLifecycleConfiguration({ Bucket: bucket }, err => assertError(err, 'NoSuchBucket', done)); }); describe('config rules', () => { @@ -43,126 +47,134 @@ describe('aws-sdk test get bucket lifecycle', () => { afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); it('should return AccessDenied if user is not bucket owner', done => { - otherAccountS3.getBucketLifecycleConfiguration({ Bucket: bucket }, - err => assertError(err, 'AccessDenied', done)); + otherAccountS3.getBucketLifecycleConfiguration({ Bucket: bucket }, err => + assertError(err, 'AccessDenied', done) + ); }); - it('should return NoSuchLifecycleConfiguration error if no lifecycle ' + - 'put to bucket', done => { + it('should return NoSuchLifecycleConfiguration error if no lifecycle ' + 'put to bucket', done => { s3.getBucketLifecycleConfiguration({ Bucket: bucket }, err => { assertError(err, 'NoSuchLifecycleConfiguration', done); }); }); it('should get bucket lifecycle config with top-level prefix', done => - s3.putBucketLifecycleConfiguration({ - Bucket: bucket, - LifecycleConfiguration: { - Rules: [{ - ID: 'test-id', - Status: 'Enabled', - Prefix: '', - Expiration: { Days: 1 }, - }], + s3.putBucketLifecycleConfiguration( + { + Bucket: bucket, + LifecycleConfiguration: { + Rules: [ + { + ID: 'test-id', + Status: 'Enabled', + Prefix: '', + Expiration: { Days: 1 }, + }, + ], + }, }, - }, err => { - assert.equal(err, null, `Err putting lifecycle config: ${err}`); - s3.getBucketLifecycleConfiguration({ Bucket: bucket }, - (err, res) => { - assert.equal(err, null, 'Error getting lifecycle config: ' + - `${err}`); - assert.strictEqual(res.Rules.length, 1); - assert.deepStrictEqual(res.Rules[0], { - Expiration: { Days: 1 }, - ID: 'test-id', - Prefix: '', - Status: 'Enabled', - Transitions: [], - NoncurrentVersionTransitions: [], + err => { + assert.equal(err, null, `Err putting lifecycle config: ${err}`); + s3.getBucketLifecycleConfiguration({ Bucket: bucket }, (err, res) => { + assert.equal(err, null, 'Error getting lifecycle config: ' + `${err}`); + assert.strictEqual(res.Rules.length, 1); + assert.deepStrictEqual(res.Rules[0], { + Expiration: { Days: 1 }, + ID: 'test-id', + Prefix: '', + Status: 'Enabled', + Transitions: [], + NoncurrentVersionTransitions: [], + }); + done(); }); - done(); - }); - })); + } + )); it('should get bucket lifecycle config with filter prefix', done => - s3.putBucketLifecycleConfiguration({ - Bucket: bucket, - LifecycleConfiguration: { - Rules: [{ - ID: 'test-id', - Status: 'Enabled', - Filter: { Prefix: '' }, - Expiration: { Days: 1 }, - }], + s3.putBucketLifecycleConfiguration( + { + Bucket: bucket, + LifecycleConfiguration: { + Rules: [ + { + ID: 'test-id', + Status: 'Enabled', + Filter: { Prefix: '' }, + Expiration: { Days: 1 }, + }, + ], + }, }, - }, err => { - assert.equal(err, null, `Err putting lifecycle config: ${err}`); - s3.getBucketLifecycleConfiguration({ Bucket: bucket }, - (err, res) => { - assert.equal(err, null, 'Error getting lifecycle config: ' + - `${err}`); - assert.strictEqual(res.Rules.length, 1); - assert.deepStrictEqual(res.Rules[0], { - Expiration: { Days: 1 }, - ID: 'test-id', - Filter: { Prefix: '' }, - Status: 'Enabled', - Transitions: [], - NoncurrentVersionTransitions: [], + err => { + assert.equal(err, null, `Err putting lifecycle config: ${err}`); + s3.getBucketLifecycleConfiguration({ Bucket: bucket }, (err, res) => { + assert.equal(err, null, 'Error getting lifecycle config: ' + `${err}`); + assert.strictEqual(res.Rules.length, 1); + assert.deepStrictEqual(res.Rules[0], { + Expiration: { Days: 1 }, + ID: 'test-id', + Filter: { Prefix: '' }, + Status: 'Enabled', + Transitions: [], + NoncurrentVersionTransitions: [], + }); + done(); }); - done(); - }); - })); + } + )); - it('should get bucket lifecycle config with filter prefix and tags', - done => - s3.putBucketLifecycleConfiguration({ - Bucket: bucket, - LifecycleConfiguration: { - Rules: [{ - ID: 'test-id', - Status: 'Enabled', - Filter: { - And: { - Prefix: '', - Tags: [ - { - Key: 'key', - Value: 'value', + it('should get bucket lifecycle config with filter prefix and tags', done => + s3.putBucketLifecycleConfiguration( + { + Bucket: bucket, + LifecycleConfiguration: { + Rules: [ + { + ID: 'test-id', + Status: 'Enabled', + Filter: { + And: { + Prefix: '', + Tags: [ + { + Key: 'key', + Value: 'value', + }, + ], }, - ], + }, + Expiration: { Days: 1 }, }, - }, - Expiration: { Days: 1 }, - }], + ], + }, }, - }, err => { - assert.equal(err, null, `Err putting lifecycle config: ${err}`); - s3.getBucketLifecycleConfiguration({ Bucket: bucket }, - (err, res) => { - assert.equal(err, null, 'Error getting lifecycle config: ' + - `${err}`); - assert.strictEqual(res.Rules.length, 1); - assert.deepStrictEqual(res.Rules[0], { - Expiration: { Days: 1 }, - ID: 'test-id', - Filter: { - And: { - Prefix: '', - Tags: [ - { - Key: 'key', - Value: 'value', - }, - ], + err => { + assert.equal(err, null, `Err putting lifecycle config: ${err}`); + s3.getBucketLifecycleConfiguration({ Bucket: bucket }, (err, res) => { + assert.equal(err, null, 'Error getting lifecycle config: ' + `${err}`); + assert.strictEqual(res.Rules.length, 1); + assert.deepStrictEqual(res.Rules[0], { + Expiration: { Days: 1 }, + ID: 'test-id', + Filter: { + And: { + Prefix: '', + Tags: [ + { + Key: 'key', + Value: 'value', + }, + ], + }, }, - }, - Status: 'Enabled', - Transitions: [], - NoncurrentVersionTransitions: [], + Status: 'Enabled', + Transitions: [], + NoncurrentVersionTransitions: [], + }); + done(); }); - done(); - }); - })); + } + )); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/getBucketNotification.js b/tests/functional/aws-node-sdk/test/bucket/getBucketNotification.js index ce068a71f0..93f105695e 100644 --- a/tests/functional/aws-node-sdk/test/bucket/getBucketNotification.js +++ b/tests/functional/aws-node-sdk/test/bucket/getBucketNotification.js @@ -7,11 +7,13 @@ const BucketUtility = require('../../lib/utility/bucket-util'); const bucket = 'notificationtestbucket'; const notificationConfig = { - QueueConfigurations: [{ - Events: ['s3:ObjectCreated:*'], - QueueArn: 'arn:scality:bucketnotif:::target1', - Id: 'test-id', - }], + QueueConfigurations: [ + { + Events: ['s3:ObjectCreated:*'], + QueueArn: 'arn:scality:bucketnotif:::target1', + Id: 'test-id', + }, + ], }; // Check for the expected error response code and status code. @@ -47,15 +49,13 @@ describe('aws-sdk test get bucket notification', () => { afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); it('should return AccessDenied if user is not bucket owner', done => { - otherAccountS3.getBucketNotificationConfiguration({ Bucket: bucket }, - err => { + otherAccountS3.getBucketNotificationConfiguration({ Bucket: bucket }, err => { assertError(err, 'AccessDenied'); done(); }); }); - it('should not return an error if no notification configuration ' + - 'put to bucket', done => { + it('should not return an error if no notification configuration ' + 'put to bucket', done => { s3.getBucketNotificationConfiguration({ Bucket: bucket }, err => { assert.ifError(err); done(); @@ -63,18 +63,20 @@ describe('aws-sdk test get bucket notification', () => { }); it('should get bucket notification config', done => { - s3.putBucketNotificationConfiguration({ - Bucket: bucket, - NotificationConfiguration: notificationConfig, - }, err => { - assert.equal(err, null, `Err putting notification config: ${err}`); - s3.getBucketNotificationConfiguration({ Bucket: bucket }, - (err, res) => { - assert.equal(err, null, `Error getting notification config: ${err}`); - assert.deepStrictEqual(res.QueueConfigurations, notificationConfig.QueueConfigurations); - done(); - }); - }); + s3.putBucketNotificationConfiguration( + { + Bucket: bucket, + NotificationConfiguration: notificationConfig, + }, + err => { + assert.equal(err, null, `Err putting notification config: ${err}`); + s3.getBucketNotificationConfiguration({ Bucket: bucket }, (err, res) => { + assert.equal(err, null, `Error getting notification config: ${err}`); + assert.deepStrictEqual(res.QueueConfigurations, notificationConfig.QueueConfigurations); + done(); + }); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/getBucketObjectLock.js b/tests/functional/aws-node-sdk/test/bucket/getBucketObjectLock.js index c88f1db485..7534e3c9dd 100644 --- a/tests/functional/aws-node-sdk/test/bucket/getBucketObjectLock.js +++ b/tests/functional/aws-node-sdk/test/bucket/getBucketObjectLock.js @@ -49,10 +49,15 @@ describe('aws-sdk test get bucket object lock', () => { }); describe('config rules', () => { - beforeEach(done => s3.createBucket({ - Bucket: bucket, - ObjectLockEnabledForBucket: true, - }, done)); + beforeEach(done => + s3.createBucket( + { + Bucket: bucket, + ObjectLockEnabledForBucket: true, + }, + done + ) + ); afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); @@ -64,19 +69,22 @@ describe('aws-sdk test get bucket object lock', () => { }); it('should get bucket object lock config', done => { - s3.putObjectLockConfiguration({ - Bucket: bucket, - ObjectLockConfiguration: objectLockConfig, - }, err => { - assert.ifError(err); - s3.getObjectLockConfiguration({ Bucket: bucket }, (err, res) => { + s3.putObjectLockConfiguration( + { + Bucket: bucket, + ObjectLockConfiguration: objectLockConfig, + }, + err => { assert.ifError(err); - assert.deepStrictEqual(res, { - ObjectLockConfiguration: objectLockConfig, + s3.getObjectLockConfiguration({ Bucket: bucket }, (err, res) => { + assert.ifError(err); + assert.deepStrictEqual(res, { + ObjectLockConfiguration: objectLockConfig, + }); + done(); }); - done(); - }); - }); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/getBucketPolicy.js b/tests/functional/aws-node-sdk/test/bucket/getBucketPolicy.js index e9253aa5dc..ec8fac6bf6 100644 --- a/tests/functional/aws-node-sdk/test/bucket/getBucketPolicy.js +++ b/tests/functional/aws-node-sdk/test/bucket/getBucketPolicy.js @@ -8,13 +8,15 @@ const BucketUtility = require('../../lib/utility/bucket-util'); const bucket = 'getbucketpolicy-testbucket'; const bucketPolicy = { Version: '2012-10-17', - Statement: [{ - Sid: 'testid', - Effect: 'Allow', - Principal: '*', - Action: 's3:putBucketPolicy', - Resource: `arn:aws:s3:::${bucket}`, - }], + Statement: [ + { + Sid: 'testid', + Effect: 'Allow', + Principal: '*', + Action: 's3:putBucketPolicy', + Resource: `arn:aws:s3:::${bucket}`, + }, + ], }; const expectedPolicy = { Sid: 'testid', @@ -29,11 +31,16 @@ function assertError(err, expectedErr, cb) { if (expectedErr === null) { assert.strictEqual(err, null, `expected no error but got '${err}'`); } else { - assert.strictEqual(err.code, expectedErr, 'incorrect error response ' + - `code: should be '${expectedErr}' but got '${err.code}'`); - assert.strictEqual(err.statusCode, errors[expectedErr].code, - 'incorrect error status code: should be 400 but got ' + - `'${err.statusCode}'`); + assert.strictEqual( + err.code, + expectedErr, + 'incorrect error response ' + `code: should be '${expectedErr}' but got '${err.code}'` + ); + assert.strictEqual( + err.statusCode, + errors[expectedErr].code, + 'incorrect error status code: should be 400 but got ' + `'${err.statusCode}'` + ); } cb(); } @@ -44,8 +51,7 @@ describe('aws-sdk test get bucket policy', () => { const otherAccountS3 = new BucketUtility('lisa', {}).s3; it('should return NoSuchBucket error if bucket does not exist', done => { - s3.getBucketPolicy({ Bucket: bucket }, err => - assertError(err, 'NoSuchBucket', done)); + s3.getBucketPolicy({ Bucket: bucket }, err => assertError(err, 'NoSuchBucket', done)); }); describe('policy rules', () => { @@ -54,32 +60,31 @@ describe('aws-sdk test get bucket policy', () => { afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); it('should return MethodNotAllowed if user is not bucket owner', done => { - otherAccountS3.getBucketPolicy({ Bucket: bucket }, - err => assertError(err, 'MethodNotAllowed', done)); + otherAccountS3.getBucketPolicy({ Bucket: bucket }, err => assertError(err, 'MethodNotAllowed', done)); }); - it('should return NoSuchBucketPolicy error if no policy put to bucket', - done => { + it('should return NoSuchBucketPolicy error if no policy put to bucket', done => { s3.getBucketPolicy({ Bucket: bucket }, err => { assertError(err, 'NoSuchBucketPolicy', done); }); }); it('should get bucket policy', done => { - s3.putBucketPolicy({ - Bucket: bucket, - Policy: JSON.stringify(bucketPolicy), - }, err => { - assert.equal(err, null, `Err putting bucket policy: ${err}`); - s3.getBucketPolicy({ Bucket: bucket }, - (err, res) => { - const parsedRes = JSON.parse(res.Policy); - assert.equal(err, null, 'Error getting bucket policy: ' + - `${err}`); - assert.deepStrictEqual(parsedRes.Statement[0], expectedPolicy); - done(); - }); - }); + s3.putBucketPolicy( + { + Bucket: bucket, + Policy: JSON.stringify(bucketPolicy), + }, + err => { + assert.equal(err, null, `Err putting bucket policy: ${err}`); + s3.getBucketPolicy({ Bucket: bucket }, (err, res) => { + const parsedRes = JSON.parse(res.Policy); + assert.equal(err, null, 'Error getting bucket policy: ' + `${err}`); + assert.deepStrictEqual(parsedRes.Statement[0], expectedPolicy); + done(); + }); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/getBucketReplication.js b/tests/functional/aws-node-sdk/test/bucket/getBucketReplication.js index 66e97181c7..7312808f97 100644 --- a/tests/functional/aws-node-sdk/test/bucket/getBucketReplication.js +++ b/tests/functional/aws-node-sdk/test/bucket/getBucketReplication.js @@ -9,8 +9,7 @@ const BucketUtility = require('../../lib/utility/bucket-util'); const bucket = 'source-bucket'; const replicationConfig = { - Role: 'arn:aws:iam::account-id:role/src-resource,' + - 'arn:aws:iam::account-id:role/dest-resource', + Role: 'arn:aws:iam::account-id:role/src-resource,' + 'arn:aws:iam::account-id:role/dest-resource', Rules: [ { Destination: { Bucket: 'arn:aws:s3:::destination-bucket' }, @@ -29,45 +28,58 @@ describe('aws-node-sdk test getBucketReplication', () => { const config = getConfig('default', { signatureVersion: 'v4' }); s3 = new S3(config); otherAccountS3 = new BucketUtility('lisa', {}).s3; - return series([ - next => s3.createBucket({ Bucket: bucket }, next), - next => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: { - Status: 'Enabled', - }, - }, next), - ], done); + return series( + [ + next => s3.createBucket({ Bucket: bucket }, next), + next => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: { + Status: 'Enabled', + }, + }, + next + ), + ], + done + ); }); afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); - it("should return 'ReplicationConfigurationNotFoundError' if bucket does " + - 'not have a replication configuration', done => - s3.getBucketReplication({ Bucket: bucket }, err => { - assert(errorInstances.ReplicationConfigurationNotFoundError.is[err.code]); - return done(); - })); + it( + "should return 'ReplicationConfigurationNotFoundError' if bucket does " + + 'not have a replication configuration', + done => + s3.getBucketReplication({ Bucket: bucket }, err => { + assert(errorInstances.ReplicationConfigurationNotFoundError.is[err.code]); + return done(); + }) + ); - it('should get the replication configuration that was put on a bucket', - done => s3.putBucketReplication({ - Bucket: bucket, - ReplicationConfiguration: replicationConfig, - }, err => { - if (err) { - return done(err); - } - return s3.getBucketReplication({ Bucket: bucket }, (err, data) => { + it('should get the replication configuration that was put on a bucket', done => + s3.putBucketReplication( + { + Bucket: bucket, + ReplicationConfiguration: replicationConfig, + }, + err => { if (err) { return done(err); } - const expectedObj = { - ReplicationConfiguration: replicationConfig, - }; - assert.deepStrictEqual(data, expectedObj); - return done(); - }); - })); + return s3.getBucketReplication({ Bucket: bucket }, (err, data) => { + if (err) { + return done(err); + } + const expectedObj = { + ReplicationConfiguration: replicationConfig, + }; + assert.deepStrictEqual(data, expectedObj); + return done(); + }); + } + )); it('should return AccessDenied if user is not bucket owner', done => otherAccountS3.getBucketReplication({ Bucket: bucket }, err => { diff --git a/tests/functional/aws-node-sdk/test/bucket/getBucketTagging.js b/tests/functional/aws-node-sdk/test/bucket/getBucketTagging.js index 18938aa5b9..d745b31189 100644 --- a/tests/functional/aws-node-sdk/test/bucket/getBucketTagging.js +++ b/tests/functional/aws-node-sdk/test/bucket/getBucketTagging.js @@ -19,31 +19,43 @@ describe('aws-sdk test get bucket tagging', () => { afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); it('should return accessDenied if expected bucket owner does not match', done => { - async.waterfall([ - next => s3.getBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - ExpectedBucketOwner: '944690102203', - }, - (err, res) => { - next(err, res); - }), - ], err => { - assertError(err, 'AccessDenied'); - done(); - }); + async.waterfall( + [ + next => + s3.getBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + ExpectedBucketOwner: '944690102203', + }, + (err, res) => { + next(err, res); + } + ), + ], + err => { + assertError(err, 'AccessDenied'); + done(); + } + ); }); it('should not return accessDenied if expected bucket owner matches', done => { - async.series([ - next => s3.getBucketTagging({ AccountId: s3.AccountId, Bucket: bucket, ExpectedBucketOwner: s3.AccountId }, - (err, res) => { - next(err, res); - }), - ], err => { - assertError(err, 'NoSuchTagSet'); - done(); - }); + async.series( + [ + next => + s3.getBucketTagging( + { AccountId: s3.AccountId, Bucket: bucket, ExpectedBucketOwner: s3.AccountId }, + (err, res) => { + next(err, res); + } + ), + ], + err => { + assertError(err, 'NoSuchTagSet'); + done(); + } + ); }); it('should return the TagSet', done => { @@ -55,21 +67,32 @@ describe('aws-sdk test get bucket tagging', () => { }, ], }; - async.series([ - next => s3.putBucketTagging({ - AccountId: s3.AccountId, - Tagging: tagSet, - Bucket: bucket, - ExpectedBucketOwner: s3.AccountId - }, next), - next => s3.getBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - ExpectedBucketOwner: s3.AccountId - }, next), - ], (err, data) => { - assert.deepStrictEqual(data[1], tagSet); - done(); - }); + async.series( + [ + next => + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: tagSet, + Bucket: bucket, + ExpectedBucketOwner: s3.AccountId, + }, + next + ), + next => + s3.getBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + ExpectedBucketOwner: s3.AccountId, + }, + next + ), + ], + (err, data) => { + assert.deepStrictEqual(data[1], tagSet); + done(); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/getCors.js b/tests/functional/aws-node-sdk/test/bucket/getCors.js index 12973c203f..93ed1bdfb2 100644 --- a/tests/functional/aws-node-sdk/test/bucket/getCors.js +++ b/tests/functional/aws-node-sdk/test/bucket/getCors.js @@ -13,31 +13,42 @@ describe('GET bucket cors', () => { afterEach(() => bucketUtil.deleteOne(bucketName)); describe('on bucket with existing cors configuration', () => { - const sampleCors = { CORSRules: [ - { AllowedMethods: ['PUT', 'POST', 'DELETE'], - AllowedOrigins: ['http://www.example.com'], - AllowedHeaders: ['*'], - MaxAgeSeconds: 3000, - ExposeHeaders: ['x-amz-server-side-encryption'] }, - { AllowedMethods: ['GET'], - AllowedOrigins: ['*'], - ExposeHeaders: [], - AllowedHeaders: ['*'], - MaxAgeSeconds: 3000 }, - ] }; + const sampleCors = { + CORSRules: [ + { + AllowedMethods: ['PUT', 'POST', 'DELETE'], + AllowedOrigins: ['http://www.example.com'], + AllowedHeaders: ['*'], + MaxAgeSeconds: 3000, + ExposeHeaders: ['x-amz-server-side-encryption'], + }, + { + AllowedMethods: ['GET'], + AllowedOrigins: ['*'], + ExposeHeaders: [], + AllowedHeaders: ['*'], + MaxAgeSeconds: 3000, + }, + ], + }; before(() => - s3.createBucket({ Bucket: bucketName }).promise() - .then(() => s3.putBucketCors({ - Bucket: bucketName, - CORSConfiguration: sampleCors, - }).promise())); + s3 + .createBucket({ Bucket: bucketName }) + .promise() + .then(() => + s3 + .putBucketCors({ + Bucket: bucketName, + CORSConfiguration: sampleCors, + }) + .promise() + ) + ); it('should return cors configuration successfully', done => { s3.getBucketCors({ Bucket: bucketName }, (err, data) => { - assert.strictEqual(err, null, - `Found unexpected err ${err}`); - assert.deepStrictEqual(data.CORSRules, - sampleCors.CORSRules); + assert.strictEqual(err, null, `Found unexpected err ${err}`); + assert.deepStrictEqual(data.CORSRules, sampleCors.CORSRules); return done(); }); }); @@ -45,49 +56,60 @@ describe('GET bucket cors', () => { describe('mixed case for AllowedHeader', () => { const testValue = 'tEsTvAlUe'; - const sampleCors = { CORSRules: [ - { AllowedMethods: ['PUT', 'POST', 'DELETE'], - AllowedOrigins: ['http://www.example.com'], - AllowedHeaders: [testValue] }, - ] }; + const sampleCors = { + CORSRules: [ + { + AllowedMethods: ['PUT', 'POST', 'DELETE'], + AllowedOrigins: ['http://www.example.com'], + AllowedHeaders: [testValue], + }, + ], + }; before(() => - s3.createBucket({ Bucket: bucketName }).promise() - .then(() => s3.putBucketCors({ - Bucket: bucketName, - CORSConfiguration: sampleCors, - }).promise())); + s3 + .createBucket({ Bucket: bucketName }) + .promise() + .then(() => + s3 + .putBucketCors({ + Bucket: bucketName, + CORSConfiguration: sampleCors, + }) + .promise() + ) + ); - it('should be preserved when putting / getting cors resource', - done => { + it('should be preserved when putting / getting cors resource', done => { s3.getBucketCors({ Bucket: bucketName }, (err, data) => { - assert.strictEqual(err, null, - `Found unexpected err ${err}`); - assert.deepStrictEqual(data.CORSRules[0].AllowedHeaders, - sampleCors.CORSRules[0].AllowedHeaders); + assert.strictEqual(err, null, `Found unexpected err ${err}`); + assert.deepStrictEqual(data.CORSRules[0].AllowedHeaders, sampleCors.CORSRules[0].AllowedHeaders); return done(); }); }); }); describe('uppercase for AllowedMethod', () => { - const sampleCors = { CORSRules: [ - { AllowedMethods: ['PUT', 'POST', 'DELETE'], - AllowedOrigins: ['http://www.example.com'] }, - ] }; + const sampleCors = { + CORSRules: [{ AllowedMethods: ['PUT', 'POST', 'DELETE'], AllowedOrigins: ['http://www.example.com'] }], + }; before(() => - s3.createBucket({ Bucket: bucketName }).promise() - .then(() => s3.putBucketCors({ - Bucket: bucketName, - CORSConfiguration: sampleCors, - }).promise())); + s3 + .createBucket({ Bucket: bucketName }) + .promise() + .then(() => + s3 + .putBucketCors({ + Bucket: bucketName, + CORSConfiguration: sampleCors, + }) + .promise() + ) + ); - it('should be preserved when retrieving cors resource', - done => { + it('should be preserved when retrieving cors resource', done => { s3.getBucketCors({ Bucket: bucketName }, (err, data) => { - assert.strictEqual(err, null, - `Found unexpected err ${err}`); - assert.deepStrictEqual(data.CORSRules[0].AllowedMethods, - sampleCors.CORSRules[0].AllowedMethods); + assert.strictEqual(err, null, `Found unexpected err ${err}`); + assert.deepStrictEqual(data.CORSRules[0].AllowedMethods, sampleCors.CORSRules[0].AllowedMethods); return done(); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/getLocation.js b/tests/functional/aws-node-sdk/test/bucket/getLocation.js index 6bc83d398c..d928682d80 100644 --- a/tests/functional/aws-node-sdk/test/bucket/getLocation.js +++ b/tests/functional/aws-node-sdk/test/bucket/getLocation.js @@ -15,8 +15,7 @@ describeSkipAWS('GET bucket location ', () => { const otherAccountBucketUtility = new BucketUtility('lisa', {}); const otherAccountS3 = otherAccountBucketUtility.s3; const locationConstraints = config.locationConstraints; - Object.keys(locationConstraints).forEach( - location => { + Object.keys(locationConstraints).forEach(location => { if (location === 'us-east-1') { // if location is us-east-1 should return empty string // see next test. @@ -27,24 +26,22 @@ describeSkipAWS('GET bucket location ', () => { return; } describe(`with location: ${location}`, () => { - before(() => s3.createBucket( - { - Bucket: bucketName, - CreateBucketConfiguration: { - LocationConstraint: location, - }, - }).promise()); + before(() => + s3 + .createBucket({ + Bucket: bucketName, + CreateBucketConfiguration: { + LocationConstraint: location, + }, + }) + .promise() + ); after(() => bucketUtil.deleteOne(bucketName)); - it(`should return location configuration: ${location} ` + - 'successfully', - done => { - s3.getBucketLocation({ Bucket: bucketName }, - (err, data) => { - assert.strictEqual(err, null, - `Found unexpected err ${err}`); - assert.deepStrictEqual(data.LocationConstraint, - location); + it(`should return location configuration: ${location} ` + 'successfully', done => { + s3.getBucketLocation({ Bucket: bucketName }, (err, data) => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + assert.deepStrictEqual(data.LocationConstraint, location); return done(); }); }); @@ -52,20 +49,20 @@ describeSkipAWS('GET bucket location ', () => { }); describe('with location us-east-1', () => { - before(() => s3.createBucket( - { - Bucket: bucketName, - CreateBucketConfiguration: { - LocationConstraint: 'us-east-1', - }, - }).promise()); + before(() => + s3 + .createBucket({ + Bucket: bucketName, + CreateBucketConfiguration: { + LocationConstraint: 'us-east-1', + }, + }) + .promise() + ); afterEach(() => bucketUtil.deleteOne(bucketName)); - it('should return empty location', - done => { - s3.getBucketLocation({ Bucket: bucketName }, - (err, data) => { - assert.strictEqual(err, null, - `Found unexpected err ${err}`); + it('should return empty location', done => { + s3.getBucketLocation({ Bucket: bucketName }, (err, data) => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); assert.deepStrictEqual(data.LocationConstraint, ''); return done(); }); @@ -75,8 +72,7 @@ describeSkipAWS('GET bucket location ', () => { describe('without location configuration', () => { after(() => { process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucketName) - .catch(err => { + return bucketUtil.deleteOne(bucketName).catch(err => { process.stdout.write(`Error in after: ${err}\n`); throw err; }); @@ -89,18 +85,15 @@ describeSkipAWS('GET bucket location ', () => { request.httpRequest.body = ''; }); request.send(err => { - assert.strictEqual(err, null, 'Error creating bucket: ' + - `${err}`); + assert.strictEqual(err, null, 'Error creating bucket: ' + `${err}`); const host = request.service.endpoint.hostname; let endpoint = config.restEndpoints[host]; // s3 actually returns '' for us-east-1 if (endpoint === 'us-east-1') { endpoint = ''; } - s3.getBucketLocation({ Bucket: bucketName }, - (err, data) => { - assert.strictEqual(err, null, 'Expected succes, ' + - `got error ${JSON.stringify(err)}`); + s3.getBucketLocation({ Bucket: bucketName }, (err, data) => { + assert.strictEqual(err, null, 'Expected succes, ' + `got error ${JSON.stringify(err)}`); assert.strictEqual(data.LocationConstraint, endpoint); done(); }); @@ -109,19 +102,20 @@ describeSkipAWS('GET bucket location ', () => { }); describe('with location configuration', () => { - before(() => s3.createBucket( - { - Bucket: bucketName, - CreateBucketConfiguration: { - LocationConstraint: 'us-east-1', - }, - }).promise()); + before(() => + s3 + .createBucket({ + Bucket: bucketName, + CreateBucketConfiguration: { + LocationConstraint: 'us-east-1', + }, + }) + .promise() + ); after(() => bucketUtil.deleteOne(bucketName)); - it('should return AccessDenied if user is not bucket owner', - done => { - otherAccountS3.getBucketLocation({ Bucket: bucketName }, - err => { + it('should return AccessDenied if user is not bucket owner', done => { + otherAccountS3.getBucketLocation({ Bucket: bucketName }, err => { assert(err); assert.strictEqual(err.code, 'AccessDenied'); assert.strictEqual(err.statusCode, 403); diff --git a/tests/functional/aws-node-sdk/test/bucket/getWebsite.js b/tests/functional/aws-node-sdk/test/bucket/getWebsite.js index 290b719d88..61e3376e41 100644 --- a/tests/functional/aws-node-sdk/test/bucket/getWebsite.js +++ b/tests/functional/aws-node-sdk/test/bucket/getWebsite.js @@ -31,16 +31,22 @@ describe('GET bucket website', () => { describe('with existing bucket configuration', () => { before(() => - s3.createBucket({ Bucket: bucketName }).promise() - .then(() => s3.putBucketWebsite({ - Bucket: bucketName, - WebsiteConfiguration: config, - }).promise())); + s3 + .createBucket({ Bucket: bucketName }) + .promise() + .then(() => + s3 + .putBucketWebsite({ + Bucket: bucketName, + WebsiteConfiguration: config, + }) + .promise() + ) + ); it('should return bucket website xml successfully', done => { s3.getBucketWebsite({ Bucket: bucketName }, (err, data) => { - assert.strictEqual(err, null, - `Found unexpected err ${err}`); + assert.strictEqual(err, null, `Found unexpected err ${err}`); const configObject = Object.assign({}, config); assert.deepStrictEqual(data, configObject); return done(); diff --git a/tests/functional/aws-node-sdk/test/bucket/head.js b/tests/functional/aws-node-sdk/test/bucket/head.js index 79c0cc0c51..df35b3b542 100644 --- a/tests/functional/aws-node-sdk/test/bucket/head.js +++ b/tests/functional/aws-node-sdk/test/bucket/head.js @@ -3,7 +3,6 @@ const assert = require('assert'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); - describe('HEAD bucket', () => { withV4(sigCfg => { let bucketUtil; @@ -15,15 +14,12 @@ describe('HEAD bucket', () => { }); // aws-sdk now (v2.363.0) returns 'UriParameterError' error - it.skip('should return an error to a head request without a ' + - 'bucket name', - done => { - s3.headBucket({ Bucket: '' }, err => { - assert.notEqual(err, null, - 'Expected failure but got success'); - assert.strictEqual(err.code, 405); - done(); - }); + it.skip('should return an error to a head request without a ' + 'bucket name', done => { + s3.headBucket({ Bucket: '' }, err => { + assert.notEqual(err, null, 'Expected failure but got success'); + assert.strictEqual(err.code, 405); + done(); }); + }); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/listingCornerCases.js b/tests/functional/aws-node-sdk/test/bucket/listingCornerCases.js index f73e0348ef..d066857afd 100644 --- a/tests/functional/aws-node-sdk/test/bucket/listingCornerCases.js +++ b/tests/functional/aws-node-sdk/test/bucket/listingCornerCases.js @@ -51,26 +51,32 @@ describe('Listing corner cases tests', () => { before(done => { const config = getConfig('default', { signatureVersion: 'v4' }); s3 = new AWS.S3(config); - s3.createBucket( - { Bucket }, (err, data) => { - if (err) { - done(err, data); - } - async.each( - objects, (o, next) => { - s3.putObject(o, (err, data) => { - next(err, data); - }); - }, done); - }); + s3.createBucket({ Bucket }, (err, data) => { + if (err) { + done(err, data); + } + async.each( + objects, + (o, next) => { + s3.putObject(o, (err, data) => { + next(err, data); + }); + }, + done + ); + }); }); after(done => { s3.listObjects({ Bucket }, (err, data) => { - async.each(data.Contents, (o, next) => { - s3.deleteObject({ Bucket, Key: o.Key }, next); - }, () => { - s3.deleteBucket({ Bucket }, done); - }); + async.each( + data.Contents, + (o, next) => { + s3.deleteObject({ Bucket, Key: o.Key }, next); + }, + () => { + s3.deleteBucket({ Bucket }, done); + } + ); }); }); it('should list everything', done => { @@ -101,257 +107,194 @@ describe('Listing corner cases tests', () => { }); }); it('should list with valid marker', done => { - s3.listObjects( - { Bucket, - Delimiter: '/', + s3.listObjects({ Bucket, Delimiter: '/', Marker: 'notes/summer/1.txt' }, (err, data) => { + assert.strictEqual(err, null); + cutAttributes(data); + assert.deepStrictEqual(data, { + IsTruncated: false, Marker: 'notes/summer/1.txt', - }, - (err, data) => { - assert.strictEqual(err, null); - cutAttributes(data); - assert.deepStrictEqual(data, { - IsTruncated: false, - Marker: 'notes/summer/1.txt', - Contents: [], - Name: Bucket, - Prefix: '', - Delimiter: '/', - MaxKeys: 1000, - CommonPrefixes: [], - }); - done(); + Contents: [], + Name: Bucket, + Prefix: '', + Delimiter: '/', + MaxKeys: 1000, + CommonPrefixes: [], }); + done(); + }); }); it('should list with unexpected marker', done => { - s3.listObjects( - { Bucket, - Delimiter: '/', + s3.listObjects({ Bucket, Delimiter: '/', Marker: 'zzzz' }, (err, data) => { + assert.strictEqual(err, null); + assert.deepStrictEqual(data, { + IsTruncated: false, Marker: 'zzzz', - }, - (err, data) => { - assert.strictEqual(err, null); - assert.deepStrictEqual(data, { - IsTruncated: false, - Marker: 'zzzz', - Contents: [], - Name: Bucket, - Prefix: '', - Delimiter: '/', - MaxKeys: 1000, - CommonPrefixes: [], - }); - done(); + Contents: [], + Name: Bucket, + Prefix: '', + Delimiter: '/', + MaxKeys: 1000, + CommonPrefixes: [], }); + done(); + }); }); it('should list with unexpected marker and prefix', done => { - s3.listObjects( - { Bucket, - Delimiter: '/', + s3.listObjects({ Bucket, Delimiter: '/', Marker: 'notes/summer0', Prefix: 'notes/summer/' }, (err, data) => { + assert.strictEqual(err, null); + assert.deepStrictEqual(data, { + IsTruncated: false, Marker: 'notes/summer0', + Contents: [], + Name: Bucket, Prefix: 'notes/summer/', - }, - (err, data) => { - assert.strictEqual(err, null); - assert.deepStrictEqual(data, { - IsTruncated: false, - Marker: 'notes/summer0', - Contents: [], - Name: Bucket, - Prefix: 'notes/summer/', - Delimiter: '/', - MaxKeys: 1000, - CommonPrefixes: [], - }); - done(); + Delimiter: '/', + MaxKeys: 1000, + CommonPrefixes: [], }); + done(); + }); }); it('should list with MaxKeys', done => { - s3.listObjects( - { Bucket, + s3.listObjects({ Bucket, MaxKeys: 3 }, (err, data) => { + assert.strictEqual(err, null); + cutAttributes(data); + assert.deepStrictEqual(data, { + Marker: '', + IsTruncated: true, + Contents: [objects[0].Key, objects[1].Key, objects[2].Key], + Name: Bucket, + Prefix: '', MaxKeys: 3, - }, - (err, data) => { - assert.strictEqual(err, null); - cutAttributes(data); - assert.deepStrictEqual(data, { - Marker: '', - IsTruncated: true, - Contents: [objects[0].Key, - objects[1].Key, - objects[2].Key, - ], - Name: Bucket, - Prefix: '', - MaxKeys: 3, - CommonPrefixes: [], - }); - done(); + CommonPrefixes: [], }); + done(); + }); }); it('should list with big MaxKeys', done => { - s3.listObjects( - { Bucket, + s3.listObjects({ Bucket, MaxKeys: 15000 }, (err, data) => { + assert.strictEqual(err, null); + cutAttributes(data); + assert.deepStrictEqual(data, { + Marker: '', + IsTruncated: false, + Contents: [ + objects[0].Key, + objects[1].Key, + objects[2].Key, + objects[3].Key, + objects[4].Key, + objects[5].Key, + objects[6].Key, + objects[7].Key, + objects[8].Key, + objects[9].Key, + ], + Name: Bucket, + Prefix: '', MaxKeys: 15000, - }, - (err, data) => { - assert.strictEqual(err, null); - cutAttributes(data); - assert.deepStrictEqual(data, { - Marker: '', - IsTruncated: false, - Contents: [objects[0].Key, - objects[1].Key, - objects[2].Key, - objects[3].Key, - objects[4].Key, - objects[5].Key, - objects[6].Key, - objects[7].Key, - objects[8].Key, - objects[9].Key, - ], - Name: Bucket, - Prefix: '', - MaxKeys: 15000, - CommonPrefixes: [], - }); - done(); + CommonPrefixes: [], }); + done(); + }); }); it('should list with delimiter', done => { - s3.listObjects( - { Bucket, + s3.listObjects({ Bucket, Delimiter: '/' }, (err, data) => { + assert.strictEqual(err, null); + cutAttributes(data); + assert.deepStrictEqual(data, { + Marker: '', + IsTruncated: false, + Contents: [objects[0].Key], + Name: Bucket, + Prefix: '', Delimiter: '/', - }, - (err, data) => { - assert.strictEqual(err, null); - cutAttributes(data); - assert.deepStrictEqual(data, { - Marker: '', - IsTruncated: false, - Contents: [objects[0].Key], - Name: Bucket, - Prefix: '', - Delimiter: '/', - MaxKeys: 1000, - CommonPrefixes: ['notes/'], - }); - done(); + MaxKeys: 1000, + CommonPrefixes: ['notes/'], }); + done(); + }); }); it('should list with long delimiter', done => { - s3.listObjects( - { Bucket, + s3.listObjects({ Bucket, Delimiter: 'notes/summer' }, (err, data) => { + assert.strictEqual(err, null); + cutAttributes(data); + assert.deepStrictEqual(data, { + Marker: '', + IsTruncated: false, + Contents: [ + objects[0].Key, + objects[1].Key, + objects[2].Key, + objects[3].Key, + objects[7].Key, + objects[8].Key, + objects[9].Key, + ], + Name: Bucket, + Prefix: '', Delimiter: 'notes/summer', - }, - (err, data) => { - assert.strictEqual(err, null); - cutAttributes(data); - assert.deepStrictEqual(data, { - Marker: '', - IsTruncated: false, - Contents: [objects[0].Key, - objects[1].Key, - objects[2].Key, - objects[3].Key, - objects[7].Key, - objects[8].Key, - objects[9].Key, - ], - Name: Bucket, - Prefix: '', - Delimiter: 'notes/summer', - MaxKeys: 1000, - CommonPrefixes: ['notes/summer'], - }); - done(); + MaxKeys: 1000, + CommonPrefixes: ['notes/summer'], }); + done(); + }); }); it('should list with delimiter and prefix related to #147', done => { - s3.listObjects( - { Bucket, - Delimiter: '/', + s3.listObjects({ Bucket, Delimiter: '/', Prefix: 'notes/' }, (err, data) => { + assert.strictEqual(err, null); + cutAttributes(data); + assert.deepStrictEqual(data, { + Marker: '', + IsTruncated: false, + Contents: [objects[7].Key, objects[8].Key], + Name: Bucket, Prefix: 'notes/', - }, - (err, data) => { - assert.strictEqual(err, null); - cutAttributes(data); - assert.deepStrictEqual(data, { - Marker: '', - IsTruncated: false, - Contents: [ - objects[7].Key, - objects[8].Key, - ], - Name: Bucket, - Prefix: 'notes/', - Delimiter: '/', - MaxKeys: 1000, - CommonPrefixes: [ - 'notes/spring/', - 'notes/summer/', - 'notes/zaphod/', - ], - }); - done(); + Delimiter: '/', + MaxKeys: 1000, + CommonPrefixes: ['notes/spring/', 'notes/summer/', 'notes/zaphod/'], }); + done(); + }); }); it('should list with prefix and marker related to #147', done => { - s3.listObjects( - { Bucket, - Delimiter: '/', - Prefix: 'notes/', + s3.listObjects({ Bucket, Delimiter: '/', Prefix: 'notes/', Marker: 'notes/year.txt' }, (err, data) => { + assert.strictEqual(err, null); + cutAttributes(data); + assert.deepStrictEqual(data, { Marker: 'notes/year.txt', - }, - (err, data) => { - assert.strictEqual(err, null); - cutAttributes(data); - assert.deepStrictEqual(data, { - Marker: 'notes/year.txt', - IsTruncated: false, - Contents: [objects[8].Key], - Name: Bucket, - Prefix: 'notes/', - Delimiter: '/', - MaxKeys: 1000, - CommonPrefixes: ['notes/zaphod/'], - }); - done(); + IsTruncated: false, + Contents: [objects[8].Key], + Name: Bucket, + Prefix: 'notes/', + Delimiter: '/', + MaxKeys: 1000, + CommonPrefixes: ['notes/zaphod/'], }); + done(); + }); }); it('should list with all parameters 1 of 5', done => { - s3.listObjects( - { Bucket, - Delimiter: '/', - Prefix: 'notes/', + s3.listObjects({ Bucket, Delimiter: '/', Prefix: 'notes/', Marker: 'notes/', MaxKeys: 1 }, (err, data) => { + assert.strictEqual(err, null); + cutAttributes(data); + assert.deepStrictEqual(data, { Marker: 'notes/', + NextMarker: 'notes/spring/', + IsTruncated: true, + Contents: [], + Name: Bucket, + Prefix: 'notes/', + Delimiter: '/', MaxKeys: 1, - }, - (err, data) => { - assert.strictEqual(err, null); - cutAttributes(data); - assert.deepStrictEqual(data, { - Marker: 'notes/', - NextMarker: 'notes/spring/', - IsTruncated: true, - Contents: [], - Name: Bucket, - Prefix: 'notes/', - Delimiter: '/', - MaxKeys: 1, - CommonPrefixes: ['notes/spring/'], - }); - done(); + CommonPrefixes: ['notes/spring/'], }); + done(); + }); }); it('should list with all parameters 2 of 5', done => { s3.listObjects( - { Bucket, - Delimiter: '/', - Prefix: 'notes/', - Marker: 'notes/spring/', - MaxKeys: 1, - }, + { Bucket, Delimiter: '/', Prefix: 'notes/', Marker: 'notes/spring/', MaxKeys: 1 }, (err, data) => { assert.strictEqual(err, null); cutAttributes(data); @@ -367,16 +310,12 @@ describe('Listing corner cases tests', () => { CommonPrefixes: ['notes/summer/'], }); done(); - }); + } + ); }); it('should list with all parameters 3 of 5', done => { s3.listObjects( - { Bucket, - Delimiter: '/', - Prefix: 'notes/', - Marker: 'notes/summer/', - MaxKeys: 1, - }, + { Bucket, Delimiter: '/', Prefix: 'notes/', Marker: 'notes/summer/', MaxKeys: 1 }, (err, data) => { assert.strictEqual(err, null); cutAttributes(data); @@ -392,16 +331,12 @@ describe('Listing corner cases tests', () => { CommonPrefixes: [], }); done(); - }); + } + ); }); it('should list with all parameters 4 of 5', done => { s3.listObjects( - { Bucket, - Delimiter: '/', - Prefix: 'notes/', - Marker: 'notes/year.txt', - MaxKeys: 1, - }, + { Bucket, Delimiter: '/', Prefix: 'notes/', Marker: 'notes/year.txt', MaxKeys: 1 }, (err, data) => { assert.strictEqual(err, null); cutAttributes(data); @@ -417,16 +352,12 @@ describe('Listing corner cases tests', () => { CommonPrefixes: [], }); done(); - }); + } + ); }); it('should list with all parameters 5 of 5', done => { s3.listObjects( - { Bucket, - Delimiter: '/', - Prefix: 'notes/', - Marker: 'notes/yore.rs', - MaxKeys: 1, - }, + { Bucket, Delimiter: '/', Prefix: 'notes/', Marker: 'notes/yore.rs', MaxKeys: 1 }, (err, data) => { assert.strictEqual(err, null); cutAttributes(data); @@ -441,92 +372,106 @@ describe('Listing corner cases tests', () => { CommonPrefixes: ['notes/zaphod/'], }); done(); - }); + } + ); }); it('should ends listing on last common prefix', done => { - s3.putObject({ - Bucket, - Key: 'notes/zaphod/TheFourth.txt', - Body: '', - }, err => { - if (!err) { - s3.listObjects( - { Bucket, - Delimiter: '/', - Prefix: 'notes/', - Marker: 'notes/yore.rs', - MaxKeys: 1, - }, - (err, data) => { - assert.strictEqual(err, null); - cutAttributes(data); - assert.deepStrictEqual(data, { - IsTruncated: false, - Marker: 'notes/yore.rs', - Contents: [], - Name: Bucket, - Prefix: 'notes/', - Delimiter: '/', - MaxKeys: 1, - CommonPrefixes: ['notes/zaphod/'], - }); - done(); - }); + s3.putObject( + { + Bucket, + Key: 'notes/zaphod/TheFourth.txt', + Body: '', + }, + err => { + if (!err) { + s3.listObjects( + { Bucket, Delimiter: '/', Prefix: 'notes/', Marker: 'notes/yore.rs', MaxKeys: 1 }, + (err, data) => { + assert.strictEqual(err, null); + cutAttributes(data); + assert.deepStrictEqual(data, { + IsTruncated: false, + Marker: 'notes/yore.rs', + Contents: [], + Name: Bucket, + Prefix: 'notes/', + Delimiter: '/', + MaxKeys: 1, + CommonPrefixes: ['notes/zaphod/'], + }); + done(); + } + ); + } } - }); + ); }); it('should not list DeleteMarkers for version suspended buckets', done => { const obj = { name: 'testDeleteMarker.txt', value: 'foo' }; const bucketName = `bucket-test-delete-markers-not-listed${Date.now()}`; let objectCount = 0; - return async.waterfall([ - next => s3.createBucket({ Bucket: bucketName }, err => next(err)), - next => { - const params = { - Bucket: bucketName, - VersioningConfiguration: { - Status: 'Suspended', - }, - }; - return s3.putBucketVersioning(params, err => - next(err)); - }, - next => s3.putObject({ - Bucket: bucketName, - Key: obj.name, - Body: obj.value, - }, err => - next(err)), - next => s3.listObjectsV2({ Bucket: bucketName }, - (err, res) => { - if (err) { - return next(err); - } - objectCount = res.Contents.length; - assert.strictEqual(res.Contents.some(c => c.Key === obj.name), true); - return next(); - }), - next => s3.deleteObject({ - Bucket: bucketName, - Key: obj.name, - }, function test(err) { - const headers = this.httpResponse.headers; - assert.strictEqual( - headers['x-amz-delete-marker'], 'true'); - return next(err); - }), - next => s3.listObjectsV2({ Bucket: bucketName }, - (err, res) => { - if (err) { - return next(err); - } - assert.strictEqual(res.Contents.length, objectCount - 1); - assert.strictEqual(res.Contents.some(c => c.Key === obj.name), false); - return next(); - }), - next => s3.deleteObject({ Bucket: bucketName, Key: obj.name, VersionId: 'null' }, err => next(err)), - next => s3.deleteBucket({ Bucket: bucketName }, err => next(err)) - ], err => done(err)); + return async.waterfall( + [ + next => s3.createBucket({ Bucket: bucketName }, err => next(err)), + next => { + const params = { + Bucket: bucketName, + VersioningConfiguration: { + Status: 'Suspended', + }, + }; + return s3.putBucketVersioning(params, err => next(err)); + }, + next => + s3.putObject( + { + Bucket: bucketName, + Key: obj.name, + Body: obj.value, + }, + err => next(err) + ), + next => + s3.listObjectsV2({ Bucket: bucketName }, (err, res) => { + if (err) { + return next(err); + } + objectCount = res.Contents.length; + assert.strictEqual( + res.Contents.some(c => c.Key === obj.name), + true + ); + return next(); + }), + next => + s3.deleteObject( + { + Bucket: bucketName, + Key: obj.name, + }, + function test(err) { + const headers = this.httpResponse.headers; + assert.strictEqual(headers['x-amz-delete-marker'], 'true'); + return next(err); + } + ), + next => + s3.listObjectsV2({ Bucket: bucketName }, (err, res) => { + if (err) { + return next(err); + } + assert.strictEqual(res.Contents.length, objectCount - 1); + assert.strictEqual( + res.Contents.some(c => c.Key === obj.name), + false + ); + return next(); + }), + next => s3.deleteObject({ Bucket: bucketName, Key: obj.name, VersionId: 'null' }, err => next(err)), + next => s3.deleteBucket({ Bucket: bucketName }, err => next(err)), + ], + err => done(err) + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/put.js b/tests/functional/aws-node-sdk/test/bucket/put.js index 6bf63335f2..ca7e5a0c80 100644 --- a/tests/functional/aws-node-sdk/test/bucket/put.js +++ b/tests/functional/aws-node-sdk/test/bucket/put.js @@ -46,43 +46,51 @@ describe('PUT Bucket - AWS.S3.createBucket', () => { }); describe('create bucket twice', () => { - beforeEach(done => bucketUtil.s3.createBucket({ Bucket: - bucketName, - CreateBucketConfiguration: { - LocationConstraint: 'us-east-1', - }, - }, done)); - afterEach(done => bucketUtil.s3.deleteBucket({ Bucket: bucketName }, - done)); + beforeEach(done => + bucketUtil.s3.createBucket( + { + Bucket: bucketName, + CreateBucketConfiguration: { + LocationConstraint: 'us-east-1', + }, + }, + done + ) + ); + afterEach(done => bucketUtil.s3.deleteBucket({ Bucket: bucketName }, done)); // AWS JS SDK sends a request with locationConstraint us-east-1 if // no locationConstraint provided. // Skip this test on E2E because it is making the asumption that the // default region is us-east-1 which is not the case for the E2E - itSkipIfE2E('should return a 200 if no locationConstraints ' + - 'provided.', done => { + itSkipIfE2E('should return a 200 if no locationConstraints ' + 'provided.', done => { bucketUtil.s3.createBucket({ Bucket: bucketName }, done); }); it('should return a 200 if us-east behavior', done => { - bucketUtil.s3.createBucket({ - Bucket: bucketName, - CreateBucketConfiguration: { - LocationConstraint: 'us-east-1', + bucketUtil.s3.createBucket( + { + Bucket: bucketName, + CreateBucketConfiguration: { + LocationConstraint: 'us-east-1', + }, }, - }, done); + done + ); }); it('should return a 409 if us-west behavior', done => { - bucketUtil.s3.createBucket({ - Bucket: bucketName, - CreateBucketConfiguration: { - LocationConstraint: 'scality-us-west-1', + bucketUtil.s3.createBucket( + { + Bucket: bucketName, + CreateBucketConfiguration: { + LocationConstraint: 'scality-us-west-1', + }, }, - }, error => { - assert.notEqual(error, null, - 'Expected failure but got success'); - assert.strictEqual(error.code, 'BucketAlreadyOwnedByYou'); - assert.strictEqual(error.statusCode, 409); - done(); - }); + error => { + assert.notEqual(error, null, 'Expected failure but got success'); + assert.strictEqual(error.code, 'BucketAlreadyOwnedByYou'); + assert.strictEqual(error.statusCode, 409); + done(); + } + ); }); }); @@ -96,15 +104,13 @@ describe('PUT Bucket - AWS.S3.createBucket', () => { bucketUtil .createOne(bucketName) .then(() => { - const e = new Error('Expect failure in creation, ' + - 'but it succeeded'); + const e = new Error('Expect failure in creation, ' + 'but it succeeded'); return done(e); }) .catch(error => { assert.strictEqual(error.code, expectedCode); - assert.strictEqual(error.statusCode, - expectedStatus); + assert.strictEqual(error.statusCode, expectedStatus); done(); }); }; @@ -128,45 +134,35 @@ describe('PUT Bucket - AWS.S3.createBucket', () => { testFn(shortName, done); }); - it('should return 403 if name is reserved (e.g., METADATA)', - done => { - const reservedName = 'METADATA'; - testFn(reservedName, done, 403, 'AccessDenied'); - }); + it('should return 403 if name is reserved (e.g., METADATA)', done => { + const reservedName = 'METADATA'; + testFn(reservedName, done, 403, 'AccessDenied'); + }); - itSkipIfAWS('should return 400 if name is longer than 63 chars', - done => { - const longName = 'x'.repeat(64); - testFn(longName, done); - } - ); + itSkipIfAWS('should return 400 if name is longer than 63 chars', done => { + const longName = 'x'.repeat(64); + testFn(longName, done); + }); - itSkipIfAWS('should return 400 if name is formatted as IP address', - done => { - const ipAddress = '192.168.5.4'; - testFn(ipAddress, done); - } - ); + itSkipIfAWS('should return 400 if name is formatted as IP address', done => { + const ipAddress = '192.168.5.4'; + testFn(ipAddress, done); + }); - itSkipIfAWS('should return 400 if name starts with period', - done => { - const invalidName = '.myawsbucket'; - testFn(invalidName, done); - } - ); + itSkipIfAWS('should return 400 if name starts with period', done => { + const invalidName = '.myawsbucket'; + testFn(invalidName, done); + }); it('should return 400 if name ends with period', done => { const invalidName = 'myawsbucket.'; testFn(invalidName, done); }); - itSkipIfAWS( - 'should return 400 if name has two period between labels', - done => { - const invalidName = 'my..examplebucket'; - testFn(invalidName, done); - } - ); + itSkipIfAWS('should return 400 if name has two period between labels', done => { + const invalidName = 'my..examplebucket'; + testFn(invalidName, done); + }); it('should return 400 if name has special chars', done => { const invalidName = 'my.#s3bucket'; @@ -179,72 +175,86 @@ describe('PUT Bucket - AWS.S3.createBucket', () => { bucketUtil.s3.createBucket({ Bucket: name }, (err, res) => { assert.ifError(err); assert(res.Location, 'No Location in response'); - assert.deepStrictEqual(res.Location, `/${name}`, - 'Wrong Location header'); - bucketUtil.deleteOne(name).then(() => done()).catch(done); + assert.deepStrictEqual(res.Location, `/${name}`, 'Wrong Location header'); + bucketUtil + .deleteOne(name) + .then(() => done()) + .catch(done); }); } - it('should create bucket if name is valid', done => - _test('scality-very-valid-bucket-name', done)); + it('should create bucket if name is valid', done => _test('scality-very-valid-bucket-name', done)); - it('should create bucket if name is some prefix and an IP address', - done => _test('prefix-192.168.5.4', done)); + it('should create bucket if name is some prefix and an IP address', done => + _test('prefix-192.168.5.4', done)); - it('should create bucket if name is an IP address with some suffix', - done => _test('192.168.5.4-suffix', done)); + it('should create bucket if name is an IP address with some suffix', done => + _test('192.168.5.4-suffix', done)); }); describe('bucket creation success with object lock', () => { function _testObjectLockEnabled(name, done) { - bucketUtil.s3.createBucket({ - Bucket: name, - ObjectLockEnabledForBucket: true, - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.Location, `/${name}`, - 'Wrong Location header'); - bucketUtil.s3.getObjectLockConfiguration({ Bucket: name }, (err, res) => { + bucketUtil.s3.createBucket( + { + Bucket: name, + ObjectLockEnabledForBucket: true, + }, + (err, res) => { assert.ifError(err); - assert.deepStrictEqual(res.ObjectLockConfiguration, - { ObjectLockEnabled: 'Enabled' }); - }); - bucketUtil.deleteOne(name).then(() => done()).catch(done); - }); + assert.strictEqual(res.Location, `/${name}`, 'Wrong Location header'); + bucketUtil.s3.getObjectLockConfiguration({ Bucket: name }, (err, res) => { + assert.ifError(err); + assert.deepStrictEqual(res.ObjectLockConfiguration, { ObjectLockEnabled: 'Enabled' }); + }); + bucketUtil + .deleteOne(name) + .then(() => done()) + .catch(done); + } + ); } function _testObjectLockDisabled(name, done) { - bucketUtil.s3.createBucket({ - Bucket: name, - ObjectLockEnabledForBucket: false, - }, (err, res) => { - assert.ifError(err); - assert(res.Location, 'No Location in response'); - assert.strictEqual(res.Location, `/${name}`, - 'Wrong Location header'); - bucketUtil.s3.getObjectLockConfiguration({ Bucket: name }, err => { - assert.strictEqual(err.code, 'ObjectLockConfigurationNotFoundError'); - }); - bucketUtil.deleteOne(name).then(() => done()).catch(done); - }); + bucketUtil.s3.createBucket( + { + Bucket: name, + ObjectLockEnabledForBucket: false, + }, + (err, res) => { + assert.ifError(err); + assert(res.Location, 'No Location in response'); + assert.strictEqual(res.Location, `/${name}`, 'Wrong Location header'); + bucketUtil.s3.getObjectLockConfiguration({ Bucket: name }, err => { + assert.strictEqual(err.code, 'ObjectLockConfigurationNotFoundError'); + }); + bucketUtil + .deleteOne(name) + .then(() => done()) + .catch(done); + } + ); } function _testVersioning(name, done) { - bucketUtil.s3.createBucket({ - Bucket: name, - ObjectLockEnabledForBucket: true, - }, (err, res) => { - assert.ifError(err); - assert(res.Location, 'No Location in response'); - assert.strictEqual(res.Location, `/${name}`, - 'Wrong Location header'); - bucketUtil.s3.getBucketVersioning({ Bucket: name }, (err, res) => { + bucketUtil.s3.createBucket( + { + Bucket: name, + ObjectLockEnabledForBucket: true, + }, + (err, res) => { assert.ifError(err); - assert.strictEqual(res.Status, 'Enabled'); - assert.strictEqual(res.MFADelete, 'Disabled'); - }); - bucketUtil.deleteOne(name).then(() => done()).catch(done); - }); + assert(res.Location, 'No Location in response'); + assert.strictEqual(res.Location, `/${name}`, 'Wrong Location header'); + bucketUtil.s3.getBucketVersioning({ Bucket: name }, (err, res) => { + assert.ifError(err); + assert.strictEqual(res.Status, 'Enabled'); + assert.strictEqual(res.MFADelete, 'Disabled'); + }); + bucketUtil + .deleteOne(name) + .then(() => done()) + .catch(done); + } + ); } - it('should create bucket without error', done => - _testObjectLockEnabled('bucket-with-object-lock', done)); + it('should create bucket without error', done => _testObjectLockEnabled('bucket-with-object-lock', done)); it('should create bucket with versioning enabled by default', done => _testVersioning('bucket-with-object-lock', done)); @@ -253,13 +263,14 @@ describe('PUT Bucket - AWS.S3.createBucket', () => { _testObjectLockDisabled('bucket-without-object-lock', done)); }); - Object.keys(locationConstraints).forEach( - location => { - describeSkipAWS(`bucket creation with location: ${location}`, - () => { + Object.keys(locationConstraints).forEach(location => { + describeSkipAWS(`bucket creation with location: ${location}`, () => { after(done => - bucketUtil.deleteOne(bucketName) - .then(() => done()).catch(() => done())); + bucketUtil + .deleteOne(bucketName) + .then(() => done()) + .catch(() => done()) + ); it(`should create bucket with location: ${location}`, done => { bucketUtil.s3.createBucket( { @@ -267,16 +278,15 @@ describe('PUT Bucket - AWS.S3.createBucket', () => { CreateBucketConfiguration: { LocationConstraint: location, }, - }, err => { + }, + err => { if (location === 'location-dmf-v1') { - assert.strictEqual( - err.code, - 'InvalidLocationConstraint' - ); + assert.strictEqual(err.code, 'InvalidLocationConstraint'); assert.strictEqual(err.statusCode, 400); } return done(); - }); + } + ); }); }); }); @@ -289,14 +299,13 @@ describe('PUT Bucket - AWS.S3.createBucket', () => { CreateBucketConfiguration: { LocationConstraint: 'coco', }, - }, err => { - assert.strictEqual( - err.code, - 'InvalidLocationConstraint' - ); - assert.strictEqual(err.statusCode, 400); - done(); - }); + }, + err => { + assert.strictEqual(err.code, 'InvalidLocationConstraint'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); }); it('should return error InvalidLocationConstraint for location constraint dmf', done => { @@ -306,48 +315,55 @@ describe('PUT Bucket - AWS.S3.createBucket', () => { CreateBucketConfiguration: { LocationConstraint: 'location-dmf-v1', }, - }, err => { - assert.strictEqual( - err.code, - 'InvalidLocationConstraint', - ); + }, + err => { + assert.strictEqual(err.code, 'InvalidLocationConstraint'); assert.strictEqual(err.statusCode, 400); done(); - }); + } + ); }); }); describe('bucket creation with ingestion location', () => { - after(done => - bucketUtil.s3.deleteBucket({ Bucket: bucketName }, done)); + after(done => bucketUtil.s3.deleteBucket({ Bucket: bucketName }, done)); it('should create bucket with location and ingestion', done => { - async.waterfall([ - next => bucketUtil.s3.createBucket( - { - Bucket: bucketName, - CreateBucketConfiguration: { - LocationConstraint: 'us-east-2:ingest', - }, - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.Location, `/${bucketName}`); - return next(); - }), - next => bucketUtil.s3.getBucketLocation( - { - Bucket: bucketName, - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.LocationConstraint, 'us-east-2'); - return next(); - }), - next => bucketUtil.s3.getBucketVersioning( - { Bucket: bucketName }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.Status, 'Enabled'); - return next(); - }), - ], done); + async.waterfall( + [ + next => + bucketUtil.s3.createBucket( + { + Bucket: bucketName, + CreateBucketConfiguration: { + LocationConstraint: 'us-east-2:ingest', + }, + }, + (err, res) => { + assert.ifError(err); + assert.strictEqual(res.Location, `/${bucketName}`); + return next(); + } + ), + next => + bucketUtil.s3.getBucketLocation( + { + Bucket: bucketName, + }, + (err, res) => { + assert.ifError(err); + assert.strictEqual(res.LocationConstraint, 'us-east-2'); + return next(); + } + ), + next => + bucketUtil.s3.getBucketVersioning({ Bucket: bucketName }, (err, res) => { + assert.ifError(err); + assert.strictEqual(res.Status, 'Enabled'); + return next(); + }), + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/putAcl.js b/tests/functional/aws-node-sdk/test/bucket/putAcl.js index ef875a3511..d0f372ad9b 100644 --- a/tests/functional/aws-node-sdk/test/bucket/putAcl.js +++ b/tests/functional/aws-node-sdk/test/bucket/putAcl.js @@ -49,8 +49,7 @@ describe('aws-node-sdk test bucket put acl', () => { s3.putBucketAcl(params, error => { if (error) { assert.strictEqual(error.statusCode, 400); - assert.strictEqual( - error.code, 'InvalidRequest'); + assert.strictEqual(error.code, 'InvalidRequest'); done(); } else { done('accepted xml body larger than 512 KB'); @@ -80,60 +79,70 @@ describe('PUT Bucket ACL', () => { }); }); - it('should set multiple ACL permissions with same grantee specified' + - 'using email', done => { - s3.putBucketAcl({ - Bucket: bucketName, - GrantRead: 'emailAddress=sampleaccount1@sampling.com', - GrantWrite: 'emailAddress=sampleaccount1@sampling.com', - }, err => { - assert(!err); - s3.getBucketAcl({ + it('should set multiple ACL permissions with same grantee specified' + 'using email', done => { + s3.putBucketAcl( + { Bucket: bucketName, - }, (err, res) => { + GrantRead: 'emailAddress=sampleaccount1@sampling.com', + GrantWrite: 'emailAddress=sampleaccount1@sampling.com', + }, + err => { assert(!err); - // expect both READ and WRITE grants to exist - assert.strictEqual(res.Grants.length, 2); - return done(); - }); - }); + s3.getBucketAcl( + { + Bucket: bucketName, + }, + (err, res) => { + assert(!err); + // expect both READ and WRITE grants to exist + assert.strictEqual(res.Grants.length, 2); + return done(); + } + ); + } + ); }); - it('should return InvalidArgument if invalid grantee ' + - 'user ID provided in ACL header request', done => { - s3.putBucketAcl({ - Bucket: bucketName, - GrantRead: 'id=invalidUserID' }, err => { - assert.strictEqual(err.statusCode, 400); - assert.strictEqual(err.code, 'InvalidArgument'); - done(); - }); + it('should return InvalidArgument if invalid grantee ' + 'user ID provided in ACL header request', done => { + s3.putBucketAcl( + { + Bucket: bucketName, + GrantRead: 'id=invalidUserID', + }, + err => { + assert.strictEqual(err.statusCode, 400); + assert.strictEqual(err.code, 'InvalidArgument'); + done(); + } + ); }); - it('should return InvalidArgument if invalid grantee ' + - 'user ID provided in ACL request body', done => { - s3.putBucketAcl({ - Bucket: bucketName, - AccessControlPolicy: { - Grants: [ - { - Grantee: { - Type: 'CanonicalUser', - ID: 'invalidUserID', + it('should return InvalidArgument if invalid grantee ' + 'user ID provided in ACL request body', done => { + s3.putBucketAcl( + { + Bucket: bucketName, + AccessControlPolicy: { + Grants: [ + { + Grantee: { + Type: 'CanonicalUser', + ID: 'invalidUserID', + }, + Permission: 'WRITE_ACP', }, - Permission: 'WRITE_ACP', - }], - Owner: { - DisplayName: 'Bart', - ID: '79a59df900b949e55d96a1e698fbace' + - 'dfd6e09d98eacf8f8d5218e7cd47ef2be', + ], + Owner: { + DisplayName: 'Bart', + ID: '79a59df900b949e55d96a1e698fbace' + 'dfd6e09d98eacf8f8d5218e7cd47ef2be', + }, }, }, - }, err => { - assert.strictEqual(err.statusCode, 400); - assert.strictEqual(err.code, 'InvalidArgument'); - done(); - }); + err => { + assert.strictEqual(err.statusCode, 400); + assert.strictEqual(err.code, 'InvalidArgument'); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/putBucketLifecycle.js b/tests/functional/aws-node-sdk/test/bucket/putBucketLifecycle.js index 22983dfc75..31219b6e1c 100644 --- a/tests/functional/aws-node-sdk/test/bucket/putBucketLifecycle.js +++ b/tests/functional/aws-node-sdk/test/bucket/putBucketLifecycle.js @@ -22,11 +22,16 @@ function assertError(err, expectedErr, cb) { if (expectedErr === null) { assert.strictEqual(err, null, `expected no error but got '${err}'`); } else { - assert.strictEqual(err.code, expectedErr, 'incorrect error response ' + - `code: should be '${expectedErr}' but got '${err.code}'`); - assert.strictEqual(err.statusCode, errors[expectedErr].code, - 'incorrect error status code: should be ' + - `${errors[expectedErr].code}, but got '${err.statusCode}'`); + assert.strictEqual( + err.code, + expectedErr, + 'incorrect error response ' + `code: should be '${expectedErr}' but got '${err.code}'` + ); + assert.strictEqual( + err.statusCode, + errors[expectedErr].code, + 'incorrect error status code: should be ' + `${errors[expectedErr].code}, but got '${err.statusCode}'` + ); } cb(); } @@ -59,8 +64,7 @@ describe('aws-sdk test put bucket lifecycle', () => { it('should return NoSuchBucket error if bucket does not exist', done => { const params = getLifecycleParams(); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'NoSuchBucket', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'NoSuchBucket', done)); }); describe('config rules', () => { @@ -70,18 +74,15 @@ describe('aws-sdk test put bucket lifecycle', () => { it('should return AccessDenied if user is not bucket owner', done => { const params = getLifecycleParams(); - otherAccountS3.putBucketLifecycleConfiguration(params, - err => assertError(err, 'AccessDenied', done)); + otherAccountS3.putBucketLifecycleConfiguration(params, err => assertError(err, 'AccessDenied', done)); }); it('should put lifecycle configuration on bucket', done => { const params = getLifecycleParams(); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); - it('should not allow lifecycle configuration with duplicated rule id ' + - 'and with Origin header set', done => { + it('should not allow lifecycle configuration with duplicated rule id ' + 'and with Origin header set', done => { const origin = 'http://www.allowedwebsite.com'; const lifecycleConfig = { @@ -109,67 +110,49 @@ describe('aws-sdk test put bucket lifecycle', () => { it('should not allow lifecycle config with no Status', done => { const params = getLifecycleParams({ key: 'Status', value: '' }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'MalformedXML', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'MalformedXML', done)); }); - it('should not allow lifecycle config with no Prefix or Filter', - done => { + it('should not allow lifecycle config with no Prefix or Filter', done => { const params = getLifecycleParams({ key: 'Prefix', value: null }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'MalformedXML', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'MalformedXML', done)); }); it('should not allow lifecycle config with empty action', done => { const params = getLifecycleParams({ key: 'Expiration', value: {} }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'MalformedXML', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'MalformedXML', done)); }); - it('should not allow lifecycle config with ID longer than 255 char', - done => { - const params = - getLifecycleParams({ key: 'ID', value: 'a'.repeat(256) }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'InvalidArgument', done)); + it('should not allow lifecycle config with ID longer than 255 char', done => { + const params = getLifecycleParams({ key: 'ID', value: 'a'.repeat(256) }); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'InvalidArgument', done)); }); it('should allow lifecycle config with Prefix length < 1024', done => { - const params = - getLifecycleParams({ key: 'Prefix', value: 'a'.repeat(1023) }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + const params = getLifecycleParams({ key: 'Prefix', value: 'a'.repeat(1023) }); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); - it('should allow lifecycle config with Prefix length === 1024', - done => { - const params = - getLifecycleParams({ key: 'Prefix', value: 'a'.repeat(1024) }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + it('should allow lifecycle config with Prefix length === 1024', done => { + const params = getLifecycleParams({ key: 'Prefix', value: 'a'.repeat(1024) }); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); - it('should not allow lifecycle config with Prefix length > 1024', - done => { - const params = - getLifecycleParams({ key: 'Prefix', value: 'a'.repeat(1025) }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'InvalidRequest', done)); + it('should not allow lifecycle config with Prefix length > 1024', done => { + const params = getLifecycleParams({ key: 'Prefix', value: 'a'.repeat(1025) }); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'InvalidRequest', done)); }); - it('should not allow lifecycle config with Filter.Prefix length > 1024', - done => { + it('should not allow lifecycle config with Filter.Prefix length > 1024', done => { const params = getLifecycleParams({ key: 'Filter', value: { Prefix: 'a'.repeat(1025) }, }); delete params.LifecycleConfiguration.Rules[0].Prefix; - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'InvalidRequest', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'InvalidRequest', done)); }); - it('should not allow lifecycle config with Filter.And.Prefix length ' + - '> 1024', done => { + it('should not allow lifecycle config with Filter.And.Prefix length ' + '> 1024', done => { const params = getLifecycleParams({ key: 'Filter', value: { @@ -180,8 +163,7 @@ describe('aws-sdk test put bucket lifecycle', () => { }, }); delete params.LifecycleConfiguration.Rules[0].Prefix; - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'InvalidRequest', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'InvalidRequest', done)); }); it('should allow lifecycle config with Tag.Key length < 128', done => { @@ -190,76 +172,62 @@ describe('aws-sdk test put bucket lifecycle', () => { value: { Tag: { Key: 'a'.repeat(127), Value: 'bar' } }, }); delete params.LifecycleConfiguration.Rules[0].Prefix; - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); - it('should allow lifecycle config with Tag.Key length === 128', - done => { + it('should allow lifecycle config with Tag.Key length === 128', done => { const params = getLifecycleParams({ key: 'Filter', value: { Tag: { Key: 'a'.repeat(128), Value: 'bar' } }, }); delete params.LifecycleConfiguration.Rules[0].Prefix; - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); - it('should not allow lifecycle config with Tag.Key length > 128', - done => { + it('should not allow lifecycle config with Tag.Key length > 128', done => { const params = getLifecycleParams({ key: 'Filter', value: { Tag: { Key: 'a'.repeat(129), Value: 'bar' } }, }); delete params.LifecycleConfiguration.Rules[0].Prefix; - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'InvalidRequest', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'InvalidRequest', done)); }); - it('should allow lifecycle config with Tag.Value length < 256', - done => { + it('should allow lifecycle config with Tag.Value length < 256', done => { const params = getLifecycleParams({ key: 'Filter', value: { Tag: { Key: 'a', Value: 'b'.repeat(255) } }, }); delete params.LifecycleConfiguration.Rules[0].Prefix; - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); - it('should allow lifecycle config with Tag.Value length === 256', - done => { + it('should allow lifecycle config with Tag.Value length === 256', done => { const params = getLifecycleParams({ key: 'Filter', value: { Tag: { Key: 'a', Value: 'b'.repeat(256) } }, }); delete params.LifecycleConfiguration.Rules[0].Prefix; - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); - it('should not allow lifecycle config with Tag.Value length > 256', - done => { + it('should not allow lifecycle config with Tag.Value length > 256', done => { const params = getLifecycleParams({ key: 'Filter', value: { Tag: { Key: 'a', Value: 'b'.repeat(257) } }, }); delete params.LifecycleConfiguration.Rules[0].Prefix; - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'InvalidRequest', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'InvalidRequest', done)); }); it('should not allow lifecycle config with Prefix and Filter', done => { - const params = getLifecycleParams( - { key: 'Filter', value: { Prefix: 'foo' } }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'MalformedXML', done)); + const params = getLifecycleParams({ key: 'Filter', value: { Prefix: 'foo' } }); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'MalformedXML', done)); }); it('should allow lifecycle config without ID', done => { const params = getLifecycleParams({ key: 'ID', value: '' }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); it('should allow lifecycle config with multiple actions', done => { @@ -267,11 +235,9 @@ describe('aws-sdk test put bucket lifecycle', () => { key: 'NoncurrentVersionExpiration', value: { NoncurrentDays: 1 }, }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); - describe('with Rule.Filter not Rule.Prefix', () => { before(done => { expirationRule.Prefix = null; @@ -280,15 +246,12 @@ describe('aws-sdk test put bucket lifecycle', () => { it('should allow config with empty Filter', done => { const params = getLifecycleParams({ key: 'Filter', value: {} }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); it('should not allow config with And & Prefix', done => { - const params = getLifecycleParams( - { key: 'Filter', value: { Prefix: 'foo', And: {} } }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'MalformedXML', done)); + const params = getLifecycleParams({ key: 'Filter', value: { Prefix: 'foo', And: {} } }); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'MalformedXML', done)); }); it('should not allow config with And & Tag', done => { @@ -296,8 +259,7 @@ describe('aws-sdk test put bucket lifecycle', () => { key: 'Filter', value: { Tag: { Key: 'foo', Value: 'bar' }, And: {} }, }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'MalformedXML', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'MalformedXML', done)); }); it('should not allow config with Prefix & Tag', done => { @@ -305,15 +267,12 @@ describe('aws-sdk test put bucket lifecycle', () => { key: 'Filter', value: { Tag: { Key: 'foo', Value: 'bar' }, Prefix: 'foo' }, }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'MalformedXML', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'MalformedXML', done)); }); it('should allow config with only Prefix', done => { - const params = getLifecycleParams( - { key: 'Filter', value: { Prefix: 'foo' } }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + const params = getLifecycleParams({ key: 'Filter', value: { Prefix: 'foo' } }); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); it('should allow config with only Tag', done => { @@ -321,16 +280,12 @@ describe('aws-sdk test put bucket lifecycle', () => { key: 'Filter', value: { Tag: { Key: 'foo', Value: 'ba' } }, }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); - it('should not allow config with And.Prefix & no And.Tags', - done => { - const params = getLifecycleParams( - { key: 'Filter', value: { And: { Prefix: 'foo' } } }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'MalformedXML', done)); + it('should not allow config with And.Prefix & no And.Tags', done => { + const params = getLifecycleParams({ key: 'Filter', value: { And: { Prefix: 'foo' } } }); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'MalformedXML', done)); }); it('should not allow config with only one And.Tags', done => { @@ -338,34 +293,38 @@ describe('aws-sdk test put bucket lifecycle', () => { key: 'Filter', value: { And: { Tags: [{ Key: 'f', Value: 'b' }] } }, }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, 'MalformedXML', done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, 'MalformedXML', done)); }); - it('should allow config with And.Tags & no And.Prefix', - done => { + it('should allow config with And.Tags & no And.Prefix', done => { const params = getLifecycleParams({ key: 'Filter', - value: { And: { Tags: - [{ Key: 'foo', Value: 'bar' }, - { Key: 'foo2', Value: 'bar2' }], - } }, + value: { + And: { + Tags: [ + { Key: 'foo', Value: 'bar' }, + { Key: 'foo2', Value: 'bar2' }, + ], + }, + }, }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); it('should allow config with And.Prefix & And.Tags', done => { const params = getLifecycleParams({ key: 'Filter', - value: { And: { Prefix: 'foo', - Tags: [ - { Key: 'foo', Value: 'bar' }, - { Key: 'foo2', Value: 'bar2' }], - } }, + value: { + And: { + Prefix: 'foo', + Tags: [ + { Key: 'foo', Value: 'bar' }, + { Key: 'foo2', Value: 'bar2' }, + ], + }, + }, }); - s3.putBucketLifecycleConfiguration(params, err => - assertError(err, null, done)); + s3.putBucketLifecycleConfiguration(params, err => assertError(err, null, done)); }); }); @@ -386,10 +345,12 @@ describe('aws-sdk test put bucket lifecycle', () => { } it('should allow NoncurrentDays and StorageClass', done => { - const noncurrentVersionTransitions = [{ - NoncurrentDays: 0, - StorageClass: 'us-east-2', - }]; + const noncurrentVersionTransitions = [ + { + NoncurrentDays: 0, + StorageClass: 'us-east-2', + }, + ]; const params = getParams(noncurrentVersionTransitions); s3.putBucketLifecycleConfiguration(params, err => { assert.ifError(err); @@ -398,30 +359,36 @@ describe('aws-sdk test put bucket lifecycle', () => { }); it('should not allow duplicate StorageClass', done => { - const noncurrentVersionTransitions = [{ - NoncurrentDays: 1, - StorageClass: 'us-east-2', - }, { - NoncurrentDays: 2, - StorageClass: 'us-east-2', - }]; + const noncurrentVersionTransitions = [ + { + NoncurrentDays: 1, + StorageClass: 'us-east-2', + }, + { + NoncurrentDays: 2, + StorageClass: 'us-east-2', + }, + ]; const params = getParams(noncurrentVersionTransitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'InvalidRequest'); - assert.strictEqual(err.message, - "'StorageClass' must be different for " + - "'NoncurrentVersionTransition' actions in same " + - "'Rule' with prefix ''"); + assert.strictEqual( + err.message, + "'StorageClass' must be different for " + + "'NoncurrentVersionTransition' actions in same " + + "'Rule' with prefix ''" + ); done(); }); }); - it('should not allow unknown StorageClass', - done => { - const noncurrentVersionTransitions = [{ - NoncurrentDays: 1, - StorageClass: 'unknown', - }]; + it('should not allow unknown StorageClass', done => { + const noncurrentVersionTransitions = [ + { + NoncurrentDays: 1, + StorageClass: 'unknown', + }, + ]; const params = getParams(noncurrentVersionTransitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'MalformedXML'); @@ -429,12 +396,13 @@ describe('aws-sdk test put bucket lifecycle', () => { }); }); - it(`should not allow NoncurrentDays value exceeding ${MAX_DAYS}`, - done => { - const noncurrentVersionTransitions = [{ - NoncurrentDays: MAX_DAYS + 1, - StorageClass: 'us-east-2', - }]; + it(`should not allow NoncurrentDays value exceeding ${MAX_DAYS}`, done => { + const noncurrentVersionTransitions = [ + { + NoncurrentDays: MAX_DAYS + 1, + StorageClass: 'us-east-2', + }, + ]; const params = getParams(noncurrentVersionTransitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'MalformedXML'); @@ -442,27 +410,30 @@ describe('aws-sdk test put bucket lifecycle', () => { }); }); - it('should not allow negative NoncurrentDays', - done => { - const noncurrentVersionTransitions = [{ - NoncurrentDays: -1, - StorageClass: 'us-east-2', - }]; + it('should not allow negative NoncurrentDays', done => { + const noncurrentVersionTransitions = [ + { + NoncurrentDays: -1, + StorageClass: 'us-east-2', + }, + ]; const params = getParams(noncurrentVersionTransitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.message, - "'NoncurrentDays' in NoncurrentVersionTransition " + - 'action must be nonnegative'); + assert.strictEqual( + err.message, + "'NoncurrentDays' in NoncurrentVersionTransition " + 'action must be nonnegative' + ); done(); }); }); - it('should not allow config missing NoncurrentDays', - done => { - const noncurrentVersionTransitions = [{ - StorageClass: 'us-east-2', - }]; + it('should not allow config missing NoncurrentDays', done => { + const noncurrentVersionTransitions = [ + { + StorageClass: 'us-east-2', + }, + ]; const params = getParams(noncurrentVersionTransitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'MalformedXML'); @@ -470,11 +441,12 @@ describe('aws-sdk test put bucket lifecycle', () => { }); }); - it('should not allow config missing StorageClass', - done => { - const noncurrentVersionTransitions = [{ - NoncurrentDays: 1, - }]; + it('should not allow config missing StorageClass', done => { + const noncurrentVersionTransitions = [ + { + NoncurrentDays: 1, + }, + ]; const params = getParams(noncurrentVersionTransitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'MalformedXML'); @@ -499,10 +471,12 @@ describe('aws-sdk test put bucket lifecycle', () => { } it('should allow Days', done => { - const transitions = [{ - Days: 0, - StorageClass: 'us-east-2', - }]; + const transitions = [ + { + Days: 0, + StorageClass: 'us-east-2', + }, + ]; const params = getParams(transitions); s3.putBucketLifecycleConfiguration(params, err => { assert.ifError(err); @@ -511,10 +485,12 @@ describe('aws-sdk test put bucket lifecycle', () => { }); it(`should not allow Days value exceeding ${MAX_DAYS}`, done => { - const transitions = [{ - Days: MAX_DAYS + 1, - StorageClass: 'us-east-2', - }]; + const transitions = [ + { + Days: MAX_DAYS + 1, + StorageClass: 'us-east-2', + }, + ]; const params = getParams(transitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'MalformedXML'); @@ -523,43 +499,50 @@ describe('aws-sdk test put bucket lifecycle', () => { }); it('should not allow negative Days value', done => { - const transitions = [{ - Days: -1, - StorageClass: 'us-east-2', - }]; + const transitions = [ + { + Days: -1, + StorageClass: 'us-east-2', + }, + ]; const params = getParams(transitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.message, - "'Days' in Transition action must be nonnegative"); + assert.strictEqual(err.message, "'Days' in Transition action must be nonnegative"); done(); }); }); it('should not allow duplicate StorageClass', done => { - const transitions = [{ - Days: 1, - StorageClass: 'us-east-2', - }, { - Days: 2, - StorageClass: 'us-east-2', - }]; + const transitions = [ + { + Days: 1, + StorageClass: 'us-east-2', + }, + { + Days: 2, + StorageClass: 'us-east-2', + }, + ]; const params = getParams(transitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'InvalidRequest'); - assert.strictEqual(err.message, - "'StorageClass' must be different for 'Transition' " + - "actions in same 'Rule' with prefix ''"); + assert.strictEqual( + err.message, + "'StorageClass' must be different for 'Transition' " + "actions in same 'Rule' with prefix ''" + ); done(); }); }); // TODO: Upgrade to aws-sdk >= 2.60.0 for correct Date field support it.skip('should allow Date', done => { - const transitions = [{ - Date: '2016-01-01T00:00:00.000Z', - StorageClass: 'us-east-2', - }]; + const transitions = [ + { + Date: '2016-01-01T00:00:00.000Z', + StorageClass: 'us-east-2', + }, + ]; const params = getParams(transitions); s3.putBucketLifecycleConfiguration(params, err => { assert.ifError(err); @@ -568,13 +551,14 @@ describe('aws-sdk test put bucket lifecycle', () => { }); // TODO: Upgrade to aws-sdk >= 2.60.0 for correct Date field support - it.skip('should not allow speficying both Days and Date value', - done => { - const transitions = [{ - Date: '2016-01-01T00:00:00.000Z', - Days: 1, - StorageClass: 'us-east-2', - }]; + it.skip('should not allow speficying both Days and Date value', done => { + const transitions = [ + { + Date: '2016-01-01T00:00:00.000Z', + Days: 1, + StorageClass: 'us-east-2', + }, + ]; const params = getParams(transitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'MalformedXML'); @@ -583,64 +567,78 @@ describe('aws-sdk test put bucket lifecycle', () => { }); // TODO: Upgrade to aws-sdk >= 2.60.0 for correct Date field support - it.skip('should not allow speficying both Days and Date value ' + - 'across transitions', done => { - const transitions = [{ - Date: '2016-01-01T00:00:00.000Z', - StorageClass: 'us-east-2', - }, { - Days: 1, - StorageClass: 'zenko', - }]; + it.skip('should not allow speficying both Days and Date value ' + 'across transitions', done => { + const transitions = [ + { + Date: '2016-01-01T00:00:00.000Z', + StorageClass: 'us-east-2', + }, + { + Days: 1, + StorageClass: 'zenko', + }, + ]; const params = getParams(transitions); s3.putBucketLifecycleConfiguration(params, err => { assert.strictEqual(err.code, 'InvalidRequest'); - assert.strictEqual(err.message, - "Found mixed 'Date' and 'Days' based Transition " + - "actions in lifecycle rule for prefix ''"); + assert.strictEqual( + err.message, + "Found mixed 'Date' and 'Days' based Transition " + "actions in lifecycle rule for prefix ''" + ); done(); }); }); // TODO: Upgrade to aws-sdk >= 2.60.0 for correct Date field support - it.skip('should not allow speficying both Days and Date value ' + - 'across transitions and expiration', done => { - const transitions = [{ - Days: 1, - StorageClass: 'us-east-2', - }]; - const params = getParams(transitions); - params.LifecycleConfiguration.Rules[0].Expiration = { Date: 0 }; - s3.putBucketLifecycleConfiguration(params, err => { - assert.strictEqual(err.code, 'InvalidRequest'); - assert.strictEqual(err.message, - "Found mixed 'Date' and 'Days' based Expiration and " + - "Transition actions in lifecycle rule for prefix ''"); - done(); - }); - }); + it.skip( + 'should not allow speficying both Days and Date value ' + 'across transitions and expiration', + done => { + const transitions = [ + { + Days: 1, + StorageClass: 'us-east-2', + }, + ]; + const params = getParams(transitions); + params.LifecycleConfiguration.Rules[0].Expiration = { Date: 0 }; + s3.putBucketLifecycleConfiguration(params, err => { + assert.strictEqual(err.code, 'InvalidRequest'); + assert.strictEqual( + err.message, + "Found mixed 'Date' and 'Days' based Expiration and " + + "Transition actions in lifecycle rule for prefix ''" + ); + done(); + }); + } + ); }); // NoncurrentVersionTransitions not implemented - describe.skip('with NoncurrentVersionTransitions and Transitions', - () => { + describe.skip('with NoncurrentVersionTransitions and Transitions', () => { it('should allow config', done => { const params = { Bucket: bucket, LifecycleConfiguration: { - Rules: [{ - ID: 'test', - Status: 'Enabled', - Prefix: '', - NoncurrentVersionTransitions: [{ - NoncurrentDays: 1, - StorageClass: 'us-east-2', - }], - Transitions: [{ - Days: 1, - StorageClass: 'us-east-2', - }], - }], + Rules: [ + { + ID: 'test', + Status: 'Enabled', + Prefix: '', + NoncurrentVersionTransitions: [ + { + NoncurrentDays: 1, + StorageClass: 'us-east-2', + }, + ], + Transitions: [ + { + Days: 1, + StorageClass: 'us-east-2', + }, + ], + }, + ], }, }; s3.putBucketLifecycleConfiguration(params, err => { @@ -650,20 +648,23 @@ describe('aws-sdk test put bucket lifecycle', () => { }); }); - it.skip('should not allow config when specifying ' + - 'NoncurrentVersionTransitions', done => { + it.skip('should not allow config when specifying ' + 'NoncurrentVersionTransitions', done => { const params = { Bucket: bucket, LifecycleConfiguration: { - Rules: [{ - ID: 'test', - Status: 'Enabled', - Prefix: '', - NoncurrentVersionTransitions: [{ - NoncurrentDays: 1, - StorageClass: 'us-east-2', - }], - }], + Rules: [ + { + ID: 'test', + Status: 'Enabled', + Prefix: '', + NoncurrentVersionTransitions: [ + { + NoncurrentDays: 1, + StorageClass: 'us-east-2', + }, + ], + }, + ], }, }; s3.putBucketLifecycleConfiguration(params, err => { diff --git a/tests/functional/aws-node-sdk/test/bucket/putBucketNotification.js b/tests/functional/aws-node-sdk/test/bucket/putBucketNotification.js index f2650f13c7..fb407aea36 100644 --- a/tests/functional/aws-node-sdk/test/bucket/putBucketNotification.js +++ b/tests/functional/aws-node-sdk/test/bucket/putBucketNotification.js @@ -47,9 +47,14 @@ describe('aws-sdk test put notification configuration', () => { }); describe('config rules', () => { - beforeEach(done => s3.createBucket({ - Bucket: bucket, - }, done)); + beforeEach(done => + s3.createBucket( + { + Bucket: bucket, + }, + done + ) + ); afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); @@ -61,24 +66,20 @@ describe('aws-sdk test put notification configuration', () => { }); }); - it('should put notification configuration on bucket with basic config', - done => { - const params = getNotificationParams(); - s3.putBucketNotificationConfiguration(params, done); - }); + it('should put notification configuration on bucket with basic config', done => { + const params = getNotificationParams(); + s3.putBucketNotificationConfiguration(params, done); + }); - it('should put notification configuration on bucket with multiple events', - done => { - const params = getNotificationParams( - ['s3:ObjectCreated:*', 's3:ObjectRemoved:*']); - s3.putBucketNotificationConfiguration(params, done); - }); + it('should put notification configuration on bucket with multiple events', done => { + const params = getNotificationParams(['s3:ObjectCreated:*', 's3:ObjectRemoved:*']); + s3.putBucketNotificationConfiguration(params, done); + }); - it('should put notification configuration on bucket with id', - done => { - const params = getNotificationParams(null, null, 'notification-id'); - s3.putBucketNotificationConfiguration(params, done); - }); + it('should put notification configuration on bucket with id', done => { + const params = getNotificationParams(null, null, 'notification-id'); + s3.putBucketNotificationConfiguration(params, done); + }); it('should put empty notification configuration', done => { const params = { @@ -88,38 +89,40 @@ describe('aws-sdk test put notification configuration', () => { s3.putBucketNotificationConfiguration(params, done); }); - it('should not allow notification config request with invalid arn', - done => { - const params = getNotificationParams(null, 'invalidArn'); - s3.putBucketNotificationConfiguration(params, err => { - checkError(err, 'MalformedXML', 400); - done(); - }); + it('should not allow notification config request with invalid arn', done => { + const params = getNotificationParams(null, 'invalidArn'); + s3.putBucketNotificationConfiguration(params, err => { + checkError(err, 'MalformedXML', 400); + done(); }); + }); - it('should not allow notification config request with invalid event', - done => { - const params = getNotificationParams(['s3:NotAnEvent']); - s3.putBucketNotificationConfiguration(params, err => { - checkError(err, 'MalformedXML', 400); - done(); - }); + it('should not allow notification config request with invalid event', done => { + const params = getNotificationParams(['s3:NotAnEvent']); + s3.putBucketNotificationConfiguration(params, err => { + checkError(err, 'MalformedXML', 400); + done(); }); + }); - it('should not allow notification config request with unsupported destination', - done => { - const params = getNotificationParams(null, 'arn:scality:bucketnotif:::target100'); - s3.putBucketNotificationConfiguration(params, err => { - checkError(err, 'InvalidArgument', 400); - done(); - }); + it('should not allow notification config request with unsupported destination', done => { + const params = getNotificationParams(null, 'arn:scality:bucketnotif:::target100'); + s3.putBucketNotificationConfiguration(params, err => { + checkError(err, 'InvalidArgument', 400); + done(); }); + }); }); describe('cross origin requests', () => { - beforeEach(done => s3.createBucket({ - Bucket: bucket, - }, done)); + beforeEach(done => + s3.createBucket( + { + Bucket: bucket, + }, + done + ) + ); afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); @@ -128,11 +131,13 @@ describe('aws-sdk test put notification configuration', () => { it: 'return valid error with invalid arn', param: getNotificationParams(null, 'invalidArn'), error: 'MalformedXML', - }, { + }, + { it: 'return valid error with unknown/unsupported destination', param: getNotificationParams(null, 'arn:scality:bucketnotif:::target100'), error: 'InvalidArgument', - }, { + }, + { it: 'save notification configuration with correct arn', param: getNotificationParams(), }, diff --git a/tests/functional/aws-node-sdk/test/bucket/putBucketObjectLock.js b/tests/functional/aws-node-sdk/test/bucket/putBucketObjectLock.js index 8aa9b6d2be..b8f77aac47 100644 --- a/tests/functional/aws-node-sdk/test/bucket/putBucketObjectLock.js +++ b/tests/functional/aws-node-sdk/test/bucket/putBucketObjectLock.js @@ -48,9 +48,14 @@ describe('aws-sdk test put object lock configuration', () => { }); describe('on object lock disabled bucket', () => { - beforeEach(done => s3.createBucket({ - Bucket: bucket, - }, done)); + beforeEach(done => + s3.createBucket( + { + Bucket: bucket, + }, + done + ) + ); afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); @@ -77,10 +82,15 @@ describe('aws-sdk test put object lock configuration', () => { }); describe('config rules', () => { - beforeEach(done => s3.createBucket({ - Bucket: bucket, - ObjectLockEnabledForBucket: true, - }, done)); + beforeEach(done => + s3.createBucket( + { + Bucket: bucket, + ObjectLockEnabledForBucket: true, + }, + done + ) + ); afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); @@ -92,85 +102,76 @@ describe('aws-sdk test put object lock configuration', () => { }); }); - it('should put object lock configuration on bucket with Governance mode', - done => { - const params = getObjectLockParams('Enabled', 'GOVERNANCE', 30); - s3.putObjectLockConfiguration(params, err => { - assert.ifError(err); - done(); - }); + it('should put object lock configuration on bucket with Governance mode', done => { + const params = getObjectLockParams('Enabled', 'GOVERNANCE', 30); + s3.putObjectLockConfiguration(params, err => { + assert.ifError(err); + done(); }); + }); - it('should put object lock configuration on bucket with Compliance mode', - done => { - const params = getObjectLockParams('Enabled', 'COMPLIANCE', 30); - s3.putObjectLockConfiguration(params, err => { - assert.ifError(err); - done(); - }); + it('should put object lock configuration on bucket with Compliance mode', done => { + const params = getObjectLockParams('Enabled', 'COMPLIANCE', 30); + s3.putObjectLockConfiguration(params, err => { + assert.ifError(err); + done(); }); + }); - it('should put object lock configuration on bucket with year retention type', - done => { - const params = getObjectLockParams('Enabled', 'COMPLIANCE', null, 2); - s3.putObjectLockConfiguration(params, err => { - assert.ifError(err); - done(); - }); + it('should put object lock configuration on bucket with year retention type', done => { + const params = getObjectLockParams('Enabled', 'COMPLIANCE', null, 2); + s3.putObjectLockConfiguration(params, err => { + assert.ifError(err); + done(); }); + }); - it('should not allow object lock config request with zero day retention', - done => { - const params = getObjectLockParams('Enabled', 'GOVERNANCE', null, 0); - s3.putObjectLockConfiguration(params, err => { - checkError(err, 'MalformedXML', 400); - done(); - }); + it('should not allow object lock config request with zero day retention', done => { + const params = getObjectLockParams('Enabled', 'GOVERNANCE', null, 0); + s3.putObjectLockConfiguration(params, err => { + checkError(err, 'MalformedXML', 400); + done(); }); + }); - it('should not allow object lock config request with negative retention', - done => { - const params = getObjectLockParams('Enabled', 'GOVERNANCE', -1); - s3.putObjectLockConfiguration(params, err => { - checkError(err, 'InvalidArgument', 400); - done(); - }); + it('should not allow object lock config request with negative retention', done => { + const params = getObjectLockParams('Enabled', 'GOVERNANCE', -1); + s3.putObjectLockConfiguration(params, err => { + checkError(err, 'InvalidArgument', 400); + done(); }); + }); - it('should not allow object lock config request with both Days and Years', - done => { - const params = getObjectLockParams('Enabled', 'GOVERNANCE', 1, 1); - s3.putObjectLockConfiguration(params, err => { - checkError(err, 'MalformedXML', 400); - done(); - }); + it('should not allow object lock config request with both Days and Years', done => { + const params = getObjectLockParams('Enabled', 'GOVERNANCE', 1, 1); + s3.putObjectLockConfiguration(params, err => { + checkError(err, 'MalformedXML', 400); + done(); }); + }); - it('should not allow object lock config request without days or years', - done => { - const params = getObjectLockParams('Enabled', 'GOVERNANCE'); - s3.putObjectLockConfiguration(params, err => { - checkError(err, 'MalformedXML', 400); - done(); - }); + it('should not allow object lock config request without days or years', done => { + const params = getObjectLockParams('Enabled', 'GOVERNANCE'); + s3.putObjectLockConfiguration(params, err => { + checkError(err, 'MalformedXML', 400); + done(); }); + }); - it('should not allow object lock config request with invalid ObjectLockEnabled', - done => { - const params = getObjectLockParams('enabled', 'GOVERNANCE', 10); - s3.putObjectLockConfiguration(params, err => { - checkError(err, 'MalformedXML', 400); - done(); - }); + it('should not allow object lock config request with invalid ObjectLockEnabled', done => { + const params = getObjectLockParams('enabled', 'GOVERNANCE', 10); + s3.putObjectLockConfiguration(params, err => { + checkError(err, 'MalformedXML', 400); + done(); }); + }); - it('should not allow object lock config request with invalid mode', - done => { - const params = getObjectLockParams('Enabled', 'Governance', 10); - s3.putObjectLockConfiguration(params, err => { - checkError(err, 'MalformedXML', 400); - done(); - }); + it('should not allow object lock config request with invalid mode', done => { + const params = getObjectLockParams('Enabled', 'Governance', 10); + s3.putObjectLockConfiguration(params, err => { + checkError(err, 'MalformedXML', 400); + done(); }); + }); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/putBucketPolicy.js b/tests/functional/aws-node-sdk/test/bucket/putBucketPolicy.js index d4489d224d..cd988ac0e2 100644 --- a/tests/functional/aws-node-sdk/test/bucket/putBucketPolicy.js +++ b/tests/functional/aws-node-sdk/test/bucket/putBucketPolicy.js @@ -52,9 +52,7 @@ function generateRandomString(length) { const allowedCharacters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+=,.@ -/'; const allowedCharactersLength = allowedCharacters.length; - return [...Array(length)] - .map(() => allowedCharacters[~~(Math.random() * allowedCharactersLength)]) - .join(''); + return [...Array(length)].map(() => allowedCharacters[~~(Math.random() * allowedCharactersLength)]).join(''); } // Check for the expected error response code and status code. @@ -62,16 +60,20 @@ function assertError(err, expectedErr, cb) { if (expectedErr === null) { assert.strictEqual(err, null, `expected no error but got '${err}'`); } else { - assert.strictEqual(err.code, expectedErr, 'incorrect error response ' + - `code: should be '${expectedErr}' but got '${err.code}'`); - assert.strictEqual(err.statusCode, errors[expectedErr].code, - 'incorrect error status code: should be ' + - `${errors[expectedErr].code}, but got '${err.statusCode}'`); + assert.strictEqual( + err.code, + expectedErr, + 'incorrect error response ' + `code: should be '${expectedErr}' but got '${err.code}'` + ); + assert.strictEqual( + err.statusCode, + errors[expectedErr].code, + 'incorrect error status code: should be ' + `${errors[expectedErr].code}, but got '${err.statusCode}'` + ); } cb(); } - describe('aws-sdk test put bucket policy', () => { let s3; let otherAccountS3; @@ -85,8 +87,7 @@ describe('aws-sdk test put bucket policy', () => { it('should return NoSuchBucket error if bucket does not exist', done => { const params = getPolicyParams(); - s3.putBucketPolicy(params, err => - assertError(err, 'NoSuchBucket', done)); + s3.putBucketPolicy(params, err => assertError(err, 'NoSuchBucket', done)); }); describe('config rules', () => { @@ -96,70 +97,64 @@ describe('aws-sdk test put bucket policy', () => { it('should return MethodNotAllowed if user is not bucket owner', done => { const params = getPolicyParams(); - otherAccountS3.putBucketPolicy(params, - err => assertError(err, 'MethodNotAllowed', done)); + otherAccountS3.putBucketPolicy(params, err => assertError(err, 'MethodNotAllowed', done)); }); it('should put a bucket policy on bucket', done => { const params = getPolicyParams(); - s3.putBucketPolicy(params, err => - assertError(err, null, done)); + s3.putBucketPolicy(params, err => assertError(err, null, done)); }); it('should not allow bucket policy with no Action', done => { const params = getPolicyParams({ key: 'Action', value: '' }); - s3.putBucketPolicy(params, err => - assertError(err, 'MalformedPolicy', done)); + s3.putBucketPolicy(params, err => assertError(err, 'MalformedPolicy', done)); }); it('should not allow bucket policy with no Effect', done => { const params = getPolicyParams({ key: 'Effect', value: '' }); - s3.putBucketPolicy(params, err => - assertError(err, 'MalformedPolicy', done)); + s3.putBucketPolicy(params, err => assertError(err, 'MalformedPolicy', done)); }); it('should not allow bucket policy with no Resource', done => { const params = getPolicyParams({ key: 'Resource', value: '' }); - s3.putBucketPolicy(params, err => - assertError(err, 'MalformedPolicy', done)); + s3.putBucketPolicy(params, err => assertError(err, 'MalformedPolicy', done)); }); - it('should not allow bucket policy with no Principal', - done => { + it('should not allow bucket policy with no Principal', done => { const params = getPolicyParams({ key: 'Principal', value: '' }); - s3.putBucketPolicy(params, err => - assertError(err, 'MalformedPolicy', done)); + s3.putBucketPolicy(params, err => assertError(err, 'MalformedPolicy', done)); }); - it('should return MalformedPolicy because Id is not a string', - done => { + it('should return MalformedPolicy because Id is not a string', done => { const params = getPolicyParamsWithId(null, 59); - s3.putBucketPolicy(params, err => - assertError(err, 'MalformedPolicy', done)); + s3.putBucketPolicy(params, err => assertError(err, 'MalformedPolicy', done)); }); - it('should put a bucket policy on bucket since Id is a string', - done => { + it('should put a bucket policy on bucket since Id is a string', done => { const params = getPolicyParamsWithId(null, 'cd3ad3d9-2776-4ef1-a904-4c229d1642e'); - s3.putBucketPolicy(params, err => - assertError(err, null, done)); + s3.putBucketPolicy(params, err => assertError(err, null, done)); }); it('should allow bucket policy with pincipal arn less than 2048 characters', done => { - const params = getPolicyParams({ key: 'Principal', value: { AWS: `arn:aws:iam::767707094035:user/${generateRandomString(150)}` } }); // eslint-disable-line max-len - s3.putBucketPolicy(params, err => - assertError(err, null, done)); + const params = getPolicyParams({ + key: 'Principal', + value: { AWS: `arn:aws:iam::767707094035:user/${generateRandomString(150)}` }, + }); + s3.putBucketPolicy(params, err => assertError(err, null, done)); }); it('should not allow bucket policy with pincipal arn more than 2048 characters', done => { - const params = getPolicyParams({ key: 'Principal', value: { AWS: `arn:aws:iam::767707094035:user/${generateRandomString(2020)}` } }); // eslint-disable-line max-len - s3.putBucketPolicy(params, err => - assertError(err, 'MalformedPolicy', done)); + const params = getPolicyParams({ + key: 'Principal', + value: { AWS: `arn:aws:iam::767707094035:user/${generateRandomString(2020)}` }, + }); + s3.putBucketPolicy(params, err => assertError(err, 'MalformedPolicy', done)); }); it('should allow bucket policy with valid SourceIp condition', done => { const params = getPolicyParams({ - key: 'Condition', value: { + key: 'Condition', + value: { IpAddress: { 'aws:SourceIp': '192.168.100.0/24', }, @@ -170,7 +165,8 @@ describe('aws-sdk test put bucket policy', () => { it('should not allow bucket policy with invalid SourceIp format', done => { const params = getPolicyParams({ - key: 'Condition', value: { + key: 'Condition', + value: { IpAddress: { 'aws:SourceIp': '192.168.100', // Invalid IP format }, @@ -181,7 +177,8 @@ describe('aws-sdk test put bucket policy', () => { it('should allow bucket policy with valid s3:object-lock-remaining-retention-days condition', done => { const params = getPolicyParams({ - key: 'Condition', value: { + key: 'Condition', + value: { NumericGreaterThanEquals: { 's3:object-lock-remaining-retention-days': '30', }, @@ -193,7 +190,8 @@ describe('aws-sdk test put bucket policy', () => { // yep, this is the expected behaviour it('should not reject policy with invalid s3:object-lock-remaining-retention-days value', done => { const params = getPolicyParams({ - key: 'Condition', value: { + key: 'Condition', + value: { NumericGreaterThanEquals: { 's3:object-lock-remaining-retention-days': '-1', // Invalid value }, @@ -205,7 +203,8 @@ describe('aws-sdk test put bucket policy', () => { // this too ¯\_(ツ)_/¯ it('should not reject policy with a key starting with aws:', done => { const params = getPolicyParams({ - key: 'Condition', value: { + key: 'Condition', + value: { NumericGreaterThanEquals: { 'aws:have-a-nice-day': 'blabla', // Invalid value }, @@ -216,7 +215,8 @@ describe('aws-sdk test put bucket policy', () => { it('should reject policy with a key that does not exist that does not start with aws:', done => { const params = getPolicyParams({ - key: 'Condition', value: { + key: 'Condition', + value: { NumericGreaterThanEquals: { 'have-a-nice-day': 'blabla', // Invalid value }, @@ -227,7 +227,8 @@ describe('aws-sdk test put bucket policy', () => { it('should enforce policies with both SourceIp and s3:object-lock conditions together', done => { const params = getPolicyParams({ - key: 'Condition', value: { + key: 'Condition', + value: { IpAddress: { 'aws:SourceIp': '192.168.100.0/24', }, @@ -241,7 +242,8 @@ describe('aws-sdk test put bucket policy', () => { it('should return error if a condition one of the condition values is invalid', done => { const params = getPolicyParams({ - key: 'Condition', value: { + key: 'Condition', + value: { IpAddress: { 'aws:SourceIp': '192.168.100', }, diff --git a/tests/functional/aws-node-sdk/test/bucket/putBucketReplication.js b/tests/functional/aws-node-sdk/test/bucket/putBucketReplication.js index f13ef62049..74b17ace95 100644 --- a/tests/functional/aws-node-sdk/test/bucket/putBucketReplication.js +++ b/tests/functional/aws-node-sdk/test/bucket/putBucketReplication.js @@ -8,7 +8,6 @@ const replicationUtils = require('../../lib/utility/replication'); const BucketUtility = require('../../lib/utility/bucket-util'); const itSkipIfE2E = process.env.S3_END_TO_END ? it.skip : it; - const sourceBucket = 'source-bucket'; const destinationBucket = 'destination-bucket'; @@ -17,11 +16,16 @@ function assertError(err, expectedErr) { if (expectedErr === null) { assert.strictEqual(err, null, `expected no error but got '${err}'`); } else { - assert.strictEqual(err.code, expectedErr, 'incorrect error response ' + - `code: should be '${expectedErr}' but got '${err.code}'`); - assert.strictEqual(err.statusCode, errors[expectedErr].code, - 'incorrect error status code: should be 400 but got ' + - `'${err.statusCode}'`); + assert.strictEqual( + err.code, + expectedErr, + 'incorrect error response ' + `code: should be '${expectedErr}' but got '${err.code}'` + ); + assert.strictEqual( + err.statusCode, + errors[expectedErr].code, + 'incorrect error status code: should be 400 but got ' + `'${err.statusCode}'` + ); } } @@ -45,8 +49,7 @@ function getVersioningParams(status) { // Get a complete replication configuration, or remove the specified property. const replicationConfig = { - Role: 'arn:aws:iam::account-id:role/src-resource,' + - 'arn:aws:iam::account-id:role/dest-resource', + Role: 'arn:aws:iam::account-id:role/src-resource,' + 'arn:aws:iam::account-id:role/dest-resource', Rules: [ { Destination: { @@ -63,8 +66,7 @@ const replicationConfig = { // Set the rules array of a configuration or a property of the first rule. function setConfigRules(val) { const config = Object.assign({}, replicationConfig); - config.Rules = Array.isArray(val) ? val : - [Object.assign({}, config.Rules[0], val)]; + config.Rules = Array.isArray(val) ? val : [Object.assign({}, config.Rules[0], val)]; return config; } @@ -76,13 +78,16 @@ describe('aws-node-sdk test putBucketReplication bucket status', () => { function checkVersioningError(s3Client, versioningStatus, expectedErr, cb) { const versioningParams = getVersioningParams(versioningStatus); - return series([ - next => s3Client.putBucketVersioning(versioningParams, next), - next => s3Client.putBucketReplication(replicationParams, next), - ], err => { - assertError(err, expectedErr); - return cb(); - }); + return series( + [ + next => s3Client.putBucketVersioning(versioningParams, next), + next => s3Client.putBucketReplication(replicationParams, next), + ], + err => { + assertError(err, expectedErr); + return cb(); + } + ); } before(done => { @@ -93,7 +98,7 @@ describe('aws-node-sdk test putBucketReplication bucket status', () => { return done(); }); - it('should return \'NoSuchBucket\' error if bucket does not exist', done => + it("should return 'NoSuchBucket' error if bucket does not exist", done => s3.putBucketReplication(replicationParams, err => { assertError(err, 'NoSuchBucket'); return done(); @@ -105,8 +110,7 @@ describe('aws-node-sdk test putBucketReplication bucket status', () => { afterEach(done => s3.deleteBucket({ Bucket: sourceBucket }, done)); it('should return AccessDenied if user is not bucket owner', done => - otherAccountS3.putBucketReplication(replicationParams, - err => { + otherAccountS3.putBucketReplication(replicationParams, err => { assert(err); assert.strictEqual(err.code, 'AccessDenied'); assert.strictEqual(err.statusCode, 403); @@ -119,16 +123,16 @@ describe('aws-node-sdk test putBucketReplication bucket status', () => { return done(); })); - it('should not put configuration on bucket with \'Suspended\'' + - 'versioning', done => - checkVersioningError(s3, 'Suspended', 'InvalidRequest', done)); + it("should not put configuration on bucket with 'Suspended'" + 'versioning', done => + checkVersioningError(s3, 'Suspended', 'InvalidRequest', done) + ); it('should put configuration on a bucket with versioning', done => checkVersioningError(s3, 'Enabled', null, done)); - it('should put configuration on a bucket with versioning if ' + - 'user is a replication user', done => - checkVersioningError(replicationAccountS3, 'Enabled', null, done)); + it('should put configuration on a bucket with versioning if ' + 'user is a replication user', done => + checkVersioningError(replicationAccountS3, 'Enabled', null, done) + ); }); }); @@ -146,11 +150,13 @@ describe('aws-node-sdk test putBucketReplication configuration rules', () => { beforeEach(done => { const config = getConfig('default', { signatureVersion: 'v4' }); s3 = new S3(config); - return series([ - next => s3.createBucket({ Bucket: sourceBucket }, next), - next => - s3.putBucketVersioning(getVersioningParams('Enabled'), next), - ], err => done(err)); + return series( + [ + next => s3.createBucket({ Bucket: sourceBucket }, next), + next => s3.putBucketVersioning(getVersioningParams('Enabled'), next), + ], + err => done(err) + ); }); afterEach(done => s3.deleteBucket({ Bucket: sourceBucket }, done)); @@ -159,21 +165,26 @@ describe('aws-node-sdk test putBucketReplication configuration rules', () => { const Role = ARN === '' || ARN === ',' ? ARN : `${ARN},${ARN}`; const config = Object.assign({}, replicationConfig, { Role }); - it('should not accept configuration when \'Role\' is not a ' + - 'comma-separated list of two valid Amazon Resource Names: ' + - `'${Role}'`, done => - checkError(config, 'InvalidArgument', done)); + it( + "should not accept configuration when 'Role' is not a " + + 'comma-separated list of two valid Amazon Resource Names: ' + + `'${Role}'`, + done => checkError(config, 'InvalidArgument', done) + ); }); - it('should not accept configuration when \'Role\' is a comma-separated ' + - 'list of more than two valid Amazon Resource Names', + it( + "should not accept configuration when 'Role' is a comma-separated " + + 'list of more than two valid Amazon Resource Names', done => { - const Role = 'arn:aws:iam::account-id:role/resource-1,' + + const Role = + 'arn:aws:iam::account-id:role/resource-1,' + 'arn:aws:iam::account-id:role/resource-2,' + 'arn:aws:iam::account-id:role/resource-3'; const config = Object.assign({}, replicationConfig, { Role }); checkError(config, 'InvalidArgument', done); - }); + } + ); replicationUtils.validRoleARNs.forEach(ARN => { const config = setConfigRules({ @@ -189,61 +200,67 @@ describe('aws-node-sdk test putBucketReplication configuration rules', () => { }); it('should allow a combination of storageClasses across rules', done => { - const config = setConfigRules([replicationConfig.Rules[0], { - Destination: { - Bucket: `arn:aws:s3:::${destinationBucket}`, - StorageClass: 'us-east-2', + const config = setConfigRules([ + replicationConfig.Rules[0], + { + Destination: { + Bucket: `arn:aws:s3:::${destinationBucket}`, + StorageClass: 'us-east-2', + }, + Prefix: 'bar', + Status: 'Enabled', }, - Prefix: 'bar', - Status: 'Enabled', - }]); - config.Role = 'arn:aws:iam::account-id:role/resource,' + - 'arn:aws:iam::account-id:role/resource1'; + ]); + config.Role = 'arn:aws:iam::account-id:role/resource,' + 'arn:aws:iam::account-id:role/resource1'; checkError(config, null, done); }); - itSkipIfE2E('should not allow a comma separated list of roles when' + - ' a rule storageClass defines an external location', done => { - const config = { - Role: 'arn:aws:iam::account-id:role/src-resource,' + - 'arn:aws:iam::account-id:role/dest-resource', - Rules: [ - { - Destination: { - Bucket: `arn:aws:s3:::${destinationBucket}`, - StorageClass: 'us-east-2', + itSkipIfE2E( + 'should not allow a comma separated list of roles when' + ' a rule storageClass defines an external location', + done => { + const config = { + Role: 'arn:aws:iam::account-id:role/src-resource,' + 'arn:aws:iam::account-id:role/dest-resource', + Rules: [ + { + Destination: { + Bucket: `arn:aws:s3:::${destinationBucket}`, + StorageClass: 'us-east-2', + }, + Prefix: 'test-prefix', + Status: 'Enabled', }, - Prefix: 'test-prefix', - Status: 'Enabled', - }, - ], - }; - checkError(config, 'InvalidArgument', done); - }); + ], + }; + checkError(config, 'InvalidArgument', done); + } + ); replicationUtils.validRoleARNs.forEach(ARN => { const Role = `${ARN},${ARN}`; const config = Object.assign({}, replicationConfig, { Role }); - it('should accept configuration when \'Role\' is a comma-separated ' + - `list of two valid Amazon Resource Names: '${Role}'`, done => - checkError(config, null, done)); + it( + "should accept configuration when 'Role' is a comma-separated " + + `list of two valid Amazon Resource Names: '${Role}'`, + done => checkError(config, null, done) + ); }); replicationUtils.invalidBucketARNs.forEach(ARN => { const config = setConfigRules({ Destination: { Bucket: ARN } }); - it('should not accept configuration when \'Bucket\' is not a ' + - `valid Amazon Resource Name format: '${ARN}'`, done => - checkError(config, 'InvalidArgument', done)); + it( + "should not accept configuration when 'Bucket' is not a " + `valid Amazon Resource Name format: '${ARN}'`, + done => checkError(config, 'InvalidArgument', done) + ); }); - it('should not accept configuration when \'Rules\' is empty ', done => { + it("should not accept configuration when 'Rules' is empty ", done => { const config = Object.assign({}, replicationConfig, { Rules: [] }); return checkError(config, 'MalformedXML', done); }); - it('should not accept configuration when \'Rules\' is > 1000', done => { + it("should not accept configuration when 'Rules' is > 1000", done => { const arr = []; for (let i = 0; i < 1001; i++) { arr.push({ @@ -256,13 +273,13 @@ describe('aws-node-sdk test putBucketReplication configuration rules', () => { return checkError(config, 'InvalidRequest', done); }); - it('should not accept configuration when \'ID\' length is > 255', done => { + it("should not accept configuration when 'ID' length is > 255", done => { // Set ID to a string of length 256. const config = setConfigRules({ ID: new Array(257).join('x') }); return checkError(config, 'InvalidArgument', done); }); - it('should not accept configuration when \'ID\' is not unique', done => { + it("should not accept configuration when 'ID' is not unique", done => { const rule1 = replicationConfig.Rules[0]; // Prefix is unique, but not the ID. const rule2 = Object.assign({}, rule1, { Prefix: 'bar' }); @@ -270,8 +287,7 @@ describe('aws-node-sdk test putBucketReplication configuration rules', () => { return checkError(config, 'InvalidRequest', done); }); - it('should accept configuration when \'ID\' is not provided for multiple ' + - 'rules', done => { + it("should accept configuration when 'ID' is not provided for multiple " + 'rules', done => { const replicationConfigWithoutID = Object.assign({}, replicationConfig); const rule1 = replicationConfigWithoutID.Rules[0]; delete rule1.ID; @@ -283,60 +299,75 @@ describe('aws-node-sdk test putBucketReplication configuration rules', () => { replicationUtils.validStatuses.forEach(status => { const config = setConfigRules({ Status: status }); - it(`should accept configuration when 'Role' is ${status}`, done => - checkError(config, null, done)); + it(`should accept configuration when 'Role' is ${status}`, done => checkError(config, null, done)); }); - it('should not accept configuration when \'Status\' is invalid', done => { + it("should not accept configuration when 'Status' is invalid", done => { // Status must either be 'Enabled' or 'Disabled'. const config = setConfigRules({ Status: 'Invalid' }); return checkError(config, 'MalformedXML', done); }); - it('should accept configuration when \'Prefix\' is \'\'', - done => { - const config = setConfigRules({ Prefix: '' }); - return checkError(config, null, done); - }); + it("should accept configuration when 'Prefix' is ''", done => { + const config = setConfigRules({ Prefix: '' }); + return checkError(config, null, done); + }); - it('should not accept configuration when \'Prefix\' length is > 1024', - done => { - // Set Prefix to a string of length of 1025. - const config = setConfigRules({ - Prefix: new Array(1026).join('x'), - }); - return checkError(config, 'InvalidArgument', done); + it("should not accept configuration when 'Prefix' length is > 1024", done => { + // Set Prefix to a string of length of 1025. + const config = setConfigRules({ + Prefix: new Array(1026).join('x'), }); - - it('should not accept configuration when rules contain overlapping ' + - '\'Prefix\' values: new prefix starts with used prefix', done => { - const config = setConfigRules([replicationConfig.Rules[0], { - Destination: { Bucket: `arn:aws:s3:::${destinationBucket}` }, - Prefix: 'test-prefix/more-content', - Status: 'Enabled', - }]); - return checkError(config, 'InvalidRequest', done); + return checkError(config, 'InvalidArgument', done); }); - it('should not accept configuration when rules contain overlapping ' + - '\'Prefix\' values: used prefix starts with new prefix', done => { - const config = setConfigRules([replicationConfig.Rules[0], { - Destination: { Bucket: `arn:aws:s3:::${destinationBucket}` }, - Prefix: 'test', - Status: 'Enabled', - }]); - return checkError(config, 'InvalidRequest', done); - }); + it( + 'should not accept configuration when rules contain overlapping ' + + "'Prefix' values: new prefix starts with used prefix", + done => { + const config = setConfigRules([ + replicationConfig.Rules[0], + { + Destination: { Bucket: `arn:aws:s3:::${destinationBucket}` }, + Prefix: 'test-prefix/more-content', + Status: 'Enabled', + }, + ]); + return checkError(config, 'InvalidRequest', done); + } + ); - it('should not accept configuration when \'Destination\' properties of ' + - 'two or more rules specify different buckets', done => { - const config = setConfigRules([replicationConfig.Rules[0], { - Destination: { Bucket: `arn:aws:s3:::${destinationBucket}-1` }, - Prefix: 'bar', - Status: 'Enabled', - }]); - return checkError(config, 'InvalidRequest', done); - }); + it( + 'should not accept configuration when rules contain overlapping ' + + "'Prefix' values: used prefix starts with new prefix", + done => { + const config = setConfigRules([ + replicationConfig.Rules[0], + { + Destination: { Bucket: `arn:aws:s3:::${destinationBucket}` }, + Prefix: 'test', + Status: 'Enabled', + }, + ]); + return checkError(config, 'InvalidRequest', done); + } + ); + + it( + "should not accept configuration when 'Destination' properties of " + + 'two or more rules specify different buckets', + done => { + const config = setConfigRules([ + replicationConfig.Rules[0], + { + Destination: { Bucket: `arn:aws:s3:::${destinationBucket}-1` }, + Prefix: 'bar', + Status: 'Enabled', + }, + ]); + return checkError(config, 'InvalidRequest', done); + } + ); replicationUtils.validStorageClasses.forEach(storageClass => { const config = setConfigRules({ @@ -346,8 +377,9 @@ describe('aws-node-sdk test putBucketReplication configuration rules', () => { }, }); - it('should accept configuration when \'StorageClass\' is ' + - `${storageClass}`, done => checkError(config, null, done)); + it("should accept configuration when 'StorageClass' is " + `${storageClass}`, done => + checkError(config, null, done) + ); }); // A combination of external destination storage classes. @@ -359,18 +391,18 @@ describe('aws-node-sdk test putBucketReplication configuration rules', () => { }, }); - itSkipIfE2E('should accept configuration when \'StorageClass\' is ' + - `${storageClass}`, done => checkError(config, null, done)); + itSkipIfE2E("should accept configuration when 'StorageClass' is " + `${storageClass}`, done => + checkError(config, null, done) + ); }); - it('should not accept configuration when \'StorageClass\' is invalid', - done => { - const config = setConfigRules({ - Destination: { - Bucket: `arn:aws:s3:::${destinationBucket}`, - StorageClass: 'INVALID', - }, - }); - return checkError(config, 'MalformedXML', done); + it("should not accept configuration when 'StorageClass' is invalid", done => { + const config = setConfigRules({ + Destination: { + Bucket: `arn:aws:s3:::${destinationBucket}`, + StorageClass: 'INVALID', + }, }); + return checkError(config, 'MalformedXML', done); + }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/putBucketTagging.js b/tests/functional/aws-node-sdk/test/bucket/putBucketTagging.js index 4233be419f..ad127857ac 100644 --- a/tests/functional/aws-node-sdk/test/bucket/putBucketTagging.js +++ b/tests/functional/aws-node-sdk/test/bucket/putBucketTagging.js @@ -49,7 +49,8 @@ const validEmptyTagging = { const taggingKeyNotValid = { TagSet: [ { - Key: 'stringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaastringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + + Key: + 'stringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaastringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + 'astringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaastringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + 'stringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', Value: 'string', @@ -69,7 +70,8 @@ const taggingValueNotValid = { }, { Key: 'string', - Value: 'stringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaastringaaaaaaaaaaaaaaaaaaaaaa' + + Value: + 'stringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaastringaaaaaaaaaaaaaaaaaaaaaa' + 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaastringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + 'aaaaaaastringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaastringaaaaaaaaaaaaaaaaaa' + 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaastringaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + @@ -92,128 +94,210 @@ describe('aws-sdk test put bucket tagging', () => { afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); it('should not add tag if tagKey not unique', done => { - async.waterfall([ - next => s3.putBucketTagging({ - AccountId: s3.AccountId, - Tagging: taggingNotUnique, Bucket: bucket, - }, (err, res) => { - next(err, res); - }), - ], err => { - assertError(err, 'InvalidTag'); - done(); - }); + async.waterfall( + [ + next => + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: taggingNotUnique, + Bucket: bucket, + }, + (err, res) => { + next(err, res); + } + ), + ], + err => { + assertError(err, 'InvalidTag'); + done(); + } + ); }); it('should not add tag if tagKey not valid', done => { - async.waterfall([ - next => s3.putBucketTagging({ - AccountId: s3.AccountId, - Tagging: taggingKeyNotValid, Bucket: bucket, - }, (err, res) => { - next(err, res); - }), - ], err => { - assertError(err, 'InvalidTag'); - done(); - }); + async.waterfall( + [ + next => + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: taggingKeyNotValid, + Bucket: bucket, + }, + (err, res) => { + next(err, res); + } + ), + ], + err => { + assertError(err, 'InvalidTag'); + done(); + } + ); }); it('should not add tag if tagValue not valid', done => { - async.waterfall([ - next => s3.putBucketTagging({ - AccountId: s3.AccountId, - Tagging: taggingValueNotValid, Bucket: bucket, - }, (err, res) => { - next(err, res); - }), - ], err => { - assertError(err, 'InvalidTag'); - done(); - }); + async.waterfall( + [ + next => + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: taggingValueNotValid, + Bucket: bucket, + }, + (err, res) => { + next(err, res); + } + ), + ], + err => { + assertError(err, 'InvalidTag'); + done(); + } + ); }); it('should add tag', done => { - async.series([ - next => s3.putBucketTagging({ - AccountId: s3.AccountId, - Tagging: validTagging, Bucket: bucket, - }, (err, res) => { - next(err, res); - }), - next => s3.getBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - }, (err, res) => { - assert.deepStrictEqual(res, validTagging); - next(err, res); - }), - ], err => { - assert.ifError(err); - done(err); - }); + async.series( + [ + next => + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: validTagging, + Bucket: bucket, + }, + (err, res) => { + next(err, res); + } + ), + next => + s3.getBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + }, + (err, res) => { + assert.deepStrictEqual(res, validTagging); + next(err, res); + } + ), + ], + err => { + assert.ifError(err); + done(err); + } + ); }); it('should be able to put single tag', done => { - async.series([ - next => s3.putBucketTagging({ - AccountId: s3.AccountId, - Tagging: validSingleTagging, Bucket: bucket, - }, (err, res) => { - next(err, res, next); - }), - next => s3.getBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - }, (err, res) => { - assert.deepStrictEqual(res, validSingleTagging); - next(err, res); - }), - ], err => { - assert.ifError(err); - done(err); - }); + async.series( + [ + next => + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: validSingleTagging, + Bucket: bucket, + }, + (err, res) => { + next(err, res, next); + } + ), + next => + s3.getBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + }, + (err, res) => { + assert.deepStrictEqual(res, validSingleTagging); + next(err, res); + } + ), + ], + err => { + assert.ifError(err); + done(err); + } + ); }); it('should be able to put empty tag array', done => { - async.series([ - next => s3.putBucketTagging({ - AccountId: s3.AccountId, - Tagging: validEmptyTagging, Bucket: bucket, - }, next), - next => s3.getBucketTagging({ - AccountId: s3.AccountId, - Bucket: bucket, - }, next), - ], err => { - assertError(err, 'NoSuchTagSet'); - done(); - }); + async.series( + [ + next => + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: validEmptyTagging, + Bucket: bucket, + }, + next + ), + next => + s3.getBucketTagging( + { + AccountId: s3.AccountId, + Bucket: bucket, + }, + next + ), + ], + err => { + assertError(err, 'NoSuchTagSet'); + done(); + } + ); }); it('should return accessDenied if expected bucket owner does not match', done => { - async.waterfall([ - next => s3.putBucketTagging({ AccountId: s3.AccountId, - Tagging: validEmptyTagging, Bucket: bucket, ExpectedBucketOwner: '944690102203' }, (err, res) => { - next(err, res); - }), - ], err => { - assertError(err, 'AccessDenied'); - done(); - }); + async.waterfall( + [ + next => + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: validEmptyTagging, + Bucket: bucket, + ExpectedBucketOwner: '944690102203', + }, + (err, res) => { + next(err, res); + } + ), + ], + err => { + assertError(err, 'AccessDenied'); + done(); + } + ); }); it('should not return accessDenied if expected bucket owner matches', done => { - async.series([ - next => s3.putBucketTagging({ AccountId: s3.AccountId, - Tagging: validEmptyTagging, Bucket: bucket, ExpectedBucketOwner: s3.AccountId }, (err, res) => { - next(err, res); - }), - next => s3.getBucketTagging({ AccountId: s3.AccountId, Bucket: bucket }, next), - ], err => { - assertError(err, 'NoSuchTagSet'); - done(); - }); + async.series( + [ + next => + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: validEmptyTagging, + Bucket: bucket, + ExpectedBucketOwner: s3.AccountId, + }, + (err, res) => { + next(err, res); + } + ), + next => s3.getBucketTagging({ AccountId: s3.AccountId, Bucket: bucket }, next), + ], + err => { + assertError(err, 'NoSuchTagSet'); + done(); + } + ); }); it('should put 50 tags', done => { @@ -223,15 +307,18 @@ describe('aws-sdk test put bucket tagging', () => { Value: `value_${index}`, })), }; - s3.putBucketTagging({ - AccountId: s3.AccountId, - Tagging: tags, - Bucket: bucket, - ExpectedBucketOwner: s3.AccountId - }, err => { - assert.ifError(err); - done(err); - }); + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: tags, + Bucket: bucket, + ExpectedBucketOwner: s3.AccountId, + }, + err => { + assert.ifError(err); + done(err); + } + ); }); it('should not put more than 50 tags', done => { @@ -241,14 +328,17 @@ describe('aws-sdk test put bucket tagging', () => { Value: `value_${index}`, })), }; - s3.putBucketTagging({ - AccountId: s3.AccountId, - Tagging: tags, - Bucket: bucket, - ExpectedBucketOwner: s3.AccountId - }, err => { - assertError(err, 'BadRequest'); - done(); - }); + s3.putBucketTagging( + { + AccountId: s3.AccountId, + Tagging: tags, + Bucket: bucket, + ExpectedBucketOwner: s3.AccountId, + }, + err => { + assertError(err, 'BadRequest'); + done(); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/putCors.js b/tests/functional/aws-node-sdk/test/bucket/putCors.js index f8f030c707..6f5900b18f 100644 --- a/tests/functional/aws-node-sdk/test/bucket/putCors.js +++ b/tests/functional/aws-node-sdk/test/bucket/putCors.js @@ -5,17 +5,18 @@ const BucketUtility = require('../../lib/utility/bucket-util'); const bucketName = 'testcorsbucket'; -const sampleCors = { CORSRules: [ - { AllowedMethods: ['PUT', 'POST', 'DELETE'], - AllowedOrigins: ['http://www.example.com'], - AllowedHeaders: ['*'], - MaxAgeSeconds: 3000, - ExposeHeaders: ['x-amz-server-side-encryption'] }, - { AllowedMethods: ['GET'], - AllowedOrigins: ['*'], - AllowedHeaders: ['*'], - MaxAgeSeconds: 3000 }, -] }; +const sampleCors = { + CORSRules: [ + { + AllowedMethods: ['PUT', 'POST', 'DELETE'], + AllowedOrigins: ['http://www.example.com'], + AllowedHeaders: ['*'], + MaxAgeSeconds: 3000, + ExposeHeaders: ['x-amz-server-side-encryption'], + }, + { AllowedMethods: ['GET'], AllowedOrigins: ['*'], AllowedHeaders: ['*'], MaxAgeSeconds: 3000 }, + ], +}; function _corsTemplate(params) { const sampleRule = { @@ -25,12 +26,11 @@ function _corsTemplate(params) { MaxAgeSeconds: 3000, ExposeHeaders: ['x-amz-server-side-encryption'], }; - ['AllowedMethods', 'AllowedOrigins', 'AllowedHeaders', 'MaxAgeSeconds', - 'ExposeHeaders'].forEach(prop => { - if (params[prop]) { - sampleRule[prop] = params[prop]; - } - }); + ['AllowedMethods', 'AllowedOrigins', 'AllowedHeaders', 'MaxAgeSeconds', 'ExposeHeaders'].forEach(prop => { + if (params[prop]) { + sampleRule[prop] = params[prop]; + } + }); return { CORSRules: [sampleRule] }; } @@ -40,8 +40,7 @@ describe('PUT bucket cors', () => { const s3 = bucketUtil.s3; function _testPutBucketCors(rules, statusCode, errMsg, cb) { - s3.putBucketCors({ Bucket: bucketName, - CORSConfiguration: rules }, err => { + s3.putBucketCors({ Bucket: bucketName, CORSConfiguration: rules }, err => { assert(err, 'Expected err but found none'); assert.strictEqual(err.code, errMsg); assert.strictEqual(err.statusCode, statusCode); @@ -54,8 +53,7 @@ describe('PUT bucket cors', () => { afterEach(() => bucketUtil.deleteOne(bucketName)); it('should put a bucket cors successfully', done => { - s3.putBucketCors({ Bucket: bucketName, - CORSConfiguration: sampleCors }, err => { + s3.putBucketCors({ Bucket: bucketName, CORSConfiguration: sampleCors }, err => { assert.strictEqual(err, null, `Found unexpected err ${err}`); done(); }); @@ -81,10 +79,8 @@ describe('PUT bucket cors', () => { _testPutBucketCors(testCors, 400, 'MalformedXML', done); }); - it('should return InvalidRequest if more than one asterisk in ' + - 'AllowedOrigin', done => { - const testCors = - _corsTemplate({ AllowedOrigins: ['http://*.*.com'] }); + it('should return InvalidRequest if more than one asterisk in ' + 'AllowedOrigin', done => { + const testCors = _corsTemplate({ AllowedOrigins: ['http://*.*.com'] }); _testPutBucketCors(testCors, 400, 'InvalidRequest', done); }); @@ -93,33 +89,27 @@ describe('PUT bucket cors', () => { _testPutBucketCors(testCors, 400, 'MalformedXML', done); }); - it('should return InvalidRequest if AllowedMethod is not a valid ' + - 'method', done => { + it('should return InvalidRequest if AllowedMethod is not a valid ' + 'method', done => { const testCors = _corsTemplate({ AllowedMethods: ['test'] }); _testPutBucketCors(testCors, 400, 'InvalidRequest', done); }); - it('should return InvalidRequest for lowercase value for ' + - 'AllowedMethod', done => { + it('should return InvalidRequest for lowercase value for ' + 'AllowedMethod', done => { const testCors = _corsTemplate({ AllowedMethods: ['put', 'get'] }); _testPutBucketCors(testCors, 400, 'InvalidRequest', done); }); - it('should return InvalidRequest if more than one asterisk in ' + - 'AllowedHeader', done => { + it('should return InvalidRequest if more than one asterisk in ' + 'AllowedHeader', done => { const testCors = _corsTemplate({ AllowedHeaders: ['*-amz-*'] }); _testPutBucketCors(testCors, 400, 'InvalidRequest', done); }); - it('should return InvalidRequest if ExposeHeader has character ' + - 'that is not dash or alphanumeric', - done => { + it('should return InvalidRequest if ExposeHeader has character ' + 'that is not dash or alphanumeric', done => { const testCors = _corsTemplate({ ExposeHeaders: ['test header'] }); _testPutBucketCors(testCors, 400, 'InvalidRequest', done); }); - it('should return InvalidRequest if ExposeHeader has wildcard', - done => { + it('should return InvalidRequest if ExposeHeader has wildcard', done => { const testCors = _corsTemplate({ ExposeHeaders: ['x-amz-*'] }); _testPutBucketCors(testCors, 400, 'InvalidRequest', done); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/putWebsite.js b/tests/functional/aws-node-sdk/test/bucket/putWebsite.js index bcb4f2b7bb..fd3dcca2a1 100644 --- a/tests/functional/aws-node-sdk/test/bucket/putWebsite.js +++ b/tests/functional/aws-node-sdk/test/bucket/putWebsite.js @@ -12,8 +12,7 @@ describe('PUT bucket website', () => { const s3 = bucketUtil.s3; function _testPutBucketWebsite(config, statusCode, errMsg, cb) { - s3.putBucketWebsite({ Bucket: bucketName, - WebsiteConfiguration: config }, err => { + s3.putBucketWebsite({ Bucket: bucketName, WebsiteConfiguration: config }, err => { assert(err, 'Expected err but found none'); assert.strictEqual(err.code, errMsg); assert.strictEqual(err.statusCode, statusCode); @@ -33,74 +32,78 @@ describe('PUT bucket website', () => { afterEach(() => { process.stdout.write('about to empty bucket\n'); - return bucketUtil.empty(bucketName).then(() => { - process.stdout.write('about to delete bucket\n'); - return bucketUtil.deleteOne(bucketName); - }).catch(err => { - if (err) { - process.stdout.write('error in afterEach', err); - throw err; - } - }); + return bucketUtil + .empty(bucketName) + .then(() => { + process.stdout.write('about to delete bucket\n'); + return bucketUtil.deleteOne(bucketName); + }) + .catch(err => { + if (err) { + process.stdout.write('error in afterEach', err); + throw err; + } + }); }); it('should put a bucket website successfully', done => { const config = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucketName, - WebsiteConfiguration: config }, err => { + s3.putBucketWebsite({ Bucket: bucketName, WebsiteConfiguration: config }, err => { assert.strictEqual(err, null, `Found unexpected err ${err}`); done(); }); }); - it('should return InvalidArgument if IndexDocument or ' + - 'RedirectAllRequestsTo is not provided', done => { + it('should return InvalidArgument if IndexDocument or ' + 'RedirectAllRequestsTo is not provided', done => { const config = new WebsiteConfigTester(); _testPutBucketWebsite(config, 400, 'InvalidArgument', done); }); - it('should return an InvalidRequest if both ' + - 'RedirectAllRequestsTo and IndexDocument are provided', done => { - const redirectAllTo = { - HostName: 'test', - Protocol: 'http', - }; - const config = new WebsiteConfigTester(null, null, - redirectAllTo); - config.addRoutingRule({ Protocol: 'http' }); - _testPutBucketWebsite(config, 400, 'InvalidRequest', done); - }); + it( + 'should return an InvalidRequest if both ' + 'RedirectAllRequestsTo and IndexDocument are provided', + done => { + const redirectAllTo = { + HostName: 'test', + Protocol: 'http', + }; + const config = new WebsiteConfigTester(null, null, redirectAllTo); + config.addRoutingRule({ Protocol: 'http' }); + _testPutBucketWebsite(config, 400, 'InvalidRequest', done); + } + ); it('should return InvalidArgument if index has slash', done => { const config = new WebsiteConfigTester('in/dex.html'); _testPutBucketWebsite(config, 400, 'InvalidArgument', done); }); - it('should return InvalidRequest if both ReplaceKeyWith and ' + - 'ReplaceKeyPrefixWith are present in same rule', done => { - const config = new WebsiteConfigTester('index.html'); - config.addRoutingRule({ ReplaceKeyPrefixWith: 'test', - ReplaceKeyWith: 'test' }); - _testPutBucketWebsite(config, 400, 'InvalidRequest', done); - }); - - it('should return InvalidRequest if both ReplaceKeyWith and ' + - 'ReplaceKeyPrefixWith are present in same rule', done => { - const config = new WebsiteConfigTester('index.html'); - config.addRoutingRule({ ReplaceKeyPrefixWith: 'test', - ReplaceKeyWith: 'test' }); - _testPutBucketWebsite(config, 400, 'InvalidRequest', done); - }); - - it('should return InvalidRequest if Redirect Protocol is ' + - 'not http or https', done => { + it( + 'should return InvalidRequest if both ReplaceKeyWith and ' + + 'ReplaceKeyPrefixWith are present in same rule', + done => { + const config = new WebsiteConfigTester('index.html'); + config.addRoutingRule({ ReplaceKeyPrefixWith: 'test', ReplaceKeyWith: 'test' }); + _testPutBucketWebsite(config, 400, 'InvalidRequest', done); + } + ); + + it( + 'should return InvalidRequest if both ReplaceKeyWith and ' + + 'ReplaceKeyPrefixWith are present in same rule', + done => { + const config = new WebsiteConfigTester('index.html'); + config.addRoutingRule({ ReplaceKeyPrefixWith: 'test', ReplaceKeyWith: 'test' }); + _testPutBucketWebsite(config, 400, 'InvalidRequest', done); + } + ); + + it('should return InvalidRequest if Redirect Protocol is ' + 'not http or https', done => { const config = new WebsiteConfigTester('index.html'); config.addRoutingRule({ Protocol: 'notvalidprotocol' }); _testPutBucketWebsite(config, 400, 'InvalidRequest', done); }); - it('should return InvalidRequest if RedirectAllRequestsTo Protocol ' + - 'is not http or https', done => { + it('should return InvalidRequest if RedirectAllRequestsTo Protocol ' + 'is not http or https', done => { const redirectAllTo = { HostName: 'test', Protocol: 'notvalidprotocol', @@ -109,36 +112,47 @@ describe('PUT bucket website', () => { _testPutBucketWebsite(config, 400, 'InvalidRequest', done); }); - it('should return MalformedXML if Redirect HttpRedirectCode ' + - 'is a string that does not contains a number', done => { - const config = new WebsiteConfigTester('index.html'); - config.addRoutingRule({ HttpRedirectCode: 'notvalidhttpcode' }); - _testPutBucketWebsite(config, 400, 'MalformedXML', done); - }); - - it('should return InvalidRequest if Redirect HttpRedirectCode ' + - 'is not a valid http redirect code (3XX excepting 300)', done => { - const config = new WebsiteConfigTester('index.html'); - config.addRoutingRule({ HttpRedirectCode: '400' }); - _testPutBucketWebsite(config, 400, 'InvalidRequest', done); - }); - - it('should return InvalidRequest if Condition ' + - 'HttpErrorCodeReturnedEquals is a string that does ' + - ' not contain a number', done => { - const condition = { HttpErrorCodeReturnedEquals: 'notvalidcode' }; - const config = new WebsiteConfigTester('index.html'); - config.addRoutingRule({ HostName: 'test' }, condition); - _testPutBucketWebsite(config, 400, 'MalformedXML', done); - }); - - it('should return InvalidRequest if Condition ' + - 'HttpErrorCodeReturnedEquals is not a valid http' + - 'error code (4XX or 5XX)', done => { - const condition = { HttpErrorCodeReturnedEquals: '300' }; - const config = new WebsiteConfigTester('index.html'); - config.addRoutingRule({ HostName: 'test' }, condition); - _testPutBucketWebsite(config, 400, 'InvalidRequest', done); - }); + it( + 'should return MalformedXML if Redirect HttpRedirectCode ' + 'is a string that does not contains a number', + done => { + const config = new WebsiteConfigTester('index.html'); + config.addRoutingRule({ HttpRedirectCode: 'notvalidhttpcode' }); + _testPutBucketWebsite(config, 400, 'MalformedXML', done); + } + ); + + it( + 'should return InvalidRequest if Redirect HttpRedirectCode ' + + 'is not a valid http redirect code (3XX excepting 300)', + done => { + const config = new WebsiteConfigTester('index.html'); + config.addRoutingRule({ HttpRedirectCode: '400' }); + _testPutBucketWebsite(config, 400, 'InvalidRequest', done); + } + ); + + it( + 'should return InvalidRequest if Condition ' + + 'HttpErrorCodeReturnedEquals is a string that does ' + + ' not contain a number', + done => { + const condition = { HttpErrorCodeReturnedEquals: 'notvalidcode' }; + const config = new WebsiteConfigTester('index.html'); + config.addRoutingRule({ HostName: 'test' }, condition); + _testPutBucketWebsite(config, 400, 'MalformedXML', done); + } + ); + + it( + 'should return InvalidRequest if Condition ' + + 'HttpErrorCodeReturnedEquals is not a valid http' + + 'error code (4XX or 5XX)', + done => { + const condition = { HttpErrorCodeReturnedEquals: '300' }; + const config = new WebsiteConfigTester('index.html'); + config.addRoutingRule({ HostName: 'test' }, condition); + _testPutBucketWebsite(config, 400, 'InvalidRequest', done); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/skipScan.js b/tests/functional/aws-node-sdk/test/bucket/skipScan.js index 57269c265e..1b89148a79 100644 --- a/tests/functional/aws-node-sdk/test/bucket/skipScan.js +++ b/tests/functional/aws-node-sdk/test/bucket/skipScan.js @@ -38,35 +38,41 @@ describe('Skip scan cases tests', () => { before(done => { const config = getConfig('default', { signatureVersion: 'v4' }); s3 = new AWS.S3(config); - s3.createBucket( - { Bucket }, (err, data) => { - if (err) { - done(err, data); - } - /* generating different prefixes every x > STREAK_LENGTH + s3.createBucket({ Bucket }, (err, data) => { + if (err) { + done(err, data); + } + /* generating different prefixes every x > STREAK_LENGTH to force the metadata backends to skip */ - const x = 120; - async.timesLimit(500, 10, - (n, next) => { - const o = {}; - o.Bucket = Bucket; - // eslint-disable-next-line - o.Key = String.fromCharCode(65 + n / x) + - '/' + n % x; - o.Body = ''; - s3.putObject(o, (err, data) => { - next(err, data); - }); - }, done); - }); + const x = 120; + async.timesLimit( + 500, + 10, + (n, next) => { + const o = {}; + o.Bucket = Bucket; + // eslint-disable-next-line + o.Key = String.fromCharCode(65 + n / x) + '/' + (n % x); + o.Body = ''; + s3.putObject(o, (err, data) => { + next(err, data); + }); + }, + done + ); + }); }); after(done => { s3.listObjects({ Bucket }, (err, data) => { - async.each(data.Contents, (o, next) => { - s3.deleteObject({ Bucket, Key: o.Key }, next); - }, () => { - s3.deleteBucket({ Bucket }, done); - }); + async.each( + data.Contents, + (o, next) => { + s3.deleteObject({ Bucket, Key: o.Key }, next); + }, + () => { + s3.deleteBucket({ Bucket }, done); + } + ); }); }); it('should find all common prefixes in one shot', done => { @@ -81,13 +87,7 @@ describe('Skip scan cases tests', () => { Name: Bucket, Prefix: '', MaxKeys: 1000, - CommonPrefixes: [ - 'A/', - 'B/', - 'C/', - 'D/', - 'E/', - ], + CommonPrefixes: ['A/', 'B/', 'C/', 'D/', 'E/'], }); done(); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/testBucketStress.js b/tests/functional/aws-node-sdk/test/bucket/testBucketStress.js index fa3bf3c9b1..426c45b90d 100644 --- a/tests/functional/aws-node-sdk/test/bucket/testBucketStress.js +++ b/tests/functional/aws-node-sdk/test/bucket/testBucketStress.js @@ -9,17 +9,25 @@ const objectCount = 100; const loopCount = 10; function putObjects(s3, loopId, cb) { - times(objectCount, (i, next) => { - const params = { Bucket: bucket, Key: `foo${loopId}_${i}`, Body: text }; - s3.putObject(params, next); - }, cb); + times( + objectCount, + (i, next) => { + const params = { Bucket: bucket, Key: `foo${loopId}_${i}`, Body: text }; + s3.putObject(params, next); + }, + cb + ); } function deleteObjects(s3, loopId, cb) { - times(objectCount, (i, next) => { - const params = { Bucket: bucket, Key: `foo${loopId}_${i}` }; - s3.deleteObject(params, next); - }, cb); + times( + objectCount, + (i, next) => { + const params = { Bucket: bucket, Key: `foo${loopId}_${i}` }; + s3.deleteObject(params, next); + }, + cb + ); } describe('aws-node-sdk stress test bucket', function testSuite() { @@ -31,11 +39,18 @@ describe('aws-node-sdk stress test bucket', function testSuite() { }); it('createBucket-putObject-deleteObject-deleteBucket loop', done => - timesSeries(loopCount, (loopId, next) => waterfall([ - next => s3.createBucket({ Bucket: bucket }, err => next(err)), - next => putObjects(s3, loopId, err => next(err)), - next => deleteObjects(s3, loopId, err => next(err)), - next => s3.deleteBucket({ Bucket: bucket }, err => next(err)), - ], err => next(err)), done) - ); + timesSeries( + loopCount, + (loopId, next) => + waterfall( + [ + next => s3.createBucket({ Bucket: bucket }, err => next(err)), + next => putObjects(s3, loopId, err => next(err)), + next => deleteObjects(s3, loopId, err => next(err)), + next => s3.deleteBucket({ Bucket: bucket }, err => next(err)), + ], + err => next(err) + ), + done + )); }); diff --git a/tests/functional/aws-node-sdk/test/bucket/testBucketVersioning.js b/tests/functional/aws-node-sdk/test/bucket/testBucketVersioning.js index 91b5610d8d..a7fc763693 100644 --- a/tests/functional/aws-node-sdk/test/bucket/testBucketVersioning.js +++ b/tests/functional/aws-node-sdk/test/bucket/testBucketVersioning.js @@ -5,8 +5,7 @@ const getConfig = require('../support/config'); const bucket = `versioning-bucket-${Date.now()}`; const config = getConfig('default', { signatureVersion: 'v4' }); -const configReplication = getConfig('replication', - { signatureVersion: 'v4' }); +const configReplication = getConfig('replication', { signatureVersion: 'v4' }); const s3 = new S3(config); describe('aws-node-sdk test bucket versioning', function testSuite() { this.timeout(60000); @@ -29,8 +28,7 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { s3.putBucketVersioning(params, error => { if (error) { assert.strictEqual(error.statusCode, 400); - assert.strictEqual( - error.code, 'IllegalVersioningConfigurationException'); + assert.strictEqual(error.code, 'IllegalVersioningConfigurationException'); done(); } else { done('accepted empty versioning configuration'); @@ -57,8 +55,7 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { s3.putBucketVersioning(params, error => { if (error) { assert.strictEqual(error.statusCode, 400); - assert.strictEqual( - error.code, 'IllegalVersioningConfigurationException'); + assert.strictEqual(error.code, 'IllegalVersioningConfigurationException'); done(); } else { done('accepted empty versioning configuration'); @@ -80,14 +77,13 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { Bucket: bucket, VersioningConfiguration: { MFADelete: 'fun', - Status: 'let\'s do it', + Status: "let's do it", }, }; s3.putBucketVersioning(params, error => { if (error) { assert.strictEqual(error.statusCode, 400); - assert.strictEqual( - error.code, 'IllegalVersioningConfigurationException'); + assert.strictEqual(error.code, 'IllegalVersioningConfigurationException'); done(); } else { done('accepted empty versioning configuration'); @@ -129,8 +125,7 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { const params = { Bucket: bucket }; s3.getBucketVersioning(params, (error, data) => { assert.strictEqual(error, null); - assert.deepStrictEqual(data, { MFADelete: 'Disabled', - Status: 'Enabled' }); + assert.deepStrictEqual(data, { MFADelete: 'Disabled', Status: 'Enabled' }); done(); }); }); @@ -145,8 +140,7 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { s3.putBucketVersioning(params, done); }); - it('should accept valid versioning configuration if user is a ' + - 'replication user', done => { + it('should accept valid versioning configuration if user is a ' + 'replication user', done => { const params = { Bucket: bucket, VersioningConfiguration: { @@ -166,26 +160,36 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { }); }); - describe('bucket versioning for ingestion buckets', () => { const Bucket = `ingestion-bucket-${Date.now()}`; - before(done => s3.createBucket({ - Bucket, - CreateBucketConfiguration: { - LocationConstraint: 'us-east-2:ingest', + before(done => + s3.createBucket( + { + Bucket, + CreateBucketConfiguration: { + LocationConstraint: 'us-east-2:ingest', + }, }, - }, done)); + done + ) + ); after(done => s3.deleteBucket({ Bucket }, done)); it('should not allow suspending versioning for ingestion buckets', done => { - s3.putBucketVersioning({ Bucket, VersioningConfiguration: { - Status: 'Suspended' - } }, err => { - assert(err, 'Expected error but got success'); - assert.strictEqual(err.code, 'InvalidBucketState'); - done(); - }); + s3.putBucketVersioning( + { + Bucket, + VersioningConfiguration: { + Status: 'Suspended', + }, + }, + err => { + assert(err, 'Expected error but got success'); + assert.strictEqual(err.code, 'InvalidBucketState'); + done(); + } + ); }); }); @@ -196,10 +200,13 @@ describe('aws-node-sdk test bucket versioning with object lock', () => { before(done => { const config = getConfig('default', { signatureVersion: 'v4' }); s3 = new S3(config); - s3.createBucket({ - Bucket: bucket, - ObjectLockEnabledForBucket: true, - }, done); + s3.createBucket( + { + Bucket: bucket, + ObjectLockEnabledForBucket: true, + }, + done + ); }); // delete bucket after testing @@ -218,4 +225,3 @@ describe('aws-node-sdk test bucket versioning with object lock', () => { }); }); }); - diff --git a/tests/functional/aws-node-sdk/test/legacy/authV2QueryTests.js b/tests/functional/aws-node-sdk/test/legacy/authV2QueryTests.js index 4a2b7b0567..832bf69e20 100644 --- a/tests/functional/aws-node-sdk/test/legacy/authV2QueryTests.js +++ b/tests/functional/aws-node-sdk/test/legacy/authV2QueryTests.js @@ -8,10 +8,7 @@ const provideRawOutput = require('../../lib/utility/provideRawOutput'); const random = Math.round(Math.random() * 100).toString(); const bucket = `mybucket-${random}`; const almostOutsideTime = 99990; -const itSkipAWS = process.env.AWS_ON_AIR - ? it.skip - : it; - +const itSkipAWS = process.env.AWS_ON_AIR ? it.skip : it; function diff(putFile, receivedFile, done) { process.stdout.write(`diff ${putFile} ${receivedFile}\n`); @@ -41,8 +38,7 @@ describe('aws-node-sdk v2auth query tests', function testSuite() { // AWS allows an expiry further in the future // 604810 seconds is higher that the Expires time limit: 604800 seconds // ( seven days) - itSkipAWS('should return an error code if expires header is too far ' + - 'in the future', done => { + itSkipAWS('should return an error code if expires header is too far ' + 'in the future', done => { const params = { Bucket: bucket, Expires: 604810 }; const url = s3.getSignedUrl('createBucket', params); provideRawOutput(['-verbose', '-X', 'PUT', url], httpCode => { @@ -51,17 +47,16 @@ describe('aws-node-sdk v2auth query tests', function testSuite() { }); }); - it('should return an error code if request occurs after expiry', - done => { - const params = { Bucket: bucket, Expires: 1 }; - const url = s3.getSignedUrl('createBucket', params); - setTimeout(() => { - provideRawOutput(['-verbose', '-X', 'PUT', url], httpCode => { - assert.strictEqual(httpCode, '403 FORBIDDEN'); - done(); - }); - }, 1500); - }); + it('should return an error code if request occurs after expiry', done => { + const params = { Bucket: bucket, Expires: 1 }; + const url = s3.getSignedUrl('createBucket', params); + setTimeout(() => { + provideRawOutput(['-verbose', '-X', 'PUT', url], httpCode => { + assert.strictEqual(httpCode, '403 FORBIDDEN'); + done(); + }); + }, 1500); + }); it('should create a bucket', done => { const params = { Bucket: bucket, Expires: almostOutsideTime }; @@ -72,39 +67,31 @@ describe('aws-node-sdk v2auth query tests', function testSuite() { }); }); - it('should put an object', done => { - const params = { Bucket: bucket, Key: 'key', Expires: - almostOutsideTime }; + const params = { Bucket: bucket, Key: 'key', Expires: almostOutsideTime }; const url = s3.getSignedUrl('putObject', params); - provideRawOutput(['-verbose', '-X', 'PUT', url, - '--upload-file', 'uploadFile'], httpCode => { + provideRawOutput(['-verbose', '-X', 'PUT', url, '--upload-file', 'uploadFile'], httpCode => { assert.strictEqual(httpCode, '200 OK'); done(); }); }); - it('should put an object with an acl setting and a storage class setting', - done => { - // This will test that upper case query parameters and lowercase - // query parameters (i.e., 'x-amz-acl') are being sorted properly. - // This will also test that query params that contain "x-amz-" - // are being added to the canonical headers list in our string - // to sign. - const params = { Bucket: bucket, Key: 'key', - ACL: 'public-read', StorageClass: 'STANDARD' }; - const url = s3.getSignedUrl('putObject', params); - provideRawOutput(['-verbose', '-X', 'PUT', url, - '--upload-file', 'uploadFile'], httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); - }); - + it('should put an object with an acl setting and a storage class setting', done => { + // This will test that upper case query parameters and lowercase + // query parameters (i.e., 'x-amz-acl') are being sorted properly. + // This will also test that query params that contain "x-amz-" + // are being added to the canonical headers list in our string + // to sign. + const params = { Bucket: bucket, Key: 'key', ACL: 'public-read', StorageClass: 'STANDARD' }; + const url = s3.getSignedUrl('putObject', params); + provideRawOutput(['-verbose', '-X', 'PUT', url, '--upload-file', 'uploadFile'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + }); + }); it('should get an object', done => { - const params = { Bucket: bucket, Key: 'key', Expires: - almostOutsideTime }; + const params = { Bucket: bucket, Key: 'key', Expires: almostOutsideTime }; const url = s3.getSignedUrl('getObject', params); provideRawOutput(['-verbose', '-o', 'download', url], httpCode => { assert.strictEqual(httpCode, '200 OK'); @@ -119,24 +106,20 @@ describe('aws-node-sdk v2auth query tests', function testSuite() { }); it('should delete an object', done => { - const params = { Bucket: bucket, Key: 'key', Expires: - almostOutsideTime }; + const params = { Bucket: bucket, Key: 'key', Expires: almostOutsideTime }; const url = s3.getSignedUrl('deleteObject', params); - provideRawOutput(['-verbose', '-X', 'DELETE', url], - httpCode => { - assert.strictEqual(httpCode, '204 NO CONTENT'); - done(); - }); + provideRawOutput(['-verbose', '-X', 'DELETE', url], httpCode => { + assert.strictEqual(httpCode, '204 NO CONTENT'); + done(); + }); }); - it('should delete a bucket', done => { const params = { Bucket: bucket, Expires: almostOutsideTime }; const url = s3.getSignedUrl('deleteBucket', params); - provideRawOutput(['-verbose', '-X', 'DELETE', url], - httpCode => { - assert.strictEqual(httpCode, '204 NO CONTENT'); - done(); - }); + provideRawOutput(['-verbose', '-X', 'DELETE', url], httpCode => { + assert.strictEqual(httpCode, '204 NO CONTENT'); + done(); + }); }); }); diff --git a/tests/functional/aws-node-sdk/test/legacy/authV4QueryTests.js b/tests/functional/aws-node-sdk/test/legacy/authV4QueryTests.js index 5e23e8284f..8741a24a06 100644 --- a/tests/functional/aws-node-sdk/test/legacy/authV4QueryTests.js +++ b/tests/functional/aws-node-sdk/test/legacy/authV4QueryTests.js @@ -10,7 +10,6 @@ const provideRawOutput = require('../../lib/utility/provideRawOutput'); const random = Math.round(Math.random() * 100).toString(); const bucket = `mybucket-${random}`; - function diff(putFile, receivedFile, done) { process.stdout.write(`diff ${putFile} ${receivedFile}\n`); cp.spawn('diff', [putFile, receivedFile]).on('exit', code => { @@ -65,8 +64,7 @@ describe('aws-node-sdk v4auth query tests', function testSuite() { if (err) { assert.ifError(err); } - const bucketNames = xml.ListAllMyBucketsResult - .Buckets[0].Bucket.map(item => item.Name[0]); + const bucketNames = xml.ListAllMyBucketsResult.Buckets[0].Bucket.map(item => item.Name[0]); const whereIsMyBucket = bucketNames.indexOf(bucket); assert(whereIsMyBucket > -1); done(); @@ -78,38 +76,39 @@ describe('aws-node-sdk v4auth query tests', function testSuite() { it('should put an object', done => { const params = { Bucket: bucket, Key: 'key' }; const url = s3.getSignedUrl('putObject', params); - provideRawOutput(['-verbose', '-X', 'PUT', url, - '--upload-file', 'uploadFile'], httpCode => { + provideRawOutput(['-verbose', '-X', 'PUT', url, '--upload-file', 'uploadFile'], httpCode => { assert.strictEqual(httpCode, '200 OK'); done(); }); }); - it('should put an object with an acl setting and a storage class setting', - done => { - // This will test that upper case query parameters and lowercase - // query parameters (i.e., 'x-amz-acl') are being sorted properly. - // This will also test that query params that contain "x-amz-" - // are being added to the canonical headers list in our string - // to sign. - const params = { Bucket: bucket, Key: 'key', - ACL: 'public-read', StorageClass: 'STANDARD', - ContentType: 'text/plain' }; - const url = s3.getSignedUrl('putObject', params); - provideRawOutput(['-verbose', '-X', 'PUT', url, - '--upload-file', 'uploadFile'], httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + it('should put an object with an acl setting and a storage class setting', done => { + // This will test that upper case query parameters and lowercase + // query parameters (i.e., 'x-amz-acl') are being sorted properly. + // This will also test that query params that contain "x-amz-" + // are being added to the canonical headers list in our string + // to sign. + const params = { + Bucket: bucket, + Key: 'key', + ACL: 'public-read', + StorageClass: 'STANDARD', + ContentType: 'text/plain', + }; + const url = s3.getSignedUrl('putObject', params); + provideRawOutput(['-verbose', '-X', 'PUT', url, '--upload-file', 'uploadFile'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); }); + }); it('should put an object with native characters', done => { - const Key = 'key-pâtisserie-中文-español-English-हिन्दी-العربية-' + - 'português-বাংলা-русский-日本語-ਪੰਜਾਬੀ-한국어-தமிழ்'; + const Key = + 'key-pâtisserie-中文-español-English-हिन्दी-العربية-' + + 'português-বাংলা-русский-日本語-ਪੰਜਾਬੀ-한국어-தமிழ்'; const params = { Bucket: bucket, Key }; const url = s3.getSignedUrl('putObject', params); - provideRawOutput(['-verbose', '-X', 'PUT', url, - '--upload-file', 'uploadFile'], httpCode => { + provideRawOutput(['-verbose', '-X', 'PUT', url, '--upload-file', 'uploadFile'], httpCode => { assert.strictEqual(httpCode, '200 OK'); done(); }); @@ -125,8 +124,7 @@ describe('aws-node-sdk v4auth query tests', function testSuite() { if (err) { assert.ifError(err); } - assert.strictEqual(result.ListBucketResult - .Contents[0].Key[0], 'key'); + assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], 'key'); done(); }); }); @@ -152,36 +150,34 @@ describe('aws-node-sdk v4auth query tests', function testSuite() { it('should delete an object', done => { const params = { Bucket: bucket, Key: 'key' }; const url = s3.getSignedUrl('deleteObject', params); - provideRawOutput(['-verbose', '-X', 'DELETE', url], - httpCode => { - assert.strictEqual(httpCode, '204 NO CONTENT'); - done(); - }); + provideRawOutput(['-verbose', '-X', 'DELETE', url], httpCode => { + assert.strictEqual(httpCode, '204 NO CONTENT'); + done(); + }); }); it('should return a 204 on delete of an already deleted object', done => { const params = { Bucket: bucket, Key: 'key' }; const url = s3.getSignedUrl('deleteObject', params); - provideRawOutput(['-verbose', '-X', 'DELETE', url], - httpCode => { - assert.strictEqual(httpCode, '204 NO CONTENT'); - done(); - }); + provideRawOutput(['-verbose', '-X', 'DELETE', url], httpCode => { + assert.strictEqual(httpCode, '204 NO CONTENT'); + done(); + }); }); it('should return 204 on delete of non-existing object', done => { const params = { Bucket: bucket, Key: 'randomObject' }; const url = s3.getSignedUrl('deleteObject', params); - provideRawOutput(['-verbose', '-X', 'DELETE', url], - httpCode => { - assert.strictEqual(httpCode, '204 NO CONTENT'); - done(); - }); + provideRawOutput(['-verbose', '-X', 'DELETE', url], httpCode => { + assert.strictEqual(httpCode, '204 NO CONTENT'); + done(); + }); }); it('should delete an object with native characters', done => { - const Key = 'key-pâtisserie-中文-español-English-हिन्दी-العربية-' + - 'português-বাংলা-русский-日本語-ਪੰਜਾਬੀ-한국어-தமிழ்'; + const Key = + 'key-pâtisserie-中文-español-English-हिन्दी-العربية-' + + 'português-বাংলা-русский-日本語-ਪੰਜਾਬੀ-한국어-தமிழ்'; const params = { Bucket: bucket, Key }; const url = s3.getSignedUrl('deleteObject', params); provideRawOutput(['-verbose', '-X', 'DELETE', url], httpCode => { @@ -194,10 +190,9 @@ describe('aws-node-sdk v4auth query tests', function testSuite() { it('should delete a bucket', done => { const params = { Bucket: bucket }; const url = s3.getSignedUrl('deleteBucket', params); - provideRawOutput(['-verbose', '-X', 'DELETE', url], - httpCode => { - assert.strictEqual(httpCode, '204 NO CONTENT'); - done(); - }); + provideRawOutput(['-verbose', '-X', 'DELETE', url], httpCode => { + assert.strictEqual(httpCode, '204 NO CONTENT'); + done(); + }); }); }); diff --git a/tests/functional/aws-node-sdk/test/legacy/tests.js b/tests/functional/aws-node-sdk/test/legacy/tests.js index eb8322b745..50115def2f 100644 --- a/tests/functional/aws-node-sdk/test/legacy/tests.js +++ b/tests/functional/aws-node-sdk/test/legacy/tests.js @@ -66,38 +66,35 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { // createMPU test it('should create a multipart upload', done => { - s3.createMultipartUpload({ Bucket: bucket, Key: objectKey }, - (err, data) => { - if (err) { - return done(new Error( - `error initiating multipart upload: ${err}`)); - } - assert.strictEqual(data.Bucket, bucket); - assert.strictEqual(data.Key, objectKey); - assert.ok(data.UploadId); - multipartUploadData.firstUploadId = data.UploadId; - return done(); - }); + s3.createMultipartUpload({ Bucket: bucket, Key: objectKey }, (err, data) => { + if (err) { + return done(new Error(`error initiating multipart upload: ${err}`)); + } + assert.strictEqual(data.Bucket, bucket); + assert.strictEqual(data.Key, objectKey); + assert.ok(data.UploadId); + multipartUploadData.firstUploadId = data.UploadId; + return done(); + }); }); - it('should upload a part of a multipart upload to be aborted', + it('should upload a part of a multipart upload to be aborted', done => { // uploadpart test - done => { - const params = { - Bucket: bucket, - Key: objectKey, - PartNumber: 1, - UploadId: multipartUploadData.firstUploadId, - Body: firstBufferBody, - }; - s3.uploadPart(params, (err, data) => { - if (err) { - return done(new Error(`error uploading a part: ${err}`)); - } - assert.strictEqual(data.ETag, `"${calculatedFirstPartHash}"`); - return done(); - }); + const params = { + Bucket: bucket, + Key: objectKey, + PartNumber: 1, + UploadId: multipartUploadData.firstUploadId, + Body: firstBufferBody, + }; + s3.uploadPart(params, (err, data) => { + if (err) { + return done(new Error(`error uploading a part: ${err}`)); + } + assert.strictEqual(data.ETag, `"${calculatedFirstPartHash}"`); + return done(); }); + }); // abortMPU test it('should abort a multipart upload', done => { @@ -108,8 +105,7 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { }; s3.abortMultipartUpload(params, (err, data) => { if (err) { - return done(new Error( - `error aborting multipart upload: ${err}`)); + return done(new Error(`error aborting multipart upload: ${err}`)); } assert.ok(data); return done(); @@ -118,52 +114,47 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { // createMPU test it('should upload a part of a multipart upload', done => { - s3.createMultipartUpload({ Bucket: bucket, Key: 'toComplete' }, - (err, data) => { - if (err) { - return done(new Error( - `error initiating multipart upload: ${err}`)); - } - const uploadId = data.UploadId; - multipartUploadData.secondUploadId = data.UploadId; - const params = { - Bucket: bucket, - Key: 'toComplete', - PartNumber: 1, - UploadId: uploadId, - Body: firstBufferBody, - }; - s3.uploadPart(params, (err, data) => { - if (err) { - return done( - new Error(`error uploading a part: ${err}`)); - } - assert.strictEqual(data.ETag, - `"${calculatedFirstPartHash}"`); - return done(); - }); - return undefined; - }); - }); - - it('should upload a second part of a multipart upload', - // createMPU test - done => { + s3.createMultipartUpload({ Bucket: bucket, Key: 'toComplete' }, (err, data) => { + if (err) { + return done(new Error(`error initiating multipart upload: ${err}`)); + } + const uploadId = data.UploadId; + multipartUploadData.secondUploadId = data.UploadId; const params = { Bucket: bucket, Key: 'toComplete', - PartNumber: 2, - UploadId: multipartUploadData.secondUploadId, - Body: secondBufferBody, + PartNumber: 1, + UploadId: uploadId, + Body: firstBufferBody, }; s3.uploadPart(params, (err, data) => { if (err) { return done(new Error(`error uploading a part: ${err}`)); } - assert.strictEqual(data.ETag, `"${calculatedSecondPartHash}"`); + assert.strictEqual(data.ETag, `"${calculatedFirstPartHash}"`); return done(); }); + return undefined; + }); + }); + + it('should upload a second part of a multipart upload', done => { + // createMPU test + const params = { + Bucket: bucket, + Key: 'toComplete', + PartNumber: 2, + UploadId: multipartUploadData.secondUploadId, + Body: secondBufferBody, + }; + s3.uploadPart(params, (err, data) => { + if (err) { + return done(new Error(`error uploading a part: ${err}`)); + } + assert.strictEqual(data.ETag, `"${calculatedSecondPartHash}"`); + return done(); }); + }); // listparts test it('should list the parts of a multipart upload', done => { @@ -178,16 +169,13 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { } assert.strictEqual(data.Bucket, bucket); assert.strictEqual(data.Key, 'toComplete'); - assert.strictEqual(data.UploadId, multipartUploadData - .secondUploadId); + assert.strictEqual(data.UploadId, multipartUploadData.secondUploadId); assert.strictEqual(data.IsTruncated, false); assert.strictEqual(data.Parts[0].PartNumber, 1); - assert.strictEqual(data.Parts[0].ETag, - `"${calculatedFirstPartHash}"`); + assert.strictEqual(data.Parts[0].ETag, `"${calculatedFirstPartHash}"`); assert.strictEqual(data.Parts[0].Size, 5242880); assert.strictEqual(data.Parts[1].PartNumber, 2); - assert.strictEqual(data.Parts[1].ETag, - `"${calculatedSecondPartHash}"`); + assert.strictEqual(data.Parts[1].ETag, `"${calculatedSecondPartHash}"`); assert.strictEqual(data.Parts[1].Size, 5242880); // Must disable for now when running with Vault // since will need to pull actual ARN and canonicalId @@ -203,19 +191,22 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { return done(); }); - it('should return an error if do not provide correct ' + - // completempu test - 'xml when completing a multipart upload', done => { - const params = { - Bucket: bucket, - Key: 'toComplete', - UploadId: multipartUploadData.secondUploadId, - }; - s3.completeMultipartUpload(params, err => { - assert.strictEqual(err.code, 'MalformedXML'); - return done(); - }); - }); + it( + 'should return an error if do not provide correct ' + + // completempu test + 'xml when completing a multipart upload', + done => { + const params = { + Bucket: bucket, + Key: 'toComplete', + UploadId: multipartUploadData.secondUploadId, + }; + s3.completeMultipartUpload(params, err => { + assert.strictEqual(err.code, 'MalformedXML'); + return done(); + }); + } + ); // completempu test it('should complete a multipart upload', done => { @@ -254,21 +245,18 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { }; s3.getObject(params, (err, data) => { if (err) { - return done(new Error( - `error getting object put by mpu: ${err}`)); + return done(new Error(`error getting object put by mpu: ${err}`)); } - assert.strictEqual(data.ETag, - combinedETag); - const uploadedObj = Buffer.concat([firstBufferBody, - secondBufferBody]); + assert.strictEqual(data.ETag, combinedETag); + const uploadedObj = Buffer.concat([firstBufferBody, secondBufferBody]); assert.deepStrictEqual(data.Body, uploadedObj); return done(); }); }); const mpuRangeGetTests = [ - { it: 'should get a range from the first part of an object ' + - 'put by multipart upload', + { + it: 'should get a range from the first part of an object ' + 'put by multipart upload', range: 'bytes=0-9', contentLength: 10, contentRange: 'bytes 0-9/10485760', @@ -277,8 +265,8 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { // first part should just contain 0 expectedBuff: Buffer.alloc(10, 0), }, - { it: 'should get a range from the second part of an object ' + - 'put by multipart upload', + { + it: 'should get a range from the second part of an object ' + 'put by multipart upload', // The completed MPU byte count starts at 0, so the first part ends // at byte 5242879 and the second part begins at byte 5242880 range: 'bytes=5242880-5242889', @@ -287,8 +275,8 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { // A range from the second part should just contain 1 expectedBuff: Buffer.alloc(10, 1), }, - { it: 'should get a range that spans both parts of an object put ' + - 'by multipart upload', + { + it: 'should get a range that spans both parts of an object put ' + 'by multipart upload', range: 'bytes=5242875-5242884', contentLength: 10, contentRange: 'bytes 5242875-5242884/10485760', @@ -296,9 +284,11 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { // of 0 and 5 bytes of 1 expectedBuff: Buffer.allocUnsafe(10).fill(0, 0, 5).fill(1, 5, 10), }, - { it: 'should get a range from the second part of an object put by ' + - 'multipart upload and include the end even if the range ' + - 'requested goes beyond the actual object end', + { + it: + 'should get a range from the second part of an object put by ' + + 'multipart upload and include the end even if the range ' + + 'requested goes beyond the actual object end', // End is actually 10485759 since size is 10485760 range: 'bytes=10485750-10485790', contentLength: 10, @@ -324,35 +314,32 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { }; s3.getObject(params, (err, data) => { if (err) { - return done(new Error( - `error getting object range put by mpu: ${err}`)); + return done(new Error(`error getting object range put by mpu: ${err}`)); } assert.strictEqual(data.ContentLength, test.contentLength); assert.strictEqual(data.AcceptRanges, 'bytes'); assert.strictEqual(data.ContentRange, test.contentRange); - assert.strictEqual(data.ETag, - combinedETag); + assert.strictEqual(data.ETag, combinedETag); assert.deepStrictEqual(data.Body, test.expectedBuff); return done(); }); }); }); - it('should delete object created by multipart upload', + it('should delete object created by multipart upload', done => { // deleteObject test - done => { - const params = { - Bucket: bucket, - Key: 'toComplete', - }; - s3.deleteObject(params, (err, data) => { - if (err) { - return done(new Error(`error deleting object: ${err}`)); - } - assert.ok(data); - return done(); - }); + const params = { + Bucket: bucket, + Key: 'toComplete', + }; + s3.deleteObject(params, (err, data) => { + if (err) { + return done(new Error(`error deleting object: ${err}`)); + } + assert.ok(data); + return done(); }); + }); it('should put an object regularly (non-MPU)', done => { const params = { @@ -362,17 +349,14 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { }; s3.putObject(params, (err, data) => { if (err) { - return done(new Error( - `error putting object regularly: ${err}`)); + return done(new Error(`error putting object regularly: ${err}`)); } assert.ok(data); return done(); }); }); - it('should return InvalidRange if the range of the resource does ' + - 'not cover the byte range', - done => { + it('should return InvalidRange if the range of the resource does ' + 'not cover the byte range', done => { const params = { Bucket: bucket, Key: 'normalput', @@ -397,8 +381,7 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { } return s3.putObject(params, err => { if (err) { - return done(new Error( - `error putting object regularly: ${err}`)); + return done(new Error(`error putting object regularly: ${err}`)); } return done(); }); @@ -407,8 +390,7 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { afterEach(done => { s3.deleteObject(params, err => { if (err) { - return done(new Error( - `error deletting object regularly: ${err}`)); + return done(new Error(`error deletting object regularly: ${err}`)); } return s3.deleteBucket({ Bucket: bucketEmptyObj }, err => { if (err) { @@ -420,9 +402,7 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { }); testsRangeOnEmptyFile.forEach(test => { const validText = test.valid ? 'InvalidRange error' : 'empty file'; - it(`should return ${validText} if get range ${test.range} on ` + - 'empty object', - done => { + it(`should return ${validText} if get range ${test.range} on ` + 'empty object', done => { const params = { Bucket: bucketEmptyObj, Key: 'emptyobj', @@ -430,12 +410,10 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { }; s3.getObject(params, (err, data) => { if (test.valid) { - assert.notEqual(err, null, 'Expected failure but ' + - 'got success'); + assert.notEqual(err, null, 'Expected failure but ' + 'got success'); assert.strictEqual(err.code, 'InvalidRange'); } else { - assert.equal(err, null, 'Expected success but ' + - `got failure: ${err}`); + assert.equal(err, null, 'Expected success but ' + `got failure: ${err}`); assert.strictEqual(data.Body.toString(), ''); } return done(); @@ -445,28 +423,30 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { }); const regularObjectRangeGetTests = [ - { it: 'should get a range for an object put without MPU', + { + it: 'should get a range for an object put without MPU', range: 'bytes=10-99', contentLength: 90, contentRange: 'bytes 10-99/200', // Buffer.fill(value, offset, end) expectedBuff: Buffer.allocUnsafe(90).fill(0, 0, 40).fill(1, 40), }, - { it: 'should get a range for an object using only an end ' + - 'offset in the request', + { + it: 'should get a range for an object using only an end ' + 'offset in the request', range: 'bytes=-10', contentLength: 10, contentRange: 'bytes 190-199/200', expectedBuff: Buffer.alloc(10, 1), }, - { it: 'should get a range for an object using only a start offset ' + - 'in the request', + { + it: 'should get a range for an object using only a start offset ' + 'in the request', range: 'bytes=190-', contentLength: 10, contentRange: 'bytes 190-199/200', expectedBuff: Buffer.alloc(10, 1), }, - { it: 'should get full object if range header is invalid', + { + it: 'should get full object if range header is invalid', range: 'bytes=-', contentLength: 200, // Since range header is invalid full object should be returned @@ -485,8 +465,7 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { }; s3.getObject(params, (err, data) => { if (err) { - return done(new Error( - `error getting object range: ${err}`)); + return done(new Error(`error getting object range: ${err}`)); } assert.strictEqual(data.AcceptRanges, 'bytes'); assert.strictEqual(data.ContentLength, test.contentLength); @@ -497,21 +476,20 @@ describe('aws-node-sdk test suite as registered user', function testSuite() { }); }); - it('should delete an object put without MPU', + it('should delete an object put without MPU', done => { // deleteObject test - done => { - const params = { - Bucket: bucket, - Key: 'normalput', - }; - s3.deleteObject(params, (err, data) => { - if (err) { - return done(new Error(`error deleting object: ${err}`)); - } - assert.ok(data); - return done(); - }); + const params = { + Bucket: bucket, + Key: 'normalput', + }; + s3.deleteObject(params, (err, data) => { + if (err) { + return done(new Error(`error deleting object: ${err}`)); + } + assert.ok(data); + return done(); }); + }); // deletebucket test it('should delete a bucket', done => { diff --git a/tests/functional/aws-node-sdk/test/mdSearch/basicSearch.js b/tests/functional/aws-node-sdk/test/mdSearch/basicSearch.js index 81f5ef02c7..088ea01eb2 100644 --- a/tests/functional/aws-node-sdk/test/mdSearch/basicSearch.js +++ b/tests/functional/aws-node-sdk/test/mdSearch/basicSearch.js @@ -15,90 +15,74 @@ runIfMongo('Basic search', () => { if (err) { return done(err); } - return s3Client.putObject({ Bucket: bucketName, Key: objectKey, - Metadata: userMetadata, Tagging: objectTagData }, err => { - if (err) { - return done(err); + return s3Client.putObject( + { Bucket: bucketName, Key: objectKey, Metadata: userMetadata, Tagging: objectTagData }, + err => { + if (err) { + return done(err); + } + return s3Client.putObject({ Bucket: bucketName, Key: hiddenKey, Tagging: hiddenTagData }, done); } - return s3Client.putObject({ Bucket: bucketName, - Key: hiddenKey, Tagging: hiddenTagData }, done); - }); + ); }); }); after(done => { - s3Client.deleteObjects({ Bucket: bucketName, Delete: { Objects: [ - { Key: objectKey }, - { Key: hiddenKey }], - } }, + s3Client.deleteObjects( + { Bucket: bucketName, Delete: { Objects: [{ Key: objectKey }, { Key: hiddenKey }] } }, err => { if (err) { return done(err); } return s3Client.deleteBucket({ Bucket: bucketName }, done); - }); + } + ); }); it('should list object with searched for system metadata', done => { const encodedSearch = encodeURIComponent(`key="${objectKey}"`); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, objectKey, done); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, objectKey, done); }); it('should list object with regex searched for system metadata', done => { const encodedSearch = encodeURIComponent('key LIKE "find.*"'); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, objectKey, done); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, objectKey, done); }); - it('should list object with regex searched for system metadata with flags', - done => { + it('should list object with regex searched for system metadata with flags', done => { const encodedSearch = encodeURIComponent('key LIKE "/FIND.*/i"'); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, objectKey, done); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, objectKey, done); }); it('should return empty when no object match regex', done => { const encodedSearch = encodeURIComponent('key LIKE "/NOTFOUND.*/i"'); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, null, done); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, null, done); }); it('should list object with searched for user metadata', done => { - const encodedSearch = - encodeURIComponent(`x-amz-meta-food="${userMetadata.food}"`); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, objectKey, done); + const encodedSearch = encodeURIComponent(`x-amz-meta-food="${userMetadata.food}"`); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, objectKey, done); }); it('should list object with searched for tag metadata', done => { - const encodedSearch = - encodeURIComponent('tags.item-type="main"'); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, objectKey, done); + const encodedSearch = encodeURIComponent('tags.item-type="main"'); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, objectKey, done); }); it('should return empty listing when no object has user md', done => { - const encodedSearch = - encodeURIComponent('x-amz-meta-food="nosuchfood"'); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, null, done); + const encodedSearch = encodeURIComponent('x-amz-meta-food="nosuchfood"'); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, null, done); }); describe('search when overwrite object', () => { before(done => { - s3Client.putObject({ Bucket: bucketName, Key: objectKey, - Metadata: updatedUserMetadata }, done); + s3Client.putObject({ Bucket: bucketName, Key: objectKey, Metadata: updatedUserMetadata }, done); }); - it('should list object with searched for updated user metadata', - done => { - const encodedSearch = - encodeURIComponent('x-amz-meta-food' + - `="${updatedUserMetadata.food}"`); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, objectKey, done); - }); + it('should list object with searched for updated user metadata', done => { + const encodedSearch = encodeURIComponent('x-amz-meta-food' + `="${updatedUserMetadata.food}"`); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, objectKey, done); + }); }); }); @@ -114,8 +98,7 @@ runIfMongo('Search when no objects in bucket', () => { it('should return empty listing when no objects in bucket', done => { const encodedSearch = encodeURIComponent(`key="${objectKey}"`); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, null, done); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, null, done); }); }); @@ -135,7 +118,6 @@ runIfMongo('Invalid regular expression searches', () => { code: 'InvalidArgument', message: 'Invalid sql where clause sent as search query', }; - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, testError, done); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, testError, done); }); }); diff --git a/tests/functional/aws-node-sdk/test/mdSearch/utils/helpers.js b/tests/functional/aws-node-sdk/test/mdSearch/utils/helpers.js index 0afdd361fa..a1bff32450 100644 --- a/tests/functional/aws-node-sdk/test/mdSearch/utils/helpers.js +++ b/tests/functional/aws-node-sdk/test/mdSearch/utils/helpers.js @@ -8,7 +8,9 @@ function _deleteVersionList(s3Client, versionList, bucket, callback) { const params = { Bucket: bucket, Delete: { Objects: [] } }; versionList.forEach(version => { params.Delete.Objects.push({ - Key: version.Key, VersionId: version.VersionId }); + Key: version.Key, + VersionId: version.VersionId, + }); }); return s3Client.deleteObjects(params, callback); @@ -16,17 +18,14 @@ function _deleteVersionList(s3Client, versionList, bucket, callback) { const testUtils = {}; -testUtils.runIfMongo = process.env.S3METADATA === 'mongodb' ? - describe : describe.skip; +testUtils.runIfMongo = process.env.S3METADATA === 'mongodb' ? describe : describe.skip; -testUtils.runAndCheckSearch = (s3Client, bucketName, encodedSearch, listVersions, - testResult, done) => { +testUtils.runAndCheckSearch = (s3Client, bucketName, encodedSearch, listVersions, testResult, done) => { let searchRequest; if (listVersions) { searchRequest = s3Client.listObjectVersions({ Bucket: bucketName }); searchRequest.on('build', () => { - searchRequest.httpRequest.path = - `/${bucketName}?search=${encodedSearch}&&versions`; + searchRequest.httpRequest.path = `/${bucketName}?search=${encodedSearch}&&versions`; }); searchRequest.on('success', res => { if (testResult) { @@ -50,8 +49,7 @@ testUtils.runAndCheckSearch = (s3Client, bucketName, encodedSearch, listVersions } else { searchRequest = s3Client.listObjects({ Bucket: bucketName }); searchRequest.on('build', () => { - searchRequest.httpRequest.path = - `/${bucketName}?search=${encodedSearch}`; + searchRequest.httpRequest.path = `/${bucketName}?search=${encodedSearch}`; }); searchRequest.on('success', res => { if (testResult) { @@ -75,24 +73,25 @@ testUtils.runAndCheckSearch = (s3Client, bucketName, encodedSearch, listVersions }; testUtils.removeAllVersions = (s3Client, bucket, callback) => { - async.waterfall([ - cb => s3Client.listObjectVersions({ Bucket: bucket }, cb), - (data, cb) => _deleteVersionList(s3Client, data.DeleteMarkers, bucket, - err => cb(err, data)), - (data, cb) => _deleteVersionList(s3Client, data.Versions, bucket, - err => cb(err, data)), - (data, cb) => { - if (data.IsTruncated) { - const params = { - Bucket: bucket, - KeyMarker: data.NextKeyMarker, - VersionIdMarker: data.NextVersionIdMarker, - }; - return this.removeAllVersions(params, cb); - } - return cb(); - }, - ], callback); + async.waterfall( + [ + cb => s3Client.listObjectVersions({ Bucket: bucket }, cb), + (data, cb) => _deleteVersionList(s3Client, data.DeleteMarkers, bucket, err => cb(err, data)), + (data, cb) => _deleteVersionList(s3Client, data.Versions, bucket, err => cb(err, data)), + (data, cb) => { + if (data.IsTruncated) { + const params = { + Bucket: bucket, + KeyMarker: data.NextKeyMarker, + VersionIdMarker: data.NextVersionIdMarker, + }; + return this.removeAllVersions(params, cb); + } + return cb(); + }, + ], + callback + ); }; module.exports = testUtils; diff --git a/tests/functional/aws-node-sdk/test/mdSearch/versionEnabledSearch.js b/tests/functional/aws-node-sdk/test/mdSearch/versionEnabledSearch.js index 82381abf08..c4391ac167 100644 --- a/tests/functional/aws-node-sdk/test/mdSearch/versionEnabledSearch.js +++ b/tests/functional/aws-node-sdk/test/mdSearch/versionEnabledSearch.js @@ -1,6 +1,5 @@ const s3Client = require('./utils/s3SDK'); -const { runAndCheckSearch, removeAllVersions, runIfMongo } = - require('./utils/helpers'); +const { runAndCheckSearch, removeAllVersions, runIfMongo } = require('./utils/helpers'); const userMetadata = { food: 'pizza' }; const updatedMetadata = { food: 'pineapple' }; @@ -17,52 +16,42 @@ runIfMongo('Search in version enabled bucket', () => { if (err) { return done(err); } - return s3Client.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration }, err => { + return s3Client.putBucketVersioning({ Bucket: bucketName, VersioningConfiguration }, err => { if (err) { return done(err); } - return s3Client.putObject({ Bucket: bucketName, - Key: masterKey, Metadata: userMetadata }, done); + return s3Client.putObject({ Bucket: bucketName, Key: masterKey, Metadata: userMetadata }, done); }); }); }); after(done => { - removeAllVersions(s3Client, bucketName, - err => { - if (err) { - return done(err); - } - return s3Client.deleteBucket({ Bucket: bucketName }, done); - }); + removeAllVersions(s3Client, bucketName, err => { + if (err) { + return done(err); + } + return s3Client.deleteBucket({ Bucket: bucketName }, done); + }); }); it('should list just master object with searched for metadata by default', done => { - const encodedSearch = - encodeURIComponent(`x-amz-meta-food="${userMetadata.food}"`); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, masterKey, done); + const encodedSearch = encodeURIComponent(`x-amz-meta-food="${userMetadata.food}"`); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, masterKey, done); }); describe('New version overwrite', () => { before(done => { - s3Client.putObject({ Bucket: bucketName, - Key: masterKey, Metadata: updatedMetadata }, done); + s3Client.putObject({ Bucket: bucketName, Key: masterKey, Metadata: updatedMetadata }, done); }); it('should list just master object with updated metadata by default', done => { - const encodedSearch = - encodeURIComponent(`x-amz-meta-food="${updatedMetadata.food}"`); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, false, masterKey, done); + const encodedSearch = encodeURIComponent(`x-amz-meta-food="${updatedMetadata.food}"`); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, false, masterKey, done); }); it('should list all object versions that met search query while specifying versions param', done => { - const encodedSearch = - encodeURIComponent('x-amz-meta-food LIKE "pi.*"'); - return runAndCheckSearch(s3Client, bucketName, - encodedSearch, true, [masterKey, masterKey], done); + const encodedSearch = encodeURIComponent('x-amz-meta-food LIKE "pi.*"'); + return runAndCheckSearch(s3Client, bucketName, encodedSearch, true, [masterKey, masterKey], done); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/acl/aclAwsVersioning.js b/tests/functional/aws-node-sdk/test/multipleBackend/acl/aclAwsVersioning.js index 840051d9ab..58e5d085a7 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/acl/aclAwsVersioning.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/acl/aclAwsVersioning.js @@ -56,21 +56,16 @@ const testAcp = new _AccessControlPolicy(ownerParams); testAcp.addGrantee('Group', constants.publicId, 'READ'); function putObjectAcl(s3, key, versionId, acp, cb) { - s3.putObjectAcl({ Bucket: bucket, Key: key, AccessControlPolicy: acp, - VersionId: versionId }, err => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object acl, got error ${err}`); + s3.putObjectAcl({ Bucket: bucket, Key: key, AccessControlPolicy: acp, VersionId: versionId }, err => { + assert.strictEqual(err, null, 'Expected success ' + `putting object acl, got error ${err}`); cb(); }); } function putObjectAndAcl(s3, key, body, acp, cb) { - s3.putObject({ Bucket: bucket, Key: key, Body: body }, - (err, putData) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); - putObjectAcl(s3, key, putData.VersionId, acp, () => - cb(null, putData.VersionId)); + s3.putObject({ Bucket: bucket, Key: key, Body: body }, (err, putData) => { + assert.strictEqual(err, null, 'Expected success ' + `putting object, got error ${err}`); + putObjectAcl(s3, key, putData.VersionId, acp, () => cb(null, putData.VersionId)); }); } @@ -88,29 +83,29 @@ function putVersionsWithAclToAws(s3, key, data, acps, cb) { throw new Error('length of data and acp arrays must be the same'); } enableVersioning(s3, bucket, () => { - async.timesLimit(data.length, 1, (i, next) => { - putObjectAndAcl(s3, key, data[i], acps[i], next); - }, (err, results) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting versions with acl, got error ${err}`); - cb(null, results); - }); + async.timesLimit( + data.length, + 1, + (i, next) => { + putObjectAndAcl(s3, key, data[i], acps[i], next); + }, + (err, results) => { + assert.strictEqual(err, null, 'Expected success ' + `putting versions with acl, got error ${err}`); + cb(null, results); + } + ); }); } function getObjectAndAssertAcl(s3, params, cb) { - const { bucket, key, versionId, body, expectedVersionId, expectedResult } - = params; - getAndAssertResult(s3, { bucket, key, versionId, expectedVersionId, body }, - () => { - s3.getObjectAcl({ Bucket: bucket, Key: key, VersionId: versionId }, - (err, data) => { - assert.strictEqual(err, null, 'Expected success ' + - `getting object acl, got error ${err}`); - assert.deepEqual(data, expectedResult); - cb(); - }); + const { bucket, key, versionId, body, expectedVersionId, expectedResult } = params; + getAndAssertResult(s3, { bucket, key, versionId, expectedVersionId, body }, () => { + s3.getObjectAcl({ Bucket: bucket, Key: key, VersionId: versionId }, (err, data) => { + assert.strictEqual(err, null, 'Expected success ' + `getting object acl, got error ${err}`); + assert.deepEqual(data, expectedResult); + cb(); }); + }); } /** getObjectsAndAssertAcls - enable versioning and put multiple versions @@ -124,23 +119,28 @@ function getObjectAndAssertAcl(s3, params, cb) { * @param {function} cb - callback * @return {undefined} - and call cb */ -function getObjectsAndAssertAcls(s3, key, versionIds, expectedData, - expectedAcps, cb) { - async.timesLimit(versionIds.length, 1, (i, next) => { - const versionId = versionIds[i]; - const body = expectedData[i]; - const expectedResult = expectedAcps[i]; - getObjectAndAssertAcl(s3, { bucket, key, versionId, body, - expectedResult, expectedVersionId: versionId }, next); - }, err => { - assert.strictEqual(err, null, 'Expected success ' + - `getting object acls, got error ${err}`); - cb(); - }); +function getObjectsAndAssertAcls(s3, key, versionIds, expectedData, expectedAcps, cb) { + async.timesLimit( + versionIds.length, + 1, + (i, next) => { + const versionId = versionIds[i]; + const body = expectedData[i]; + const expectedResult = expectedAcps[i]; + getObjectAndAssertAcl( + s3, + { bucket, key, versionId, body, expectedResult, expectedVersionId: versionId }, + next + ); + }, + err => { + assert.strictEqual(err, null, 'Expected success ' + `getting object acls, got error ${err}`); + cb(); + } + ); } -describeSkipIfNotMultiple('AWS backend put/get object acl with versioning', -function testSuite() { +describeSkipIfNotMultiple('AWS backend put/get object acl with versioning', function testSuite() { this.timeout(30000); withV4(sigCfg => { let bucketUtil; @@ -150,106 +150,120 @@ function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: awsLocation, - }, - }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: awsLocation, + }, + }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); }); - it('versioning not configured: should put/get acl successfully when ' + - 'versioning not configured', done => { + it('versioning not configured: should put/get acl successfully when ' + 'versioning not configured', done => { const key = `somekey-${genUniqID()}`; putObjectAndAcl(s3, key, someBody, testAcp, (err, versionId) => { assert.strictEqual(versionId, undefined); - getObjectAndAssertAcl(s3, { bucket, key, body: someBody, - expectedResult: testAcp }, done); + getObjectAndAssertAcl(s3, { bucket, key, body: someBody, expectedResult: testAcp }, done); }); }); - it('versioning suspended then enabled: should put/get acl on null ' + - 'version successfully even when latest version is not null version', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, [undefined], - err => next(err)), - next => putVersionsToAws(s3, bucket, key, [someBody], - err => next(err)), - next => putObjectAcl(s3, key, 'null', testAcp, next), - next => getObjectAndAssertAcl(s3, { bucket, key, body: '', - versionId: 'null', expectedResult: testAcp, - expectedVersionId: 'null' }, next), - ], done); - }); + it( + 'versioning suspended then enabled: should put/get acl on null ' + + 'version successfully even when latest version is not null version', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, [undefined], err => next(err)), + next => putVersionsToAws(s3, bucket, key, [someBody], err => next(err)), + next => putObjectAcl(s3, key, 'null', testAcp, next), + next => + getObjectAndAssertAcl( + s3, + { + bucket, + key, + body: '', + versionId: 'null', + expectedResult: testAcp, + expectedVersionId: 'null', + }, + next + ), + ], + done + ); + } + ); - it('versioning enabled: should get correct acl using version IDs', - done => { + it('versioning enabled: should get correct acl using version IDs', done => { const key = `somekey-${genUniqID()}`; - const acps = ['READ', 'FULL_CONTROL', 'READ_ACP', 'WRITE_ACP'] - .map(perm => { + const acps = ['READ', 'FULL_CONTROL', 'READ_ACP', 'WRITE_ACP'].map(perm => { const acp = new _AccessControlPolicy(ownerParams); acp.addGrantee('Group', constants.publicId, perm); return acp; }); const data = [...Array(acps.length).keys()].map(i => i.toString()); const versionIds = ['null']; - async.waterfall([ - next => putObjectAndAcl(s3, key, data[0], acps[0], - () => next()), - next => putVersionsWithAclToAws(s3, key, data.slice(1), - acps.slice(1), next), - (ids, next) => { - versionIds.push(...ids); - next(); - }, - next => getObjectsAndAssertAcls(s3, key, versionIds, data, acps, - next), - ], done); + async.waterfall( + [ + next => putObjectAndAcl(s3, key, data[0], acps[0], () => next()), + next => putVersionsWithAclToAws(s3, key, data.slice(1), acps.slice(1), next), + (ids, next) => { + versionIds.push(...ids); + next(); + }, + next => getObjectsAndAssertAcls(s3, key, versionIds, data, acps, next), + ], + done + ); }); - it('versioning enabled: should get correct acl when getting ' + - 'without version ID', done => { + it('versioning enabled: should get correct acl when getting ' + 'without version ID', done => { const key = `somekey-${genUniqID()}`; - const acps = ['READ', 'FULL_CONTROL', 'READ_ACP', 'WRITE_ACP'] - .map(perm => { + const acps = ['READ', 'FULL_CONTROL', 'READ_ACP', 'WRITE_ACP'].map(perm => { const acp = new _AccessControlPolicy(ownerParams); acp.addGrantee('Group', constants.publicId, perm); return acp; }); const data = [...Array(acps.length).keys()].map(i => i.toString()); const versionIds = ['null']; - async.waterfall([ - next => putObjectAndAcl(s3, key, data[0], acps[0], - () => next()), - next => putVersionsWithAclToAws(s3, key, data.slice(1), - acps.slice(1), next), - (ids, next) => { - versionIds.push(...ids); - next(); - }, - next => getObjectAndAssertAcl(s3, { bucket, key, - expectedVersionId: versionIds[3], - expectedResult: acps[3], body: data[3] }, next), - ], done); + async.waterfall( + [ + next => putObjectAndAcl(s3, key, data[0], acps[0], () => next()), + next => putVersionsWithAclToAws(s3, key, data.slice(1), acps.slice(1), next), + (ids, next) => { + versionIds.push(...ids); + next(); + }, + next => + getObjectAndAssertAcl( + s3, + { bucket, key, expectedVersionId: versionIds[3], expectedResult: acps[3], body: data[3] }, + next + ), + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js index 4cea3c7eb4..86ae108643 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/delete.js @@ -30,58 +30,80 @@ describeSkipIfNotMultiple('Multiple backend delete', () => { process.stdout.write('Creating bucket\n'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }) - .then(() => { - process.stdout.write('Putting object to mem\n'); - const params = { Bucket: bucket, Key: memObject, Body: body, - Metadata: { 'scal-location-constraint': memLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting object to file\n'); - const params = { Bucket: bucket, Key: fileObject, Body: body, - Metadata: { 'scal-location-constraint': fileLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting object to AWS\n'); - const params = { Bucket: bucket, Key: awsObject, Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting 0-byte object to AWS\n'); - const params = { Bucket: bucket, Key: emptyObject, - Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting large object to AWS\n'); - const params = { Bucket: bucket, Key: bigObject, - Body: bigBody, - Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting object to AWS\n'); - const params = { Bucket: bucket, Key: mismatchObject, - Body: body, Metadata: - { 'scal-location-constraint': awsLocationMismatch } }; - return s3.putObject(params).promise(); - }) - .catch(err => { - process.stdout.write(`Error putting objects: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }) + .then(() => { + process.stdout.write('Putting object to mem\n'); + const params = { + Bucket: bucket, + Key: memObject, + Body: body, + Metadata: { 'scal-location-constraint': memLocation }, + }; + return s3.putObject(params).promise(); + }) + .then(() => { + process.stdout.write('Putting object to file\n'); + const params = { + Bucket: bucket, + Key: fileObject, + Body: body, + Metadata: { 'scal-location-constraint': fileLocation }, + }; + return s3.putObject(params).promise(); + }) + .then(() => { + process.stdout.write('Putting object to AWS\n'); + const params = { + Bucket: bucket, + Key: awsObject, + Body: body, + Metadata: { 'scal-location-constraint': awsLocation }, + }; + return s3.putObject(params).promise(); + }) + .then(() => { + process.stdout.write('Putting 0-byte object to AWS\n'); + const params = { + Bucket: bucket, + Key: emptyObject, + Metadata: { 'scal-location-constraint': awsLocation }, + }; + return s3.putObject(params).promise(); + }) + .then(() => { + process.stdout.write('Putting large object to AWS\n'); + const params = { + Bucket: bucket, + Key: bigObject, + Body: bigBody, + Metadata: { 'scal-location-constraint': awsLocation }, + }; + return s3.putObject(params).promise(); + }) + .then(() => { + process.stdout.write('Putting object to AWS\n'); + const params = { + Bucket: bucket, + Key: mismatchObject, + Body: body, + Metadata: { 'scal-location-constraint': awsLocationMismatch }, + }; + return s3.putObject(params).promise(); + }) + .catch(err => { + process.stdout.write(`Error putting objects: ${err}\n`); + throw err; + }); }); after(() => { process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket) - .catch(err => { + return bucketUtil.deleteOne(bucket).catch(err => { process.stdout.write(`Error deleting bucket: ${err}\n`); throw err; }); @@ -89,67 +111,54 @@ describeSkipIfNotMultiple('Multiple backend delete', () => { it('should delete object from mem', done => { s3.deleteObject({ Bucket: bucket, Key: memObject }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.strictEqual(err, null, `Expected success, got error ${JSON.stringify(err)}`); s3.getObject({ Bucket: bucket, Key: memObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); + assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + 'error but got success'); done(); }); }); }); it('should delete object from file', done => { s3.deleteObject({ Bucket: bucket, Key: fileObject }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.strictEqual(err, null, `Expected success, got error ${JSON.stringify(err)}`); s3.getObject({ Bucket: bucket, Key: fileObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); + assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + 'error but got success'); done(); }); }); }); it('should delete object from AWS', done => { s3.deleteObject({ Bucket: bucket, Key: awsObject }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.strictEqual(err, null, `Expected success, got error ${JSON.stringify(err)}`); s3.getObject({ Bucket: bucket, Key: awsObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); + assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + 'error but got success'); done(); }); }); }); it('should delete 0-byte object from AWS', done => { s3.deleteObject({ Bucket: bucket, Key: emptyObject }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.strictEqual(err, null, `Expected success, got error ${JSON.stringify(err)}`); s3.getObject({ Bucket: bucket, Key: emptyObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); + assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + 'error but got success'); done(); }); }); }); it('should delete large object from AWS', done => { s3.deleteObject({ Bucket: bucket, Key: bigObject }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.strictEqual(err, null, `Expected success, got error ${JSON.stringify(err)}`); s3.getObject({ Bucket: bucket, Key: bigObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); + assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + 'error but got success'); done(); }); }); }); - it('should delete object from AWS location with bucketMatch set to ' + - 'false', done => { + it('should delete object from AWS location with bucketMatch set to ' + 'false', done => { s3.deleteObject({ Bucket: bucket, Key: mismatchObject }, err => { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); s3.getObject({ Bucket: bucket, Key: mismatchObject }, err => { - assert.strictEqual(err.code, 'NoSuchKey', - 'Expected error but got success'); + assert.strictEqual(err.code, 'NoSuchKey', 'Expected error but got success'); done(); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAwsVersioning.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAwsVersioning.js index 9445721922..7886b2ea1a 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAwsVersioning.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAwsVersioning.js @@ -37,22 +37,23 @@ const _deleteResultSchema = { deleteDeleteMarker: [true, true, true], }; -const [nonVersionedDelete, newDeleteMarker, deleteVersion, deleteDeleteMarker] - = Object.keys(_deleteResultSchema); +const [nonVersionedDelete, newDeleteMarker, deleteVersion, deleteDeleteMarker] = Object.keys(_deleteResultSchema); function _assertDeleteResult(result, resultType, requestVersionId) { if (!_deleteResultSchema[resultType]) { throw new Error(`undefined result type "${resultType}"`); } - const [expectVersionId, matchReqVersionId, expectDeleteMarker] = - _deleteResultSchema[resultType]; + const [expectVersionId, matchReqVersionId, expectDeleteMarker] = _deleteResultSchema[resultType]; if (expectVersionId && matchReqVersionId) { assert.strictEqual(result.VersionId, requestVersionId); } else if (expectVersionId) { assert(result.VersionId, 'expected version id in result'); } else { - assert.strictEqual(result.VersionId, undefined, - `did not expect version id in result, got "${result.VersionId}"`); + assert.strictEqual( + result.VersionId, + undefined, + `did not expect version id in result, got "${result.VersionId}"` + ); } if (expectDeleteMarker) { assert.strictEqual(result.DeleteMarker, true); @@ -63,16 +64,14 @@ function _assertDeleteResult(result, resultType, requestVersionId) { function delAndAssertResult(s3, params, cb) { const { bucket, key, versionId, resultType, resultError } = params; - return s3.deleteObject({ Bucket: bucket, Key: key, VersionId: - versionId }, (err, result) => { + return s3.deleteObject({ Bucket: bucket, Key: key, VersionId: versionId }, (err, result) => { if (resultError) { assert(err, `expected ${resultError} but found no error`); assert.strictEqual(err.code, resultError); assert.strictEqual(err.statusCode, errors[resultError].code); return cb(null); } - assert.strictEqual(err, null, 'Expected success ' + - `deleting object, got error ${err}`); + assert.strictEqual(err, null, 'Expected success ' + `deleting object, got error ${err}`); _assertDeleteResult(result, resultType, versionId); return cb(null, result.VersionId); }); @@ -96,8 +95,7 @@ function delObjectsAndAssertResult(s3, params, cb) { assert.strictEqual(err.statusCode, errors[resultError].code); return cb(null); } - assert.strictEqual(err, null, 'Expected success ' + - `deleting object, got error ${err}`); + assert.strictEqual(err, null, 'Expected success ' + `deleting object, got error ${err}`); const result = res.Deleted[0]; _assertDeleteResult(result, resultType, versionId); return cb(null, result.VersionId); @@ -105,27 +103,30 @@ function delObjectsAndAssertResult(s3, params, cb) { } function _createDeleteMarkers(s3, bucket, key, count, cb) { - return async.timesSeries(count, - (i, next) => delAndAssertResult(s3, { bucket, key, - resultType: newDeleteMarker }, next), - cb); + return async.timesSeries( + count, + (i, next) => delAndAssertResult(s3, { bucket, key, resultType: newDeleteMarker }, next), + cb + ); } function _deleteDeleteMarkers(s3, bucket, key, deleteMarkerVids, cb) { - return async.mapSeries(deleteMarkerVids, (versionId, next) => { - delAndAssertResult(s3, { bucket, key, versionId, - resultType: deleteDeleteMarker }, next); - }, () => cb()); + return async.mapSeries( + deleteMarkerVids, + (versionId, next) => { + delAndAssertResult(s3, { bucket, key, versionId, resultType: deleteDeleteMarker }, next); + }, + () => cb() + ); } function _getAssertDeleted(s3, params, cb) { const { key, versionId, errorCode } = params; - return s3.getObject({ Bucket: bucket, Key: key, VersionId: versionId }, - err => { - assert.strictEqual(err.code, errorCode); - assert.strictEqual(err.statusCode, 404); - return cb(); - }); + return s3.getObject({ Bucket: bucket, Key: key, VersionId: versionId }, err => { + assert.strictEqual(err.code, errorCode); + assert.strictEqual(err.statusCode, 404); + return cb(); + }); } function _awsGetAssertDeleted(params, cb) { @@ -137,552 +138,686 @@ function _awsGetAssertDeleted(params, cb) { }); } -describeSkipIfNotMultiple('AWS backend delete object w. versioning: ' + - 'using object location constraint', function testSuite() { - this.timeout(120000); - withV4(sigCfg => { - let bucketUtil; - let s3; - beforeEach(() => { - process.stdout.write('Creating bucket\n'); - bucketUtil = new BucketUtility('default', sigCfg); - s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; +describeSkipIfNotMultiple( + 'AWS backend delete object w. versioning: ' + 'using object location constraint', + function testSuite() { + this.timeout(120000); + withV4(sigCfg => { + let bucketUtil; + let s3; + beforeEach(() => { + process.stdout.write('Creating bucket\n'); + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); - }); - afterEach(() => { - process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; + afterEach(() => { + process.stdout.write('Emptying bucket\n'); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); }); - }); - - it('versioning not configured: if specifying "null" version, should ' + - 'delete specific version in AWS backend', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putToAwsBackend(s3, bucket, key, someBody, - err => next(err)), - next => awsGetLatestVerId(key, someBody, next), - (awsVerId, next) => delAndAssertResult(s3, { bucket, - key, versionId: 'null', resultType: deleteVersion }, - err => next(err, awsVerId)), - (awsVerId, next) => { - const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; - _awsGetAssertDeleted({ key, - versionId: awsVerId, errorCode: wanted }, next); - }, - ], done); - }); - - it('versioning not configured: specifying any version id other ' + - 'than null should not result in its deletion in AWS backend', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putToAwsBackend(s3, bucket, key, someBody, - err => next(err)), - next => awsGetLatestVerId(key, someBody, next), - (awsVerId, next) => delAndAssertResult(s3, { bucket, - key, versionId: 'awsVerIdWhichIsLongerThan40BytesButNotLongEnough', - resultError: 'InvalidArgument' }, err => next(err, awsVerId)), - (awsVerId, next) => awsGetLatestVerId(key, someBody, - (err, resultVid) => { - assert.strictEqual(resultVid, awsVerId); - next(); - }), - ], done); - }); - - it('versioning suspended: should delete a specific version in AWS ' + - 'backend successfully', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, [someBody], - err => next(err)), - next => awsGetLatestVerId(key, someBody, next), - (awsVerId, next) => delAndAssertResult(s3, { bucket, - key, versionId: 'null', resultType: deleteVersion }, - err => next(err, awsVerId)), - (awsVerId, next) => { - const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; - _awsGetAssertDeleted({ key, - versionId: awsVerId, errorCode: wanted }, next); - }, - ], done); - }); - - it('versioning enabled: should delete a specific version in AWS ' + - 'backend successfully', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putVersionsToAws(s3, bucket, key, [someBody], - (err, versionIds) => next(err, versionIds[0])), - (s3vid, next) => awsGetLatestVerId(key, someBody, - (err, awsVid) => next(err, s3vid, awsVid)), - (s3VerId, awsVerId, next) => delAndAssertResult(s3, { bucket, - key, versionId: s3VerId, resultType: deleteVersion }, - err => next(err, awsVerId)), - (awsVerId, next) => { - const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; - _awsGetAssertDeleted({ key, - versionId: awsVerId, errorCode: wanted }, next); - }, - ], done); - }); - - it('versioning not configured: deleting existing object should ' + - 'not return version id or x-amz-delete-marker: true but should ' + - 'create a delete marker in aws ', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putToAwsBackend(s3, bucket, key, someBody, - err => next(err)), - next => delAndAssertResult(s3, { bucket, key, - resultType: nonVersionedDelete }, err => next(err)), - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, - next), - ], done); - }); - - it('versioning suspended: should create a delete marker in s3 ' + - 'and aws successfully when deleting existing object', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, [someBody], - err => next(err)), - next => delAndAssertResult(s3, { bucket, key, resultType: - newDeleteMarker }, err => next(err)), - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, - next), - ], done); - }); - - // NOTE: Normal deletes when versioning is suspended create a - // delete marker with the version id "null", which overwrites an - // existing null version in s3 metadata. - it('versioning suspended: creating a delete marker will overwrite an ' + - 'existing null version that is the latest version in s3 metadata,' + - ' but the data of the first null version will remain in AWS', - function itF(done) { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, [someBody], - err => next(err)), - next => awsGetLatestVerId(key, someBody, next), - (awsNullVid, next) => { - this.test.awsNullVid = awsNullVid; - next(); - }, - // following call should generate a delete marker - next => delAndAssertResult(s3, { bucket, key, resultType: - newDeleteMarker }, next), - // delete delete marker - (dmVid, next) => delAndAssertResult(s3, { bucket, key, - versionId: dmVid, resultType: deleteDeleteMarker }, - err => next(err)), - // should get no such object even after deleting del marker - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - // get directly to aws however will give us first null version - next => awsGetLatestVerId(key, someBody, next), - (awsLatestVid, next) => { - assert.strictEqual(awsLatestVid, this.test.awsNullVid); - next(); - }, - ], done); - }); - - // NOTE: Normal deletes when versioning is suspended create a - // delete marker with the version id "null" which is supposed to - // overwrite any existing null version. - it('versioning suspended: creating a delete marker will overwrite an ' + - 'existing null version that is not the latest version in s3 metadata,' + - ' but the data of the first null version will remain in AWS', - function itF(done) { - const key = `somekey-${genUniqID()}`; - const data = [undefined, 'data1']; - async.waterfall([ - // put null version - next => putToAwsBackend(s3, bucket, key, data[0], - err => next(err)), - next => awsGetLatestVerId(key, '', next), - (awsNullVid, next) => { - this.test.awsNullVid = awsNullVid; - next(); - }, - // enable versioning and put another version - next => putVersionsToAws(s3, bucket, key, [data[1]], next), - (versions, next) => { - this.test.s3vid = versions[0]; - next(); - }, - next => suspendVersioning(s3, bucket, next), - // overwrites null version in s3 metadata but does not send - // additional delete to AWS to clean up previous "null" version - next => delAndAssertResult(s3, { bucket, key, - resultType: newDeleteMarker }, next), - (s3dmVid, next) => { - this.test.s3DeleteMarkerId = s3dmVid; - next(); - }, - // delete delete marker - next => delAndAssertResult(s3, { bucket, key, - versionId: this.test.s3DeleteMarkerId, - resultType: deleteDeleteMarker }, err => next(err)), - // deleting latest version after del marker - next => delAndAssertResult(s3, { bucket, key, - versionId: this.test.s3vid, resultType: deleteVersion }, - err => next(err)), - // should get no such object instead of null version - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - // we get the null version that should have been "overwritten" - // when getting the latest version in AWS now - next => awsGetLatestVerId(key, '', next), - (awsLatestVid, next) => { - assert.strictEqual(awsLatestVid, this.test.awsNullVid); - next(); - }, - ], done); - }); - - it('versioning enabled: should create a delete marker in s3 and ' + - 'aws successfully when deleting existing object', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putVersionsToAws(s3, bucket, key, [someBody], - err => next(err)), - next => delAndAssertResult(s3, { bucket, key, resultType: - newDeleteMarker }, err => next(err)), - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, - next), - ], done); - }); - - it('versioning enabled: should delete a delete marker in s3 and ' + - 'aws successfully', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putVersionsToAws(s3, bucket, key, [someBody], - (err, versionIds) => next(err, versionIds[0])), - // create a delete marker - (s3vid, next) => delAndAssertResult(s3, { bucket, key, - resultType: newDeleteMarker }, (err, delMarkerVid) => - next(err, s3vid, delMarkerVid)), - // delete delete marker - (s3vid, dmVid, next) => delAndAssertResult(s3, { bucket, key, - versionId: dmVid, resultType: deleteDeleteMarker }, - err => next(err, s3vid)), - // should be able to get object originally put from s3 - (s3vid, next) => getAndAssertResult(s3, { bucket, key, - body: someBody, expectedVersionId: s3vid }, next), - // latest version in aws should now be object originally put - next => awsGetLatestVerId(key, someBody, next), - ], done); - }); - - it('multiple delete markers: should be able to get pre-existing ' + - 'versions after creating and deleting several delete markers', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putVersionsToAws(s3, bucket, key, [someBody], - (err, versionIds) => next(err, versionIds[0])), - (s3vid, next) => _createDeleteMarkers(s3, bucket, key, 3, - (err, dmVids) => next(err, s3vid, dmVids)), - (s3vid, dmVids, next) => _deleteDeleteMarkers(s3, bucket, key, - dmVids, () => next(null, s3vid)), - // should be able to get object originally put from s3 - (s3vid, next) => getAndAssertResult(s3, { bucket, key, - body: someBody, expectedVersionId: s3vid }, next), - // latest version in aws should now be object originally put - next => awsGetLatestVerId(key, someBody, next), - ], done); - }); - - it('multiple delete markers: should get NoSuchObject if only ' + - 'one of the delete markers is deleted', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putVersionsToAws(s3, bucket, key, [someBody], - err => next(err)), - next => _createDeleteMarkers(s3, bucket, key, 3, - (err, dmVids) => next(err, dmVids[2])), - (lastDmVid, next) => delAndAssertResult(s3, { bucket, - key, versionId: lastDmVid, resultType: deleteDeleteMarker }, - err => next(err)), - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, - next), - ], done); - }); - it('should get the new latest version after deleting the latest' + - 'specific version', done => { - const key = `somekey-${genUniqID()}`; - const data = [...Array(4).keys()].map(i => i.toString()); - async.waterfall([ - // put 3 null versions - next => mapToAwsPuts(s3, bucket, key, data.slice(0, 3), - err => next(err)), - // put one version - next => putVersionsToAws(s3, bucket, key, [data[3]], - (err, versionIds) => next(err, versionIds[0])), - // delete the latest version - (versionId, next) => delAndAssertResult(s3, { bucket, - key, versionId, resultType: deleteVersion }, - err => next(err)), - // should get the last null version - next => getAndAssertResult(s3, { bucket, key, - body: data[2], expectedVersionId: 'null' }, next), - next => awsGetLatestVerId(key, data[2], - err => next(err)), - // delete the null version - next => delAndAssertResult(s3, { bucket, - key, versionId: 'null', resultType: deleteVersion }, - err => next(err)), - // s3 metadata should report no existing versions for keyname - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - // NOTE: latest version in aws will be the second null version - next => awsGetLatestVerId(key, data[1], - err => next(err)), - ], done); - }); + it( + 'versioning not configured: if specifying "null" version, should ' + + 'delete specific version in AWS backend', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putToAwsBackend(s3, bucket, key, someBody, err => next(err)), + next => awsGetLatestVerId(key, someBody, next), + (awsVerId, next) => + delAndAssertResult( + s3, + { bucket, key, versionId: 'null', resultType: deleteVersion }, + err => next(err, awsVerId) + ), + (awsVerId, next) => { + const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; + _awsGetAssertDeleted({ key, versionId: awsVerId, errorCode: wanted }, next); + }, + ], + done + ); + } + ); + + it( + 'versioning not configured: specifying any version id other ' + + 'than null should not result in its deletion in AWS backend', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putToAwsBackend(s3, bucket, key, someBody, err => next(err)), + next => awsGetLatestVerId(key, someBody, next), + (awsVerId, next) => + delAndAssertResult( + s3, + { + bucket, + key, + versionId: 'awsVerIdWhichIsLongerThan40BytesButNotLongEnough', + resultError: 'InvalidArgument', + }, + err => next(err, awsVerId) + ), + (awsVerId, next) => + awsGetLatestVerId(key, someBody, (err, resultVid) => { + assert.strictEqual(resultVid, awsVerId); + next(); + }), + ], + done + ); + } + ); + + it('versioning suspended: should delete a specific version in AWS ' + 'backend successfully', done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, [someBody], err => next(err)), + next => awsGetLatestVerId(key, someBody, next), + (awsVerId, next) => + delAndAssertResult(s3, { bucket, key, versionId: 'null', resultType: deleteVersion }, err => + next(err, awsVerId) + ), + (awsVerId, next) => { + const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; + _awsGetAssertDeleted({ key, versionId: awsVerId, errorCode: wanted }, next); + }, + ], + done + ); + }); - it('should delete the correct version even if other versions or ' + - 'delete markers put directly on aws', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putVersionsToAws(s3, bucket, key, [someBody], - (err, versionIds) => next(err, versionIds[0])), - (s3vid, next) => awsGetLatestVerId(key, someBody, - (err, awsVid) => next(err, s3vid, awsVid)), - // put an object in AWS - (s3vid, awsVid, next) => awsS3.putObject({ Bucket: awsBucket, - Key: key }, err => next(err, s3vid, awsVid)), - // create a delete marker in AWS - (s3vid, awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key }, err => next(err, s3vid, awsVid)), - // delete original version in s3 - (s3vid, awsVid, next) => delAndAssertResult(s3, { bucket, key, - versionId: s3vid, resultType: deleteVersion }, - err => next(err, awsVid)), - (awsVid, next) => _getAssertDeleted(s3, { key, - errorCode: 'NoSuchKey' }, () => next(null, awsVid)), - (awsVerId, next) => { - const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; - _awsGetAssertDeleted({ key, - versionId: awsVerId, errorCode: wanted }, next); - }, - ], done); - }); + it('versioning enabled: should delete a specific version in AWS ' + 'backend successfully', done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => + putVersionsToAws(s3, bucket, key, [someBody], (err, versionIds) => + next(err, versionIds[0]) + ), + (s3vid, next) => awsGetLatestVerId(key, someBody, (err, awsVid) => next(err, s3vid, awsVid)), + (s3VerId, awsVerId, next) => + delAndAssertResult( + s3, + { bucket, key, versionId: s3VerId, resultType: deleteVersion }, + err => next(err, awsVerId) + ), + (awsVerId, next) => { + const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; + _awsGetAssertDeleted({ key, versionId: awsVerId, errorCode: wanted }, next); + }, + ], + done + ); + }); - it('should not return an error deleting a version that was already ' + - 'deleted directly from AWS backend', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putVersionsToAws(s3, bucket, key, [someBody], - (err, versionIds) => next(err, versionIds[0])), - (s3vid, next) => awsGetLatestVerId(key, someBody, - (err, awsVid) => next(err, s3vid, awsVid)), - // delete the object in AWS - (s3vid, awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, err => next(err, s3vid)), - // then try to delete in S3 - (s3vid, next) => delAndAssertResult(s3, { bucket, key, - versionId: s3vid, resultType: deleteVersion }, - err => next(err)), - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - ], done); - }); - }); -}); - -describeSkipIfNotMultiple('AWS backend delete object w. versioning: ' + - 'using bucket location constraint', function testSuite() { - this.timeout(120000); - const createBucketParams = { - Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: awsLocation, - }, - }; - withV4(sigCfg => { - let bucketUtil; - let s3; - beforeEach(() => { - process.stdout.write('Creating bucket\n'); - bucketUtil = new BucketUtility('default', sigCfg); - s3 = bucketUtil.s3; - return s3.createBucket(createBucketParams).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; + it( + 'versioning not configured: deleting existing object should ' + + 'not return version id or x-amz-delete-marker: true but should ' + + 'create a delete marker in aws ', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putToAwsBackend(s3, bucket, key, someBody, err => next(err)), + next => + delAndAssertResult(s3, { bucket, key, resultType: nonVersionedDelete }, err => + next(err) + ), + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, next), + ], + done + ); + } + ); + + it( + 'versioning suspended: should create a delete marker in s3 ' + + 'and aws successfully when deleting existing object', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, [someBody], err => next(err)), + next => + delAndAssertResult(s3, { bucket, key, resultType: newDeleteMarker }, err => next(err)), + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, next), + ], + done + ); + } + ); + + // NOTE: Normal deletes when versioning is suspended create a + // delete marker with the version id "null", which overwrites an + // existing null version in s3 metadata. + it( + 'versioning suspended: creating a delete marker will overwrite an ' + + 'existing null version that is the latest version in s3 metadata,' + + ' but the data of the first null version will remain in AWS', + function itF(done) { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, [someBody], err => next(err)), + next => awsGetLatestVerId(key, someBody, next), + (awsNullVid, next) => { + this.test.awsNullVid = awsNullVid; + next(); + }, + // following call should generate a delete marker + next => delAndAssertResult(s3, { bucket, key, resultType: newDeleteMarker }, next), + // delete delete marker + (dmVid, next) => + delAndAssertResult( + s3, + { bucket, key, versionId: dmVid, resultType: deleteDeleteMarker }, + err => next(err) + ), + // should get no such object even after deleting del marker + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + // get directly to aws however will give us first null version + next => awsGetLatestVerId(key, someBody, next), + (awsLatestVid, next) => { + assert.strictEqual(awsLatestVid, this.test.awsNullVid); + next(); + }, + ], + done + ); + } + ); + + // NOTE: Normal deletes when versioning is suspended create a + // delete marker with the version id "null" which is supposed to + // overwrite any existing null version. + it( + 'versioning suspended: creating a delete marker will overwrite an ' + + 'existing null version that is not the latest version in s3 metadata,' + + ' but the data of the first null version will remain in AWS', + function itF(done) { + const key = `somekey-${genUniqID()}`; + const data = [undefined, 'data1']; + async.waterfall( + [ + // put null version + next => putToAwsBackend(s3, bucket, key, data[0], err => next(err)), + next => awsGetLatestVerId(key, '', next), + (awsNullVid, next) => { + this.test.awsNullVid = awsNullVid; + next(); + }, + // enable versioning and put another version + next => putVersionsToAws(s3, bucket, key, [data[1]], next), + (versions, next) => { + this.test.s3vid = versions[0]; + next(); + }, + next => suspendVersioning(s3, bucket, next), + // overwrites null version in s3 metadata but does not send + // additional delete to AWS to clean up previous "null" version + next => delAndAssertResult(s3, { bucket, key, resultType: newDeleteMarker }, next), + (s3dmVid, next) => { + this.test.s3DeleteMarkerId = s3dmVid; + next(); + }, + // delete delete marker + next => + delAndAssertResult( + s3, + { + bucket, + key, + versionId: this.test.s3DeleteMarkerId, + resultType: deleteDeleteMarker, + }, + err => next(err) + ), + // deleting latest version after del marker + next => + delAndAssertResult( + s3, + { bucket, key, versionId: this.test.s3vid, resultType: deleteVersion }, + err => next(err) + ), + // should get no such object instead of null version + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + // we get the null version that should have been "overwritten" + // when getting the latest version in AWS now + next => awsGetLatestVerId(key, '', next), + (awsLatestVid, next) => { + assert.strictEqual(awsLatestVid, this.test.awsNullVid); + next(); + }, + ], + done + ); + } + ); + + it( + 'versioning enabled: should create a delete marker in s3 and ' + + 'aws successfully when deleting existing object', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putVersionsToAws(s3, bucket, key, [someBody], err => next(err)), + next => + delAndAssertResult(s3, { bucket, key, resultType: newDeleteMarker }, err => next(err)), + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, next), + ], + done + ); + } + ); + + it('versioning enabled: should delete a delete marker in s3 and ' + 'aws successfully', done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => + putVersionsToAws(s3, bucket, key, [someBody], (err, versionIds) => + next(err, versionIds[0]) + ), + // create a delete marker + (s3vid, next) => + delAndAssertResult(s3, { bucket, key, resultType: newDeleteMarker }, (err, delMarkerVid) => + next(err, s3vid, delMarkerVid) + ), + // delete delete marker + (s3vid, dmVid, next) => + delAndAssertResult( + s3, + { bucket, key, versionId: dmVid, resultType: deleteDeleteMarker }, + err => next(err, s3vid) + ), + // should be able to get object originally put from s3 + (s3vid, next) => + getAndAssertResult(s3, { bucket, key, body: someBody, expectedVersionId: s3vid }, next), + // latest version in aws should now be object originally put + next => awsGetLatestVerId(key, someBody, next), + ], + done + ); }); - }); - afterEach(() => { - process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; + it( + 'multiple delete markers: should be able to get pre-existing ' + + 'versions after creating and deleting several delete markers', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => + putVersionsToAws(s3, bucket, key, [someBody], (err, versionIds) => + next(err, versionIds[0]) + ), + (s3vid, next) => + _createDeleteMarkers(s3, bucket, key, 3, (err, dmVids) => next(err, s3vid, dmVids)), + (s3vid, dmVids, next) => + _deleteDeleteMarkers(s3, bucket, key, dmVids, () => next(null, s3vid)), + // should be able to get object originally put from s3 + (s3vid, next) => + getAndAssertResult(s3, { bucket, key, body: someBody, expectedVersionId: s3vid }, next), + // latest version in aws should now be object originally put + next => awsGetLatestVerId(key, someBody, next), + ], + done + ); + } + ); + + it( + 'multiple delete markers: should get NoSuchObject if only ' + 'one of the delete markers is deleted', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putVersionsToAws(s3, bucket, key, [someBody], err => next(err)), + next => _createDeleteMarkers(s3, bucket, key, 3, (err, dmVids) => next(err, dmVids[2])), + (lastDmVid, next) => + delAndAssertResult( + s3, + { bucket, key, versionId: lastDmVid, resultType: deleteDeleteMarker }, + err => next(err) + ), + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, next), + ], + done + ); + } + ); + + it('should get the new latest version after deleting the latest' + 'specific version', done => { + const key = `somekey-${genUniqID()}`; + const data = [...Array(4).keys()].map(i => i.toString()); + async.waterfall( + [ + // put 3 null versions + next => mapToAwsPuts(s3, bucket, key, data.slice(0, 3), err => next(err)), + // put one version + next => + putVersionsToAws(s3, bucket, key, [data[3]], (err, versionIds) => next(err, versionIds[0])), + // delete the latest version + (versionId, next) => + delAndAssertResult(s3, { bucket, key, versionId, resultType: deleteVersion }, err => + next(err) + ), + // should get the last null version + next => getAndAssertResult(s3, { bucket, key, body: data[2], expectedVersionId: 'null' }, next), + next => awsGetLatestVerId(key, data[2], err => next(err)), + // delete the null version + next => + delAndAssertResult(s3, { bucket, key, versionId: 'null', resultType: deleteVersion }, err => + next(err) + ), + // s3 metadata should report no existing versions for keyname + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + // NOTE: latest version in aws will be the second null version + next => awsGetLatestVerId(key, data[1], err => next(err)), + ], + done + ); }); - }); - it('versioning not configured: deleting non-existing object should ' + - 'not return version id or x-amz-delete-marker: true nor create a ' + - 'delete marker in aws ', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => delAndAssertResult(s3, { bucket, key, - resultType: nonVersionedDelete }, err => next(err)), - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, - next), - ], done); + it( + 'should delete the correct version even if other versions or ' + 'delete markers put directly on aws', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => + putVersionsToAws(s3, bucket, key, [someBody], (err, versionIds) => + next(err, versionIds[0]) + ), + (s3vid, next) => + awsGetLatestVerId(key, someBody, (err, awsVid) => next(err, s3vid, awsVid)), + // put an object in AWS + (s3vid, awsVid, next) => + awsS3.putObject({ Bucket: awsBucket, Key: key }, err => next(err, s3vid, awsVid)), + // create a delete marker in AWS + (s3vid, awsVid, next) => + awsS3.deleteObject({ Bucket: awsBucket, Key: key }, err => next(err, s3vid, awsVid)), + // delete original version in s3 + (s3vid, awsVid, next) => + delAndAssertResult( + s3, + { bucket, key, versionId: s3vid, resultType: deleteVersion }, + err => next(err, awsVid) + ), + (awsVid, next) => + _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, () => next(null, awsVid)), + (awsVerId, next) => { + const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; + _awsGetAssertDeleted({ key, versionId: awsVerId, errorCode: wanted }, next); + }, + ], + done + ); + } + ); + + it( + 'should not return an error deleting a version that was already ' + 'deleted directly from AWS backend', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => + putVersionsToAws(s3, bucket, key, [someBody], (err, versionIds) => + next(err, versionIds[0]) + ), + (s3vid, next) => + awsGetLatestVerId(key, someBody, (err, awsVid) => next(err, s3vid, awsVid)), + // delete the object in AWS + (s3vid, awsVid, next) => + awsS3.deleteObject({ Bucket: awsBucket, Key: key, VersionId: awsVid }, err => + next(err, s3vid) + ), + // then try to delete in S3 + (s3vid, next) => + delAndAssertResult( + s3, + { bucket, key, versionId: s3vid, resultType: deleteVersion }, + err => next(err) + ), + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + ], + done + ); + } + ); }); + } +); + +describeSkipIfNotMultiple( + 'AWS backend delete object w. versioning: ' + 'using bucket location constraint', + function testSuite() { + this.timeout(120000); + const createBucketParams = { + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: awsLocation, + }, + }; + withV4(sigCfg => { + let bucketUtil; + let s3; + beforeEach(() => { + process.stdout.write('Creating bucket\n'); + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; + return s3 + .createBucket(createBucketParams) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); + }); - it('versioning suspended: should create a delete marker in s3 ' + - 'and aws successfully when deleting non-existing object', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => suspendVersioning(s3, bucket, next), - next => delAndAssertResult(s3, { bucket, key, resultType: - newDeleteMarker }, err => next(err)), - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, - next), - ], done); - }); + afterEach(() => { + process.stdout.write('Emptying bucket\n'); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); + }); - it('versioning enabled: should create a delete marker in s3 and ' + - 'aws successfully when deleting non-existing object', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => delAndAssertResult(s3, { bucket, key, resultType: - newDeleteMarker }, err => next(err)), - next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, - next), - next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, - next), - ], done); + it( + 'versioning not configured: deleting non-existing object should ' + + 'not return version id or x-amz-delete-marker: true nor create a ' + + 'delete marker in aws ', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => + delAndAssertResult(s3, { bucket, key, resultType: nonVersionedDelete }, err => + next(err) + ), + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, next), + ], + done + ); + } + ); + + it( + 'versioning suspended: should create a delete marker in s3 ' + + 'and aws successfully when deleting non-existing object', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => suspendVersioning(s3, bucket, next), + next => + delAndAssertResult(s3, { bucket, key, resultType: newDeleteMarker }, err => next(err)), + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, next), + ], + done + ); + } + ); + + it( + 'versioning enabled: should create a delete marker in s3 and ' + + 'aws successfully when deleting non-existing object', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => + delAndAssertResult(s3, { bucket, key, resultType: newDeleteMarker }, err => next(err)), + next => _getAssertDeleted(s3, { key, errorCode: 'NoSuchKey' }, next), + next => _awsGetAssertDeleted({ key, errorCode: 'NoSuchKey' }, next), + ], + done + ); + } + ); }); - }); -}); - - -describeSkipIfNotMultiple('AWS backend delete multiple objects w. versioning: ' + - 'using object location constraint', function testSuite() { - this.timeout(120000); - withV4(sigCfg => { - let bucketUtil; - let s3; - beforeEach(() => { - process.stdout.write('Creating bucket\n'); - bucketUtil = new BucketUtility('default', sigCfg); - s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; + } +); + +describeSkipIfNotMultiple( + 'AWS backend delete multiple objects w. versioning: ' + 'using object location constraint', + function testSuite() { + this.timeout(120000); + withV4(sigCfg => { + let bucketUtil; + let s3; + beforeEach(() => { + process.stdout.write('Creating bucket\n'); + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); - }); - afterEach(() => { - process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; + afterEach(() => { + process.stdout.write('Emptying bucket\n'); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); }); - }); - it('versioning not configured: if specifying "null" version, should ' + - 'delete specific version in AWS backend', done => { - const key = `somekey-${Date.now()}`; - async.waterfall([ - next => putToAwsBackend(s3, bucket, key, someBody, - err => next(err)), - next => awsGetLatestVerId(key, someBody, next), - (awsVerId, next) => delObjectsAndAssertResult(s3, { bucket, - key, versionId: 'null', resultType: deleteVersion }, - err => next(err, awsVerId)), - (awsVerId, next) => { - const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; - _awsGetAssertDeleted({ key, - versionId: awsVerId, errorCode: wanted }, next); - }, - ], done); - }); - - it('versioning suspended: should delete a specific version in AWS ' + - 'backend successfully', done => { - const key = `somekey-${Date.now()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, [someBody], - err => next(err)), - next => awsGetLatestVerId(key, someBody, next), - (awsVerId, next) => delObjectsAndAssertResult(s3, { bucket, - key, versionId: 'null', resultType: deleteVersion }, - err => next(err, awsVerId)), - (awsVerId, next) => { - const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; - _awsGetAssertDeleted({ key, - versionId: awsVerId, errorCode: wanted }, next); - }, - ], done); - }); + it( + 'versioning not configured: if specifying "null" version, should ' + + 'delete specific version in AWS backend', + done => { + const key = `somekey-${Date.now()}`; + async.waterfall( + [ + next => putToAwsBackend(s3, bucket, key, someBody, err => next(err)), + next => awsGetLatestVerId(key, someBody, next), + (awsVerId, next) => + delObjectsAndAssertResult( + s3, + { bucket, key, versionId: 'null', resultType: deleteVersion }, + err => next(err, awsVerId) + ), + (awsVerId, next) => { + const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; + _awsGetAssertDeleted({ key, versionId: awsVerId, errorCode: wanted }, next); + }, + ], + done + ); + } + ); + + it('versioning suspended: should delete a specific version in AWS ' + 'backend successfully', done => { + const key = `somekey-${Date.now()}`; + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, [someBody], err => next(err)), + next => awsGetLatestVerId(key, someBody, next), + (awsVerId, next) => + delObjectsAndAssertResult( + s3, + { bucket, key, versionId: 'null', resultType: deleteVersion }, + err => next(err, awsVerId) + ), + (awsVerId, next) => { + const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; + _awsGetAssertDeleted({ key, versionId: awsVerId, errorCode: wanted }, next); + }, + ], + done + ); + }); - it('versioning enabled: should delete a specific version in AWS ' + - 'backend successfully', done => { - const key = `somekey-${Date.now()}`; - async.waterfall([ - next => putVersionsToAws(s3, bucket, key, [someBody], - (err, versionIds) => next(err, versionIds[0])), - (s3vid, next) => awsGetLatestVerId(key, someBody, - (err, awsVid) => next(err, s3vid, awsVid)), - (s3VerId, awsVerId, next) => delObjectsAndAssertResult(s3, { bucket, - key, versionId: s3VerId, resultType: deleteVersion }, - err => next(err, awsVerId)), - (awsVerId, next) => { - const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; - _awsGetAssertDeleted({ key, - versionId: awsVerId, errorCode: wanted }, next); - }, - ], done); + it('versioning enabled: should delete a specific version in AWS ' + 'backend successfully', done => { + const key = `somekey-${Date.now()}`; + async.waterfall( + [ + next => + putVersionsToAws(s3, bucket, key, [someBody], (err, versionIds) => + next(err, versionIds[0]) + ), + (s3vid, next) => awsGetLatestVerId(key, someBody, (err, awsVid) => next(err, s3vid, awsVid)), + (s3VerId, awsVerId, next) => + delObjectsAndAssertResult( + s3, + { bucket, key, versionId: s3VerId, resultType: deleteVersion }, + err => next(err, awsVerId) + ), + (awsVerId, next) => { + const wanted = isCEPH ? 'NoSuchKey' : 'NoSuchVersion'; + _awsGetAssertDeleted({ key, versionId: awsVerId, errorCode: wanted }, next); + }, + ], + done + ); + }); }); - }); -}); + } +); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js index 22e9d150fb..3a855f0f3b 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js @@ -21,12 +21,11 @@ const azureClient = getAzureClient(); const normalBody = Buffer.from('I am a body', 'utf8'); const azureTimeout = 20000; -const nonExistingId = process.env.AWS_ON_AIR ? - 'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' : - '3939393939393939393936493939393939393939756e6437'; +const nonExistingId = process.env.AWS_ON_AIR + ? 'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' + : '3939393939393939393936493939393939393939756e6437'; -describeSkipIfNotMultipleOrCeph('Multiple backend delete object from Azure', -function testSuite() { +describeSkipIfNotMultipleOrCeph('Multiple backend delete object from Azure', function testSuite() { this.timeout(250000); withV4(sigCfg => { let bucketUtil; @@ -36,169 +35,219 @@ function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: azureContainerName }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: azureContainerName }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); after(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(azureContainerName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(azureContainerName); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; - }); + return bucketUtil + .empty(azureContainerName) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(azureContainerName); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); }); keys.forEach(key => { const keyName = uniqName(keyObject); describe(`${key.describe} size`, () => { before(done => { - s3.putObject({ - Bucket: azureContainerName, - Key: keyName, - Body: key.body, - Metadata: { - 'scal-location-constraint': azureLocation, + s3.putObject( + { + Bucket: azureContainerName, + Key: keyName, + Body: key.body, + Metadata: { + 'scal-location-constraint': azureLocation, + }, }, - }, done); + done + ); }); - it(`should delete an ${key.describe} object from Azure`, - done => { - s3.deleteObject({ - Bucket: azureContainerName, - Key: keyName, - }, err => { - assert.equal(err, null, 'Expected success ' + - `but got error ${err}`); - setTimeout(() => azureClient.getContainerClient(azureContainerName) - .getProperties(keyName) - .then(() => assert.fail('Expected error'), err => { - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NotFound'); - return done(); - }), azureTimeout); - }); + it(`should delete an ${key.describe} object from Azure`, done => { + s3.deleteObject( + { + Bucket: azureContainerName, + Key: keyName, + }, + err => { + assert.equal(err, null, 'Expected success ' + `but got error ${err}`); + setTimeout( + () => + azureClient + .getContainerClient(azureContainerName) + .getProperties(keyName) + .then( + () => assert.fail('Expected error'), + err => { + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NotFound'); + return done(); + } + ), + azureTimeout + ); + } + ); }); }); }); - describe('delete from Azure location with bucketMatch set to false', - () => { + describe('delete from Azure location with bucketMatch set to false', () => { beforeEach(function beforeF(done) { this.currentTest.azureObject = uniqName(keyObject); - s3.putObject({ - Bucket: azureContainerName, - Key: this.currentTest.azureObject, - Body: normalBody, - Metadata: { - 'scal-location-constraint': azureLocationMismatch, + s3.putObject( + { + Bucket: azureContainerName, + Key: this.currentTest.azureObject, + Body: normalBody, + Metadata: { + 'scal-location-constraint': azureLocationMismatch, + }, }, - }, done); + done + ); }); it('should delete object', function itF(done) { - s3.deleteObject({ - Bucket: azureContainerName, - Key: this.test.azureObject, - }, err => { - assert.equal(err, null, 'Expected success ' + - `but got error ${err}`); - setTimeout(() => - azureClient.getContainerClient(azureContainerName) - .getProperties(`${azureContainerName}/${this.test.azureObject}`) - .then(() => assert.fail('Expected error'), err => { - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NotFound'); - return done(); - }), azureTimeout); - }); + s3.deleteObject( + { + Bucket: azureContainerName, + Key: this.test.azureObject, + }, + err => { + assert.equal(err, null, 'Expected success ' + `but got error ${err}`); + setTimeout( + () => + azureClient + .getContainerClient(azureContainerName) + .getProperties(`${azureContainerName}/${this.test.azureObject}`) + .then( + () => assert.fail('Expected error'), + err => { + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NotFound'); + return done(); + } + ), + azureTimeout + ); + } + ); }); }); describe('returning no error', () => { beforeEach(function beF(done) { this.currentTest.azureObject = uniqName(keyObject); - s3.putObject({ - Bucket: azureContainerName, - Key: this.currentTest.azureObject, - Body: normalBody, - Metadata: { - 'scal-location-constraint': azureLocation, + s3.putObject( + { + Bucket: azureContainerName, + Key: this.currentTest.azureObject, + Body: normalBody, + Metadata: { + 'scal-location-constraint': azureLocation, + }, }, - }, err => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - azureClient.getContainerClient(azureContainerName) - .deleteBlob(this.currentTest.azureObject).then(done, err => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - done(err); - }); - }); + err => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + azureClient + .getContainerClient(azureContainerName) + .deleteBlob(this.currentTest.azureObject) + .then(done, err => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + done(err); + }); + } + ); }); - it('should return no error on deleting an object deleted ' + - 'from Azure', function itF(done) { - s3.deleteObject({ - Bucket: azureContainerName, - Key: this.test.azureObject, - }, err => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - done(); - }); + it('should return no error on deleting an object deleted ' + 'from Azure', function itF(done) { + s3.deleteObject( + { + Bucket: azureContainerName, + Key: this.test.azureObject, + }, + err => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + done(); + } + ); }); }); describe('Versioning:: ', () => { beforeEach(function beF(done) { this.currentTest.azureObject = uniqName(keyObject); - s3.putObject({ - Bucket: azureContainerName, - Key: this.currentTest.azureObject, - Body: normalBody, - Metadata: { - 'scal-location-constraint': azureLocation, + s3.putObject( + { + Bucket: azureContainerName, + Key: this.currentTest.azureObject, + Body: normalBody, + Metadata: { + 'scal-location-constraint': azureLocation, + }, }, - }, done); + done + ); }); - it('should not delete object when deleting a non-existing ' + - 'version from Azure', function itF(done) { - async.waterfall([ - next => s3.deleteObject({ - Bucket: azureContainerName, - Key: this.test.azureObject, - VersionId: nonExistingId, - }, err => next(err)), - next => s3.getObject({ - Bucket: azureContainerName, - Key: this.test.azureObject, - }, (err, res) => { - assert.equal(err, null, 'getObject: Expected success ' + - `but got error ${err}`); - assert.deepStrictEqual(res.Body, normalBody); - return next(err); - }), - next => azureClient.getContainerClient(azureContainerName) - .getBlobClient(this.test.azureObject) - .downloadToBuffer().then(res => { - assert.deepStrictEqual(res, normalBody); - return next(); - }, err => { - assert.equal(err, null, 'getBlobToText: Expected ' + - `successbut got error ${err}`); - return next(); - }), - ], done); + it('should not delete object when deleting a non-existing ' + 'version from Azure', function itF(done) { + async.waterfall( + [ + next => + s3.deleteObject( + { + Bucket: azureContainerName, + Key: this.test.azureObject, + VersionId: nonExistingId, + }, + err => next(err) + ), + next => + s3.getObject( + { + Bucket: azureContainerName, + Key: this.test.azureObject, + }, + (err, res) => { + assert.equal(err, null, 'getObject: Expected success ' + `but got error ${err}`); + assert.deepStrictEqual(res.Body, normalBody); + return next(err); + } + ), + next => + azureClient + .getContainerClient(azureContainerName) + .getBlobClient(this.test.azureObject) + .downloadToBuffer() + .then( + res => { + assert.deepStrictEqual(res, normalBody); + return next(); + }, + err => { + assert.equal( + err, + null, + 'getBlobToText: Expected ' + `successbut got error ${err}` + ); + return next(); + } + ), + ], + done + ); }); }); @@ -212,16 +261,14 @@ function testSuite() { Metadata: { 'scal-location-constraint': azureLocation }, }; s3.putObject(params, err => { - assert.equal(err, null, 'Err putting object to Azure: ' + - `${err}`); + assert.equal(err, null, 'Err putting object to Azure: ' + `${err}`); const params = { Bucket: azureContainerName, Key: this.currentTest.key, Metadata: { 'scal-location-constraint': azureLocation }, }; s3.createMultipartUpload(params, (err, res) => { - assert.equal(err, null, 'Err initiating MPU on ' + - `Azure: ${err}`); + assert.equal(err, null, 'Err initiating MPU on ' + `Azure: ${err}`); this.currentTest.uploadId = res.UploadId; setTimeout(() => done(), azureTimeout); }); @@ -229,24 +276,30 @@ function testSuite() { }); afterEach(function afF(done) { - s3.abortMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.key, - UploadId: this.currentTest.uploadId, - }, err => { - assert.equal(err, null, `Err aborting MPU: ${err}`); - setTimeout(() => done(), azureTimeout); - }); + s3.abortMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + UploadId: this.currentTest.uploadId, + }, + err => { + assert.equal(err, null, `Err aborting MPU: ${err}`); + setTimeout(() => done(), azureTimeout); + } + ); }); it('should return InternalError', function itFn(done) { - s3.deleteObject({ - Bucket: azureContainerName, - Key: this.test.key, - }, err => { - assert.strictEqual(err.code, 'MPUinProgress'); - done(); - }); + s3.deleteObject( + { + Bucket: azureContainerName, + Key: this.test.key, + }, + err => { + assert.strictEqual(err.code, 'MPUinProgress'); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteGcp.js index d8efe4c81e..fb15bf6f91 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteGcp.js @@ -2,12 +2,7 @@ const assert = require('assert'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { - describeSkipIfNotMultipleOrCeph, - gcpLocation, - gcpLocationMismatch, - genUniqID, -} = require('../utils'); +const { describeSkipIfNotMultipleOrCeph, gcpLocation, gcpLocationMismatch, genUniqID } = require('../utils'); const bucket = `deletegcp${genUniqID()}`; const gcpObject = `gcpObject-${genUniqID()}`; @@ -17,8 +12,7 @@ const mismatchObject = `mismatchObject-${genUniqID()}`; const body = Buffer.from('I am a body', 'utf8'); const bigBody = Buffer.alloc(10485760); -describeSkipIfNotMultipleOrCeph('Multiple backend delete', -function testSuite() { +describeSkipIfNotMultipleOrCeph('Multiple backend delete', function testSuite() { this.timeout(120000); withV4(sigCfg => { let bucketUtil; @@ -28,45 +22,60 @@ function testSuite() { process.stdout.write('Creating bucket\n'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }).then(() => { - process.stdout.write('Putting object to GCP\n'); - const params = { Bucket: bucket, Key: gcpObject, Body: body, - Metadata: { 'scal-location-constraint': gcpLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting 0-byte object to GCP\n'); - const params = { Bucket: bucket, Key: emptyObject, - Metadata: { 'scal-location-constraint': gcpLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting large object to GCP\n'); - const params = { Bucket: bucket, Key: bigObject, - Body: bigBody, - Metadata: { 'scal-location-constraint': gcpLocation } }; - return s3.putObject(params).promise(); - }) - .then(() => { - process.stdout.write('Putting object to GCP\n'); - const params = { Bucket: bucket, Key: mismatchObject, - Body: body, Metadata: - { 'scal-location-constraint': gcpLocationMismatch } }; - return s3.putObject(params).promise(); - }) - .catch(err => { - process.stdout.write(`Error putting objects: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }) + .then(() => { + process.stdout.write('Putting object to GCP\n'); + const params = { + Bucket: bucket, + Key: gcpObject, + Body: body, + Metadata: { 'scal-location-constraint': gcpLocation }, + }; + return s3.putObject(params).promise(); + }) + .then(() => { + process.stdout.write('Putting 0-byte object to GCP\n'); + const params = { + Bucket: bucket, + Key: emptyObject, + Metadata: { 'scal-location-constraint': gcpLocation }, + }; + return s3.putObject(params).promise(); + }) + .then(() => { + process.stdout.write('Putting large object to GCP\n'); + const params = { + Bucket: bucket, + Key: bigObject, + Body: bigBody, + Metadata: { 'scal-location-constraint': gcpLocation }, + }; + return s3.putObject(params).promise(); + }) + .then(() => { + process.stdout.write('Putting object to GCP\n'); + const params = { + Bucket: bucket, + Key: mismatchObject, + Body: body, + Metadata: { 'scal-location-constraint': gcpLocationMismatch }, + }; + return s3.putObject(params).promise(); + }) + .catch(err => { + process.stdout.write(`Error putting objects: ${err}\n`); + throw err; + }); }); after(() => { process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket) - .catch(err => { + return bucketUtil.deleteOne(bucket).catch(err => { process.stdout.write(`Error deleting bucket: ${err}\n`); throw err; }); @@ -75,39 +84,41 @@ function testSuite() { const deleteTests = [ { msg: 'should delete object from GCP', - Bucket: bucket, Key: gcpObject, + Bucket: bucket, + Key: gcpObject, }, { msg: 'should delete 0-byte object from GCP', - Bucket: bucket, Key: emptyObject, + Bucket: bucket, + Key: emptyObject, }, { msg: 'should delete large object from GCP', - Bucket: bucket, Key: bigObject, + Bucket: bucket, + Key: bigObject, }, { - msg: 'should delete object from GCP location with ' + - 'bucketMatch set to false', - Bucket: bucket, Key: mismatchObject, + msg: 'should delete object from GCP location with ' + 'bucketMatch set to false', + Bucket: bucket, + Key: mismatchObject, }, ]; deleteTests.forEach(test => { const { msg, Bucket, Key } = test; - it(msg, done => s3.deleteObject({ Bucket, Key }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket, Key }, err => { - assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + - 'error but got success'); - done(); - }); - })); + it(msg, done => + s3.deleteObject({ Bucket, Key }, err => { + assert.strictEqual(err, null, `Expected success, got error ${JSON.stringify(err)}`); + s3.getObject({ Bucket, Key }, err => { + assert.strictEqual(err.code, 'NoSuchKey', 'Expected ' + 'error but got success'); + done(); + }); + }) + ); }); - it('should return success if the object does not exist', - done => s3.deleteObject({ Bucket: bucket, Key: 'noop' }, err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + it('should return success if the object does not exist', done => + s3.deleteObject({ Bucket: bucket, Key: 'noop' }, err => { + assert.strictEqual(err, null, `Expected success, got error ${JSON.stringify(err)}`); done(); })); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js b/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js index 23d8688b49..2fdf195d3d 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/get/get.js @@ -35,286 +35,323 @@ describe('Multiple backend get object', function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); after(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); }); // aws-sdk now (v2.363.0) returns 'UriParameterError' error - it.skip('should return an error to get request without a valid ' + - 'bucket name', - done => { - s3.getObject({ Bucket: '', Key: 'somekey' }, err => { - assert.notEqual(err, null, - 'Expected failure but got success'); - assert.strictEqual(err.code, 'MethodNotAllowed'); - done(); - }); + it.skip('should return an error to get request without a valid ' + 'bucket name', done => { + s3.getObject({ Bucket: '', Key: 'somekey' }, err => { + assert.notEqual(err, null, 'Expected failure but got success'); + assert.strictEqual(err.code, 'MethodNotAllowed'); + done(); }); - it('should return NoSuchKey error when no such object', - done => { - s3.getObject({ Bucket: bucket, Key: 'nope' }, err => { - assert.notEqual(err, null, - 'Expected failure but got success'); - assert.strictEqual(err.code, 'NoSuchKey'); - done(); - }); + }); + it('should return NoSuchKey error when no such object', done => { + s3.getObject({ Bucket: bucket, Key: 'nope' }, err => { + assert.notEqual(err, null, 'Expected failure but got success'); + assert.strictEqual(err.code, 'NoSuchKey'); + done(); }); + }); - describeSkipIfNotMultiple('Complete MPU then get object on AWS ' + - 'location with bucketMatch: true ', () => { + describeSkipIfNotMultiple('Complete MPU then get object on AWS ' + 'location with bucketMatch: true ', () => { beforeEach(function beforeEachFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - async.waterfall([ - next => s3.createMultipartUpload({ - Bucket: bucket, Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': awsLocation, - } }, (err, res) => next(err, res.UploadId)), - (uploadId, next) => s3.uploadPart({ - Bucket: bucket, - Key: this.currentTest.key, - PartNumber: 1, - UploadId: uploadId, - Body: 'helloworld' }, (err, res) => next(err, uploadId, - res.ETag)), - (uploadId, eTag, next) => s3.completeMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - MultipartUpload: { - Parts: [ + async.waterfall( + [ + next => + s3.createMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': awsLocation }, + }, + (err, res) => next(err, res.UploadId) + ), + (uploadId, next) => + s3.uploadPart( { - ETag: eTag, + Bucket: bucket, + Key: this.currentTest.key, PartNumber: 1, + UploadId: uploadId, + Body: 'helloworld', + }, + (err, res) => next(err, uploadId, res.ETag) + ), + (uploadId, eTag, next) => + s3.completeMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + MultipartUpload: { + Parts: [ + { + ETag: eTag, + PartNumber: 1, + }, + ], + }, + UploadId: uploadId, }, - ], - }, - UploadId: uploadId, - }, err => next(err)), - ], done); + err => next(err) + ), + ], + done + ); }); - it('should get object from MPU on AWS ' + - 'location with bucketMatch: true ', function it(done) { - s3.getObject({ - Bucket: bucket, - Key: this.test.key, - }, (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.strictEqual(res.ContentLength, 10); - assert.strictEqual(res.Body.toString(), 'helloworld'); - assert.deepStrictEqual(res.Metadata, - { 'scal-location-constraint': awsLocation }); - return done(err); - }); + it('should get object from MPU on AWS ' + 'location with bucketMatch: true ', function it(done) { + s3.getObject( + { + Bucket: bucket, + Key: this.test.key, + }, + (err, res) => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + assert.strictEqual(res.ContentLength, 10); + assert.strictEqual(res.Body.toString(), 'helloworld'); + assert.deepStrictEqual(res.Metadata, { 'scal-location-constraint': awsLocation }); + return done(err); + } + ); }); }); - describeSkipIfNotMultiple('Complete MPU then get object on AWS ' + - 'location with bucketMatch: false ', () => { + describeSkipIfNotMultiple('Complete MPU then get object on AWS ' + 'location with bucketMatch: false ', () => { beforeEach(function beforeEachFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - async.waterfall([ - next => s3.createMultipartUpload({ - Bucket: bucket, Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': - awsLocationMismatch, - } }, (err, res) => next(err, res.UploadId)), - (uploadId, next) => s3.uploadPart({ - Bucket: bucket, - Key: this.currentTest.key, - PartNumber: 1, - UploadId: uploadId, - Body: 'helloworld' }, (err, res) => next(err, uploadId, - res.ETag)), - (uploadId, eTag, next) => s3.completeMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - MultipartUpload: { - Parts: [ + async.waterfall( + [ + next => + s3.createMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': awsLocationMismatch }, + }, + (err, res) => next(err, res.UploadId) + ), + (uploadId, next) => + s3.uploadPart( { - ETag: eTag, + Bucket: bucket, + Key: this.currentTest.key, PartNumber: 1, + UploadId: uploadId, + Body: 'helloworld', + }, + (err, res) => next(err, uploadId, res.ETag) + ), + (uploadId, eTag, next) => + s3.completeMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + MultipartUpload: { + Parts: [ + { + ETag: eTag, + PartNumber: 1, + }, + ], + }, + UploadId: uploadId, }, - ], - }, - UploadId: uploadId, - }, err => next(err)), - ], done); + err => next(err) + ), + ], + done + ); }); - it('should get object from MPU on AWS ' + - 'location with bucketMatch: false ', function it(done) { - s3.getObject({ - Bucket: bucket, - Key: this.test.key, - }, (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.strictEqual(res.ContentLength, 10); - assert.strictEqual(res.Body.toString(), 'helloworld'); - assert.deepStrictEqual(res.Metadata, - { 'scal-location-constraint': awsLocationMismatch }); - return done(err); - }); + it('should get object from MPU on AWS ' + 'location with bucketMatch: false ', function it(done) { + s3.getObject( + { + Bucket: bucket, + Key: this.test.key, + }, + (err, res) => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + assert.strictEqual(res.ContentLength, 10); + assert.strictEqual(res.Body.toString(), 'helloworld'); + assert.deepStrictEqual(res.Metadata, { 'scal-location-constraint': awsLocationMismatch }); + return done(err); + } + ); }); }); - describeSkipIfNotMultiple('with objects in all available backends ' + - '(mem/file/AWS)', () => { + describeSkipIfNotMultiple('with objects in all available backends ' + '(mem/file/AWS)', () => { before(() => { process.stdout.write('Putting object to mem\n'); - return s3.putObject({ Bucket: bucket, Key: memObject, - Body: body, - Metadata: { 'scal-location-constraint': memLocation }, - }).promise() - .then(() => { - process.stdout.write('Putting object to file\n'); - return s3.putObject({ Bucket: bucket, - Key: fileObject, - Body: body, - Metadata: - { 'scal-location-constraint': fileLocation }, - }).promise(); - }) - .then(() => { - process.stdout.write('Putting object to AWS\n'); - return s3.putObject({ Bucket: bucket, Key: awsObject, + return s3 + .putObject({ + Bucket: bucket, + Key: memObject, Body: body, - Metadata: { - 'scal-location-constraint': awsLocation }, - }).promise(); - }) - .then(() => { - process.stdout.write('Putting 0-byte object to mem\n'); - return s3.putObject({ Bucket: bucket, - Key: emptyObject, - Metadata: - { 'scal-location-constraint': memLocation }, - }).promise(); - }) - .then(() => { - process.stdout.write('Putting 0-byte object to AWS\n'); - return s3.putObject({ Bucket: bucket, - Key: emptyAwsObject, - Metadata: { - 'scal-location-constraint': awsLocation }, - }).promise(); - }) - .then(() => { - process.stdout.write('Putting large object to AWS\n'); - return s3.putObject({ Bucket: bucket, - Key: bigObject, Body: bigBody, - Metadata: { - 'scal-location-constraint': awsLocation }, - }).promise(); - }) - .catch(err => { - process.stdout.write(`Error putting objects: ${err}\n`); - throw err; - }); + Metadata: { 'scal-location-constraint': memLocation }, + }) + .promise() + .then(() => { + process.stdout.write('Putting object to file\n'); + return s3 + .putObject({ + Bucket: bucket, + Key: fileObject, + Body: body, + Metadata: { 'scal-location-constraint': fileLocation }, + }) + .promise(); + }) + .then(() => { + process.stdout.write('Putting object to AWS\n'); + return s3 + .putObject({ + Bucket: bucket, + Key: awsObject, + Body: body, + Metadata: { + 'scal-location-constraint': awsLocation, + }, + }) + .promise(); + }) + .then(() => { + process.stdout.write('Putting 0-byte object to mem\n'); + return s3 + .putObject({ + Bucket: bucket, + Key: emptyObject, + Metadata: { 'scal-location-constraint': memLocation }, + }) + .promise(); + }) + .then(() => { + process.stdout.write('Putting 0-byte object to AWS\n'); + return s3 + .putObject({ + Bucket: bucket, + Key: emptyAwsObject, + Metadata: { + 'scal-location-constraint': awsLocation, + }, + }) + .promise(); + }) + .then(() => { + process.stdout.write('Putting large object to AWS\n'); + return s3 + .putObject({ + Bucket: bucket, + Key: bigObject, + Body: bigBody, + Metadata: { + 'scal-location-constraint': awsLocation, + }, + }) + .promise(); + }) + .catch(err => { + process.stdout.write(`Error putting objects: ${err}\n`); + throw err; + }); }); it('should get an object from mem', done => { s3.getObject({ Bucket: bucket, Key: memObject }, (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); assert.strictEqual(res.ETag, `"${correctMD5}"`); done(); }); }); it('should get a 0-byte object from mem', done => { - s3.getObject({ Bucket: bucket, Key: emptyObject }, - (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); + s3.getObject({ Bucket: bucket, Key: emptyObject }, (err, res) => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); assert.strictEqual(res.ETag, `"${emptyMD5}"`); done(); }); }); it('should get a 0-byte object from AWS', done => { - s3.getObject({ Bucket: bucket, Key: emptyAwsObject }, - (err, res) => { - assert.equal(err, null, 'Expected success but got error ' + - `error ${err}`); + s3.getObject({ Bucket: bucket, Key: emptyAwsObject }, (err, res) => { + assert.equal(err, null, 'Expected success but got error ' + `error ${err}`); assert.strictEqual(res.ETag, `"${emptyMD5}"`); done(); }); }); it('should get an object from file', done => { - s3.getObject({ Bucket: bucket, Key: fileObject }, - (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); + s3.getObject({ Bucket: bucket, Key: fileObject }, (err, res) => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + done(); + }); }); it('should get an object from AWS', done => { - s3.getObject({ Bucket: bucket, Key: awsObject }, - (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); + s3.getObject({ Bucket: bucket, Key: awsObject }, (err, res) => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + done(); + }); }); it('should get a large object from AWS', done => { - s3.getObject({ Bucket: bucket, Key: bigObject }, - (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.strictEqual(res.ETag, `"${bigMD5}"`); - done(); - }); + s3.getObject({ Bucket: bucket, Key: bigObject }, (err, res) => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + assert.strictEqual(res.ETag, `"${bigMD5}"`); + done(); + }); }); it('should get an object using range query from AWS', done => { - s3.getObject({ Bucket: bucket, Key: bigObject, - Range: 'bytes=0-9' }, - (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.strictEqual(res.ContentLength, 10); - assert.strictEqual(res.ContentRange, - `bytes 0-9/${bigBodyLen}`); - assert.strictEqual(res.ETag, `"${bigMD5}"`); - done(); - }); + s3.getObject({ Bucket: bucket, Key: bigObject, Range: 'bytes=0-9' }, (err, res) => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + assert.strictEqual(res.ContentLength, 10); + assert.strictEqual(res.ContentRange, `bytes 0-9/${bigBodyLen}`); + assert.strictEqual(res.ETag, `"${bigMD5}"`); + done(); + }); }); }); describeSkipIfNotMultiple('with bucketMatch set to false', () => { beforeEach(done => { - s3.putObject({ Bucket: bucket, Key: mismatchObject, Body: body, - Metadata: { 'scal-location-constraint': awsLocationMismatch } }, - err => { - assert.equal(err, null, `Err putting object: ${err}`); - done(); - }); + s3.putObject( + { + Bucket: bucket, + Key: mismatchObject, + Body: body, + Metadata: { 'scal-location-constraint': awsLocationMismatch }, + }, + err => { + assert.equal(err, null, `Err putting object: ${err}`); + done(); + } + ); }); it('should get an object from AWS', done => { - s3.getObject({ Bucket: bucket, Key: mismatchObject }, - (err, res) => { + s3.getObject({ Bucket: bucket, Key: mismatchObject }, (err, res) => { assert.equal(err, null, `Error getting object: ${err}`); assert.strictEqual(res.ETag, `"${correctMD5}"`); done(); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/get/getAwsVersioning.js b/tests/functional/aws-node-sdk/test/multipleBackend/get/getAwsVersioning.js index fd17cef45e..e9fd2b8e3b 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/get/getAwsVersioning.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/get/getAwsVersioning.js @@ -19,25 +19,24 @@ const { const someBody = 'testbody'; const bucket = `getawsversioning${genUniqID()}`; -function getAndAssertVersions(s3, bucket, key, versionIds, expectedData, - cb) { - async.mapSeries(versionIds, (versionId, next) => { - s3.getObject({ Bucket: bucket, Key: key, - VersionId: versionId }, next); - }, (err, results) => { - assert.strictEqual(err, null, 'Expected success ' + - `getting object, got error ${err}`); - const resultIds = results.map(result => result.VersionId); - const resultData = results.map(result => - result.Body.toString()); - assert.deepStrictEqual(resultIds, versionIds); - assert.deepStrictEqual(resultData, expectedData); - cb(); - }); +function getAndAssertVersions(s3, bucket, key, versionIds, expectedData, cb) { + async.mapSeries( + versionIds, + (versionId, next) => { + s3.getObject({ Bucket: bucket, Key: key, VersionId: versionId }, next); + }, + (err, results) => { + assert.strictEqual(err, null, 'Expected success ' + `getting object, got error ${err}`); + const resultIds = results.map(result => result.VersionId); + const resultData = results.map(result => result.Body.toString()); + assert.deepStrictEqual(resultIds, versionIds); + assert.deepStrictEqual(resultData, expectedData); + cb(); + } + ); } -describeSkipIfNotMultiple('AWS backend get object with versioning', -function testSuite() { +describeSkipIfNotMultiple('AWS backend get object with versioning', function testSuite() { this.timeout(30000); withV4(sigCfg => { let bucketUtil; @@ -47,309 +46,485 @@ function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); }); - it('should not return version ids when versioning has not been ' + - 'configured via CloudServer', done => { + it('should not return version ids when versioning has not been ' + 'configured via CloudServer', done => { const key = `somekey-${genUniqID()}`; - s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, data) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); - assert.strictEqual(data.VersionId, undefined); - getAndAssertResult(s3, { bucket, key, body: someBody, - expectedVersionId: false }, done); - }); + s3.putObject( + { Bucket: bucket, Key: key, Body: someBody, Metadata: { 'scal-location-constraint': awsLocation } }, + (err, data) => { + assert.strictEqual(err, null, 'Expected success ' + `putting object, got error ${err}`); + assert.strictEqual(data.VersionId, undefined); + getAndAssertResult(s3, { bucket, key, body: someBody, expectedVersionId: false }, done); + } + ); }); - it('should not return version ids when versioning has not been ' + - 'configured via CloudServer, even when version id specified', done => { - const key = `somekey-${genUniqID()}`; - s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, data) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); - assert.strictEqual(data.VersionId, undefined); - getAndAssertResult(s3, { bucket, key, body: someBody, - versionId: 'null', expectedVersionId: false }, done); - }); - }); + it( + 'should not return version ids when versioning has not been ' + + 'configured via CloudServer, even when version id specified', + done => { + const key = `somekey-${genUniqID()}`; + s3.putObject( + { Bucket: bucket, Key: key, Body: someBody, Metadata: { 'scal-location-constraint': awsLocation } }, + (err, data) => { + assert.strictEqual(err, null, 'Expected success ' + `putting object, got error ${err}`); + assert.strictEqual(data.VersionId, undefined); + getAndAssertResult( + s3, + { bucket, key, body: someBody, versionId: 'null', expectedVersionId: false }, + done + ); + } + ); + } + ); - it('should return version id for null version when versioning ' + - 'has been configured via CloudServer', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), - next => enableVersioning(s3, bucket, next), - // get with version id specified - next => getAndAssertResult(s3, { bucket, key, body: someBody, - versionId: 'null', expectedVersionId: 'null' }, next), - // get without version id specified - next => getAndAssertResult(s3, { bucket, key, body: someBody, - expectedVersionId: 'null' }, next), - ], done); - }); + it( + 'should return version id for null version when versioning ' + 'has been configured via CloudServer', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => next(err) + ), + next => enableVersioning(s3, bucket, next), + // get with version id specified + next => + getAndAssertResult( + s3, + { bucket, key, body: someBody, versionId: 'null', expectedVersionId: 'null' }, + next + ), + // get without version id specified + next => + getAndAssertResult(s3, { bucket, key, body: someBody, expectedVersionId: 'null' }, next), + ], + done + ); + } + ); - it('should overwrite the null version if putting object twice ' + - 'before versioning is configured', done => { + it('should overwrite the null version if putting object twice ' + 'before versioning is configured', done => { const key = `somekey-${genUniqID()}`; const data = ['data1', 'data2']; - async.waterfall([ - next => mapToAwsPuts(s3, bucket, key, data, err => next(err)), - // get latest version - next => getAndAssertResult(s3, { bucket, key, body: data[1], - expectedVersionId: false }, next), - // get specific version - next => getAndAssertResult(s3, { bucket, key, body: data[1], - versionId: 'null', expectedVersionId: false }, next), - ], done); + async.waterfall( + [ + next => mapToAwsPuts(s3, bucket, key, data, err => next(err)), + // get latest version + next => getAndAssertResult(s3, { bucket, key, body: data[1], expectedVersionId: false }, next), + // get specific version + next => + getAndAssertResult( + s3, + { bucket, key, body: data[1], versionId: 'null', expectedVersionId: false }, + next + ), + ], + done + ); }); - it('should overwrite existing null version if putting object ' + - 'after suspending versioning', done => { + it('should overwrite existing null version if putting object ' + 'after suspending versioning', done => { const key = `somekey-${genUniqID()}`; const data = ['data1', 'data2']; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[0], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), - next => suspendVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[1], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), - // get latest version - next => getAndAssertResult(s3, { bucket, key, body: data[1], - expectedVersionId: 'null' }, next), - // get specific version - next => getAndAssertResult(s3, { bucket, key, body: data[1], - versionId: 'null', expectedVersionId: 'null' }, next), - ], done); + async.waterfall( + [ + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: data[0], + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => next(err) + ), + next => suspendVersioning(s3, bucket, next), + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: data[1], + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => next(err) + ), + // get latest version + next => getAndAssertResult(s3, { bucket, key, body: data[1], expectedVersionId: 'null' }, next), + // get specific version + next => + getAndAssertResult( + s3, + { bucket, key, body: data[1], versionId: 'null', expectedVersionId: 'null' }, + next + ), + ], + done + ); }); - it('should overwrite null version if putting object when ' + - 'versioning is suspended after versioning enabled', done => { - const key = `somekey-${genUniqID()}`; - const data = [...Array(3).keys()].map(i => `data${i}`); - let firstVersionId; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[0], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[1], - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, result) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); - assert.notEqual(result.VersionId, 'null'); - firstVersionId = result.VersionId; - next(); - }), - next => suspendVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[3], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), - // get latest version - next => getAndAssertResult(s3, { bucket, key, body: data[3], - expectedVersionId: 'null' }, next), - // get specific version (null) - next => getAndAssertResult(s3, { bucket, key, body: data[3], - versionId: 'null', expectedVersionId: 'null' }, next), - // assert getting first version put for good measure - next => getAndAssertResult(s3, { bucket, key, body: data[1], - versionId: firstVersionId, - expectedVersionId: firstVersionId }, next), - ], done); - }); + it( + 'should overwrite null version if putting object when ' + + 'versioning is suspended after versioning enabled', + done => { + const key = `somekey-${genUniqID()}`; + const data = [...Array(3).keys()].map(i => `data${i}`); + let firstVersionId; + async.waterfall( + [ + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: data[0], + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => next(err) + ), + next => enableVersioning(s3, bucket, next), + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: data[1], + Metadata: { 'scal-location-constraint': awsLocation }, + }, + (err, result) => { + assert.strictEqual( + err, + null, + 'Expected success ' + `putting object, got error ${err}` + ); + assert.notEqual(result.VersionId, 'null'); + firstVersionId = result.VersionId; + next(); + } + ), + next => suspendVersioning(s3, bucket, next), + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: data[3], + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => next(err) + ), + // get latest version + next => getAndAssertResult(s3, { bucket, key, body: data[3], expectedVersionId: 'null' }, next), + // get specific version (null) + next => + getAndAssertResult( + s3, + { bucket, key, body: data[3], versionId: 'null', expectedVersionId: 'null' }, + next + ), + // assert getting first version put for good measure + next => + getAndAssertResult( + s3, + { + bucket, + key, + body: data[1], + versionId: firstVersionId, + expectedVersionId: firstVersionId, + }, + next + ), + ], + done + ); + } + ); - it('should get correct data from aws backend using version IDs', - done => { + it('should get correct data from aws backend using version IDs', done => { const key = `somekey-${genUniqID()}`; const data = [...Array(5).keys()].map(i => i.toString()); const versionIds = ['null']; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[0], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), - next => putVersionsToAws(s3, bucket, key, data.slice(1), next), - (ids, next) => { - versionIds.push(...ids); - next(); - }, - next => getAndAssertVersions(s3, bucket, key, versionIds, data, - next), - ], done); + async.waterfall( + [ + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: data[0], + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => next(err) + ), + next => putVersionsToAws(s3, bucket, key, data.slice(1), next), + (ids, next) => { + versionIds.push(...ids); + next(); + }, + next => getAndAssertVersions(s3, bucket, key, versionIds, data, next), + ], + done + ); }); - it('should get correct version when getting without version ID', - done => { + it('should get correct version when getting without version ID', done => { const key = `somekey-${genUniqID()}`; const data = [...Array(5).keys()].map(i => i.toString()); const versionIds = ['null']; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[0], - Metadata: { 'scal-location-constraint': awsLocation } }, - err => next(err)), - next => putVersionsToAws(s3, bucket, key, data.slice(1), next), - (ids, next) => { - versionIds.push(...ids); - next(); - }, - next => getAndAssertResult(s3, { bucket, key, body: data[4], - expectedVersionId: versionIds[4] }, next), - ], done); + async.waterfall( + [ + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: data[0], + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => next(err) + ), + next => putVersionsToAws(s3, bucket, key, data.slice(1), next), + (ids, next) => { + versionIds.push(...ids); + next(); + }, + next => + getAndAssertResult(s3, { bucket, key, body: data[4], expectedVersionId: versionIds[4] }, next), + ], + done + ); }); - it('should get correct data from aws backend using version IDs ' + - 'after putting null versions, putting versions, putting more null ' + - 'versions and then putting more versions', - done => { - const key = `somekey-${genUniqID()}`; - const data = [...Array(16).keys()].map(i => i.toString()); - // put three null versions, - // 5 real versions, - // three null versions, - // 5 versions again - const firstThreeNullVersions = data.slice(0, 3); - const firstFiveVersions = data.slice(3, 8); - const secondThreeNullVersions = data.slice(8, 11); - const secondFiveVersions = data.slice(11, 16); - const versionIds = []; - const lastNullVersion = secondThreeNullVersions[2]; - const finalDataArr = firstFiveVersions.concat([lastNullVersion]) - .concat(secondFiveVersions); - async.waterfall([ - next => mapToAwsPuts(s3, bucket, key, firstThreeNullVersions, - err => next(err)), - next => putVersionsToAws(s3, bucket, key, firstFiveVersions, - next), - (ids, next) => { - versionIds.push(...ids); - next(); - }, - next => putNullVersionsToAws(s3, bucket, key, - secondThreeNullVersions, err => next(err)), - next => putVersionsToAws(s3, bucket, key, secondFiveVersions, - next), - (ids, next) => { - versionIds.push('null'); - versionIds.push(...ids); - next(); - }, - // get versions by id - next => getAndAssertVersions(s3, bucket, key, versionIds, - finalDataArr, next), - // get and assert latest version - next => getAndAssertResult(s3, { bucket, key, body: data[16], - versionId: versionIds[versionIds.length - 1], - expectedVersionId: versionIds[versionIds.length - 1] }, - next), - ], done); - }); + it( + 'should get correct data from aws backend using version IDs ' + + 'after putting null versions, putting versions, putting more null ' + + 'versions and then putting more versions', + done => { + const key = `somekey-${genUniqID()}`; + const data = [...Array(16).keys()].map(i => i.toString()); + // put three null versions, + // 5 real versions, + // three null versions, + // 5 versions again + const firstThreeNullVersions = data.slice(0, 3); + const firstFiveVersions = data.slice(3, 8); + const secondThreeNullVersions = data.slice(8, 11); + const secondFiveVersions = data.slice(11, 16); + const versionIds = []; + const lastNullVersion = secondThreeNullVersions[2]; + const finalDataArr = firstFiveVersions.concat([lastNullVersion]).concat(secondFiveVersions); + async.waterfall( + [ + next => mapToAwsPuts(s3, bucket, key, firstThreeNullVersions, err => next(err)), + next => putVersionsToAws(s3, bucket, key, firstFiveVersions, next), + (ids, next) => { + versionIds.push(...ids); + next(); + }, + next => putNullVersionsToAws(s3, bucket, key, secondThreeNullVersions, err => next(err)), + next => putVersionsToAws(s3, bucket, key, secondFiveVersions, next), + (ids, next) => { + versionIds.push('null'); + versionIds.push(...ids); + next(); + }, + // get versions by id + next => getAndAssertVersions(s3, bucket, key, versionIds, finalDataArr, next), + // get and assert latest version + next => + getAndAssertResult( + s3, + { + bucket, + key, + body: data[16], + versionId: versionIds[versionIds.length - 1], + expectedVersionId: versionIds[versionIds.length - 1], + }, + next + ), + ], + done + ); + } + ); - it('should return the correct data getting versioned object ' + - 'even if object was deleted from AWS (creating a delete marker)', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, res) => next(err, res.VersionId)), - // create a delete marker in AWS - (versionId, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key }, err => next(err, versionId)), - (versionId, next) => getAndAssertResult(s3, { bucket, key, - body: someBody, expectedVersionId: versionId }, next), - ], done); - }); + it( + 'should return the correct data getting versioned object ' + + 'even if object was deleted from AWS (creating a delete marker)', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation }, + }, + (err, res) => next(err, res.VersionId) + ), + // create a delete marker in AWS + (versionId, next) => + awsS3.deleteObject({ Bucket: awsBucket, Key: key }, err => next(err, versionId)), + (versionId, next) => + getAndAssertResult(s3, { bucket, key, body: someBody, expectedVersionId: versionId }, next), + ], + done + ); + } + ); - it('should return the correct data getting versioned object ' + - 'even if object is put directly to AWS (creating new version)', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, res) => next(err, res.VersionId)), - // put an object in AWS - (versionId, next) => awsS3.putObject({ Bucket: awsBucket, - Key: key }, err => next(err, versionId)), - (versionId, next) => getAndAssertResult(s3, { bucket, key, - body: someBody, expectedVersionId: versionId }, next), - ], done); - }); + it( + 'should return the correct data getting versioned object ' + + 'even if object is put directly to AWS (creating new version)', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation }, + }, + (err, res) => next(err, res.VersionId) + ), + // put an object in AWS + (versionId, next) => + awsS3.putObject({ Bucket: awsBucket, Key: key }, err => next(err, versionId)), + (versionId, next) => + getAndAssertResult(s3, { bucket, key, body: someBody, expectedVersionId: versionId }, next), + ], + done + ); + } + ); - it('should return a LocationNotFound if trying to get an object ' + - 'that was deleted in AWS but exists in s3 metadata', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, res) => next(err, res.VersionId)), - // get the latest version id in aws - (s3vid, next) => awsS3.getObject({ Bucket: awsBucket, - Key: key }, (err, res) => next(err, s3vid, res.VersionId)), - (s3VerId, awsVerId, next) => awsS3.deleteObject({ - Bucket: awsBucket, Key: key, VersionId: awsVerId }, - err => next(err, s3VerId)), - (s3VerId, next) => s3.getObject({ Bucket: bucket, Key: key }, - err => { - assert.strictEqual(err.code, 'LocationNotFound'); - assert.strictEqual(err.statusCode, 424); - next(); - }), - ], done); - }); + it( + 'should return a LocationNotFound if trying to get an object ' + + 'that was deleted in AWS but exists in s3 metadata', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation }, + }, + (err, res) => next(err, res.VersionId) + ), + // get the latest version id in aws + (s3vid, next) => + awsS3.getObject({ Bucket: awsBucket, Key: key }, (err, res) => + next(err, s3vid, res.VersionId) + ), + (s3VerId, awsVerId, next) => + awsS3.deleteObject( + { + Bucket: awsBucket, + Key: key, + VersionId: awsVerId, + }, + err => next(err, s3VerId) + ), + (s3VerId, next) => + s3.getObject({ Bucket: bucket, Key: key }, err => { + assert.strictEqual(err.code, 'LocationNotFound'); + assert.strictEqual(err.statusCode, 424); + next(); + }), + ], + done + ); + } + ); - it('should return a LocationNotFound if trying to get a version ' + - 'that was deleted in AWS but exists in s3 metadata', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key, Body: someBody, - Metadata: { 'scal-location-constraint': awsLocation } }, - (err, res) => next(err, res.VersionId)), - // get the latest version id in aws - (s3vid, next) => awsS3.getObject({ Bucket: awsBucket, - Key: key }, (err, res) => next(err, s3vid, res.VersionId)), - (s3VerId, awsVerId, next) => awsS3.deleteObject({ - Bucket: awsBucket, Key: key, VersionId: awsVerId }, - err => next(err, s3VerId)), - (s3VerId, next) => s3.getObject({ Bucket: bucket, Key: key, - VersionId: s3VerId }, err => { - assert.strictEqual(err.code, 'LocationNotFound'); - assert.strictEqual(err.statusCode, 424); - next(); - }), - ], done); - }); + it( + 'should return a LocationNotFound if trying to get a version ' + + 'that was deleted in AWS but exists in s3 metadata', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: someBody, + Metadata: { 'scal-location-constraint': awsLocation }, + }, + (err, res) => next(err, res.VersionId) + ), + // get the latest version id in aws + (s3vid, next) => + awsS3.getObject({ Bucket: awsBucket, Key: key }, (err, res) => + next(err, s3vid, res.VersionId) + ), + (s3VerId, awsVerId, next) => + awsS3.deleteObject( + { + Bucket: awsBucket, + Key: key, + VersionId: awsVerId, + }, + err => next(err, s3VerId) + ), + (s3VerId, next) => + s3.getObject({ Bucket: bucket, Key: key, VersionId: s3VerId }, err => { + assert.strictEqual(err.code, 'LocationNotFound'); + assert.strictEqual(err.statusCode, 424); + next(); + }), + ], + done + ); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/get/getAzure.js b/tests/functional/aws-node-sdk/test/multipleBackend/get/getAzure.js index d38a1e7069..112cf8efc7 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/get/getAzure.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/get/getAzure.js @@ -3,13 +3,7 @@ const assert = require('assert'); const BucketUtility = require('../../../lib/utility/bucket-util'); const withV4 = require('../../support/withV4'); -const { - uniqName, - getAzureClient, - getAzureContainerName, - getAzureKeys, - azureLocation, -} = require('../utils'); +const { uniqName, getAzureClient, getAzureContainerName, getAzureKeys, azureLocation } = require('../utils'); const azureClient = getAzureClient(); const azureContainerName = getAzureContainerName(azureLocation); @@ -20,8 +14,7 @@ const normalBody = Buffer.from('I am a body', 'utf8'); const azureTimeout = 10000; -describe.skip('Multiple backend get object from Azure', -function testSuite() { +describe.skip('Multiple backend get object from Azure', function testSuite() { this.timeout(30000); withV4(sigCfg => { let bucketUtil; @@ -31,51 +24,53 @@ function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: azureContainerName }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: azureContainerName }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); after(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(azureContainerName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(azureContainerName); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; - }); + return bucketUtil + .empty(azureContainerName) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(azureContainerName); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); }); keys.forEach(key => { describe(`${key.describe} size`, () => { const testKey = `${key.name}-${Date.now()}`; before(done => { setTimeout(() => { - s3.putObject({ - Bucket: azureContainerName, - Key: testKey, - Body: key.body, - Metadata: { - 'scal-location-constraint': azureLocation, + s3.putObject( + { + Bucket: azureContainerName, + Key: testKey, + Body: key.body, + Metadata: { + 'scal-location-constraint': azureLocation, + }, }, - }, done); + done + ); }, azureTimeout); }); it(`should get an ${key.describe} object from Azure`, done => { - s3.getObject({ Bucket: azureContainerName, Key: - testKey }, - (err, res) => { - assert.equal(err, null, 'Expected success ' + - `but got error ${err}`); - assert.strictEqual(res.ETag, `"${key.MD5}"`); - done(); - }); + s3.getObject({ Bucket: azureContainerName, Key: testKey }, (err, res) => { + assert.equal(err, null, 'Expected success ' + `but got error ${err}`); + assert.strictEqual(res.ETag, `"${key.MD5}"`); + done(); + }); }); }); }); @@ -83,79 +78,89 @@ function testSuite() { describe('with range', () => { const azureObject = uniqName(keyObject); before(done => { - s3.putObject({ - Bucket: azureContainerName, - Key: azureObject, - Body: '0123456789', - Metadata: { - 'scal-location-constraint': azureLocation, + s3.putObject( + { + Bucket: azureContainerName, + Key: azureObject, + Body: '0123456789', + Metadata: { + 'scal-location-constraint': azureLocation, + }, }, - }, done); + done + ); }); - it('should get an object with body 012345 with "bytes=0-5"', - done => { - s3.getObject({ - Bucket: azureContainerName, - Key: azureObject, - Range: 'bytes=0-5', - }, (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.equal(res.ContentLength, 6); - assert.strictEqual(res.ContentRange, 'bytes 0-5/10'); - assert.strictEqual(res.Body.toString(), '012345'); - done(); - }); + it('should get an object with body 012345 with "bytes=0-5"', done => { + s3.getObject( + { + Bucket: azureContainerName, + Key: azureObject, + Range: 'bytes=0-5', + }, + (err, res) => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + assert.equal(res.ContentLength, 6); + assert.strictEqual(res.ContentRange, 'bytes 0-5/10'); + assert.strictEqual(res.Body.toString(), '012345'); + done(); + } + ); }); - it('should get an object with body 456789 with "bytes=4-"', - done => { - s3.getObject({ - Bucket: azureContainerName, - Key: azureObject, - Range: 'bytes=4-', - }, (err, res) => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - assert.equal(res.ContentLength, 6); - assert.strictEqual(res.ContentRange, 'bytes 4-9/10'); - assert.strictEqual(res.Body.toString(), '456789'); - done(); - }); + it('should get an object with body 456789 with "bytes=4-"', done => { + s3.getObject( + { + Bucket: azureContainerName, + Key: azureObject, + Range: 'bytes=4-', + }, + (err, res) => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + assert.equal(res.ContentLength, 6); + assert.strictEqual(res.ContentRange, 'bytes 4-9/10'); + assert.strictEqual(res.Body.toString(), '456789'); + done(); + } + ); }); }); describe('returning error', () => { const azureObject = uniqName(keyObject); before(done => { - s3.putObject({ - Bucket: azureContainerName, - Key: azureObject, - Body: normalBody, - Metadata: { - 'scal-location-constraint': azureLocation, + s3.putObject( + { + Bucket: azureContainerName, + Key: azureObject, + Body: normalBody, + Metadata: { + 'scal-location-constraint': azureLocation, + }, }, - }, err => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - azureClient.getContainerClient(azureContainerName) - .deleteBlob(azureObject).then(done, err => { - assert.equal(err, null, 'Expected success but got ' + - `error ${err}`); - done(err); - }); - }); + err => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + azureClient + .getContainerClient(azureContainerName) + .deleteBlob(azureObject) + .then(done, err => { + assert.equal(err, null, 'Expected success but got ' + `error ${err}`); + done(err); + }); + } + ); }); - it('should return an error on get done to object deleted ' + - 'from Azure', done => { - s3.getObject({ - Bucket: azureContainerName, - Key: azureObject, - }, err => { - assert.strictEqual(err.code, 'LocationNotFound'); - done(); - }); + it('should return an error on get done to object deleted ' + 'from Azure', done => { + s3.getObject( + { + Bucket: azureContainerName, + Key: azureObject, + }, + err => { + assert.strictEqual(err.code, 'LocationNotFound'); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/get/getGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/get/getGcp.js index 28234c78e9..5e665adc0a 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/get/getGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/get/getGcp.js @@ -1,12 +1,7 @@ const assert = require('assert'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { - describeSkipIfNotMultipleOrCeph, - gcpLocation, - gcpLocationMismatch, - genUniqID, -} = require('../utils'); +const { describeSkipIfNotMultipleOrCeph, gcpLocation, gcpLocationMismatch, genUniqID } = require('../utils'); const bucket = `getgcp${genUniqID()}`; const gcpObject = `gcpobject-${genUniqID()}`; @@ -29,89 +24,95 @@ describe('Multiple backend get object', function testSuite() { process.stdout.write('Creating bucket'); bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); after(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); }); describeSkipIfNotMultipleOrCeph('with objects in GCP', () => { before(() => { process.stdout.write('Putting object to GCP\n'); - return s3.putObject({ Bucket: bucket, Key: gcpObject, - Body: body, - Metadata: { 'scal-location-constraint': gcpLocation }, - }).promise() - .then(() => { - process.stdout.write('Putting 0-byte object to GCP\n'); - return s3.putObject({ Bucket: bucket, - Key: emptyGcpObject, + return s3 + .putObject({ + Bucket: bucket, + Key: gcpObject, + Body: body, Metadata: { 'scal-location-constraint': gcpLocation }, - }).promise(); - }) - .then(() => { - process.stdout.write('Putting large object to GCP\n'); - return s3.putObject({ Bucket: bucket, - Key: bigObject, Body: bigBody, - Metadata: { 'scal-location-constraint': gcpLocation }, - }).promise(); - }) - .catch(err => { - process.stdout.write(`Error putting objects: ${err}\n`); - throw err; - }); + }) + .promise() + .then(() => { + process.stdout.write('Putting 0-byte object to GCP\n'); + return s3 + .putObject({ + Bucket: bucket, + Key: emptyGcpObject, + Metadata: { 'scal-location-constraint': gcpLocation }, + }) + .promise(); + }) + .then(() => { + process.stdout.write('Putting large object to GCP\n'); + return s3 + .putObject({ + Bucket: bucket, + Key: bigObject, + Body: bigBody, + Metadata: { 'scal-location-constraint': gcpLocation }, + }) + .promise(); + }) + .catch(err => { + process.stdout.write(`Error putting objects: ${err}\n`); + throw err; + }); }); const getTests = [ { msg: 'should get a 0-byte object from GCP', - input: { Bucket: bucket, Key: emptyGcpObject, - range: null, size: null }, + input: { Bucket: bucket, Key: emptyGcpObject, range: null, size: null }, output: { MD5: emptyMD5, contentRange: null }, }, { msg: 'should get an object from GCP', - input: { Bucket: bucket, Key: gcpObject, - range: null, size: null }, + input: { Bucket: bucket, Key: gcpObject, range: null, size: null }, output: { MD5: correctMD5, contentRange: null }, }, { msg: 'should get a large object from GCP', - input: { Bucket: bucket, Key: bigObject, - range: null, size: null }, + input: { Bucket: bucket, Key: bigObject, range: null, size: null }, output: { MD5: bigMD5, contentRange: null }, }, { msg: 'should get an object using range query from GCP', - input: { Bucket: bucket, Key: bigObject, - range: 'bytes=0-9', size: 10 }, - output: { MD5: bigMD5, - contentRange: `bytes 0-9/${bigBodyLen}` }, + input: { Bucket: bucket, Key: bigObject, range: 'bytes=0-9', size: 10 }, + output: { MD5: bigMD5, contentRange: `bytes 0-9/${bigBodyLen}` }, }, ]; getTests.forEach(test => { const { Bucket, Key, range, size } = test.input; const { MD5, contentRange } = test.output; it(test.msg, done => { - s3.getObject({ Bucket, Key, Range: range }, - (err, res) => { - assert.equal(err, null, - `Expected success but got error ${err}`); + s3.getObject({ Bucket, Key, Range: range }, (err, res) => { + assert.equal(err, null, `Expected success but got error ${err}`); if (range) { assert.strictEqual(res.ContentLength, size); assert.strictEqual(res.ContentRange, contentRange); @@ -125,17 +126,22 @@ describe('Multiple backend get object', function testSuite() { describeSkipIfNotMultipleOrCeph('with bucketMatch set to false', () => { beforeEach(done => { - s3.putObject({ Bucket: bucket, Key: mismatchObject, Body: body, - Metadata: { 'scal-location-constraint': gcpLocationMismatch } }, - err => { - assert.equal(err, null, `Err putting object: ${err}`); - done(); - }); + s3.putObject( + { + Bucket: bucket, + Key: mismatchObject, + Body: body, + Metadata: { 'scal-location-constraint': gcpLocationMismatch }, + }, + err => { + assert.equal(err, null, `Err putting object: ${err}`); + done(); + } + ); }); it('should get an object from GCP', done => { - s3.getObject({ Bucket: bucket, Key: mismatchObject }, - (err, res) => { + s3.getObject({ Bucket: bucket, Key: mismatchObject }, (err, res) => { assert.equal(err, null, `Error getting object: ${err}`); assert.strictEqual(res.ETag, `"${correctMD5}"`); done(); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/initMPU/initMPUAzure.js b/tests/functional/aws-node-sdk/test/multipleBackend/initMPU/initMPUAzure.js index 968251346a..4d326c34fd 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/initMPU/initMPUAzure.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/initMPU/initMPUAzure.js @@ -3,8 +3,7 @@ const assert = require('assert'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, azureLocation, getAzureContainerName, - genUniqID } = require('../utils'); +const { describeSkipIfNotMultipleOrCeph, azureLocation, getAzureContainerName, genUniqID } = require('../utils'); const keyName = `somekey-${genUniqID()}`; @@ -21,23 +20,29 @@ describeSkipIfNotMultipleOrCeph('Initiate MPU to AZURE', () => { afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(azureContainerName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(azureContainerName); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(azureContainerName) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(azureContainerName); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); describe('Basic test: ', () => { beforeEach(done => - s3.createBucket({ Bucket: azureContainerName, - CreateBucketConfiguration: { - LocationConstraint: azureLocation, - }, - }, done)); + s3.createBucket( + { + Bucket: azureContainerName, + CreateBucketConfiguration: { + LocationConstraint: azureLocation, + }, + }, + done + ) + ); afterEach(function afterEachF(done) { const params = { Bucket: azureContainerName, @@ -46,32 +51,33 @@ describeSkipIfNotMultipleOrCeph('Initiate MPU to AZURE', () => { }; s3.abortMultipartUpload(params, done); }); - it('should create MPU and list in-progress multipart uploads', - function ifF(done) { + it('should create MPU and list in-progress multipart uploads', function ifF(done) { const params = { Bucket: azureContainerName, Key: keyName, Metadata: { 'scal-location-constraint': azureLocation }, }; - async.waterfall([ - next => s3.createMultipartUpload(params, (err, res) => { - this.test.uploadId = res.UploadId; - assert(this.test.uploadId); - assert.strictEqual(res.Bucket, azureContainerName); - assert.strictEqual(res.Key, keyName); - next(err); - }), - next => s3.listMultipartUploads( - { Bucket: azureContainerName }, (err, res) => { - assert.strictEqual(res.NextKeyMarker, keyName); - assert.strictEqual(res.NextUploadIdMarker, - this.test.uploadId); - assert.strictEqual(res.Uploads[0].Key, keyName); - assert.strictEqual(res.Uploads[0].UploadId, - this.test.uploadId); - next(err); - }), - ], done); + async.waterfall( + [ + next => + s3.createMultipartUpload(params, (err, res) => { + this.test.uploadId = res.UploadId; + assert(this.test.uploadId); + assert.strictEqual(res.Bucket, azureContainerName); + assert.strictEqual(res.Key, keyName); + next(err); + }), + next => + s3.listMultipartUploads({ Bucket: azureContainerName }, (err, res) => { + assert.strictEqual(res.NextKeyMarker, keyName); + assert.strictEqual(res.NextUploadIdMarker, this.test.uploadId); + assert.strictEqual(res.Uploads[0].Key, keyName); + assert.strictEqual(res.Uploads[0].UploadId, this.test.uploadId); + next(err); + }), + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/initMPU/initMPUGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/initMPU/initMPUGcp.js index b4b65661e4..8f4f0834ac 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/initMPU/initMPUGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/initMPU/initMPUGcp.js @@ -4,8 +4,7 @@ const arsenal = require('arsenal'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, gcpClient, gcpBucketMPU, gcpLocation, - genUniqID } = require('../utils'); +const { describeSkipIfNotMultipleOrCeph, gcpClient, gcpBucketMPU, gcpLocation, genUniqID } = require('../utils'); const { createMpuKey } = arsenal.storage.data.external.GcpUtils; const bucket = `initmpugcp${genUniqID()}`; @@ -23,23 +22,29 @@ describeSkipIfNotMultipleOrCeph('Initiate MPU to GCP', () => { afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); describe('Basic test: ', () => { beforeEach(done => - s3.createBucket({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: gcpLocation, - }, - }, done)); + s3.createBucket( + { + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: gcpLocation, + }, + }, + done + ) + ); afterEach(function afterEachF(done) { const params = { Bucket: bucket, @@ -48,45 +53,44 @@ describeSkipIfNotMultipleOrCeph('Initiate MPU to GCP', () => { }; s3.abortMultipartUpload(params, done); }); - it('should create MPU and list in-progress multipart uploads', - function ifF(done) { + it('should create MPU and list in-progress multipart uploads', function ifF(done) { const params = { Bucket: bucket, Key: keyName, Metadata: { 'scal-location-constraint': gcpLocation }, }; - async.waterfall([ - next => s3.createMultipartUpload(params, (err, res) => { - this.test.uploadId = res.UploadId; - assert(this.test.uploadId); - assert.strictEqual(res.Bucket, bucket); - assert.strictEqual(res.Key, keyName); - next(err); - }), - next => s3.listMultipartUploads( - { Bucket: bucket }, (err, res) => { - assert.strictEqual(res.NextKeyMarker, keyName); - assert.strictEqual(res.NextUploadIdMarker, - this.test.uploadId); - assert.strictEqual(res.Uploads[0].Key, keyName); - assert.strictEqual(res.Uploads[0].UploadId, - this.test.uploadId); - next(err); - }), - next => { - const mpuKey = - createMpuKey(keyName, this.test.uploadId, 'init'); - const params = { - Bucket: gcpBucketMPU, - Key: mpuKey, - }; - gcpClient.getObject(params, err => { - assert.ifError(err, - `Expected success, but got err ${err}`); - next(); - }); - }, - ], done); + async.waterfall( + [ + next => + s3.createMultipartUpload(params, (err, res) => { + this.test.uploadId = res.UploadId; + assert(this.test.uploadId); + assert.strictEqual(res.Bucket, bucket); + assert.strictEqual(res.Key, keyName); + next(err); + }), + next => + s3.listMultipartUploads({ Bucket: bucket }, (err, res) => { + assert.strictEqual(res.NextKeyMarker, keyName); + assert.strictEqual(res.NextUploadIdMarker, this.test.uploadId); + assert.strictEqual(res.Uploads[0].Key, keyName); + assert.strictEqual(res.Uploads[0].UploadId, this.test.uploadId); + next(err); + }), + next => { + const mpuKey = createMpuKey(keyName, this.test.uploadId, 'init'); + const params = { + Bucket: gcpBucketMPU, + Key: mpuKey, + }; + gcpClient.getObject(params, err => { + assert.ifError(err, `Expected success, but got err ${err}`); + next(); + }); + }, + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/listParts/azureListParts.js b/tests/functional/aws-node-sdk/test/multipleBackend/listParts/azureListParts.js index a4d4596ee3..822e2c9931 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/listParts/azureListParts.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/listParts/azureListParts.js @@ -2,8 +2,7 @@ const assert = require('assert'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, azureLocation, getAzureContainerName, - genUniqID } = require('../utils'); +const { describeSkipIfNotMultipleOrCeph, azureLocation, getAzureContainerName, genUniqID } = require('../utils'); const azureContainerName = getAzureContainerName(azureLocation); const firstPartSize = 10; @@ -14,86 +13,116 @@ const bodySecondPart = Buffer.alloc(secondPartSize); let bucketUtil; let s3; -describeSkipIfNotMultipleOrCeph('List parts of MPU on Azure data backend', -() => { +describeSkipIfNotMultipleOrCeph('List parts of MPU on Azure data backend', () => { withV4(sigCfg => { beforeEach(function beforeEachFn() { this.currentTest.key = `somekey-${genUniqID()}`; bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: azureContainerName }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: azureContainerName, Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': azureLocation }, - }).promise()) - .then(res => { - this.currentTest.uploadId = res.UploadId; - return s3.uploadPart({ Bucket: azureContainerName, - Key: this.currentTest.key, PartNumber: 1, - UploadId: this.currentTest.uploadId, Body: bodyFirstPart, - }).promise(); - }).then(res => { - this.currentTest.firstEtag = res.ETag; - }).then(() => s3.uploadPart({ Bucket: azureContainerName, - Key: this.currentTest.key, PartNumber: 2, - UploadId: this.currentTest.uploadId, Body: bodySecondPart, - }).promise()).then(res => { - this.currentTest.secondEtag = res.ETag; - }) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: azureContainerName }) + .promise() + .then(() => + s3 + .createMultipartUpload({ + Bucket: azureContainerName, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': azureLocation }, + }) + .promise() + ) + .then(res => { + this.currentTest.uploadId = res.UploadId; + return s3 + .uploadPart({ + Bucket: azureContainerName, + Key: this.currentTest.key, + PartNumber: 1, + UploadId: this.currentTest.uploadId, + Body: bodyFirstPart, + }) + .promise(); + }) + .then(res => { + this.currentTest.firstEtag = res.ETag; + }) + .then(() => + s3 + .uploadPart({ + Bucket: azureContainerName, + Key: this.currentTest.key, + PartNumber: 2, + UploadId: this.currentTest.uploadId, + Body: bodySecondPart, + }) + .promise() + ) + .then(res => { + this.currentTest.secondEtag = res.ETag; + }) + .catch(err => { + process.stdout.write(`Error in beforeEach: ${err}\n`); + throw err; + }); }); afterEach(function afterEachFn() { process.stdout.write('Emptying bucket'); - return s3.abortMultipartUpload({ - Bucket: azureContainerName, Key: this.currentTest.key, - UploadId: this.currentTest.uploadId, - }).promise() - .then(() => bucketUtil.empty(azureContainerName)) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(azureContainerName); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return s3 + .abortMultipartUpload({ + Bucket: azureContainerName, + Key: this.currentTest.key, + UploadId: this.currentTest.uploadId, + }) + .promise() + .then(() => bucketUtil.empty(azureContainerName)) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(azureContainerName); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); it('should list both parts', function itFn(done) { - s3.listParts({ - Bucket: azureContainerName, - Key: this.test.key, - UploadId: this.test.uploadId }, - (err, data) => { - assert.equal(err, null, `Err listing parts: ${err}`); - assert.strictEqual(data.Parts.length, 2); - assert.strictEqual(data.Parts[0].PartNumber, 1); - assert.strictEqual(data.Parts[0].Size, firstPartSize); - assert.strictEqual(data.Parts[0].ETag, this.test.firstEtag); - assert.strictEqual(data.Parts[1].PartNumber, 2); - assert.strictEqual(data.Parts[1].Size, secondPartSize); - assert.strictEqual(data.Parts[1].ETag, this.test.secondEtag); - done(); - }); + s3.listParts( + { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: this.test.uploadId, + }, + (err, data) => { + assert.equal(err, null, `Err listing parts: ${err}`); + assert.strictEqual(data.Parts.length, 2); + assert.strictEqual(data.Parts[0].PartNumber, 1); + assert.strictEqual(data.Parts[0].Size, firstPartSize); + assert.strictEqual(data.Parts[0].ETag, this.test.firstEtag); + assert.strictEqual(data.Parts[1].PartNumber, 2); + assert.strictEqual(data.Parts[1].Size, secondPartSize); + assert.strictEqual(data.Parts[1].ETag, this.test.secondEtag); + done(); + } + ); }); it('should only list the second part', function itFn(done) { - s3.listParts({ - Bucket: azureContainerName, - Key: this.test.key, - PartNumberMarker: 1, - UploadId: this.test.uploadId }, - (err, data) => { - assert.equal(err, null, `Err listing parts: ${err}`); - assert.strictEqual(data.Parts[0].PartNumber, 2); - assert.strictEqual(data.Parts[0].Size, secondPartSize); - assert.strictEqual(data.Parts[0].ETag, this.test.secondEtag); - done(); - }); + s3.listParts( + { + Bucket: azureContainerName, + Key: this.test.key, + PartNumberMarker: 1, + UploadId: this.test.uploadId, + }, + (err, data) => { + assert.equal(err, null, `Err listing parts: ${err}`); + assert.strictEqual(data.Parts[0].PartNumber, 2); + assert.strictEqual(data.Parts[0].Size, secondPartSize); + assert.strictEqual(data.Parts[0].ETag, this.test.secondEtag); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/listParts/listPartsGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/listParts/listPartsGcp.js index 46edeee0d3..0cdb976039 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/listParts/listPartsGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/listParts/listPartsGcp.js @@ -2,8 +2,7 @@ const assert = require('assert'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, gcpLocation, genUniqID } - = require('../utils'); +const { describeSkipIfNotMultipleOrCeph, gcpLocation, genUniqID } = require('../utils'); const bucket = `listpartsgcp${genUniqID()}`; const firstPartSize = 10; @@ -20,80 +19,110 @@ describeSkipIfNotMultipleOrCeph('List parts of MPU on GCP data backend', () => { this.currentTest.key = `somekey-${genUniqID()}`; bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: bucket, Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': gcpLocation }, - }).promise()) - .then(res => { - this.currentTest.uploadId = res.UploadId; - return s3.uploadPart({ Bucket: bucket, - Key: this.currentTest.key, PartNumber: 1, - UploadId: this.currentTest.uploadId, Body: bodyFirstPart, - }).promise(); - }).then(res => { - this.currentTest.firstEtag = res.ETag; - }).then(() => s3.uploadPart({ Bucket: bucket, - Key: this.currentTest.key, PartNumber: 2, - UploadId: this.currentTest.uploadId, Body: bodySecondPart, - }).promise()) - .then(res => { - this.currentTest.secondEtag = res.ETag; - }) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .then(() => + s3 + .createMultipartUpload({ + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': gcpLocation }, + }) + .promise() + ) + .then(res => { + this.currentTest.uploadId = res.UploadId; + return s3 + .uploadPart({ + Bucket: bucket, + Key: this.currentTest.key, + PartNumber: 1, + UploadId: this.currentTest.uploadId, + Body: bodyFirstPart, + }) + .promise(); + }) + .then(res => { + this.currentTest.firstEtag = res.ETag; + }) + .then(() => + s3 + .uploadPart({ + Bucket: bucket, + Key: this.currentTest.key, + PartNumber: 2, + UploadId: this.currentTest.uploadId, + Body: bodySecondPart, + }) + .promise() + ) + .then(res => { + this.currentTest.secondEtag = res.ETag; + }) + .catch(err => { + process.stdout.write(`Error in beforeEach: ${err}\n`); + throw err; + }); }); afterEach(function afterEachFn() { process.stdout.write('Emptying bucket'); - return s3.abortMultipartUpload({ - Bucket: bucket, Key: this.currentTest.key, - UploadId: this.currentTest.uploadId, - }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return s3 + .abortMultipartUpload({ + Bucket: bucket, + Key: this.currentTest.key, + UploadId: this.currentTest.uploadId, + }) + .promise() + .then(() => bucketUtil.empty(bucket)) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); it('should list both parts', function itFn(done) { - s3.listParts({ - Bucket: bucket, - Key: this.test.key, - UploadId: this.test.uploadId }, - (err, data) => { - assert.equal(err, null, `Err listing parts: ${err}`); - assert.strictEqual(data.Parts.length, 2); - assert.strictEqual(data.Parts[0].PartNumber, 1); - assert.strictEqual(data.Parts[0].Size, firstPartSize); - assert.strictEqual(data.Parts[0].ETag, this.test.firstEtag); - assert.strictEqual(data.Parts[1].PartNumber, 2); - assert.strictEqual(data.Parts[1].Size, secondPartSize); - assert.strictEqual(data.Parts[1].ETag, this.test.secondEtag); - done(); - }); + s3.listParts( + { + Bucket: bucket, + Key: this.test.key, + UploadId: this.test.uploadId, + }, + (err, data) => { + assert.equal(err, null, `Err listing parts: ${err}`); + assert.strictEqual(data.Parts.length, 2); + assert.strictEqual(data.Parts[0].PartNumber, 1); + assert.strictEqual(data.Parts[0].Size, firstPartSize); + assert.strictEqual(data.Parts[0].ETag, this.test.firstEtag); + assert.strictEqual(data.Parts[1].PartNumber, 2); + assert.strictEqual(data.Parts[1].Size, secondPartSize); + assert.strictEqual(data.Parts[1].ETag, this.test.secondEtag); + done(); + } + ); }); it('should only list the second part', function itFn(done) { - s3.listParts({ - Bucket: bucket, - Key: this.test.key, - PartNumberMarker: 1, - UploadId: this.test.uploadId }, - (err, data) => { - assert.equal(err, null, `Err listing parts: ${err}`); - assert.strictEqual(data.Parts[0].PartNumber, 2); - assert.strictEqual(data.Parts[0].Size, secondPartSize); - assert.strictEqual(data.Parts[0].ETag, this.test.secondEtag); - done(); - }); + s3.listParts( + { + Bucket: bucket, + Key: this.test.key, + PartNumberMarker: 1, + UploadId: this.test.uploadId, + }, + (err, data) => { + assert.equal(err, null, `Err listing parts: ${err}`); + assert.strictEqual(data.Parts[0].PartNumber, 2); + assert.strictEqual(data.Parts[0].Size, secondPartSize); + assert.strictEqual(data.Parts[0].ETag, this.test.secondEtag); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/abortMPUGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/abortMPUGcp.js index e03b86f774..e49cd09a9a 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/abortMPUGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/abortMPUGcp.js @@ -3,8 +3,15 @@ const async = require('async'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, gcpClient, gcpBucket, gcpBucketMPU, - gcpLocation, uniqName, genUniqID } = require('../utils'); +const { + describeSkipIfNotMultipleOrCeph, + gcpClient, + gcpBucket, + gcpBucketMPU, + gcpLocation, + uniqName, + genUniqID, +} = require('../utils'); const keyObject = 'abortgcp'; const bucket = `abortmpugcp${genUniqID()}`; @@ -22,16 +29,13 @@ function checkMPUList(bucket, key, uploadId, cb) { UploadId: uploadId, }; gcpClient.listParts(params, (err, res) => { - assert.ifError(err, - `Expected success, but got err ${err}`); - assert.deepStrictEqual(res.Contents, [], - 'Expected 0 parts, listed some'); + assert.ifError(err, `Expected success, but got err ${err}`); + assert.deepStrictEqual(res.Contents, [], 'Expected 0 parts, listed some'); cb(); }); } -describeSkipIfNotMultipleOrCeph('Abort MPU on GCP data backend', function -descrbeFn() { +describeSkipIfNotMultipleOrCeph('Abort MPU on GCP data backend', function descrbeFn() { this.timeout(180000); withV4(sigCfg => { beforeEach(function beforeFn() { @@ -42,25 +46,30 @@ descrbeFn() { describe('with bucket location header', () => { beforeEach(function beforeEachFn(done) { - async.waterfall([ - next => s3.createBucket({ Bucket: bucket }, - err => next(err)), - next => s3.createMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': gcpLocation }, - }, (err, res) => { - if (err) { - return next(err); - } - this.currentTest.uploadId = res.UploadId; - return next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucket }, err => next(err)), + next => + s3.createMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': gcpLocation }, + }, + (err, res) => { + if (err) { + return next(err); + } + this.currentTest.uploadId = res.UploadId; + return next(); + } + ), + ], + done + ); }); - afterEach(done => s3.deleteBucket({ Bucket: bucket }, - done)); + afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); it('should abort a MPU with 0 parts', function itFn(done) { const params = { @@ -68,12 +77,17 @@ descrbeFn() { Key: this.test.key, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.abortMultipartUpload(params, () => next()), - next => setTimeout(() => checkMPUList( - gcpBucketMPU, this.test.key, this.test.uploadId, next), - gcpTimeout), - ], done); + async.waterfall( + [ + next => s3.abortMultipartUpload(params, () => next()), + next => + setTimeout( + () => checkMPUList(gcpBucketMPU, this.test.key, this.test.uploadId, next), + gcpTimeout + ), + ], + done + ); }); it('should abort a MPU with uploaded parts', function itFn(done) { @@ -82,111 +96,127 @@ descrbeFn() { Key: this.test.key, UploadId: this.test.uploadId, }; - async.waterfall([ - next => { - async.times(2, (n, cb) => { - const params = { - Bucket: bucket, - Key: this.test.key, - UploadId: this.test.uploadId, - Body: body, - PartNumber: n + 1, - }; - s3.uploadPart(params, (err, res) => { - assert.ifError(err, - `Expected success, but got err ${err}`); - assert.strictEqual( - res.ETag, `"${correctMD5}"`); - cb(); - }); - }, () => next()); - }, - next => s3.abortMultipartUpload(params, () => next()), - next => setTimeout(() => checkMPUList( - gcpBucketMPU, this.test.key, this.test.uploadId, next), - gcpTimeout), - ], done); + async.waterfall( + [ + next => { + async.times( + 2, + (n, cb) => { + const params = { + Bucket: bucket, + Key: this.test.key, + UploadId: this.test.uploadId, + Body: body, + PartNumber: n + 1, + }; + s3.uploadPart(params, (err, res) => { + assert.ifError(err, `Expected success, but got err ${err}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + cb(); + }); + }, + () => next() + ); + }, + next => s3.abortMultipartUpload(params, () => next()), + next => + setTimeout( + () => checkMPUList(gcpBucketMPU, this.test.key, this.test.uploadId, next), + gcpTimeout + ), + ], + done + ); }); }); describe('with previously existing object with same key', () => { beforeEach(function beforeEachFn(done) { - async.waterfall([ - next => s3.createBucket({ Bucket: bucket }, - err => next(err)), - next => { - s3.putObject({ - Bucket: bucket, - Key: this.currentTest.key, - Metadata: { - 'scal-location-constraint': gcpLocation }, - Body: body, - }, err => { - assert.ifError(err, - `Expected success, got error: ${err}`); - return next(); - }); - }, - next => s3.createMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': gcpLocation }, - }, (err, res) => { - if (err) { - return next(err); - } - this.currentTest.uploadId = res.UploadId; - return next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucket }, err => next(err)), + next => { + s3.putObject( + { + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { + 'scal-location-constraint': gcpLocation, + }, + Body: body, + }, + err => { + assert.ifError(err, `Expected success, got error: ${err}`); + return next(); + } + ); + }, + next => + s3.createMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': gcpLocation }, + }, + (err, res) => { + if (err) { + return next(err); + } + this.currentTest.uploadId = res.UploadId; + return next(); + } + ), + ], + done + ); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); }); - it('should abort MPU without deleting existing object', - function itFn(done) { + it('should abort MPU without deleting existing object', function itFn(done) { const params = { Bucket: bucket, Key: this.test.key, UploadId: this.test.uploadId, }; - async.waterfall([ - next => { - const body = Buffer.alloc(10); - const partParams = Object.assign( - { PartNumber: 1, Body: body }, params); - s3.uploadPart(partParams, err => { - assert.ifError(err, - `Expected success, got error: ${err}`); - return next(); - }); - }, - next => s3.abortMultipartUpload(params, () => next()), - next => setTimeout(() => { - const params = { - Bucket: gcpBucket, - Key: this.test.key, - }; - gcpClient.getObject(params, (err, res) => { - assert.ifError(err, - `Expected success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - next(); - }); - }, gcpTimeout), - ], done); + async.waterfall( + [ + next => { + const body = Buffer.alloc(10); + const partParams = Object.assign({ PartNumber: 1, Body: body }, params); + s3.uploadPart(partParams, err => { + assert.ifError(err, `Expected success, got error: ${err}`); + return next(); + }); + }, + next => s3.abortMultipartUpload(params, () => next()), + next => + setTimeout(() => { + const params = { + Bucket: gcpBucket, + Key: this.test.key, + }; + gcpClient.getObject(params, (err, res) => { + assert.ifError(err, `Expected success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + next(); + }); + }, gcpTimeout), + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js index 92c5e4c1d5..2f830f5ef7 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js @@ -4,8 +4,14 @@ const async = require('async'); const { s3middleware } = require('arsenal'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, uniqName, getAzureClient, - getAzureContainerName, convertMD5, azureLocation } = require('../utils'); +const { + describeSkipIfNotMultipleOrCeph, + uniqName, + getAzureClient, + getAzureContainerName, + convertMD5, + azureLocation, +} = require('../utils'); const azureMpuUtils = s3middleware.azureHelper.mpuUtils; const maxSubPartSize = azureMpuUtils.maxSubPartSize; @@ -18,22 +24,26 @@ let bucketUtil; let s3; function azureCheck(container, key, expected, cb) { - azureClient.getContainerClient(container).getProperties(key).then(res => { - assert.ok(!expected.error); - const convertedMD5 = convertMD5(res.contentSettings.contentMD5); - assert.strictEqual(convertedMD5, expectedMD5); - return cb(); - }, - err => { - assert.ok(expected.error); - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NotFound'); - return cb(); - }); + azureClient + .getContainerClient(container) + .getProperties(key) + .then( + res => { + assert.ok(!expected.error); + const convertedMD5 = convertMD5(res.contentSettings.contentMD5); + assert.strictEqual(convertedMD5, expectedMD5); + return cb(); + }, + err => { + assert.ok(expected.error); + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NotFound'); + return cb(); + } + ); } -describeSkipIfNotMultipleOrCeph('Abort MPU on Azure data backend', function -describeF() { +describeSkipIfNotMultipleOrCeph('Abort MPU on Azure data backend', function describeF() { this.timeout(50000); withV4(sigCfg => { beforeEach(function beforeFn() { @@ -43,25 +53,30 @@ describeF() { }); describe('with bucket location header', () => { beforeEach(function beforeEachFn(done) { - async.waterfall([ - next => s3.createBucket({ Bucket: azureContainerName }, - err => next(err)), - next => s3.createMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': azureLocation }, - }, (err, res) => { - if (err) { - return next(err); - } - this.currentTest.uploadId = res.UploadId; - return next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: azureContainerName }, err => next(err)), + next => + s3.createMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': azureLocation }, + }, + (err, res) => { + if (err) { + return next(err); + } + this.currentTest.uploadId = res.UploadId; + return next(); + } + ), + ], + done + ); }); - afterEach(done => s3.deleteBucket({ Bucket: azureContainerName }, - done)); + afterEach(done => s3.deleteBucket({ Bucket: azureContainerName }, done)); it('should abort an MPU with one empty part ', function itFn(done) { const expected = { error: true }; @@ -70,118 +85,123 @@ describeF() { Key: this.test.key, UploadId: this.test.uploadId, }; - async.waterfall([ - next => { - const partParams = Object.assign({ PartNumber: 1 }, - params); - s3.uploadPart(partParams, err => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error: ${err}`); - return next(); - }); - }, - next => s3.abortMultipartUpload(params, err => next(err)), - next => azureCheck(azureContainerName, this.test.key, - expected, next), - ], done); + async.waterfall( + [ + next => { + const partParams = Object.assign({ PartNumber: 1 }, params); + s3.uploadPart(partParams, err => { + assert.strictEqual(err, null, 'Expected success, ' + `got error: ${err}`); + return next(); + }); + }, + next => s3.abortMultipartUpload(params, err => next(err)), + next => azureCheck(azureContainerName, this.test.key, expected, next), + ], + done + ); }); - it('should abort MPU with one part bigger than max subpart', - function itFn(done) { + it('should abort MPU with one part bigger than max subpart', function itFn(done) { const expected = { error: true }; const params = { Bucket: azureContainerName, Key: this.test.key, UploadId: this.test.uploadId, }; - async.waterfall([ - next => { - const body = Buffer.alloc(maxSubPartSize + 10); - const partParams = Object.assign( - { PartNumber: 1, Body: body }, params); - s3.uploadPart(partParams, err => { - assert.strictEqual(err, null, 'Expected ' + - `success, got error: ${err}`); - return next(); - }); - }, - next => s3.abortMultipartUpload(params, err => next(err)), - next => azureCheck(azureContainerName, this.test.key, - expected, next), - ], done); + async.waterfall( + [ + next => { + const body = Buffer.alloc(maxSubPartSize + 10); + const partParams = Object.assign({ PartNumber: 1, Body: body }, params); + s3.uploadPart(partParams, err => { + assert.strictEqual(err, null, 'Expected ' + `success, got error: ${err}`); + return next(); + }); + }, + next => s3.abortMultipartUpload(params, err => next(err)), + next => azureCheck(azureContainerName, this.test.key, expected, next), + ], + done + ); }); }); describe('with previously existing object with same key', () => { beforeEach(function beforeEachFn(done) { - async.waterfall([ - next => s3.createBucket({ Bucket: azureContainerName }, - err => next(err)), - next => { - const body = Buffer.alloc(10); - s3.putObject({ - Bucket: azureContainerName, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': - azureLocation }, - Body: body, - }, err => { - assert.equal(err, null, 'Err putting object to ' + - `azure: ${err}`); - return next(); - }); - }, - next => s3.createMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': azureLocation }, - }, (err, res) => { - if (err) { - return next(err); - } - this.currentTest.uploadId = res.UploadId; - return next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: azureContainerName }, err => next(err)), + next => { + const body = Buffer.alloc(10); + s3.putObject( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': azureLocation }, + Body: body, + }, + err => { + assert.equal(err, null, 'Err putting object to ' + `azure: ${err}`); + return next(); + } + ); + }, + next => + s3.createMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': azureLocation }, + }, + (err, res) => { + if (err) { + return next(err); + } + this.currentTest.uploadId = res.UploadId; + return next(); + } + ), + ], + done + ); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(azureContainerName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(azureContainerName); - }) - .catch(err => { - process.stdout.write('Error emptying/deleting bucket: ' + - `${err}\n`); - throw err; - }); + return bucketUtil + .empty(azureContainerName) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(azureContainerName); + }) + .catch(err => { + process.stdout.write('Error emptying/deleting bucket: ' + `${err}\n`); + throw err; + }); }); - it('should abort MPU without deleting existing object', - function itFn(done) { + it('should abort MPU without deleting existing object', function itFn(done) { const expected = { error: false }; const params = { Bucket: azureContainerName, Key: this.test.key, UploadId: this.test.uploadId, }; - async.waterfall([ - next => { - const body = Buffer.alloc(10); - const partParams = Object.assign( - { PartNumber: 1, Body: body }, params); - s3.uploadPart(partParams, err => { - assert.strictEqual(err, null, 'Expected ' + - `success, got error: ${err}`); - return next(); - }); - }, - next => s3.abortMultipartUpload(params, err => next(err)), - next => azureCheck(azureContainerName, this.test.key, - expected, next), - ], done); + async.waterfall( + [ + next => { + const body = Buffer.alloc(10); + const partParams = Object.assign({ PartNumber: 1, Body: body }, params); + s3.uploadPart(partParams, err => { + assert.strictEqual(err, null, 'Expected ' + `success, got error: ${err}`); + return next(); + }); + }, + next => s3.abortMultipartUpload(params, err => next(err)), + next => azureCheck(azureContainerName, this.test.key, expected, next), + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/azureCompleteMPU.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/azureCompleteMPU.js index 8bc3dd0979..d42c75fa19 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/azureCompleteMPU.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/azureCompleteMPU.js @@ -41,79 +41,85 @@ function getCheck(key, bucketMatch, cb) { if (!bucketMatch) { azureKey = `${azureContainerName}/${key}`; } - azureClient.getContainerClient(azureContainerName).getProperties(azureKey).then( - azureRes => { - assert.strictEqual(expectedContentLength, azureRes.contentLength); - cb(); - }, - err => { - assert.equal(err, null, `Err getting object from Azure: ${err}`); - cb(); - }); + azureClient + .getContainerClient(azureContainerName) + .getProperties(azureKey) + .then( + azureRes => { + assert.strictEqual(expectedContentLength, azureRes.contentLength); + cb(); + }, + err => { + assert.equal(err, null, `Err getting object from Azure: ${err}`); + cb(); + } + ); }); } function mpuSetup(key, location, cb) { const partArray = []; - async.waterfall([ - next => { - const params = { - Bucket: azureContainerName, - Key: key, - Metadata: { 'scal-location-constraint': location }, - }; - s3.createMultipartUpload(params, (err, res) => { - if (err) { - return next(err); - } - const uploadId = res.UploadId; - assert(uploadId); - assert.strictEqual(res.Bucket, azureContainerName); - assert.strictEqual(res.Key, key); - return next(null, uploadId); - }); - }, - (uploadId, next) => { - const partParams = { - Bucket: azureContainerName, - Key: key, - PartNumber: 1, - UploadId: uploadId, - Body: smallBody, - }; - s3.uploadPart(partParams, (err, res) => { - if (err) { - return next(err); - } - partArray.push({ ETag: res.ETag, PartNumber: 1 }); - return next(null, uploadId); - }); - }, - (uploadId, next) => { - const partParams = { - Bucket: azureContainerName, - Key: key, - PartNumber: 2, - UploadId: uploadId, - Body: bigBody, - }; - s3.uploadPart(partParams, (err, res) => { - if (err) { - return next(err); - } - partArray.push({ ETag: res.ETag, PartNumber: 2 }); - return next(null, uploadId); - }); - }, - ], (err, uploadId) => { - process.stdout.write('Created MPU and put two parts\n'); - assert.equal(err, null, `Err setting up MPU: ${err}`); - cb(uploadId, partArray); - }); + async.waterfall( + [ + next => { + const params = { + Bucket: azureContainerName, + Key: key, + Metadata: { 'scal-location-constraint': location }, + }; + s3.createMultipartUpload(params, (err, res) => { + if (err) { + return next(err); + } + const uploadId = res.UploadId; + assert(uploadId); + assert.strictEqual(res.Bucket, azureContainerName); + assert.strictEqual(res.Key, key); + return next(null, uploadId); + }); + }, + (uploadId, next) => { + const partParams = { + Bucket: azureContainerName, + Key: key, + PartNumber: 1, + UploadId: uploadId, + Body: smallBody, + }; + s3.uploadPart(partParams, (err, res) => { + if (err) { + return next(err); + } + partArray.push({ ETag: res.ETag, PartNumber: 1 }); + return next(null, uploadId); + }); + }, + (uploadId, next) => { + const partParams = { + Bucket: azureContainerName, + Key: key, + PartNumber: 2, + UploadId: uploadId, + Body: bigBody, + }; + s3.uploadPart(partParams, (err, res) => { + if (err) { + return next(err); + } + partArray.push({ ETag: res.ETag, PartNumber: 2 }); + return next(null, uploadId); + }); + }, + ], + (err, uploadId) => { + process.stdout.write('Created MPU and put two parts\n'); + assert.equal(err, null, `Err setting up MPU: ${err}`); + cb(uploadId, partArray); + } + ); } -describeSkipIfNotMultipleOrCeph('Complete MPU API for Azure data backend', -function testSuite() { +describeSkipIfNotMultipleOrCeph('Complete MPU API for Azure data backend', function testSuite() { this.timeout(150000); withV4(sigCfg => { beforeEach(function beFn() { @@ -121,24 +127,27 @@ function testSuite() { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; this.currentTest.awsClient = awsS3; - return s3.createBucket({ Bucket: azureContainerName }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: azureContainerName }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(azureContainerName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(azureContainerName); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(azureContainerName) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(azureContainerName); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); it('should complete an MPU on Azure', function itFn(done) { @@ -151,16 +160,13 @@ function testSuite() { }; s3.completeMultipartUpload(params, err => { assert.equal(err, null, `Err completing MPU: ${err}`); - setTimeout(() => getCheck(this.test.key, true, done), - azureTimeout); + setTimeout(() => getCheck(this.test.key, true, done), azureTimeout); }); }); }); - it('should complete an MPU on Azure with bucketMatch=false', - function itFn(done) { - mpuSetup(this.test.key, azureLocationMismatch, - (uploadId, partArray) => { + it('should complete an MPU on Azure with bucketMatch=false', function itFn(done) { + mpuSetup(this.test.key, azureLocationMismatch, (uploadId, partArray) => { const params = { Bucket: azureContainerName, Key: this.test.key, @@ -169,97 +175,96 @@ function testSuite() { }; s3.completeMultipartUpload(params, err => { assert.equal(err, null, `Err completing MPU: ${err}`); - setTimeout(() => getCheck(this.test.key, false, done), - azureTimeout); + setTimeout(() => getCheck(this.test.key, false, done), azureTimeout); }); }); }); - it('should complete an MPU on Azure with same key as object put ' + - 'to file', function itFn(done) { + it('should complete an MPU on Azure with same key as object put ' + 'to file', function itFn(done) { const body = Buffer.from('I am a body', 'utf8'); - s3.putObject({ - Bucket: azureContainerName, - Key: this.test.key, - Body: body, - Metadata: { 'scal-location-constraint': fileLocation } }, - err => { - assert.equal(err, null, `Err putting object to file: ${err}`); - mpuSetup(this.test.key, azureLocation, - (uploadId, partArray) => { - const params = { - Bucket: azureContainerName, - Key: this.test.key, - UploadId: uploadId, - MultipartUpload: { Parts: partArray }, - }; - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, `Err completing MPU: ${err}`); - setTimeout(() => getCheck(this.test.key, true, done), - azureTimeout); + s3.putObject( + { + Bucket: azureContainerName, + Key: this.test.key, + Body: body, + Metadata: { 'scal-location-constraint': fileLocation }, + }, + err => { + assert.equal(err, null, `Err putting object to file: ${err}`); + mpuSetup(this.test.key, azureLocation, (uploadId, partArray) => { + const params = { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: uploadId, + MultipartUpload: { Parts: partArray }, + }; + s3.completeMultipartUpload(params, err => { + assert.equal(err, null, `Err completing MPU: ${err}`); + setTimeout(() => getCheck(this.test.key, true, done), azureTimeout); + }); }); - }); - }); + } + ); }); - it('should complete an MPU on Azure with same key as object put ' + - 'to Azure', function itFn(done) { + it('should complete an MPU on Azure with same key as object put ' + 'to Azure', function itFn(done) { const body = Buffer.from('I am a body', 'utf8'); - s3.putObject({ - Bucket: azureContainerName, - Key: this.test.key, - Body: body, - Metadata: { 'scal-location-constraint': azureLocation } }, - err => { - assert.equal(err, null, `Err putting object to Azure: ${err}`); - mpuSetup(this.test.key, azureLocation, - (uploadId, partArray) => { - const params = { - Bucket: azureContainerName, - Key: this.test.key, - UploadId: uploadId, - MultipartUpload: { Parts: partArray }, - }; - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, `Err completing MPU: ${err}`); - setTimeout(() => getCheck(this.test.key, true, done), - azureTimeout); + s3.putObject( + { + Bucket: azureContainerName, + Key: this.test.key, + Body: body, + Metadata: { 'scal-location-constraint': azureLocation }, + }, + err => { + assert.equal(err, null, `Err putting object to Azure: ${err}`); + mpuSetup(this.test.key, azureLocation, (uploadId, partArray) => { + const params = { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: uploadId, + MultipartUpload: { Parts: partArray }, + }; + s3.completeMultipartUpload(params, err => { + assert.equal(err, null, `Err completing MPU: ${err}`); + setTimeout(() => getCheck(this.test.key, true, done), azureTimeout); + }); }); - }); - }); + } + ); }); - it('should complete an MPU on Azure with same key as object put ' + - 'to AWS', function itFn(done) { + it('should complete an MPU on Azure with same key as object put ' + 'to AWS', function itFn(done) { const body = Buffer.from('I am a body', 'utf8'); - s3.putObject({ - Bucket: azureContainerName, - Key: this.test.key, - Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }, - err => { - assert.equal(err, null, `Err putting object to AWS: ${err}`); - mpuSetup(this.test.key, azureLocation, - (uploadId, partArray) => { - const params = { - Bucket: azureContainerName, - Key: this.test.key, - UploadId: uploadId, - MultipartUpload: { Parts: partArray }, - }; - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, `Err completing MPU: ${err}`); - // make sure object is gone from AWS - setTimeout(() => { - this.test.awsClient.getObject({ Bucket: awsBucket, - Key: this.test.key }, err => { - assert.strictEqual(err.code, 'NoSuchKey'); - getCheck(this.test.key, true, done); - }); - }, azureTimeout); + s3.putObject( + { + Bucket: azureContainerName, + Key: this.test.key, + Body: body, + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => { + assert.equal(err, null, `Err putting object to AWS: ${err}`); + mpuSetup(this.test.key, azureLocation, (uploadId, partArray) => { + const params = { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: uploadId, + MultipartUpload: { Parts: partArray }, + }; + s3.completeMultipartUpload(params, err => { + assert.equal(err, null, `Err completing MPU: ${err}`); + // make sure object is gone from AWS + setTimeout(() => { + this.test.awsClient.getObject({ Bucket: awsBucket, Key: this.test.key }, err => { + assert.strictEqual(err.code, 'NoSuchKey'); + getCheck(this.test.key, true, done); + }); + }, azureTimeout); + }); }); - }); - }); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/completeMPUGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/completeMPUGcp.js index 45a0cad28d..1beb669646 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/completeMPUGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/completeMPUGcp.js @@ -3,9 +3,18 @@ const async = require('async'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, fileLocation, awsS3, awsLocation, - awsBucket, gcpClient, gcpBucket, gcpLocation, gcpLocationMismatch, - genUniqID } = require('../utils'); +const { + describeSkipIfNotMultipleOrCeph, + fileLocation, + awsS3, + awsLocation, + awsBucket, + gcpClient, + gcpBucket, + gcpLocation, + gcpLocationMismatch, + genUniqID, +} = require('../utils'); const bucket = `completempugcp${genUniqID()}`; const smallBody = Buffer.from('I am a body', 'utf8'); @@ -19,8 +28,7 @@ let bucketUtil; function getCheck(key, bucketMatch, cb) { let gcpKey = key; - s3.getObject({ Bucket: bucket, Key: gcpKey }, - (err, s3Res) => { + s3.getObject({ Bucket: bucket, Key: gcpKey }, (err, s3Res) => { assert.equal(err, null, `Err getting object from S3: ${err}`); assert.strictEqual(s3Res.ETag, `"${s3MD5}"`); @@ -38,56 +46,58 @@ function getCheck(key, bucketMatch, cb) { function mpuSetup(key, location, cb) { const partArray = []; - async.waterfall([ - next => { - const params = { - Bucket: bucket, - Key: key, - Metadata: { 'scal-location-constraint': location }, - }; - s3.createMultipartUpload(params, (err, res) => { - const uploadId = res.UploadId; - assert(uploadId); - assert.strictEqual(res.Bucket, bucket); - assert.strictEqual(res.Key, key); - next(err, uploadId); - }); - }, - (uploadId, next) => { - const partParams = { - Bucket: bucket, - Key: key, - PartNumber: 1, - UploadId: uploadId, - Body: smallBody, - }; - s3.uploadPart(partParams, (err, res) => { - partArray.push({ ETag: res.ETag, PartNumber: 1 }); - next(err, uploadId); - }); - }, - (uploadId, next) => { - const partParams = { - Bucket: bucket, - Key: key, - PartNumber: 2, - UploadId: uploadId, - Body: bigBody, - }; - s3.uploadPart(partParams, (err, res) => { - partArray.push({ ETag: res.ETag, PartNumber: 2 }); - next(err, uploadId); - }); - }, - ], (err, uploadId) => { - process.stdout.write('Created MPU and put two parts\n'); - assert.equal(err, null, `Err setting up MPU: ${err}`); - cb(uploadId, partArray); - }); + async.waterfall( + [ + next => { + const params = { + Bucket: bucket, + Key: key, + Metadata: { 'scal-location-constraint': location }, + }; + s3.createMultipartUpload(params, (err, res) => { + const uploadId = res.UploadId; + assert(uploadId); + assert.strictEqual(res.Bucket, bucket); + assert.strictEqual(res.Key, key); + next(err, uploadId); + }); + }, + (uploadId, next) => { + const partParams = { + Bucket: bucket, + Key: key, + PartNumber: 1, + UploadId: uploadId, + Body: smallBody, + }; + s3.uploadPart(partParams, (err, res) => { + partArray.push({ ETag: res.ETag, PartNumber: 1 }); + next(err, uploadId); + }); + }, + (uploadId, next) => { + const partParams = { + Bucket: bucket, + Key: key, + PartNumber: 2, + UploadId: uploadId, + Body: bigBody, + }; + s3.uploadPart(partParams, (err, res) => { + partArray.push({ ETag: res.ETag, PartNumber: 2 }); + next(err, uploadId); + }); + }, + ], + (err, uploadId) => { + process.stdout.write('Created MPU and put two parts\n'); + assert.equal(err, null, `Err setting up MPU: ${err}`); + cb(uploadId, partArray); + } + ); } -describeSkipIfNotMultipleOrCeph('Complete MPU API for GCP data backend', -function testSuite() { +describeSkipIfNotMultipleOrCeph('Complete MPU API for GCP data backend', function testSuite() { this.timeout(150000); withV4(sigCfg => { beforeEach(function beFn() { @@ -95,24 +105,27 @@ function testSuite() { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; this.currentTest.awsClient = awsS3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); it('should complete an MPU on GCP', function itFn(done) { @@ -125,18 +138,15 @@ function testSuite() { }; setTimeout(() => { s3.completeMultipartUpload(params, err => { - assert.equal(err, null, - `Err completing MPU: ${err}`); + assert.equal(err, null, `Err completing MPU: ${err}`); getCheck(this.test.key, true, done); }); }, gcpTimeout); }); }); - it('should complete an MPU on GCP with bucketMatch=false', - function itFn(done) { - mpuSetup(this.test.key, gcpLocationMismatch, - (uploadId, partArray) => { + it('should complete an MPU on GCP with bucketMatch=false', function itFn(done) { + mpuSetup(this.test.key, gcpLocationMismatch, (uploadId, partArray) => { const params = { Bucket: bucket, Key: this.test.key, @@ -145,103 +155,102 @@ function testSuite() { }; setTimeout(() => { s3.completeMultipartUpload(params, err => { - assert.equal(err, null, - `Err completing MPU: ${err}`); + assert.equal(err, null, `Err completing MPU: ${err}`); getCheck(this.test.key, false, done); }); }, gcpTimeout); }); }); - it('should complete an MPU on GCP with same key as object put ' + - 'to file', function itFn(done) { + it('should complete an MPU on GCP with same key as object put ' + 'to file', function itFn(done) { const body = Buffer.from('I am a body', 'utf8'); - s3.putObject({ - Bucket: bucket, - Key: this.test.key, - Body: body, - Metadata: { 'scal-location-constraint': fileLocation } }, - err => { - assert.equal(err, null, `Err putting object to file: ${err}`); - mpuSetup(this.test.key, gcpLocation, - (uploadId, partArray) => { - const params = { - Bucket: bucket, - Key: this.test.key, - UploadId: uploadId, - MultipartUpload: { Parts: partArray }, - }; - setTimeout(() => { - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, - `Err completing MPU: ${err}`); - getCheck(this.test.key, true, done); - }); - }, gcpTimeout); - }); - }); - }); - - it('should complete an MPU on GCP with same key as object put ' + - 'to GCP', function itFn(done) { - const body = Buffer.from('I am a body', 'utf8'); - s3.putObject({ - Bucket: bucket, - Key: this.test.key, - Body: body, - Metadata: { 'scal-location-constraint': gcpLocation } }, - err => { - assert.equal(err, null, `Err putting object to GCP: ${err}`); - mpuSetup(this.test.key, gcpLocation, - (uploadId, partArray) => { - const params = { - Bucket: bucket, - Key: this.test.key, - UploadId: uploadId, - MultipartUpload: { Parts: partArray }, - }; - setTimeout(() => { - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, - `Err completing MPU: ${err}`); - getCheck(this.test.key, true, done); - }); - }, gcpTimeout); - }); - }); + s3.putObject( + { + Bucket: bucket, + Key: this.test.key, + Body: body, + Metadata: { 'scal-location-constraint': fileLocation }, + }, + err => { + assert.equal(err, null, `Err putting object to file: ${err}`); + mpuSetup(this.test.key, gcpLocation, (uploadId, partArray) => { + const params = { + Bucket: bucket, + Key: this.test.key, + UploadId: uploadId, + MultipartUpload: { Parts: partArray }, + }; + setTimeout(() => { + s3.completeMultipartUpload(params, err => { + assert.equal(err, null, `Err completing MPU: ${err}`); + getCheck(this.test.key, true, done); + }); + }, gcpTimeout); + }); + } + ); }); - it('should complete an MPU on GCP with same key as object put ' + - 'to AWS', function itFn(done) { + it('should complete an MPU on GCP with same key as object put ' + 'to GCP', function itFn(done) { const body = Buffer.from('I am a body', 'utf8'); - s3.putObject({ - Bucket: bucket, - Key: this.test.key, - Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }, - err => { - assert.equal(err, null, `Err putting object to AWS: ${err}`); - mpuSetup(this.test.key, gcpLocation, - (uploadId, partArray) => { - const params = { - Bucket: bucket, - Key: this.test.key, - UploadId: uploadId, - MultipartUpload: { Parts: partArray }, - }; - s3.completeMultipartUpload(params, err => { - assert.equal(err, null, `Err completing MPU: ${err}`); - // make sure object is gone from AWS + s3.putObject( + { + Bucket: bucket, + Key: this.test.key, + Body: body, + Metadata: { 'scal-location-constraint': gcpLocation }, + }, + err => { + assert.equal(err, null, `Err putting object to GCP: ${err}`); + mpuSetup(this.test.key, gcpLocation, (uploadId, partArray) => { + const params = { + Bucket: bucket, + Key: this.test.key, + UploadId: uploadId, + MultipartUpload: { Parts: partArray }, + }; setTimeout(() => { - this.test.awsClient.getObject({ Bucket: awsBucket, - Key: this.test.key }, err => { - assert.strictEqual(err.code, 'NoSuchKey'); + s3.completeMultipartUpload(params, err => { + assert.equal(err, null, `Err completing MPU: ${err}`); getCheck(this.test.key, true, done); }); }, gcpTimeout); }); - }); - }); + } + ); + }); + + it('should complete an MPU on GCP with same key as object put ' + 'to AWS', function itFn(done) { + const body = Buffer.from('I am a body', 'utf8'); + s3.putObject( + { + Bucket: bucket, + Key: this.test.key, + Body: body, + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => { + assert.equal(err, null, `Err putting object to AWS: ${err}`); + mpuSetup(this.test.key, gcpLocation, (uploadId, partArray) => { + const params = { + Bucket: bucket, + Key: this.test.key, + UploadId: uploadId, + MultipartUpload: { Parts: partArray }, + }; + s3.completeMultipartUpload(params, err => { + assert.equal(err, null, `Err completing MPU: ${err}`); + // make sure object is gone from AWS + setTimeout(() => { + this.test.awsClient.getObject({ Bucket: awsBucket, Key: this.test.key }, err => { + assert.strictEqual(err.code, 'NoSuchKey'); + getCheck(this.test.key, true, done); + }); + }, gcpTimeout); + }); + }); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js index ff7eea8adf..3fd5c20ad5 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/mpuAwsVersioning.js @@ -22,89 +22,97 @@ const bucket = `mpuawsversioning${genUniqID()}`; function mpuSetup(s3, key, location, cb) { const partArray = []; - async.waterfall([ - next => { - const params = { - Bucket: bucket, - Key: key, - Metadata: { 'scal-location-constraint': location }, - }; - s3.createMultipartUpload(params, (err, res) => { - assert.strictEqual(err, null, `err creating mpu: ${err}`); - const uploadId = res.UploadId; - assert(uploadId); - assert.strictEqual(res.Bucket, bucket); - assert.strictEqual(res.Key, key); - next(err, uploadId); - }); - }, - (uploadId, next) => { - const partParams = { - Bucket: bucket, - Key: key, - PartNumber: 1, - UploadId: uploadId, - Body: data[0], - }; - s3.uploadPart(partParams, (err, res) => { - assert.strictEqual(err, null, `err uploading part 1: ${err}`); - partArray.push({ ETag: res.ETag, PartNumber: 1 }); - next(err, uploadId); - }); - }, - (uploadId, next) => { - const partParams = { - Bucket: bucket, - Key: key, - PartNumber: 2, - UploadId: uploadId, - Body: data[1], - }; - s3.uploadPart(partParams, (err, res) => { - assert.strictEqual(err, null, `err uploading part 2: ${err}`); - partArray.push({ ETag: res.ETag, PartNumber: 2 }); - next(err, uploadId); - }); - }, - ], (err, uploadId) => { - process.stdout.write('Created MPU and put two parts\n'); - cb(err, uploadId, partArray); - }); + async.waterfall( + [ + next => { + const params = { + Bucket: bucket, + Key: key, + Metadata: { 'scal-location-constraint': location }, + }; + s3.createMultipartUpload(params, (err, res) => { + assert.strictEqual(err, null, `err creating mpu: ${err}`); + const uploadId = res.UploadId; + assert(uploadId); + assert.strictEqual(res.Bucket, bucket); + assert.strictEqual(res.Key, key); + next(err, uploadId); + }); + }, + (uploadId, next) => { + const partParams = { + Bucket: bucket, + Key: key, + PartNumber: 1, + UploadId: uploadId, + Body: data[0], + }; + s3.uploadPart(partParams, (err, res) => { + assert.strictEqual(err, null, `err uploading part 1: ${err}`); + partArray.push({ ETag: res.ETag, PartNumber: 1 }); + next(err, uploadId); + }); + }, + (uploadId, next) => { + const partParams = { + Bucket: bucket, + Key: key, + PartNumber: 2, + UploadId: uploadId, + Body: data[1], + }; + s3.uploadPart(partParams, (err, res) => { + assert.strictEqual(err, null, `err uploading part 2: ${err}`); + partArray.push({ ETag: res.ETag, PartNumber: 2 }); + next(err, uploadId); + }); + }, + ], + (err, uploadId) => { + process.stdout.write('Created MPU and put two parts\n'); + cb(err, uploadId, partArray); + } + ); } function completeAndAssertMpu(s3, params, cb) { - const { bucket, key, uploadId, partArray, expectVersionId, - expectedGetVersionId } = params; - s3.completeMultipartUpload({ - Bucket: bucket, - Key: key, - UploadId: uploadId, - MultipartUpload: { Parts: partArray }, - }, (err, data) => { - assert.strictEqual(err, null, `Err completing MPU: ${err}`); - if (expectVersionId) { - assert.notEqual(data.VersionId, undefined); - } else { - assert.strictEqual(data.VersionId, undefined); + const { bucket, key, uploadId, partArray, expectVersionId, expectedGetVersionId } = params; + s3.completeMultipartUpload( + { + Bucket: bucket, + Key: key, + UploadId: uploadId, + MultipartUpload: { Parts: partArray }, + }, + (err, data) => { + assert.strictEqual(err, null, `Err completing MPU: ${err}`); + if (expectVersionId) { + assert.notEqual(data.VersionId, undefined); + } else { + assert.strictEqual(data.VersionId, undefined); + } + const expectedVersionId = expectedGetVersionId || data.VersionId; + getAndAssertResult(s3, { bucket, key, body: concattedData, expectedVersionId }, cb); } - const expectedVersionId = expectedGetVersionId || data.VersionId; - getAndAssertResult(s3, { bucket, key, body: concattedData, - expectedVersionId }, cb); - }); + ); } -describeSkipIfNotMultiple('AWS backend complete mpu with versioning', -function testSuite() { +describeSkipIfNotMultiple('AWS backend complete mpu with versioning', function testSuite() { this.timeout(120000); withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); const s3 = bucketUtil.s3; - beforeEach(done => s3.createBucket({ - Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: awsLocation, - }, - }, done)); + beforeEach(done => + s3.createBucket( + { + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: awsLocation, + }, + }, + done + ) + ); afterEach(done => { removeAllVersions({ Bucket: bucket }, err => { if (err) { @@ -114,64 +122,75 @@ function testSuite() { }); }); - it('versioning not configured: should not return version id ' + - 'completing mpu', done => { + it('versioning not configured: should not return version id ' + 'completing mpu', done => { const key = `somekey-${genUniqID()}`; mpuSetup(s3, key, awsLocation, (err, uploadId, partArray) => { - completeAndAssertMpu(s3, { bucket, key, uploadId, partArray, - expectVersionId: false }, done); + completeAndAssertMpu(s3, { bucket, key, uploadId, partArray, expectVersionId: false }, done); }); }); - it('versioning not configured: if complete mpu on already-existing ' + - 'object, metadata should be overwritten but data of previous version' + - 'in AWS should not be deleted', function itF(done) { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putToAwsBackend(s3, bucket, key, '', err => next(err)), - next => awsGetLatestVerId(key, '', next), - (awsVerId, next) => { - this.test.awsVerId = awsVerId; - next(); - }, - next => mpuSetup(s3, key, awsLocation, next), - (uploadId, partArray, next) => completeAndAssertMpu(s3, - { bucket, key, uploadId, partArray, expectVersionId: - false }, next), - next => s3.deleteObject({ Bucket: bucket, Key: key, VersionId: - 'null' }, next), - (delData, next) => getAndAssertResult(s3, { bucket, key, - expectedError: 'NoSuchKey' }, next), - next => awsGetLatestVerId(key, '', next), - (awsVerId, next) => { - assert.strictEqual(awsVerId, this.test.awsVerId); - next(); - }, - ], done); - }); + it( + 'versioning not configured: if complete mpu on already-existing ' + + 'object, metadata should be overwritten but data of previous version' + + 'in AWS should not be deleted', + function itF(done) { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putToAwsBackend(s3, bucket, key, '', err => next(err)), + next => awsGetLatestVerId(key, '', next), + (awsVerId, next) => { + this.test.awsVerId = awsVerId; + next(); + }, + next => mpuSetup(s3, key, awsLocation, next), + (uploadId, partArray, next) => + completeAndAssertMpu( + s3, + { bucket, key, uploadId, partArray, expectVersionId: false }, + next + ), + next => s3.deleteObject({ Bucket: bucket, Key: key, VersionId: 'null' }, next), + (delData, next) => getAndAssertResult(s3, { bucket, key, expectedError: 'NoSuchKey' }, next), + next => awsGetLatestVerId(key, '', next), + (awsVerId, next) => { + assert.strictEqual(awsVerId, this.test.awsVerId); + next(); + }, + ], + done + ); + } + ); - it('versioning suspended: should not return version id completing mpu', - done => { + it('versioning suspended: should not return version id completing mpu', done => { const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => suspendVersioning(s3, bucket, next), - next => mpuSetup(s3, key, awsLocation, next), - (uploadId, partArray, next) => completeAndAssertMpu(s3, - { bucket, key, uploadId, partArray, expectVersionId: false, - expectedGetVersionId: 'null' }, next), - ], done); + async.waterfall( + [ + next => suspendVersioning(s3, bucket, next), + next => mpuSetup(s3, key, awsLocation, next), + (uploadId, partArray, next) => + completeAndAssertMpu( + s3, + { bucket, key, uploadId, partArray, expectVersionId: false, expectedGetVersionId: 'null' }, + next + ), + ], + done + ); }); - it('versioning enabled: should return version id completing mpu', - done => { + it('versioning enabled: should return version id completing mpu', done => { const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => mpuSetup(s3, key, awsLocation, next), - (uploadId, partArray, next) => completeAndAssertMpu(s3, - { bucket, key, uploadId, partArray, expectVersionId: true }, - next), - ], done); + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => mpuSetup(s3, key, awsLocation, next), + (uploadId, partArray, next) => + completeAndAssertMpu(s3, { bucket, key, uploadId, partArray, expectVersionId: true }, next), + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/azurePutPart.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/azurePutPart.js index c658e948f2..2015c961cd 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/azurePutPart.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/azurePutPart.js @@ -4,9 +4,16 @@ const async = require('async'); const { s3middleware } = require('arsenal'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, expectedETag, uniqName, getAzureClient, - getAzureContainerName, convertMD5, azureLocation, azureLocationMismatch } - = require('../utils'); +const { + describeSkipIfNotMultipleOrCeph, + expectedETag, + uniqName, + getAzureClient, + getAzureContainerName, + convertMD5, + azureLocation, + azureLocationMismatch, +} = require('../utils'); const azureMpuUtils = s3middleware.azureHelper.mpuUtils; const maxSubPartSize = azureMpuUtils.maxSubPartSize; const getBlockId = azureMpuUtils.getBlockId; @@ -20,15 +27,19 @@ let bucketUtil; let s3; function checkSubPart(key, uploadId, expectedParts, cb) { - azureClient.getContainerClient(azureContainerName) + azureClient + .getContainerClient(azureContainerName) .getBlockBlobClient(key) - .getBlockList('all').then(list => { + .getBlockList('all') + .then(list => { const uncommittedBlocks = list.uncommittedBlocks; const committedBlocks = list.committedBlocks; assert.strictEqual(committedBlocks, undefined); uncommittedBlocks.forEach((l, index) => { - assert.strictEqual(l.name, getBlockId(uploadId, - expectedParts[index].partnbr, expectedParts[index].subpartnbr)); + assert.strictEqual( + l.name, + getBlockId(uploadId, expectedParts[index].partnbr, expectedParts[index].subpartnbr) + ); assert.strictEqual(l.size, expectedParts[index].size.toString()); }); cb(); @@ -39,16 +50,18 @@ function azureCheck(key, cb) { s3.getObject({ Bucket: azureContainerName, Key: key }, (err, res) => { assert.equal(err, null); assert.strictEqual(res.ETag, `"${expectedMD5}"`); - azureClient.getContainerClient(azureContainerName).getProperties(key).then(res => { - const convertedMD5 = convertMD5(res.contentSettings.contentMD5); - assert.strictEqual(convertedMD5, expectedMD5); - return cb(); - }, assert.ifError); + azureClient + .getContainerClient(azureContainerName) + .getProperties(key) + .then(res => { + const convertedMD5 = convertMD5(res.contentSettings.contentMD5); + assert.strictEqual(convertedMD5, expectedMD5); + return cb(); + }, assert.ifError); }); } -describeSkipIfNotMultipleOrCeph('MultipleBackend put part to AZURE', function -describeF() { +describeSkipIfNotMultipleOrCeph('MultipleBackend put part to AZURE', function describeF() { this.timeout(80000); withV4(sigCfg => { beforeEach(function beforeFn() { @@ -58,36 +71,48 @@ describeF() { }); describe('with bucket location header', () => { beforeEach(function beforeEachFn(done) { - async.waterfall([ - next => s3.createBucket({ Bucket: azureContainerName, - }, err => next(err)), - next => s3.createMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': azureLocation }, - }, (err, res) => { - if (err) { - return next(err); - } - this.currentTest.uploadId = res.UploadId; - return next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: azureContainerName }, err => next(err)), + next => + s3.createMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': azureLocation }, + }, + (err, res) => { + if (err) { + return next(err); + } + this.currentTest.uploadId = res.UploadId; + return next(); + } + ), + ], + done + ); }); afterEach(function afterEachFn(done) { - async.waterfall([ - next => s3.abortMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.key, - UploadId: this.currentTest.uploadId, - }, err => next(err)), - next => s3.deleteBucket({ Bucket: azureContainerName }, - err => next(err)), - ], err => { - assert.equal(err, null, `Error aborting MPU: ${err}`); - done(); - }); + async.waterfall( + [ + next => + s3.abortMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + UploadId: this.currentTest.uploadId, + }, + err => next(err) + ), + next => s3.deleteBucket({ Bucket: azureContainerName }, err => next(err)), + ], + err => { + assert.equal(err, null, `Error aborting MPU: ${err}`); + done(); + } + ); }); it('should put 0-byte block to Azure', function itFn(done) { @@ -97,27 +122,37 @@ describeF() { UploadId: this.test.uploadId, PartNumber: 1, }; - async.waterfall([ - next => s3.uploadPart(params, (err, res) => { - const eTagExpected = `"${azureMpuUtils.zeroByteETag}"`; - assert.strictEqual(res.ETag, eTagExpected); - return next(err); - }), - next => azureClient.getContainerClient(azureContainerName) - .getBlockBlobClient(this.test.key) - .getBlockList('all').then( - () => assert.fail('Expected failure but got success'), err => { - assert.strictEqual(err.code, 'BlobNotFound'); - next(); + async.waterfall( + [ + next => + s3.uploadPart(params, (err, res) => { + const eTagExpected = `"${azureMpuUtils.zeroByteETag}"`; + assert.strictEqual(res.ETag, eTagExpected); + return next(err); }), - ], done); + next => + azureClient + .getContainerClient(azureContainerName) + .getBlockBlobClient(this.test.key) + .getBlockList('all') + .then( + () => assert.fail('Expected failure but got success'), + err => { + assert.strictEqual(err.code, 'BlobNotFound'); + next(); + } + ), + ], + done + ); }); it('should put 2 blocks to Azure', function itFn(done) { const body = Buffer.alloc(maxSubPartSize + 10); - const parts = [{ partnbr: 1, subpartnbr: 0, - size: maxSubPartSize }, - { partnbr: 1, subpartnbr: 1, size: 10 }]; + const parts = [ + { partnbr: 1, subpartnbr: 0, size: maxSubPartSize }, + { partnbr: 1, subpartnbr: 1, size: 10 }, + ]; const params = { Bucket: azureContainerName, Key: this.test.key, @@ -125,260 +160,307 @@ describeF() { PartNumber: 1, Body: body, }; - async.waterfall([ - next => s3.uploadPart(params, (err, res) => { - const eTagExpected = expectedETag(body); - assert.strictEqual(res.ETag, eTagExpected); - return next(err); - }), - next => checkSubPart(this.test.key, this.test.uploadId, - parts, next), - ], done); + async.waterfall( + [ + next => + s3.uploadPart(params, (err, res) => { + const eTagExpected = expectedETag(body); + assert.strictEqual(res.ETag, eTagExpected); + return next(err); + }), + next => checkSubPart(this.test.key, this.test.uploadId, parts, next), + ], + done + ); }); - it('should put 5 parts bigger than maxSubPartSize to Azure', - function it(done) { + it('should put 5 parts bigger than maxSubPartSize to Azure', function it(done) { const body = Buffer.alloc(maxSubPartSize + 10); let parts = []; for (let i = 1; i < 6; i++) { parts = parts.concat([ - { partnbr: i, subpartnbr: 0, size: maxSubPartSize }, - { partnbr: i, subpartnbr: 1, size: 10 }, + { partnbr: i, subpartnbr: 0, size: maxSubPartSize }, + { partnbr: i, subpartnbr: 1, size: 10 }, ]); } - async.times(5, (n, next) => { - const partNumber = n + 1; - const params = { - Bucket: azureContainerName, - Key: this.test.key, - UploadId: this.test.uploadId, - PartNumber: partNumber, - Body: body, - }; - s3.uploadPart(params, (err, res) => { - const eTagExpected = expectedETag(body); - assert.strictEqual(res.ETag, eTagExpected); - return next(err); - }); - }, err => { - assert.equal(err, null, 'Expected success, ' + - `got error: ${err}`); - checkSubPart(this.test.key, this.test.uploadId, - parts, done); - }); + async.times( + 5, + (n, next) => { + const partNumber = n + 1; + const params = { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: this.test.uploadId, + PartNumber: partNumber, + Body: body, + }; + s3.uploadPart(params, (err, res) => { + const eTagExpected = expectedETag(body); + assert.strictEqual(res.ETag, eTagExpected); + return next(err); + }); + }, + err => { + assert.equal(err, null, 'Expected success, ' + `got error: ${err}`); + checkSubPart(this.test.key, this.test.uploadId, parts, done); + } + ); }); - it('should put 5 parts smaller than maxSubPartSize to Azure', - function it(done) { + it('should put 5 parts smaller than maxSubPartSize to Azure', function it(done) { const body = Buffer.alloc(10); let parts = []; for (let i = 1; i < 6; i++) { - parts = parts.concat([ - { partnbr: i, subpartnbr: 0, size: 10 }, - ]); + parts = parts.concat([{ partnbr: i, subpartnbr: 0, size: 10 }]); } - async.times(5, (n, next) => { - const partNumber = n + 1; - const params = { - Bucket: azureContainerName, - Key: this.test.key, - UploadId: this.test.uploadId, - PartNumber: partNumber, - Body: body, - }; - s3.uploadPart(params, (err, res) => { - const eTagExpected = expectedETag(body); - assert.strictEqual(res.ETag, eTagExpected); - return next(err); - }); - }, err => { - assert.equal(err, null, 'Expected success, ' + - `got error: ${err}`); - checkSubPart(this.test.key, this.test.uploadId, - parts, done); - }); + async.times( + 5, + (n, next) => { + const partNumber = n + 1; + const params = { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: this.test.uploadId, + PartNumber: partNumber, + Body: body, + }; + s3.uploadPart(params, (err, res) => { + const eTagExpected = expectedETag(body); + assert.strictEqual(res.ETag, eTagExpected); + return next(err); + }); + }, + err => { + assert.equal(err, null, 'Expected success, ' + `got error: ${err}`); + checkSubPart(this.test.key, this.test.uploadId, parts, done); + } + ); }); it('should put the same part twice', function itFn(done) { const body1 = Buffer.alloc(maxSubPartSize + 10); const body2 = Buffer.alloc(20); - const parts2 = [{ partnbr: 1, subpartnbr: 0, size: 20 }, - { partnbr: 1, subpartnbr: 1, size: 10 }]; - async.waterfall([ - next => s3.uploadPart({ - Bucket: azureContainerName, - Key: this.test.key, - UploadId: this.test.uploadId, - PartNumber: 1, - Body: body1, - }, err => next(err)), - next => s3.uploadPart({ - Bucket: azureContainerName, - Key: this.test.key, - UploadId: this.test.uploadId, - PartNumber: 1, - Body: body2, - }, (err, res) => { - const eTagExpected = expectedETag(body2); - assert.strictEqual(res.ETag, eTagExpected); - return next(err); - }), - next => checkSubPart(this.test.key, this.test.uploadId, - parts2, next), - ], done); + const parts2 = [ + { partnbr: 1, subpartnbr: 0, size: 20 }, + { partnbr: 1, subpartnbr: 1, size: 10 }, + ]; + async.waterfall( + [ + next => + s3.uploadPart( + { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: this.test.uploadId, + PartNumber: 1, + Body: body1, + }, + err => next(err) + ), + next => + s3.uploadPart( + { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: this.test.uploadId, + PartNumber: 1, + Body: body2, + }, + (err, res) => { + const eTagExpected = expectedETag(body2); + assert.strictEqual(res.ETag, eTagExpected); + return next(err); + } + ), + next => checkSubPart(this.test.key, this.test.uploadId, parts2, next), + ], + done + ); }); }); describe('with same key as preexisting part', () => { beforeEach(function beforeEachFn(done) { - async.waterfall([ - next => s3.createBucket({ Bucket: azureContainerName }, - err => next(err)), - next => { - const body = Buffer.alloc(10); - s3.putObject({ - Bucket: azureContainerName, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': - azureLocation }, - Body: body, - }, err => { - assert.equal(err, null, 'Err putting object to ' + - `azure: ${err}`); - return next(); - }); - }, - next => s3.createMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': azureLocation }, - }, (err, res) => { - if (err) { - return next(err); - } - this.currentTest.uploadId = res.UploadId; - return next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: azureContainerName }, err => next(err)), + next => { + const body = Buffer.alloc(10); + s3.putObject( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': azureLocation }, + Body: body, + }, + err => { + assert.equal(err, null, 'Err putting object to ' + `azure: ${err}`); + return next(); + } + ); + }, + next => + s3.createMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': azureLocation }, + }, + (err, res) => { + if (err) { + return next(err); + } + this.currentTest.uploadId = res.UploadId; + return next(); + } + ), + ], + done + ); }); afterEach(function afterEachFn(done) { - async.waterfall([ - next => { - process.stdout.write('Aborting multipart upload\n'); - s3.abortMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.key, - UploadId: this.currentTest.uploadId }, - err => next(err)); - }, - next => { - process.stdout.write('Deleting object\n'); - s3.deleteObject({ - Bucket: azureContainerName, - Key: this.currentTest.key }, - err => next(err)); - }, - next => { - process.stdout.write('Deleting bucket\n'); - s3.deleteBucket({ - Bucket: azureContainerName }, - err => next(err)); - }, - ], err => { - assert.equal(err, null, `Err in afterEach: ${err}`); - done(); - }); + async.waterfall( + [ + next => { + process.stdout.write('Aborting multipart upload\n'); + s3.abortMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + UploadId: this.currentTest.uploadId, + }, + err => next(err) + ); + }, + next => { + process.stdout.write('Deleting object\n'); + s3.deleteObject( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + }, + err => next(err) + ); + }, + next => { + process.stdout.write('Deleting bucket\n'); + s3.deleteBucket( + { + Bucket: azureContainerName, + }, + err => next(err) + ); + }, + ], + err => { + assert.equal(err, null, `Err in afterEach: ${err}`); + done(); + } + ); }); - it('should put a part without overwriting existing object', - function itFn(done) { + it('should put a part without overwriting existing object', function itFn(done) { const body = Buffer.alloc(20); - s3.uploadPart({ - Bucket: azureContainerName, - Key: this.test.key, - UploadId: this.test.uploadId, - PartNumber: 1, - Body: body, - }, err => { - assert.strictEqual(err, null, 'Err putting part to ' + - `Azure: ${err}`); - azureCheck(this.test.key, done); - }); + s3.uploadPart( + { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: this.test.uploadId, + PartNumber: 1, + Body: body, + }, + err => { + assert.strictEqual(err, null, 'Err putting part to ' + `Azure: ${err}`); + azureCheck(this.test.key, done); + } + ); }); }); }); }); -describeSkipIfNotMultipleOrCeph('MultipleBackend put part to AZURE ' + -'location with bucketMatch sets to false', function -describeF() { - this.timeout(80000); - withV4(sigCfg => { - beforeEach(function beforeFn() { - this.currentTest.key = uniqName(keyObject); - bucketUtil = new BucketUtility('default', sigCfg); - s3 = bucketUtil.s3; - }); - describe('with bucket location header', () => { - beforeEach(function beforeEachFn(done) { - async.waterfall([ - next => s3.createBucket({ Bucket: azureContainerName, - }, err => next(err)), - next => s3.createMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': - azureLocationMismatch }, - }, (err, res) => { - if (err) { - return next(err); - } - this.currentTest.uploadId = res.UploadId; - return next(); - }), - ], done); +describeSkipIfNotMultipleOrCeph( + 'MultipleBackend put part to AZURE ' + 'location with bucketMatch sets to false', + function describeF() { + this.timeout(80000); + withV4(sigCfg => { + beforeEach(function beforeFn() { + this.currentTest.key = uniqName(keyObject); + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; }); + describe('with bucket location header', () => { + beforeEach(function beforeEachFn(done) { + async.waterfall( + [ + next => s3.createBucket({ Bucket: azureContainerName }, err => next(err)), + next => + s3.createMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': azureLocationMismatch }, + }, + (err, res) => { + if (err) { + return next(err); + } + this.currentTest.uploadId = res.UploadId; + return next(); + } + ), + ], + done + ); + }); - afterEach(function afterEachFn(done) { - async.waterfall([ - next => s3.abortMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.key, - UploadId: this.currentTest.uploadId, - }, err => next(err)), - next => s3.deleteBucket({ Bucket: azureContainerName }, - err => next(err)), - ], err => { - assert.equal(err, null, `Error aborting MPU: ${err}`); - done(); + afterEach(function afterEachFn(done) { + async.waterfall( + [ + next => + s3.abortMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.key, + UploadId: this.currentTest.uploadId, + }, + err => next(err) + ), + next => s3.deleteBucket({ Bucket: azureContainerName }, err => next(err)), + ], + err => { + assert.equal(err, null, `Error aborting MPU: ${err}`); + done(); + } + ); }); - }); - it('should put block to AZURE location with bucketMatch' + - ' sets to false', function itFn(done) { - const body20 = Buffer.alloc(20); - const params = { - Bucket: azureContainerName, - Key: this.test.key, - UploadId: this.test.uploadId, - PartNumber: 1, - Body: body20, - }; - const parts = [{ partnbr: 1, subpartnbr: 0, - size: 20 }]; - async.waterfall([ - next => s3.uploadPart(params, (err, res) => { - const eTagExpected = - '"441018525208457705bf09a8ee3c1093"'; - assert.strictEqual(res.ETag, eTagExpected); - return next(err); - }), - next => checkSubPart( - `${azureContainerName}/${this.test.key}`, - this.test.uploadId, parts, next), - ], done); + it('should put block to AZURE location with bucketMatch' + ' sets to false', function itFn(done) { + const body20 = Buffer.alloc(20); + const params = { + Bucket: azureContainerName, + Key: this.test.key, + UploadId: this.test.uploadId, + PartNumber: 1, + Body: body20, + }; + const parts = [{ partnbr: 1, subpartnbr: 0, size: 20 }]; + async.waterfall( + [ + next => + s3.uploadPart(params, (err, res) => { + const eTagExpected = '"441018525208457705bf09a8ee3c1093"'; + assert.strictEqual(res.ETag, eTagExpected); + return next(err); + }), + next => + checkSubPart(`${azureContainerName}/${this.test.key}`, this.test.uploadId, parts, next), + ], + done + ); + }); }); }); - }); -}); + } +); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/putPartGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/putPartGcp.js index 2aeb8a3137..a137e5e1a4 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/putPartGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/putPartGcp.js @@ -4,9 +4,16 @@ const arsenal = require('arsenal'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, gcpClient, gcpBucket, gcpBucketMPU, - gcpLocation, gcpLocationMismatch, uniqName, genUniqID } - = require('../utils'); +const { + describeSkipIfNotMultipleOrCeph, + gcpClient, + gcpBucket, + gcpBucketMPU, + gcpLocation, + gcpLocationMismatch, + uniqName, + genUniqID, +} = require('../utils'); const { createMpuKey } = arsenal.storage.data.external.GcpUtils; const keyObject = 'putgcp'; @@ -25,20 +32,16 @@ function checkMPUResult(bucket, key, uploadId, objCount, expected, cb) { UploadId: uploadId, }; gcpClient.listParts(params, (err, res) => { - assert.ifError(err, - `Expected success, but got err ${err}`); - assert((res && res.Contents && - res.Contents.length === objCount)); + assert.ifError(err, `Expected success, but got err ${err}`); + assert(res && res.Contents && res.Contents.length === objCount); res.Contents.forEach(part => { - assert.strictEqual( - part.ETag, `"${expected}"`); + assert.strictEqual(part.ETag, `"${expected}"`); }); cb(); }); } -describeSkipIfNotMultipleOrCeph('MultipleBacked put part to GCP', function -describeFn() { +describeSkipIfNotMultipleOrCeph('MultipleBacked put part to GCP', function describeFn() { this.timeout(180000); withV4(sigCfg => { beforeEach(function beforeFn() { @@ -49,36 +52,48 @@ describeFn() { describe('with bucket location header', () => { beforeEach(function beforeEachFn(done) { - async.waterfall([ - next => s3.createBucket({ Bucket: bucket, - }, err => next(err)), - next => s3.createMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': gcpLocation }, - }, (err, res) => { - if (err) { - return next(err); - } - this.currentTest.uploadId = res.UploadId; - return next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucket }, err => next(err)), + next => + s3.createMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': gcpLocation }, + }, + (err, res) => { + if (err) { + return next(err); + } + this.currentTest.uploadId = res.UploadId; + return next(); + } + ), + ], + done + ); }); afterEach(function afterEachFn(done) { - async.waterfall([ - next => s3.abortMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - UploadId: this.currentTest.uploadId, - }, err => next(err)), - next => s3.deleteBucket({ Bucket: bucket }, - err => next(err)), - ], err => { - assert.equal(err, null, `Error aborting MPU: ${err}`); - done(); - }); + async.waterfall( + [ + next => + s3.abortMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + UploadId: this.currentTest.uploadId, + }, + err => next(err) + ), + next => s3.deleteBucket({ Bucket: bucket }, err => next(err)), + ], + err => { + assert.equal(err, null, `Error aborting MPU: ${err}`); + done(); + } + ); }); it('should put 0-byte part to GCP', function itFn(done) { @@ -88,252 +103,295 @@ describeFn() { UploadId: this.test.uploadId, PartNumber: 1, }; - async.waterfall([ - next => s3.uploadPart(params, (err, res) => { - assert.ifError(err, - `Expected success, but got err ${err}`); - assert.strictEqual(res.ETag, `"${emptyMD5}"`); - next(); - }), - next => { - const mpuKey = - createMpuKey(this.test.key, this.test.uploadId, 1); - const getParams = { - Bucket: gcpBucketMPU, - Key: mpuKey, - }; - gcpClient.getObject(getParams, (err, res) => { - assert.ifError(err, - `Expected success, but got err ${err}`); - assert.strictEqual(res.ETag, `"${emptyMD5}"`); - next(); - }); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPart(params, (err, res) => { + assert.ifError(err, `Expected success, but got err ${err}`); + assert.strictEqual(res.ETag, `"${emptyMD5}"`); + next(); + }), + next => { + const mpuKey = createMpuKey(this.test.key, this.test.uploadId, 1); + const getParams = { + Bucket: gcpBucketMPU, + Key: mpuKey, + }; + gcpClient.getObject(getParams, (err, res) => { + assert.ifError(err, `Expected success, but got err ${err}`); + assert.strictEqual(res.ETag, `"${emptyMD5}"`); + next(); + }); + }, + ], + done + ); }); it('should put 2 parts to GCP', function ifFn(done) { - async.waterfall([ - next => { - async.times(2, (n, cb) => { - const params = { - Bucket: bucket, - Key: this.test.key, - UploadId: this.test.uploadId, - Body: body, - PartNumber: n + 1, - }; - s3.uploadPart(params, (err, res) => { - assert.ifError(err, - `Expected success, but got err ${err}`); - assert.strictEqual( - res.ETag, `"${correctMD5}"`); - cb(); - }); - }, () => next()); - }, - next => checkMPUResult( - gcpBucketMPU, this.test.key, this.test.uploadId, - 2, correctMD5, next), - ], done); + async.waterfall( + [ + next => { + async.times( + 2, + (n, cb) => { + const params = { + Bucket: bucket, + Key: this.test.key, + UploadId: this.test.uploadId, + Body: body, + PartNumber: n + 1, + }; + s3.uploadPart(params, (err, res) => { + assert.ifError(err, `Expected success, but got err ${err}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + cb(); + }); + }, + () => next() + ); + }, + next => checkMPUResult(gcpBucketMPU, this.test.key, this.test.uploadId, 2, correctMD5, next), + ], + done + ); }); it('should put the same part twice', function ifFn(done) { - async.waterfall([ - next => { - const partBody = ['', body]; - const partMD5 = [emptyMD5, correctMD5]; - async.timesSeries(2, (n, cb) => { - const params = { - Bucket: bucket, - Key: this.test.key, - UploadId: this.test.uploadId, - Body: partBody[n], - PartNumber: 1, - }; - s3.uploadPart(params, (err, res) => { - assert.ifError(err, - `Expected success, but got err ${err}`); - assert.strictEqual( - res.ETag, `"${partMD5[n]}"`); - cb(); - }); - }, () => next()); - }, - next => checkMPUResult( - gcpBucketMPU, this.test.key, this.test.uploadId, - 1, correctMD5, next), - ], done); + async.waterfall( + [ + next => { + const partBody = ['', body]; + const partMD5 = [emptyMD5, correctMD5]; + async.timesSeries( + 2, + (n, cb) => { + const params = { + Bucket: bucket, + Key: this.test.key, + UploadId: this.test.uploadId, + Body: partBody[n], + PartNumber: 1, + }; + s3.uploadPart(params, (err, res) => { + assert.ifError(err, `Expected success, but got err ${err}`); + assert.strictEqual(res.ETag, `"${partMD5[n]}"`); + cb(); + }); + }, + () => next() + ); + }, + next => checkMPUResult(gcpBucketMPU, this.test.key, this.test.uploadId, 1, correctMD5, next), + ], + done + ); }); }); describe('with same key as preexisting part', () => { beforeEach(function beforeEachFn(done) { - async.waterfall([ - next => s3.createBucket({ Bucket: bucket }, - err => next(err)), - next => { - s3.putObject({ - Bucket: bucket, - Key: this.currentTest.key, - Metadata: { - 'scal-location-constraint': gcpLocation }, - Body: body, - }, err => { - assert.equal(err, null, 'Err putting object to ' + - `GCP: ${err}`); - return next(); - }); - }, - next => s3.createMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': gcpLocation }, - }, (err, res) => { - if (err) { - return next(err); - } - this.currentTest.uploadId = res.UploadId; - return next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucket }, err => next(err)), + next => { + s3.putObject( + { + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { + 'scal-location-constraint': gcpLocation, + }, + Body: body, + }, + err => { + assert.equal(err, null, 'Err putting object to ' + `GCP: ${err}`); + return next(); + } + ); + }, + next => + s3.createMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': gcpLocation }, + }, + (err, res) => { + if (err) { + return next(err); + } + this.currentTest.uploadId = res.UploadId; + return next(); + } + ), + ], + done + ); }); afterEach(function afterEachFn(done) { - async.waterfall([ - next => { - process.stdout.write('Aborting multipart upload\n'); - s3.abortMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - UploadId: this.currentTest.uploadId }, - err => next(err)); - }, - next => { - process.stdout.write('Deleting object\n'); - s3.deleteObject({ - Bucket: bucket, - Key: this.currentTest.key }, - err => next(err)); - }, - next => { - process.stdout.write('Deleting bucket\n'); - s3.deleteBucket({ - Bucket: bucket }, - err => next(err)); - }, - ], err => { - assert.equal(err, null, `Err in afterEach: ${err}`); - done(); - }); + async.waterfall( + [ + next => { + process.stdout.write('Aborting multipart upload\n'); + s3.abortMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + UploadId: this.currentTest.uploadId, + }, + err => next(err) + ); + }, + next => { + process.stdout.write('Deleting object\n'); + s3.deleteObject( + { + Bucket: bucket, + Key: this.currentTest.key, + }, + err => next(err) + ); + }, + next => { + process.stdout.write('Deleting bucket\n'); + s3.deleteBucket( + { + Bucket: bucket, + }, + err => next(err) + ); + }, + ], + err => { + assert.equal(err, null, `Err in afterEach: ${err}`); + done(); + } + ); }); - it('should put a part without overwriting existing object', - function itFn(done) { + it('should put a part without overwriting existing object', function itFn(done) { const body = Buffer.alloc(20); - s3.uploadPart({ - Bucket: bucket, - Key: this.test.key, - UploadId: this.test.uploadId, - PartNumber: 1, - Body: body, - }, err => { - assert.strictEqual(err, null, 'Err putting part to ' + - `GCP: ${err}`); - gcpClient.getObject({ - Bucket: gcpBucket, + s3.uploadPart( + { + Bucket: bucket, Key: this.test.key, - }, (err, res) => { - assert.ifError(err, - `Expected success, but got err ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); - }); + UploadId: this.test.uploadId, + PartNumber: 1, + Body: body, + }, + err => { + assert.strictEqual(err, null, 'Err putting part to ' + `GCP: ${err}`); + gcpClient.getObject( + { + Bucket: gcpBucket, + Key: this.test.key, + }, + (err, res) => { + assert.ifError(err, `Expected success, but got err ${err}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + done(); + } + ); + } + ); }); }); }); }); -describeSkipIfNotMultipleOrCeph('MultipleBackend put part to GCP location ' + -'with bucketMatch sets to false', function -describeF() { - this.timeout(80000); - withV4(sigCfg => { - beforeEach(function beforeFn() { - this.currentTest.key = uniqName(keyObject); - bucketUtil = new BucketUtility('default', sigCfg); - s3 = bucketUtil.s3; - }); - describe('with bucket location header', () => { - beforeEach(function beforeEachFn(done) { - async.waterfall([ - next => s3.createBucket({ Bucket: bucket, - }, err => next(err)), - next => s3.createMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - Metadata: { 'scal-location-constraint': - gcpLocationMismatch }, - }, (err, res) => { - if (err) { - return next(err); - } - this.currentTest.uploadId = res.UploadId; - return next(); - }), - ], done); +describeSkipIfNotMultipleOrCeph( + 'MultipleBackend put part to GCP location ' + 'with bucketMatch sets to false', + function describeF() { + this.timeout(80000); + withV4(sigCfg => { + beforeEach(function beforeFn() { + this.currentTest.key = uniqName(keyObject); + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; }); + describe('with bucket location header', () => { + beforeEach(function beforeEachFn(done) { + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucket }, err => next(err)), + next => + s3.createMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + Metadata: { 'scal-location-constraint': gcpLocationMismatch }, + }, + (err, res) => { + if (err) { + return next(err); + } + this.currentTest.uploadId = res.UploadId; + return next(); + } + ), + ], + done + ); + }); - afterEach(function afterEachFn(done) { - async.waterfall([ - next => s3.abortMultipartUpload({ - Bucket: bucket, - Key: this.currentTest.key, - UploadId: this.currentTest.uploadId, - }, err => next(err)), - next => s3.deleteBucket({ Bucket: bucket }, - err => next(err)), - ], err => { - assert.equal(err, null, `Error aborting MPU: ${err}`); - done(); + afterEach(function afterEachFn(done) { + async.waterfall( + [ + next => + s3.abortMultipartUpload( + { + Bucket: bucket, + Key: this.currentTest.key, + UploadId: this.currentTest.uploadId, + }, + err => next(err) + ), + next => s3.deleteBucket({ Bucket: bucket }, err => next(err)), + ], + err => { + assert.equal(err, null, `Error aborting MPU: ${err}`); + done(); + } + ); }); - }); - it('should put part to GCP location with bucketMatch' + - ' sets to false', function itFn(done) { - const body20 = Buffer.alloc(20); - const params = { - Bucket: bucket, - Key: this.test.key, - UploadId: this.test.uploadId, - PartNumber: 1, - Body: body20, - }; - const eTagExpected = - '"441018525208457705bf09a8ee3c1093"'; - async.waterfall([ - next => s3.uploadPart(params, (err, res) => { - assert.strictEqual(res.ETag, eTagExpected); - next(err); - }), - next => { - const key = - createMpuKey(this.test.key, this.test.uploadId, 1); - const mpuKey = `${bucket}/${key}`; - const getParams = { - Bucket: gcpBucketMPU, - Key: mpuKey, - }; - gcpClient.getObject(getParams, (err, res) => { - assert.ifError(err, - `Expected success, but got err ${err}`); - assert.strictEqual(res.ETag, eTagExpected); - next(); - }); - }, - ], done); + it('should put part to GCP location with bucketMatch' + ' sets to false', function itFn(done) { + const body20 = Buffer.alloc(20); + const params = { + Bucket: bucket, + Key: this.test.key, + UploadId: this.test.uploadId, + PartNumber: 1, + Body: body20, + }; + const eTagExpected = '"441018525208457705bf09a8ee3c1093"'; + async.waterfall( + [ + next => + s3.uploadPart(params, (err, res) => { + assert.strictEqual(res.ETag, eTagExpected); + next(err); + }), + next => { + const key = createMpuKey(this.test.key, this.test.uploadId, 1); + const mpuKey = `${bucket}/${key}`; + const getParams = { + Bucket: gcpBucketMPU, + Key: mpuKey, + }; + gcpClient.getObject(getParams, (err, res) => { + assert.ifError(err, `Expected success, but got err ${err}`); + assert.strictEqual(res.ETag, eTagExpected); + next(); + }); + }, + ], + done + ); + }); }); }); - }); -}); + } +); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/azureObjectCopy.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/azureObjectCopy.js index eb1be40768..81a655e86b 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/azureObjectCopy.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/azureObjectCopy.js @@ -17,8 +17,7 @@ const { azureLocationMismatch, genUniqID, } = require('../utils'); -const { createEncryptedBucketPromise } = - require('../../../lib/utility/createEncryptedBucket'); +const { createEncryptedBucketPromise } = require('../../../lib/utility/createEncryptedBucket'); const azureClient = getAzureClient(); const azureContainerName = getAzureContainerName(azureLocation); @@ -38,7 +37,9 @@ let bucketUtil; let s3; function putSourceObj(key, location, objSize, bucket, cb) { - const sourceParams = { Bucket: bucket, Key: key, + const sourceParams = { + Bucket: bucket, + Key: key, Metadata: { 'test-header': 'copyme', }, @@ -64,58 +65,74 @@ function putSourceObj(key, location, objSize, bucket, cb) { }); } -function assertGetObjects(sourceKey, sourceBucket, sourceLoc, destKey, -destBucket, destLoc, azureKey, mdDirective, objSize, callback) { +function assertGetObjects( + sourceKey, + sourceBucket, + sourceLoc, + destKey, + destBucket, + destLoc, + azureKey, + mdDirective, + objSize, + callback +) { const sourceGetParams = { Bucket: sourceBucket, Key: sourceKey }; const destGetParams = { Bucket: destBucket, Key: destKey }; - async.series([ - cb => s3.getObject(sourceGetParams, cb), - cb => s3.getObject(destGetParams, cb), - cb => azureClient.getContainerClient(azureContainerName).getProperties(azureKey) - .then(res => cb(null, res), err => cb(err)), - ], (err, results) => { - assert.equal(err, null, `Error in assertGetObjects: ${err}`); - const [sourceRes, destRes, azureRes] = results; - const convertedMD5 = convertMD5(azureRes[0].contentSettings.contentMD5); - if (objSize && objSize.empty) { - assert.strictEqual(sourceRes.ETag, `"${emptyMD5}"`); - assert.strictEqual(destRes.ETag, `"${emptyMD5}"`); - assert.strictEqual(convertedMD5, `${emptyMD5}`); - assert.strictEqual('0', azureRes[0].contentLength); - } else if (objSize && objSize.big) { - assert.strictEqual(sourceRes.ETag, `"${bigMD5}"`); - assert.strictEqual(destRes.ETag, `"${bigMD5}"`); - if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { - assert.strictEqual(sourceRes.ServerSideEncryption, 'AES256'); - assert.strictEqual(destRes.ServerSideEncryption, 'AES256'); + async.series( + [ + cb => s3.getObject(sourceGetParams, cb), + cb => s3.getObject(destGetParams, cb), + cb => + azureClient + .getContainerClient(azureContainerName) + .getProperties(azureKey) + .then( + res => cb(null, res), + err => cb(err) + ), + ], + (err, results) => { + assert.equal(err, null, `Error in assertGetObjects: ${err}`); + const [sourceRes, destRes, azureRes] = results; + const convertedMD5 = convertMD5(azureRes[0].contentSettings.contentMD5); + if (objSize && objSize.empty) { + assert.strictEqual(sourceRes.ETag, `"${emptyMD5}"`); + assert.strictEqual(destRes.ETag, `"${emptyMD5}"`); + assert.strictEqual(convertedMD5, `${emptyMD5}`); + assert.strictEqual('0', azureRes[0].contentLength); + } else if (objSize && objSize.big) { + assert.strictEqual(sourceRes.ETag, `"${bigMD5}"`); + assert.strictEqual(destRes.ETag, `"${bigMD5}"`); + if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { + assert.strictEqual(sourceRes.ServerSideEncryption, 'AES256'); + assert.strictEqual(destRes.ServerSideEncryption, 'AES256'); + } else { + assert.strictEqual(convertedMD5, `${bigMD5}`); + } } else { - assert.strictEqual(convertedMD5, `${bigMD5}`); + if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { + assert.strictEqual(sourceRes.ServerSideEncryption, 'AES256'); + assert.strictEqual(destRes.ServerSideEncryption, 'AES256'); + } else { + assert.strictEqual(sourceRes.ETag, `"${normalMD5}"`); + assert.strictEqual(destRes.ETag, `"${normalMD5}"`); + assert.strictEqual(convertedMD5, `${normalMD5}`); + } } - } else { - if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { - assert.strictEqual(sourceRes.ServerSideEncryption, 'AES256'); - assert.strictEqual(destRes.ServerSideEncryption, 'AES256'); - } else { - assert.strictEqual(sourceRes.ETag, `"${normalMD5}"`); - assert.strictEqual(destRes.ETag, `"${normalMD5}"`); - assert.strictEqual(convertedMD5, `${normalMD5}`); + if (mdDirective === 'COPY') { + assert.strictEqual(sourceRes.Metadata['test-header'], destRes.Metadata['test-header']); + assert.strictEqual(azureRes[0].metadata.test_header, destRes.Metadata['test-header']); } + assert.strictEqual(sourceRes.ContentLength, destRes.ContentLength); + assert.strictEqual(sourceRes.Metadata[locMetaHeader], sourceLoc); + assert.strictEqual(destRes.Metadata[locMetaHeader], destLoc); + callback(); } - if (mdDirective === 'COPY') { - assert.strictEqual(sourceRes.Metadata['test-header'], - destRes.Metadata['test-header']); - assert.strictEqual(azureRes[0].metadata.test_header, - destRes.Metadata['test-header']); - } - assert.strictEqual(sourceRes.ContentLength, destRes.ContentLength); - assert.strictEqual(sourceRes.Metadata[locMetaHeader], sourceLoc); - assert.strictEqual(destRes.Metadata[locMetaHeader], destLoc); - callback(); - }); + ); } -describeSkipIfNotMultipleOrCeph('MultipleBackend object copy: Azure', -function testSuite() { +describeSkipIfNotMultipleOrCeph('MultipleBackend object copy: Azure', function testSuite() { this.timeout(250000); withV4(sigCfg => { beforeEach(function beFn() { @@ -128,38 +145,44 @@ function testSuite() { if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { s3.createBucketPromise = createEncryptedBucketPromise; } - return s3.createBucketPromise({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: memLocation, - }, - }) - .then(() => s3.createBucketPromise({ Bucket: bucketAzure, - CreateBucketConfiguration: { - LocationConstraint: azureLocation, - }, - })) - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucketPromise({ + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: memLocation, + }, + }) + .then(() => + s3.createBucketPromise({ + Bucket: bucketAzure, + CreateBucketConfiguration: { + LocationConstraint: azureLocation, + }, + }) + ) + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => bucketUtil.empty(bucketAzure)) - .then(() => { - process.stdout.write(`Deleting bucket: ${bucket}\n`); - return bucketUtil.deleteOne(bucket); - }) - .then(() => { - process.stdout.write(`Deleting bucket: ${bucketAzure}\n`); - return bucketUtil.deleteOne(bucketAzure); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => bucketUtil.empty(bucketAzure)) + .then(() => { + process.stdout.write(`Deleting bucket: ${bucket}\n`); + return bucketUtil.deleteOne(bucket); + }) + .then(() => { + process.stdout.write(`Deleting bucket: ${bucketAzure}\n`); + return bucketUtil.deleteOne(bucketAzure); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); it('should copy an object from mem to Azure', function itFn(done) { @@ -172,19 +195,25 @@ function testSuite() { Metadata: { 'scal-location-constraint': azureLocation }, }; s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, memLocation, - this.test.copyKey, bucket, azureLocation, - this.test.copyKey, 'REPLACE', null, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + memLocation, + this.test.copyKey, + bucket, + azureLocation, + this.test.copyKey, + 'REPLACE', + null, + done + ); }); }); }); - it('should copy an object with no location contraint from mem to Azure', - function itFn(done) { + it('should copy an object with no location contraint from mem to Azure', function itFn(done) { putSourceObj(this.test.key, null, null, bucket, () => { const copyParams = { Bucket: bucketAzure, @@ -193,13 +222,20 @@ function testSuite() { MetadataDirective: 'COPY', }; s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, undefined, - this.test.copyKey, bucketAzure, undefined, - this.test.copyKey, 'COPY', null, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + undefined, + this.test.copyKey, + bucketAzure, + undefined, + this.test.copyKey, + 'COPY', + null, + done + ); }); }); }); @@ -214,13 +250,20 @@ function testSuite() { Metadata: { 'scal-location-constraint': memLocation }, }; s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, azureLocation, - this.test.copyKey, bucket, memLocation, this.test.key, - 'REPLACE', null, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + azureLocation, + this.test.copyKey, + bucket, + memLocation, + this.test.key, + 'REPLACE', + null, + done + ); }); }); }); @@ -235,13 +278,20 @@ function testSuite() { Metadata: { 'scal-location-constraint': azureLocation }, }; s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, awsLocation, - this.test.copyKey, bucket, azureLocation, - this.test.copyKey, 'REPLACE', null, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + awsLocation, + this.test.copyKey, + bucket, + azureLocation, + this.test.copyKey, + 'REPLACE', + null, + done + ); }); }); }); @@ -256,256 +306,337 @@ function testSuite() { Metadata: { 'scal-location-constraint': awsLocation }, }; s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, azureLocation, - this.test.copyKey, bucket, awsLocation, this.test.key, - 'REPLACE', null, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + azureLocation, + this.test.copyKey, + bucket, + awsLocation, + this.test.key, + 'REPLACE', + null, + done + ); }); }); }); - it('should copy an object from Azure to mem with "REPLACE" directive ' + - 'and no location constraint md', function itFn(done) { - putSourceObj(this.test.key, azureLocation, null, bucket, () => { - const copyParams = { - Bucket: bucket, - Key: this.test.copyKey, - CopySource: `/${bucket}/${this.test.key}`, - MetadataDirective: 'REPLACE', - }; - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, azureLocation, - this.test.copyKey, bucket, undefined, this.test.key, - 'REPLACE', null, done); + it( + 'should copy an object from Azure to mem with "REPLACE" directive ' + 'and no location constraint md', + function itFn(done) { + putSourceObj(this.test.key, azureLocation, null, bucket, () => { + const copyParams = { + Bucket: bucket, + Key: this.test.copyKey, + CopySource: `/${bucket}/${this.test.key}`, + MetadataDirective: 'REPLACE', + }; + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + azureLocation, + this.test.copyKey, + bucket, + undefined, + this.test.key, + 'REPLACE', + null, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object from mem to Azure with "REPLACE" directive ' + - 'and no location constraint md', function itFn(done) { - putSourceObj(this.test.key, null, null, bucket, () => { - const copyParams = { - Bucket: bucketAzure, - Key: this.test.copyKey, - CopySource: `/${bucket}/${this.test.key}`, - MetadataDirective: 'REPLACE', - }; - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, undefined, - this.test.copyKey, bucketAzure, undefined, - this.test.copyKey, 'REPLACE', null, done); + it( + 'should copy an object from mem to Azure with "REPLACE" directive ' + 'and no location constraint md', + function itFn(done) { + putSourceObj(this.test.key, null, null, bucket, () => { + const copyParams = { + Bucket: bucketAzure, + Key: this.test.copyKey, + CopySource: `/${bucket}/${this.test.key}`, + MetadataDirective: 'REPLACE', + }; + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + undefined, + this.test.copyKey, + bucketAzure, + undefined, + this.test.copyKey, + 'REPLACE', + null, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object from Azure to Azure showing sending ' + - 'metadata location constraint this doesn\'t matter with COPY directive', - function itFn(done) { - putSourceObj(this.test.key, azureLocation, null, bucketAzure, - () => { - const copyParams = { - Bucket: bucketAzure, - Key: this.test.copyKey, - CopySource: `/${bucketAzure}/${this.test.key}`, - MetadataDirective: 'COPY', - Metadata: { 'scal-location-constraint': memLocation }, - }; - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucketAzure, azureLocation, - this.test.copyKey, bucketAzure, azureLocation, - this.test.copyKey, 'COPY', null, done); + it( + 'should copy an object from Azure to Azure showing sending ' + + "metadata location constraint this doesn't matter with COPY directive", + function itFn(done) { + putSourceObj(this.test.key, azureLocation, null, bucketAzure, () => { + const copyParams = { + Bucket: bucketAzure, + Key: this.test.copyKey, + CopySource: `/${bucketAzure}/${this.test.key}`, + MetadataDirective: 'COPY', + Metadata: { 'scal-location-constraint': memLocation }, + }; + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucketAzure, + azureLocation, + this.test.copyKey, + bucketAzure, + azureLocation, + this.test.copyKey, + 'COPY', + null, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object with no location constraint from Azure to ' + - 'Azure relying on the bucket location constraint', - function itFn(done) { - putSourceObj(this.test.key, null, null, bucketAzure, - () => { - const copyParams = { - Bucket: bucketAzure, - Key: this.test.copyKey, - CopySource: `/${bucketAzure}/${this.test.key}`, - MetadataDirective: 'COPY', - }; - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucketAzure, undefined, - this.test.copyKey, bucketAzure, undefined, - this.test.copyKey, 'COPY', null, done); + it( + 'should copy an object with no location constraint from Azure to ' + + 'Azure relying on the bucket location constraint', + function itFn(done) { + putSourceObj(this.test.key, null, null, bucketAzure, () => { + const copyParams = { + Bucket: bucketAzure, + Key: this.test.copyKey, + CopySource: `/${bucketAzure}/${this.test.key}`, + MetadataDirective: 'COPY', + }; + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucketAzure, + undefined, + this.test.copyKey, + bucketAzure, + undefined, + this.test.copyKey, + 'COPY', + null, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object from Azure to mem because bucket ' + - 'destination location is mem', function itFn(done) { - putSourceObj(this.test.key, azureLocation, null, bucket, () => { - const copyParams = { - Bucket: bucket, - Key: this.test.copyKey, - CopySource: `/${bucket}/${this.test.key}`, - MetadataDirective: 'COPY', - }; - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, azureLocation, - this.test.copyKey, bucket, memLocation, - this.test.key, 'COPY', null, done); + it( + 'should copy an object from Azure to mem because bucket ' + 'destination location is mem', + function itFn(done) { + putSourceObj(this.test.key, azureLocation, null, bucket, () => { + const copyParams = { + Bucket: bucket, + Key: this.test.copyKey, + CopySource: `/${bucket}/${this.test.key}`, + MetadataDirective: 'COPY', + }; + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + azureLocation, + this.test.copyKey, + bucket, + memLocation, + this.test.key, + 'COPY', + null, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object on Azure to a different Azure ' + - 'account without source object READ access', - function itFn(done) { - putSourceObj(this.test.key, azureLocation2, null, bucket, () => { - const copyParams = { - Bucket: bucket, - Key: this.test.copyKey, - CopySource: `/${bucket}/${this.test.key}`, - MetadataDirective: 'REPLACE', - Metadata: { 'scal-location-constraint': azureLocation }, - }; - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, azureLocation2, - this.test.copyKey, bucket, azureLocation, - this.test.copyKey, 'REPLACE', null, done); + it( + 'should copy an object on Azure to a different Azure ' + 'account without source object READ access', + function itFn(done) { + putSourceObj(this.test.key, azureLocation2, null, bucket, () => { + const copyParams = { + Bucket: bucket, + Key: this.test.copyKey, + CopySource: `/${bucket}/${this.test.key}`, + MetadataDirective: 'REPLACE', + Metadata: { 'scal-location-constraint': azureLocation }, + }; + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + azureLocation2, + this.test.copyKey, + bucket, + azureLocation, + this.test.copyKey, + 'REPLACE', + null, + done + ); + }); }); - }); - }); + } + ); - it('should copy a 5MB object on Azure to a different Azure ' + - 'account without source object READ access', - function itFn(done) { - putSourceObj(this.test.key, azureLocation2, { big: true }, bucket, - () => { - const copyParams = { - Bucket: bucket, - Key: this.test.copyKey, - CopySource: `/${bucket}/${this.test.key}`, - MetadataDirective: 'REPLACE', - Metadata: { 'scal-location-constraint': azureLocation }, - }; - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${bigMD5}"`); - assertGetObjects(this.test.key, bucket, azureLocation2, - this.test.copyKey, bucket, azureLocation, - this.test.copyKey, 'REPLACE', { big: true }, done); + it( + 'should copy a 5MB object on Azure to a different Azure ' + 'account without source object READ access', + function itFn(done) { + putSourceObj(this.test.key, azureLocation2, { big: true }, bucket, () => { + const copyParams = { + Bucket: bucket, + Key: this.test.copyKey, + CopySource: `/${bucket}/${this.test.key}`, + MetadataDirective: 'REPLACE', + Metadata: { 'scal-location-constraint': azureLocation }, + }; + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${bigMD5}"`); + assertGetObjects( + this.test.key, + bucket, + azureLocation2, + this.test.copyKey, + bucket, + azureLocation, + this.test.copyKey, + 'REPLACE', + { big: true }, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object from bucketmatch=false ' + - 'Azure location to MPU with a bucketmatch=false Azure location', - function itFn(done) { - putSourceObj(this.test.key, azureLocationMismatch, null, bucket, - () => { - const copyParams = { - Bucket: bucket, - Key: this.test.copyKey, - CopySource: `/${bucket}/${this.test.key}`, - MetadataDirective: 'REPLACE', - Metadata: { 'scal-location-constraint': - azureLocationMismatch }, - }; - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, - azureLocationMismatch, - this.test.copyKey, bucket, azureLocationMismatch, - `${bucket}/${this.test.copyKey}`, 'REPLACE', null, - done); + it( + 'should copy an object from bucketmatch=false ' + + 'Azure location to MPU with a bucketmatch=false Azure location', + function itFn(done) { + putSourceObj(this.test.key, azureLocationMismatch, null, bucket, () => { + const copyParams = { + Bucket: bucket, + Key: this.test.copyKey, + CopySource: `/${bucket}/${this.test.key}`, + MetadataDirective: 'REPLACE', + Metadata: { 'scal-location-constraint': azureLocationMismatch }, + }; + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + azureLocationMismatch, + this.test.copyKey, + bucket, + azureLocationMismatch, + `${bucket}/${this.test.copyKey}`, + 'REPLACE', + null, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object from bucketmatch=false ' + - 'Azure location to MPU with a bucketmatch=true Azure location', - function itFn(done) { - putSourceObj(this.test.key, azureLocationMismatch, null, bucket, - () => { - const copyParams = { - Bucket: bucket, - Key: this.test.copyKey, - CopySource: `/${bucket}/${this.test.key}`, - MetadataDirective: 'REPLACE', - Metadata: { 'scal-location-constraint': azureLocation }, - }; - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, - azureLocationMismatch, - this.test.copyKey, bucket, azureLocation, - this.test.copyKey, 'REPLACE', null, done); + it( + 'should copy an object from bucketmatch=false ' + + 'Azure location to MPU with a bucketmatch=true Azure location', + function itFn(done) { + putSourceObj(this.test.key, azureLocationMismatch, null, bucket, () => { + const copyParams = { + Bucket: bucket, + Key: this.test.copyKey, + CopySource: `/${bucket}/${this.test.key}`, + MetadataDirective: 'REPLACE', + Metadata: { 'scal-location-constraint': azureLocation }, + }; + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + azureLocationMismatch, + this.test.copyKey, + bucket, + azureLocation, + this.test.copyKey, + 'REPLACE', + null, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object from bucketmatch=true ' + - 'Azure location to MPU with a bucketmatch=false Azure location', - function itFn(done) { - putSourceObj(this.test.key, azureLocation, null, bucket, () => { - const copyParams = { - Bucket: bucket, - Key: this.test.copyKey, - CopySource: `/${bucket}/${this.test.key}`, - MetadataDirective: 'REPLACE', - Metadata: { 'scal-location-constraint': - azureLocationMismatch }, - }; - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${normalMD5}"`); - assertGetObjects(this.test.key, bucket, - azureLocation, - this.test.copyKey, bucket, azureLocationMismatch, - `${bucket}/${this.test.copyKey}`, - 'REPLACE', null, done); + it( + 'should copy an object from bucketmatch=true ' + + 'Azure location to MPU with a bucketmatch=false Azure location', + function itFn(done) { + putSourceObj(this.test.key, azureLocation, null, bucket, () => { + const copyParams = { + Bucket: bucket, + Key: this.test.copyKey, + CopySource: `/${bucket}/${this.test.key}`, + MetadataDirective: 'REPLACE', + Metadata: { 'scal-location-constraint': azureLocationMismatch }, + }; + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${normalMD5}"`); + assertGetObjects( + this.test.key, + bucket, + azureLocation, + this.test.copyKey, + bucket, + azureLocationMismatch, + `${bucket}/${this.test.copyKey}`, + 'REPLACE', + null, + done + ); + }); }); - }); - }); + } + ); - it('should copy a 0-byte object from mem to Azure', - function itFn(done) { - putSourceObj(this.test.key, memLocation, { empty: true }, bucket, - () => { + it('should copy a 0-byte object from mem to Azure', function itFn(done) { + putSourceObj(this.test.key, memLocation, { empty: true }, bucket, () => { const copyParams = { Bucket: bucket, Key: this.test.copyKey, @@ -514,20 +645,26 @@ function testSuite() { Metadata: { 'scal-location-constraint': azureLocation }, }; s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${emptyMD5}"`); - assertGetObjects(this.test.key, bucket, memLocation, - this.test.copyKey, bucket, azureLocation, - this.test.copyKey, 'REPLACE', { empty: true }, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${emptyMD5}"`); + assertGetObjects( + this.test.key, + bucket, + memLocation, + this.test.copyKey, + bucket, + azureLocation, + this.test.copyKey, + 'REPLACE', + { empty: true }, + done + ); }); }); }); it('should copy a 0-byte object on Azure', function itFn(done) { - putSourceObj(this.test.key, azureLocation, { empty: true }, bucket, - () => { + putSourceObj(this.test.key, azureLocation, { empty: true }, bucket, () => { const copyParams = { Bucket: bucket, Key: this.test.copyKey, @@ -536,20 +673,26 @@ function testSuite() { Metadata: { 'scal-location-constraint': azureLocation }, }; s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${emptyMD5}"`); - assertGetObjects(this.test.key, bucket, azureLocation, - this.test.copyKey, bucket, azureLocation, - this.test.copyKey, 'REPLACE', { empty: true }, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${emptyMD5}"`); + assertGetObjects( + this.test.key, + bucket, + azureLocation, + this.test.copyKey, + bucket, + azureLocation, + this.test.copyKey, + 'REPLACE', + { empty: true }, + done + ); }); }); }); it('should copy a 5MB object from mem to Azure', function itFn(done) { - putSourceObj(this.test.key, memLocation, { big: true }, bucket, - () => { + putSourceObj(this.test.key, memLocation, { big: true }, bucket, () => { const copyParams = { Bucket: bucket, Key: this.test.copyKey, @@ -559,20 +702,27 @@ function testSuite() { }; s3.copyObject(copyParams, (err, result) => { assert.equal(err, null, `Err copying object: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${bigMD5}"`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${bigMD5}"`); setTimeout(() => { - assertGetObjects(this.test.key, bucket, memLocation, - this.test.copyKey, bucket, azureLocation, - this.test.copyKey, 'REPLACE', { big: true }, done); + assertGetObjects( + this.test.key, + bucket, + memLocation, + this.test.copyKey, + bucket, + azureLocation, + this.test.copyKey, + 'REPLACE', + { big: true }, + done + ); }, azureTimeout); }); }); }); it('should copy a 5MB object on Azure', function itFn(done) { - putSourceObj(this.test.key, azureLocation, { big: true }, bucket, - () => { + putSourceObj(this.test.key, azureLocation, { big: true }, bucket, () => { const copyParams = { Bucket: bucket, Key: this.test.copyKey, @@ -582,25 +732,29 @@ function testSuite() { }; s3.copyObject(copyParams, (err, result) => { assert.equal(err, null, `Err copying object: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${bigMD5}"`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${bigMD5}"`); setTimeout(() => { - assertGetObjects(this.test.key, bucket, azureLocation, - this.test.copyKey, bucket, azureLocation, - this.test.copyKey, 'REPLACE', { big: true }, done); + assertGetObjects( + this.test.key, + bucket, + azureLocation, + this.test.copyKey, + bucket, + azureLocation, + this.test.copyKey, + 'REPLACE', + { big: true }, + done + ); }, azureTimeout); }); }); }); - it('should return error if Azure source object has ' + - 'been deleted', function itFn(done) { - putSourceObj(this.test.key, azureLocation, null, bucket, - () => { - azureClient.deleteBlob(azureContainerName, this.test.key, - err => { - assert.equal(err, null, 'Error deleting object from ' + - `Azure: ${err}`); + it('should return error if Azure source object has ' + 'been deleted', function itFn(done) { + putSourceObj(this.test.key, azureLocation, null, bucket, () => { + azureClient.deleteBlob(azureContainerName, this.test.key, err => { + assert.equal(err, null, 'Error deleting object from ' + `Azure: ${err}`); const copyParams = { Bucket: bucket, Key: this.test.copyKey, diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopy.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopy.js index c13ae83c7e..9f7eeaaf26 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopy.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopy.js @@ -7,11 +7,19 @@ const BucketUtility = require('../../../lib/utility/bucket-util'); const constants = require('../../../../../../constants'); const { config } = require('../../../../../../lib/Config'); const { getRealAwsConfig } = require('../../support/awsConfig'); -const { createEncryptedBucketPromise } = - require('../../../lib/utility/createEncryptedBucket'); -const { describeSkipIfNotMultiple, itSkipCeph, awsS3, memLocation, awsLocation, - azureLocation, awsLocation2, awsLocationMismatch, awsLocationEncryption, - genUniqID } = require('../utils'); +const { createEncryptedBucketPromise } = require('../../../lib/utility/createEncryptedBucket'); +const { + describeSkipIfNotMultiple, + itSkipCeph, + awsS3, + memLocation, + awsLocation, + azureLocation, + awsLocation2, + awsLocationMismatch, + awsLocationEncryption, + genUniqID, +} = require('../utils'); const bucket = `objectcopybucket${genUniqID()}`; const bucketAws = `objectcopyaws${genUniqID()}`; @@ -26,7 +34,9 @@ let s3; function putSourceObj(location, isEmptyObj, bucket, cb) { const key = `somekey-${genUniqID()}`; - const sourceParams = { Bucket: bucket, Key: key, + const sourceParams = { + Bucket: bucket, + Key: key, Metadata: { 'test-header': 'copyme', }, @@ -49,66 +59,74 @@ function putSourceObj(location, isEmptyObj, bucket, cb) { }); } -function assertGetObjects(sourceKey, sourceBucket, sourceLoc, destKey, -destBucket, destLoc, awsKey, mdDirective, isEmptyObj, awsS3, awsLocation, -callback) { - const awsBucket = - config.locationConstraints[awsLocation].details.bucketName; +function assertGetObjects( + sourceKey, + sourceBucket, + sourceLoc, + destKey, + destBucket, + destLoc, + awsKey, + mdDirective, + isEmptyObj, + awsS3, + awsLocation, + callback +) { + const awsBucket = config.locationConstraints[awsLocation].details.bucketName; const sourceGetParams = { Bucket: sourceBucket, Key: sourceKey }; const destGetParams = { Bucket: destBucket, Key: destKey }; const awsParams = { Bucket: awsBucket, Key: awsKey }; - async.series([ - cb => s3.getObject(sourceGetParams, cb), - cb => s3.getObject(destGetParams, cb), - cb => awsS3.getObject(awsParams, cb), - ], (err, results) => { - assert.equal(err, null, `Error in assertGetObjects: ${err}`); - const [sourceRes, destRes, awsRes] = results; - if (isEmptyObj) { - assert.strictEqual(sourceRes.ETag, `"${emptyMD5}"`); - assert.strictEqual(destRes.ETag, `"${emptyMD5}"`); - assert.strictEqual(awsRes.ETag, `"${emptyMD5}"`); - } else if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { - assert.strictEqual(sourceRes.ServerSideEncryption, 'AES256'); - assert.strictEqual(destRes.ServerSideEncryption, 'AES256'); - } else { - assert.strictEqual(sourceRes.ETag, `"${correctMD5}"`); - assert.strictEqual(destRes.ETag, `"${correctMD5}"`); - assert.deepStrictEqual(sourceRes.Body, destRes.Body); - assert.strictEqual(awsRes.ETag, `"${correctMD5}"`); - assert.deepStrictEqual(sourceRes.Body, awsRes.Body); - } - if (destLoc === awsLocationEncryption) { - assert.strictEqual(awsRes.ServerSideEncryption, 'AES256'); - } else { - assert.strictEqual(awsRes.ServerSideEncryption, undefined); - } - if (mdDirective === 'COPY') { - assert.deepStrictEqual(sourceRes.Metadata['test-header'], - destRes.Metadata['test-header']); - } else if (mdDirective === 'REPLACE') { - assert.strictEqual(destRes.Metadata['test-header'], - undefined); - } - if (destLoc === awsLocation) { - assert.strictEqual(awsRes.Metadata[locMetaHeader], destLoc); + async.series( + [ + cb => s3.getObject(sourceGetParams, cb), + cb => s3.getObject(destGetParams, cb), + cb => awsS3.getObject(awsParams, cb), + ], + (err, results) => { + assert.equal(err, null, `Error in assertGetObjects: ${err}`); + const [sourceRes, destRes, awsRes] = results; + if (isEmptyObj) { + assert.strictEqual(sourceRes.ETag, `"${emptyMD5}"`); + assert.strictEqual(destRes.ETag, `"${emptyMD5}"`); + assert.strictEqual(awsRes.ETag, `"${emptyMD5}"`); + } else if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { + assert.strictEqual(sourceRes.ServerSideEncryption, 'AES256'); + assert.strictEqual(destRes.ServerSideEncryption, 'AES256'); + } else { + assert.strictEqual(sourceRes.ETag, `"${correctMD5}"`); + assert.strictEqual(destRes.ETag, `"${correctMD5}"`); + assert.deepStrictEqual(sourceRes.Body, destRes.Body); + assert.strictEqual(awsRes.ETag, `"${correctMD5}"`); + assert.deepStrictEqual(sourceRes.Body, awsRes.Body); + } + if (destLoc === awsLocationEncryption) { + assert.strictEqual(awsRes.ServerSideEncryption, 'AES256'); + } else { + assert.strictEqual(awsRes.ServerSideEncryption, undefined); + } if (mdDirective === 'COPY') { - assert.deepStrictEqual(sourceRes.Metadata['test-header'], - awsRes.Metadata['test-header']); + assert.deepStrictEqual(sourceRes.Metadata['test-header'], destRes.Metadata['test-header']); } else if (mdDirective === 'REPLACE') { - assert.strictEqual(awsRes.Metadata['test-header'], - undefined); + assert.strictEqual(destRes.Metadata['test-header'], undefined); } + if (destLoc === awsLocation) { + assert.strictEqual(awsRes.Metadata[locMetaHeader], destLoc); + if (mdDirective === 'COPY') { + assert.deepStrictEqual(sourceRes.Metadata['test-header'], awsRes.Metadata['test-header']); + } else if (mdDirective === 'REPLACE') { + assert.strictEqual(awsRes.Metadata['test-header'], undefined); + } + } + assert.strictEqual(sourceRes.ContentLength, destRes.ContentLength); + assert.strictEqual(sourceRes.Metadata[locMetaHeader], sourceLoc); + assert.strictEqual(destRes.Metadata[locMetaHeader], destLoc); + callback(); } - assert.strictEqual(sourceRes.ContentLength, destRes.ContentLength); - assert.strictEqual(sourceRes.Metadata[locMetaHeader], sourceLoc); - assert.strictEqual(destRes.Metadata[locMetaHeader], destLoc); - callback(); - }); + ); } -describeSkipIfNotMultiple('MultipleBackend object copy: AWS', -function testSuite() { +describeSkipIfNotMultiple('MultipleBackend object copy: AWS', function testSuite() { this.timeout(250000); withV4(sigCfg => { beforeEach(() => { @@ -119,55 +137,60 @@ function testSuite() { if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { s3.createBucketPromise = createEncryptedBucketPromise; } - return s3.createBucketPromise({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: memLocation, - }, - }) - .then(() => s3.createBucketPromise({ - Bucket: awsServerSideEncryptionbucket, - CreateBucketConfiguration: { - LocationConstraint: awsLocationEncryption, - }, - })) - .then(() => s3.createBucketPromise({ Bucket: bucketAws, - CreateBucketConfiguration: { - LocationConstraint: awsLocation, - }, - })) - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucketPromise({ + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: memLocation, + }, + }) + .then(() => + s3.createBucketPromise({ + Bucket: awsServerSideEncryptionbucket, + CreateBucketConfiguration: { + LocationConstraint: awsLocationEncryption, + }, + }) + ) + .then(() => + s3.createBucketPromise({ + Bucket: bucketAws, + CreateBucketConfiguration: { + LocationConstraint: awsLocation, + }, + }) + ) + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => bucketUtil.empty(bucketAws)) - .then(() => bucketUtil.empty(awsServerSideEncryptionbucket)) - .then(() => { - process.stdout.write(`Deleting bucket ${bucket}\n`); - return bucketUtil.deleteOne(bucket); - }) - .then(() => { - process.stdout.write('Deleting bucket ' + - `${awsServerSideEncryptionbucket}\n`); - return bucketUtil.deleteOne(awsServerSideEncryptionbucket); - }) - .then(() => { - process.stdout.write(`Deleting bucket ${bucketAws}\n`); - return bucketUtil.deleteOne(bucketAws); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => bucketUtil.empty(bucketAws)) + .then(() => bucketUtil.empty(awsServerSideEncryptionbucket)) + .then(() => { + process.stdout.write(`Deleting bucket ${bucket}\n`); + return bucketUtil.deleteOne(bucket); + }) + .then(() => { + process.stdout.write('Deleting bucket ' + `${awsServerSideEncryptionbucket}\n`); + return bucketUtil.deleteOne(awsServerSideEncryptionbucket); + }) + .then(() => { + process.stdout.write(`Deleting bucket ${bucketAws}\n`); + return bucketUtil.deleteOne(bucketAws); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); - it('should copy an object from mem to AWS relying on ' + - 'destination bucket location', - done => { + it('should copy an object from mem to AWS relying on ' + 'destination bucket location', done => { putSourceObj(memLocation, false, bucket, key => { const copyKey = `copyKey-${genUniqID()}`; const copyParams = { @@ -178,20 +201,27 @@ function testSuite() { }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, memLocation, copyKey, - bucketAws, awsLocation, copyKey, 'COPY', false, awsS3, - awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + memLocation, + copyKey, + bucketAws, + awsLocation, + copyKey, + 'COPY', + false, + awsS3, + awsLocation, + done + ); }); }); }); - it('should copy an object from Azure to AWS relying on ' + - 'destination bucket location', - done => { + it('should copy an object from Azure to AWS relying on ' + 'destination bucket location', done => { putSourceObj(azureLocation, false, bucket, key => { const copyKey = `copyKey-${genUniqID()}`; const copyParams = { @@ -202,44 +232,62 @@ function testSuite() { }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, azureLocation, copyKey, - bucketAws, awsLocation, copyKey, 'COPY', false, awsS3, - awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + azureLocation, + copyKey, + bucketAws, + awsLocation, + copyKey, + 'COPY', + false, + awsS3, + awsLocation, + done + ); }); }); }); - it('should copy an object without location contraint from mem ' + - 'to AWS relying on destination bucket location', - done => { - putSourceObj(null, false, bucket, key => { - const copyKey = `copyKey-${genUniqID()}`; - const copyParams = { - Bucket: bucketAws, - Key: copyKey, - CopySource: `/${bucket}/${key}`, - MetadataDirective: 'COPY', - }; - process.stdout.write('Copying object\n'); - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, undefined, copyKey, - bucketAws, undefined, copyKey, 'COPY', false, awsS3, - awsLocation, done); + it( + 'should copy an object without location contraint from mem ' + + 'to AWS relying on destination bucket location', + done => { + putSourceObj(null, false, bucket, key => { + const copyKey = `copyKey-${genUniqID()}`; + const copyParams = { + Bucket: bucketAws, + Key: copyKey, + CopySource: `/${bucket}/${key}`, + MetadataDirective: 'COPY', + }; + process.stdout.write('Copying object\n'); + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + undefined, + copyKey, + bucketAws, + undefined, + copyKey, + 'COPY', + false, + awsS3, + awsLocation, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object from AWS to mem relying on destination ' + - 'bucket location', - done => { + it('should copy an object from AWS to mem relying on destination ' + 'bucket location', done => { putSourceObj(awsLocation, false, bucketAws, key => { const copyKey = `copyKey-${genUniqID()}`; const copyParams = { @@ -250,13 +298,22 @@ function testSuite() { }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucketAws, awsLocation, copyKey, - bucket, memLocation, key, 'COPY', false, awsS3, - awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucketAws, + awsLocation, + copyKey, + bucket, + memLocation, + key, + 'COPY', + false, + awsS3, + awsLocation, + done + ); }); }); }); @@ -270,23 +327,32 @@ function testSuite() { CopySource: `/${bucket}/${key}`, MetadataDirective: 'REPLACE', Metadata: { - 'scal-location-constraint': awsLocation }, + 'scal-location-constraint': awsLocation, + }, }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, memLocation, copyKey, bucket, - awsLocation, copyKey, 'REPLACE', false, awsS3, - awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + memLocation, + copyKey, + bucket, + awsLocation, + copyKey, + 'REPLACE', + false, + awsS3, + awsLocation, + done + ); }); }); }); - itSkipCeph('should copy an object from mem to AWS with aws server ' + - 'side encryption', done => { + itSkipCeph('should copy an object from mem to AWS with aws server ' + 'side encryption', done => { putSourceObj(memLocation, false, bucket, key => { const copyKey = `copyKey-${genUniqID()}`; const copyParams = { @@ -295,47 +361,67 @@ function testSuite() { CopySource: `/${bucket}/${key}`, MetadataDirective: 'REPLACE', Metadata: { - 'scal-location-constraint': awsLocationEncryption }, + 'scal-location-constraint': awsLocationEncryption, + }, }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, memLocation, copyKey, bucket, - awsLocationEncryption, copyKey, 'REPLACE', false, - awsS3, awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + memLocation, + copyKey, + bucket, + awsLocationEncryption, + copyKey, + 'REPLACE', + false, + awsS3, + awsLocation, + done + ); }); }); }); - it('should copy an object from AWS to mem with encryption with ' + - 'REPLACE directive but no location constraint', done => { - putSourceObj(awsLocation, false, bucket, key => { - const copyKey = `copyKey-${genUniqID()}`; - const copyParams = { - Bucket: bucket, - Key: copyKey, - CopySource: `/${bucket}/${key}`, - MetadataDirective: 'REPLACE', - }; - process.stdout.write('Copying object\n'); - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, awsLocation, copyKey, bucket, - undefined, key, 'REPLACE', false, - awsS3, awsLocation, done); + it( + 'should copy an object from AWS to mem with encryption with ' + + 'REPLACE directive but no location constraint', + done => { + putSourceObj(awsLocation, false, bucket, key => { + const copyKey = `copyKey-${genUniqID()}`; + const copyParams = { + Bucket: bucket, + Key: copyKey, + CopySource: `/${bucket}/${key}`, + MetadataDirective: 'REPLACE', + }; + process.stdout.write('Copying object\n'); + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + awsLocation, + copyKey, + bucket, + undefined, + key, + 'REPLACE', + false, + awsS3, + awsLocation, + done + ); + }); }); - }); - }); + } + ); - itSkipCeph('should copy an object on AWS with aws server side ' + - 'encryption', - done => { + itSkipCeph('should copy an object on AWS with aws server side ' + 'encryption', done => { putSourceObj(awsLocation, false, bucket, key => { const copyKey = `copyKey-${genUniqID()}`; const copyParams = { @@ -344,25 +430,33 @@ function testSuite() { CopySource: `/${bucket}/${key}`, MetadataDirective: 'REPLACE', Metadata: { - 'scal-location-constraint': awsLocationEncryption }, + 'scal-location-constraint': awsLocationEncryption, + }, }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, awsLocation, copyKey, bucket, - awsLocationEncryption, copyKey, 'REPLACE', false, awsS3, - awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + awsLocation, + copyKey, + bucket, + awsLocationEncryption, + copyKey, + 'REPLACE', + false, + awsS3, + awsLocation, + done + ); }); }); }); - itSkipCeph('should copy an object on AWS with aws server side ' + - 'encrypted bucket', done => { - putSourceObj(awsLocation, false, awsServerSideEncryptionbucket, - key => { + itSkipCeph('should copy an object on AWS with aws server side ' + 'encrypted bucket', done => { + putSourceObj(awsLocation, false, awsServerSideEncryptionbucket, key => { const copyKey = `copyKey-${genUniqID()}`; const copyParams = { Bucket: awsServerSideEncryptionbucket, @@ -372,44 +466,62 @@ function testSuite() { }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, awsServerSideEncryptionbucket, - awsLocation, copyKey, awsServerSideEncryptionbucket, - awsLocationEncryption, copyKey, 'COPY', - false, awsS3, awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + awsServerSideEncryptionbucket, + awsLocation, + copyKey, + awsServerSideEncryptionbucket, + awsLocationEncryption, + copyKey, + 'COPY', + false, + awsS3, + awsLocation, + done + ); }); }); }); - it('should copy an object from mem to AWS with encryption with ' + - 'REPLACE directive but no location constraint', done => { - putSourceObj(null, false, bucket, key => { - const copyKey = `copyKey-${genUniqID()}`; - const copyParams = { - Bucket: bucketAws, - Key: copyKey, - CopySource: `/${bucket}/${key}`, - MetadataDirective: 'REPLACE', - }; - process.stdout.write('Copying object\n'); - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, undefined, copyKey, - bucketAws, undefined, copyKey, 'REPLACE', false, - awsS3, awsLocation, done); + it( + 'should copy an object from mem to AWS with encryption with ' + + 'REPLACE directive but no location constraint', + done => { + putSourceObj(null, false, bucket, key => { + const copyKey = `copyKey-${genUniqID()}`; + const copyParams = { + Bucket: bucketAws, + Key: copyKey, + CopySource: `/${bucket}/${key}`, + MetadataDirective: 'REPLACE', + }; + process.stdout.write('Copying object\n'); + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + undefined, + copyKey, + bucketAws, + undefined, + copyKey, + 'REPLACE', + false, + awsS3, + awsLocation, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object from AWS to mem with "COPY" ' + - 'directive and aws location metadata', - done => { + it('should copy an object from AWS to mem with "COPY" ' + 'directive and aws location metadata', done => { putSourceObj(awsLocation, false, bucket, key => { const copyKey = `copyKey-${genUniqID()}`; const copyParams = { @@ -418,17 +530,27 @@ function testSuite() { CopySource: `/${bucket}/${key}`, MetadataDirective: 'COPY', Metadata: { - 'scal-location-constraint': awsLocation }, + 'scal-location-constraint': awsLocation, + }, }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, awsLocation, copyKey, bucket, - memLocation, key, 'COPY', false, awsS3, - awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + awsLocation, + copyKey, + bucket, + memLocation, + key, + 'COPY', + false, + awsS3, + awsLocation, + done + ); }); }); }); @@ -445,103 +567,135 @@ function testSuite() { }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, awsLocation, copyKey, bucket, - awsLocation, copyKey, 'REPLACE', false, awsS3, - awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + awsLocation, + copyKey, + bucket, + awsLocation, + copyKey, + 'REPLACE', + false, + awsS3, + awsLocation, + done + ); }); }); }); - it('should copy an object on AWS location with bucketMatch equals ' + - 'false to a different AWS location with bucketMatch equals true', - done => { - putSourceObj(awsLocationMismatch, false, bucket, key => { - const copyKey = `copyKey-${genUniqID()}`; - const copyParams = { - Bucket: bucket, - Key: copyKey, - CopySource: `/${bucket}/${key}`, - MetadataDirective: 'REPLACE', - Metadata: { - 'scal-location-constraint': awsLocation }, - }; - process.stdout.write('Copying object\n'); - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, awsLocationMismatch, copyKey, - bucket, awsLocation, copyKey, 'REPLACE', false, awsS3, - awsLocation, done); + it( + 'should copy an object on AWS location with bucketMatch equals ' + + 'false to a different AWS location with bucketMatch equals true', + done => { + putSourceObj(awsLocationMismatch, false, bucket, key => { + const copyKey = `copyKey-${genUniqID()}`; + const copyParams = { + Bucket: bucket, + Key: copyKey, + CopySource: `/${bucket}/${key}`, + MetadataDirective: 'REPLACE', + Metadata: { + 'scal-location-constraint': awsLocation, + }, + }; + process.stdout.write('Copying object\n'); + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + awsLocationMismatch, + copyKey, + bucket, + awsLocation, + copyKey, + 'REPLACE', + false, + awsS3, + awsLocation, + done + ); + }); }); - }); - }); + } + ); - it('should copy an object on AWS to a different AWS location ' + - 'with source object READ access', - done => { + it('should copy an object on AWS to a different AWS location ' + 'with source object READ access', done => { const awsConfig2 = getRealAwsConfig(awsLocation2); const awsS3Two = new AWS.S3(awsConfig2); const copyKey = `copyKey-${genUniqID()}`; - const awsBucket = - config.locationConstraints[awsLocation].details.bucketName; - async.waterfall([ - // giving access to the object on the AWS side - next => putSourceObj(awsLocation, false, bucket, key => - next(null, key)), - (key, next) => awsS3.putObjectAcl( - { Bucket: awsBucket, Key: key, - ACL: 'public-read' }, err => next(err, key)), - (key, next) => { + const awsBucket = config.locationConstraints[awsLocation].details.bucketName; + async.waterfall( + [ + // giving access to the object on the AWS side + next => putSourceObj(awsLocation, false, bucket, key => next(null, key)), + (key, next) => + awsS3.putObjectAcl({ Bucket: awsBucket, Key: key, ACL: 'public-read' }, err => next(err, key)), + (key, next) => { + const copyParams = { + Bucket: bucket, + Key: copyKey, + CopySource: `/${bucket}/${key}`, + MetadataDirective: 'REPLACE', + Metadata: { + 'scal-location-constraint': awsLocation2, + }, + }; + process.stdout.write('Copying object\n'); + s3.copyObject(copyParams, (err, result) => { + assert.equal(err, null, 'Expected success ' + `but got error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + next(err, key); + }); + }, + (key, next) => + assertGetObjects( + key, + bucket, + awsLocation, + copyKey, + bucket, + awsLocation2, + copyKey, + 'REPLACE', + false, + awsS3Two, + awsLocation2, + next + ), + ], + done + ); + }); + + itSkipCeph( + 'should return error AccessDenied copying an object on ' + + 'AWS to a different AWS account without source object READ access', + done => { + putSourceObj(awsLocation, false, bucket, key => { + const copyKey = `copyKey-${genUniqID()}`; const copyParams = { Bucket: bucket, Key: copyKey, CopySource: `/${bucket}/${key}`, MetadataDirective: 'REPLACE', Metadata: { - 'scal-location-constraint': awsLocation2 }, + 'scal-location-constraint': awsLocation2, + }, }; process.stdout.write('Copying object\n'); - s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success ' + - `but got error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - next(err, key); + s3.copyObject(copyParams, err => { + assert.strictEqual(err.code, 'AccessDenied'); + done(); }); - }, - (key, next) => - assertGetObjects(key, bucket, awsLocation, copyKey, - bucket, awsLocation2, copyKey, 'REPLACE', false, - awsS3Two, awsLocation2, next), - ], done); - }); - - itSkipCeph('should return error AccessDenied copying an object on ' + - 'AWS to a different AWS account without source object READ access', - done => { - putSourceObj(awsLocation, false, bucket, key => { - const copyKey = `copyKey-${genUniqID()}`; - const copyParams = { - Bucket: bucket, - Key: copyKey, - CopySource: `/${bucket}/${key}`, - MetadataDirective: 'REPLACE', - Metadata: { - 'scal-location-constraint': awsLocation2 }, - }; - process.stdout.write('Copying object\n'); - s3.copyObject(copyParams, err => { - assert.strictEqual(err.code, 'AccessDenied'); - done(); }); - }); - }); + } + ); it('should copy an object on AWS with REPLACE', done => { putSourceObj(awsLocation, false, bucket, key => { @@ -552,17 +706,27 @@ function testSuite() { CopySource: `/${bucket}/${key}`, MetadataDirective: 'REPLACE', Metadata: { - 'scal-location-constraint': awsLocation }, + 'scal-location-constraint': awsLocation, + }, }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${correctMD5}"`); - assertGetObjects(key, bucket, awsLocation, copyKey, bucket, - awsLocation, copyKey, 'REPLACE', false, awsS3, - awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${correctMD5}"`); + assertGetObjects( + key, + bucket, + awsLocation, + copyKey, + bucket, + awsLocation, + copyKey, + 'REPLACE', + false, + awsS3, + awsLocation, + done + ); }); }); }); @@ -576,17 +740,27 @@ function testSuite() { CopySource: `/${bucket}/${key}`, MetadataDirective: 'REPLACE', Metadata: { - 'scal-location-constraint': awsLocation }, + 'scal-location-constraint': awsLocation, + }, }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${emptyMD5}"`); - assertGetObjects(key, bucket, memLocation, copyKey, bucket, - awsLocation, copyKey, 'REPLACE', true, awsS3, - awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${emptyMD5}"`); + assertGetObjects( + key, + bucket, + memLocation, + copyKey, + bucket, + awsLocation, + copyKey, + 'REPLACE', + true, + awsS3, + awsLocation, + done + ); }); }); }); @@ -603,27 +777,35 @@ function testSuite() { }; process.stdout.write('Copying object\n'); s3.copyObject(copyParams, (err, result) => { - assert.equal(err, null, 'Expected success but got ' + - `error: ${err}`); - assert.strictEqual(result.CopyObjectResult.ETag, - `"${emptyMD5}"`); - assertGetObjects(key, bucket, awsLocation, copyKey, bucket, - awsLocation, copyKey, 'REPLACE', true, awsS3, - awsLocation, done); + assert.equal(err, null, 'Expected success but got ' + `error: ${err}`); + assert.strictEqual(result.CopyObjectResult.ETag, `"${emptyMD5}"`); + assertGetObjects( + key, + bucket, + awsLocation, + copyKey, + bucket, + awsLocation, + copyKey, + 'REPLACE', + true, + awsS3, + awsLocation, + done + ); }); }); }); - it('should return error if AWS source object has ' + - 'been deleted', done => { + it('should return error if AWS source object has ' + 'been deleted', done => { putSourceObj(awsLocation, false, bucket, key => { - const awsBucket = - config.locationConstraints[awsLocation].details.bucketName; + const awsBucket = config.locationConstraints[awsLocation].details.bucketName; awsS3.deleteObject({ Bucket: awsBucket, Key: key }, err => { - assert.equal(err, null, 'Error deleting object from AWS: ' + - `${err}`); + assert.equal(err, null, 'Error deleting object from AWS: ' + `${err}`); const copyKey = `copyKey-${genUniqID()}`; - const copyParams = { Bucket: bucket, Key: copyKey, + const copyParams = { + Bucket: bucket, + Key: copyKey, CopySource: `/${bucket}/${key}`, MetadataDirective: 'REPLACE', Metadata: { 'scal-location-constraint': awsLocation }, diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopyAwsVersioning.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopyAwsVersioning.js index 1999459857..a1bc32d339 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopyAwsVersioning.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectCopy/objectCopyAwsVersioning.js @@ -40,16 +40,17 @@ function _getCreateBucketParams(bucket, location) { } function createBuckets(testParams, cb) { - const { sourceBucket, sourceLocation, destBucket, destLocation } - = testParams; + const { sourceBucket, sourceLocation, destBucket, destLocation } = testParams; const sourceParams = _getCreateBucketParams(sourceBucket, sourceLocation); const destParams = _getCreateBucketParams(destBucket, destLocation); if (sourceBucket === destBucket) { return s3.createBucket(sourceParams, err => cb(err)); } - return async.map([sourceParams, destParams], + return async.map( + [sourceParams, destParams], (createParams, next) => s3.createBucket(createParams, next), - err => cb(err)); + err => cb(err) + ); } function putSourceObj(testParams, cb) { @@ -64,8 +65,7 @@ function putSourceObj(testParams, cb) { sourceParams.Body = someBody; } s3.putObject(sourceParams, (err, result) => { - assert.strictEqual(err, null, - `Error putting source object: ${err}`); + assert.strictEqual(err, null, `Error putting source object: ${err}`); if (isEmptyObj) { assert.strictEqual(result.ETag, `"${emptyMD5}"`); } else { @@ -80,9 +80,16 @@ function putSourceObj(testParams, cb) { } function copyObject(testParams, cb) { - const { sourceBucket, sourceKey, sourceVersionId, sourceVersioningState, - destBucket, directive, destVersioningState, isEmptyObj } - = testParams; + const { + sourceBucket, + sourceKey, + sourceVersionId, + sourceVersioningState, + destBucket, + directive, + destVersioningState, + isEmptyObj, + } = testParams; const destKey = `destkey-${genUniqID()}`; const copyParams = { Bucket: destBucket, @@ -91,15 +98,12 @@ function copyObject(testParams, cb) { MetadataDirective: directive, }; if (sourceVersionId) { - copyParams.CopySource = - `${copyParams.CopySource}?versionId=${sourceVersionId}`; + copyParams.CopySource = `${copyParams.CopySource}?versionId=${sourceVersionId}`; } else if (sourceVersioningState === 'Suspended') { - copyParams.CopySource = - `${copyParams.CopySource}?versionId=null`; + copyParams.CopySource = `${copyParams.CopySource}?versionId=null`; } s3.copyObject(copyParams, (err, data) => { - assert.strictEqual(err, null, - `Error copying object to destination: ${err}`); + assert.strictEqual(err, null, `Error copying object to destination: ${err}`); if (destVersioningState === 'Enabled') { assert.notEqual(data.VersionId, undefined); } else { @@ -133,84 +137,86 @@ function assertGetObjects(testParams, cb) { isEmptyObj, directive, } = testParams; - const sourceGetParams = { Bucket: sourceBucket, Key: sourceKey, - VersionId: sourceVersionId }; - const destGetParams = { Bucket: destBucket, Key: destKey, - VersionId: destVersionId }; - const awsParams = { Bucket: awsBucket, Key: destKey, - VersionId: awsVersionId }; + const sourceGetParams = { Bucket: sourceBucket, Key: sourceKey, VersionId: sourceVersionId }; + const destGetParams = { Bucket: destBucket, Key: destKey, VersionId: destVersionId }; + const awsParams = { Bucket: awsBucket, Key: destKey, VersionId: awsVersionId }; - async.series([ - cb => s3.getObject(sourceGetParams, cb), - cb => s3.getObject(destGetParams, cb), - cb => awsS3.getObject(awsParams, cb), - ], (err, results) => { - assert.strictEqual(err, null, `Error in assertGetObjects: ${err}`); - const [sourceRes, destRes, awsRes] = results; - if (isEmptyObj) { - assert.strictEqual(sourceRes.ETag, `"${emptyMD5}"`); - assert.strictEqual(destRes.ETag, `"${emptyMD5}"`); - assert.strictEqual(awsRes.ETag, `"${emptyMD5}"`); - } else { - assert.strictEqual(sourceRes.ETag, `"${correctMD5}"`); - assert.strictEqual(destRes.ETag, `"${correctMD5}"`); - assert.deepStrictEqual(sourceRes.Body, destRes.Body); - assert.strictEqual(awsRes.ETag, `"${correctMD5}"`); - assert.deepStrictEqual(sourceRes.Body, awsRes.Body); - } - if (directive === 'COPY') { - assert.deepStrictEqual(sourceRes.Metadata, testMetadata); - assert.deepStrictEqual(sourceRes.Metadata, destRes.Metadata); - assert.deepStrictEqual(sourceRes.Metadata, awsRes.Metadata); - } else if (directive === 'REPLACE') { - assert.deepStrictEqual(destRes.Metadata, {}); - assert.deepStrictEqual(awsRes.Metadata, {}); + async.series( + [ + cb => s3.getObject(sourceGetParams, cb), + cb => s3.getObject(destGetParams, cb), + cb => awsS3.getObject(awsParams, cb), + ], + (err, results) => { + assert.strictEqual(err, null, `Error in assertGetObjects: ${err}`); + const [sourceRes, destRes, awsRes] = results; + if (isEmptyObj) { + assert.strictEqual(sourceRes.ETag, `"${emptyMD5}"`); + assert.strictEqual(destRes.ETag, `"${emptyMD5}"`); + assert.strictEqual(awsRes.ETag, `"${emptyMD5}"`); + } else { + assert.strictEqual(sourceRes.ETag, `"${correctMD5}"`); + assert.strictEqual(destRes.ETag, `"${correctMD5}"`); + assert.deepStrictEqual(sourceRes.Body, destRes.Body); + assert.strictEqual(awsRes.ETag, `"${correctMD5}"`); + assert.deepStrictEqual(sourceRes.Body, awsRes.Body); + } + if (directive === 'COPY') { + assert.deepStrictEqual(sourceRes.Metadata, testMetadata); + assert.deepStrictEqual(sourceRes.Metadata, destRes.Metadata); + assert.deepStrictEqual(sourceRes.Metadata, awsRes.Metadata); + } else if (directive === 'REPLACE') { + assert.deepStrictEqual(destRes.Metadata, {}); + assert.deepStrictEqual(awsRes.Metadata, {}); + } + assert.strictEqual(sourceRes.ContentLength, destRes.ContentLength); + cb(); } - assert.strictEqual(sourceRes.ContentLength, destRes.ContentLength); - cb(); - }); + ); } -describeSkipIfNotMultiple('AWS backend object copy with versioning', -function testSuite() { +describeSkipIfNotMultiple('AWS backend object copy with versioning', function testSuite() { this.timeout(250000); withV4(sigCfg => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - afterEach(() => bucketUtil.empty(sourceBucketName) - .then(() => bucketUtil.deleteOne(sourceBucketName)) - .catch(err => { - process.stdout.write('Error deleting source bucket ' + - `in afterEach: ${err}\n`); - throw err; - }) - .then(() => bucketUtil.empty(destBucketName)) - .then(() => bucketUtil.deleteOne(destBucketName)) - .catch(err => { - if (err.code === 'NoSuchBucket') { - process.stdout.write('Warning: did not find dest bucket ' + - 'for deletion'); - // we do not throw err since dest bucket may not exist - // if we are using source as dest - } else { - process.stdout.write('Error deleting dest bucket ' + - `in afterEach: ${err}\n`); + afterEach(() => + bucketUtil + .empty(sourceBucketName) + .then(() => bucketUtil.deleteOne(sourceBucketName)) + .catch(err => { + process.stdout.write('Error deleting source bucket ' + `in afterEach: ${err}\n`); throw err; - } - }) + }) + .then(() => bucketUtil.empty(destBucketName)) + .then(() => bucketUtil.deleteOne(destBucketName)) + .catch(err => { + if (err.code === 'NoSuchBucket') { + process.stdout.write('Warning: did not find dest bucket ' + 'for deletion'); + // we do not throw err since dest bucket may not exist + // if we are using source as dest + } else { + process.stdout.write('Error deleting dest bucket ' + `in afterEach: ${err}\n`); + throw err; + } + }) ); - [{ - directive: 'REPLACE', - isEmptyObj: true, - }, { - directive: 'REPLACE', - isEmptyObj: false, - }, { - directive: 'COPY', - isEmptyObj: false, - }].forEach(testParams => { + [ + { + directive: 'REPLACE', + isEmptyObj: true, + }, + { + directive: 'REPLACE', + isEmptyObj: false, + }, + { + directive: 'COPY', + isEmptyObj: false, + }, + ].forEach(testParams => { Object.assign(testParams, { sourceBucket: sourceBucketName, sourceLocation: awsLocation, @@ -218,168 +224,207 @@ function testSuite() { destLocation: awsLocation, }); const { isEmptyObj, directive } = testParams; - itSkipCeph(`should copy ${isEmptyObj ? 'an empty' : ''} ` + - 'object from AWS backend non-versioned bucket' + - 'to AWS backend versioned bucket ' + - `with ${directive} directive`, done => { - Object.assign(testParams, { - sourceVersioningState: undefined, - destVersioningState: 'Enabled', - }); - async.waterfall([ - next => createBuckets(testParams, next), - next => putSourceObj(testParams, next), - next => enableVersioning(s3, testParams.destBucket, next), - next => copyObject(testParams, next), - // put another version to test and make sure version id from - // copy was stored to get the right version - next => putToAwsBackend(s3, destBucketName, - testParams.destKey, wrongVersionBody, () => next()), - next => assertGetObjects(testParams, next), - ], done); - }); + itSkipCeph( + `should copy ${isEmptyObj ? 'an empty' : ''} ` + + 'object from AWS backend non-versioned bucket' + + 'to AWS backend versioned bucket ' + + `with ${directive} directive`, + done => { + Object.assign(testParams, { + sourceVersioningState: undefined, + destVersioningState: 'Enabled', + }); + async.waterfall( + [ + next => createBuckets(testParams, next), + next => putSourceObj(testParams, next), + next => enableVersioning(s3, testParams.destBucket, next), + next => copyObject(testParams, next), + // put another version to test and make sure version id from + // copy was stored to get the right version + next => + putToAwsBackend(s3, destBucketName, testParams.destKey, wrongVersionBody, () => next()), + next => assertGetObjects(testParams, next), + ], + done + ); + } + ); - itSkipCeph(`should copy ${isEmptyObj ? 'an empty ' : ''}version ` + - 'from one AWS backend versioned bucket' + - `to another on ${directive} directive`, - done => { - Object.assign(testParams, { - sourceVersioningState: 'Enabled', - destVersioningState: 'Enabled', - }); - async.waterfall([ - next => createBuckets(testParams, next), - next => enableVersioning(s3, testParams.sourceBucket, next), - next => putSourceObj(testParams, next), - next => enableVersioning(s3, testParams.destBucket, next), - next => copyObject(testParams, next), - // put another version to test and make sure version id from - // copy was stored to get the right version - next => putToAwsBackend(s3, destBucketName, - testParams.destKey, wrongVersionBody, () => next()), - next => assertGetObjects(testParams, next), - ], done); - }); + itSkipCeph( + `should copy ${isEmptyObj ? 'an empty ' : ''}version ` + + 'from one AWS backend versioned bucket' + + `to another on ${directive} directive`, + done => { + Object.assign(testParams, { + sourceVersioningState: 'Enabled', + destVersioningState: 'Enabled', + }); + async.waterfall( + [ + next => createBuckets(testParams, next), + next => enableVersioning(s3, testParams.sourceBucket, next), + next => putSourceObj(testParams, next), + next => enableVersioning(s3, testParams.destBucket, next), + next => copyObject(testParams, next), + // put another version to test and make sure version id from + // copy was stored to get the right version + next => + putToAwsBackend(s3, destBucketName, testParams.destKey, wrongVersionBody, () => next()), + next => assertGetObjects(testParams, next), + ], + done + ); + } + ); - itSkipCeph(`should copy ${isEmptyObj ? 'an empty ' : ''}null ` + - 'version from one AWS backend versioning suspended bucket to ' + - ` another versioning suspended bucket with ${directive} directive`, - done => { - Object.assign(testParams, { - sourceVersioningState: 'Suspended', - destVersioningState: 'Suspended', - }); - async.waterfall([ - next => createBuckets(testParams, next), - next => suspendVersioning(s3, testParams.sourceBucket, - next), - next => putSourceObj(testParams, next), - next => suspendVersioning(s3, testParams.destBucket, next), - next => copyObject(testParams, next), - next => enableVersioning(s3, testParams.destBucket, next), - // put another version to test and make sure version id from - // copy was stored to get the right version - next => putToAwsBackend(s3, destBucketName, - testParams.destKey, wrongVersionBody, () => next()), - next => assertGetObjects(testParams, next), - ], done); - }); + itSkipCeph( + `should copy ${isEmptyObj ? 'an empty ' : ''}null ` + + 'version from one AWS backend versioning suspended bucket to ' + + ` another versioning suspended bucket with ${directive} directive`, + done => { + Object.assign(testParams, { + sourceVersioningState: 'Suspended', + destVersioningState: 'Suspended', + }); + async.waterfall( + [ + next => createBuckets(testParams, next), + next => suspendVersioning(s3, testParams.sourceBucket, next), + next => putSourceObj(testParams, next), + next => suspendVersioning(s3, testParams.destBucket, next), + next => copyObject(testParams, next), + next => enableVersioning(s3, testParams.destBucket, next), + // put another version to test and make sure version id from + // copy was stored to get the right version + next => + putToAwsBackend(s3, destBucketName, testParams.destKey, wrongVersionBody, () => next()), + next => assertGetObjects(testParams, next), + ], + done + ); + } + ); - itSkipCeph(`should copy ${isEmptyObj ? 'an empty ' : ''}version ` + - 'from a AWS backend versioned bucket to a versioned-suspended' + - `one with ${directive} directive`, done => { - Object.assign(testParams, { - sourceVersioningState: 'Enabled', - destVersioningState: 'Suspended', - }); - async.waterfall([ - next => createBuckets(testParams, next), - next => enableVersioning(s3, testParams.sourceBucket, next), - next => putSourceObj(testParams, next), - next => suspendVersioning(s3, testParams.destBucket, next), - next => copyObject(testParams, next), - // put another version to test and make sure version id from - // copy was stored to get the right version - next => enableVersioning(s3, testParams.destBucket, next), - next => putToAwsBackend(s3, destBucketName, - testParams.destKey, wrongVersionBody, () => next()), - next => assertGetObjects(testParams, next), - ], done); - }); + itSkipCeph( + `should copy ${isEmptyObj ? 'an empty ' : ''}version ` + + 'from a AWS backend versioned bucket to a versioned-suspended' + + `one with ${directive} directive`, + done => { + Object.assign(testParams, { + sourceVersioningState: 'Enabled', + destVersioningState: 'Suspended', + }); + async.waterfall( + [ + next => createBuckets(testParams, next), + next => enableVersioning(s3, testParams.sourceBucket, next), + next => putSourceObj(testParams, next), + next => suspendVersioning(s3, testParams.destBucket, next), + next => copyObject(testParams, next), + // put another version to test and make sure version id from + // copy was stored to get the right version + next => enableVersioning(s3, testParams.destBucket, next), + next => + putToAwsBackend(s3, destBucketName, testParams.destKey, wrongVersionBody, () => next()), + next => assertGetObjects(testParams, next), + ], + done + ); + } + ); }); - itSkipCeph('versioning not configured: if copy object to a ' + - 'pre-existing object on AWS backend, metadata should be overwritten ' + - 'but data of previous version in AWS should not be deleted', - function itF(done) { - const destKey = `destkey-${genUniqID()}`; - const testParams = { - sourceBucket: sourceBucketName, - sourceLocation: awsLocation, - sourceVersioningState: undefined, - destBucket: sourceBucketName, - destLocation: awsLocation, - destVersioningState: undefined, + itSkipCeph( + 'versioning not configured: if copy object to a ' + + 'pre-existing object on AWS backend, metadata should be overwritten ' + + 'but data of previous version in AWS should not be deleted', + function itF(done) { + const destKey = `destkey-${genUniqID()}`; + const testParams = { + sourceBucket: sourceBucketName, + sourceLocation: awsLocation, + sourceVersioningState: undefined, + destBucket: sourceBucketName, + destLocation: awsLocation, + destVersioningState: undefined, + isEmptyObj: true, + directive: 'REPLACE', + }; + async.waterfall( + [ + next => createBuckets(testParams, next), + next => putToAwsBackend(s3, testParams.destBucket, destKey, someBody, err => next(err)), + next => awsGetLatestVerId(destKey, someBody, next), + (awsVerId, next) => { + this.test.awsVerId = awsVerId; + next(); + }, + next => putSourceObj(testParams, next), + next => + s3.copyObject( + { + Bucket: testParams.destBucket, + Key: destKey, + CopySource: `/${testParams.sourceBucket}` + `/${testParams.sourceKey}`, + MetadataDirective: testParams.directive, + Metadata: { + 'scal-location-constraint': testParams.destLocation, + }, + }, + next + ), + (copyResult, next) => + awsGetLatestVerId(destKey, '', (err, awsVersionId) => { + testParams.destKey = destKey; + testParams.destVersionId = copyResult.VersionId; + testParams.awsVersionId = awsVersionId; + next(); + }), + next => + s3.deleteObject( + { Bucket: testParams.destBucket, Key: testParams.destKey, VersionId: 'null' }, + next + ), + (delData, next) => + getAndAssertResult( + s3, + { bucket: testParams.destBucket, key: testParams.destKey, expectedError: 'NoSuchKey' }, + next + ), + next => awsGetLatestVerId(testParams.destKey, someBody, next), + (awsVerId, next) => { + assert.strictEqual(awsVerId, this.test.awsVerId); + next(); + }, + ], + done + ); + } + ); + + [ + { + sourceLocation: memLocation, + directive: 'REPLACE', isEmptyObj: true, + }, + { + sourceLocation: fileLocation, directive: 'REPLACE', - }; - async.waterfall([ - next => createBuckets(testParams, next), - next => putToAwsBackend(s3, testParams.destBucket, destKey, - someBody, err => next(err)), - next => awsGetLatestVerId(destKey, someBody, next), - (awsVerId, next) => { - this.test.awsVerId = awsVerId; - next(); - }, - next => putSourceObj(testParams, next), - next => s3.copyObject({ - Bucket: testParams.destBucket, - Key: destKey, - CopySource: `/${testParams.sourceBucket}` + - `/${testParams.sourceKey}`, - MetadataDirective: testParams.directive, - Metadata: { - 'scal-location-constraint': testParams.destLocation, - }, - }, next), - (copyResult, next) => awsGetLatestVerId(destKey, '', - (err, awsVersionId) => { - testParams.destKey = destKey; - testParams.destVersionId = copyResult.VersionId; - testParams.awsVersionId = awsVersionId; - next(); - }), - next => s3.deleteObject({ Bucket: testParams.destBucket, - Key: testParams.destKey, VersionId: 'null' }, next), - (delData, next) => getAndAssertResult(s3, { bucket: - testParams.destBucket, key: testParams.destKey, - expectedError: 'NoSuchKey' }, next), - next => awsGetLatestVerId(testParams.destKey, someBody, next), - (awsVerId, next) => { - assert.strictEqual(awsVerId, this.test.awsVerId); - next(); - }, - ], done); - }); - - [{ - sourceLocation: memLocation, - directive: 'REPLACE', - isEmptyObj: true, - }, { - sourceLocation: fileLocation, - directive: 'REPLACE', - isEmptyObj: true, - }, { - sourceLocation: memLocation, - directive: 'COPY', - isEmptyObj: false, - }, { - sourceLocation: fileLocation, - directive: 'COPY', - isEmptyObj: false, - }].forEach(testParams => { + isEmptyObj: true, + }, + { + sourceLocation: memLocation, + directive: 'COPY', + isEmptyObj: false, + }, + { + sourceLocation: fileLocation, + directive: 'COPY', + isEmptyObj: false, + }, + ].forEach(testParams => { Object.assign(testParams, { sourceBucket: sourceBucketName, sourceVersioningState: 'Enabled', @@ -389,36 +434,48 @@ function testSuite() { }); const { sourceLocation, directive, isEmptyObj } = testParams; - it(`should copy ${isEmptyObj ? 'empty ' : ''}object from ` + - `${sourceLocation} to bucket on AWS backend with ` + - `versioning with ${directive}`, done => { - async.waterfall([ - next => createBuckets(testParams, next), - next => putSourceObj(testParams, next), - next => enableVersioning(s3, testParams.destBucket, next), - next => copyObject(testParams, next), - next => assertGetObjects(testParams, next), - ], done); - }); + it( + `should copy ${isEmptyObj ? 'empty ' : ''}object from ` + + `${sourceLocation} to bucket on AWS backend with ` + + `versioning with ${directive}`, + done => { + async.waterfall( + [ + next => createBuckets(testParams, next), + next => putSourceObj(testParams, next), + next => enableVersioning(s3, testParams.destBucket, next), + next => copyObject(testParams, next), + next => assertGetObjects(testParams, next), + ], + done + ); + } + ); - it(`should copy ${isEmptyObj ? 'an empty ' : ''}version from ` + - `${sourceLocation} to bucket on AWS backend with ` + - `versioning with ${directive} directive`, done => { - async.waterfall([ - next => createBuckets(testParams, next), - next => enableVersioning(s3, testParams.sourceBucket, next), - // returns a version id which is added to testParams - // to be used in object copy - next => putSourceObj(testParams, next), - next => enableVersioning(s3, testParams.destBucket, next), - next => copyObject(testParams, next), - // put another version to test and make sure version id - // from copy was stored to get the right version - next => putToAwsBackend(s3, destBucketName, - testParams.destKey, wrongVersionBody, () => next()), - next => assertGetObjects(testParams, next), - ], done); - }); + it( + `should copy ${isEmptyObj ? 'an empty ' : ''}version from ` + + `${sourceLocation} to bucket on AWS backend with ` + + `versioning with ${directive} directive`, + done => { + async.waterfall( + [ + next => createBuckets(testParams, next), + next => enableVersioning(s3, testParams.sourceBucket, next), + // returns a version id which is added to testParams + // to be used in object copy + next => putSourceObj(testParams, next), + next => enableVersioning(s3, testParams.destBucket, next), + next => copyObject(testParams, next), + // put another version to test and make sure version id + // from copy was stored to get the right version + next => + putToAwsBackend(s3, destBucketName, testParams.destKey, wrongVersionBody, () => next()), + next => assertGetObjects(testParams, next), + ], + done + ); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectPutCopyPart/objectPutCopyPartAzure.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectPutCopyPart/objectPutCopyPartAzure.js index 275e3ffba8..7620bd4bea 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectPutCopyPart/objectPutCopyPartAzure.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectPutCopyPart/objectPutCopyPartAzure.js @@ -6,20 +6,28 @@ const azureMpuUtils = s3middleware.azureHelper.mpuUtils; const { config } = require('../../../../../../lib/Config'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { uniqName, getAzureClient, azureLocation, azureLocationMismatch, - memLocation, awsLocation, awsS3, getOwnerInfo, genUniqID } - = require('../utils'); - -const describeSkipIfNotMultipleOrCeph = config.backends.data !== 'multiple' - ? describe.skip : describe.skip; +const { + uniqName, + getAzureClient, + azureLocation, + azureLocationMismatch, + memLocation, + awsLocation, + awsS3, + getOwnerInfo, + genUniqID, +} = require('../utils'); + +const describeSkipIfNotMultipleOrCeph = config.backends.data !== 'multiple' ? describe.skip : describe.skip; let azureContainerName; -if (config.locationConstraints[azureLocation] && -config.locationConstraints[azureLocation].details && -config.locationConstraints[azureLocation].details.azureContainerName) { - azureContainerName = - config.locationConstraints[azureLocation].details.azureContainerName; +if ( + config.locationConstraints[azureLocation] && + config.locationConstraints[azureLocation].details && + config.locationConstraints[azureLocation].details.azureContainerName +) { + azureContainerName = config.locationConstraints[azureLocation].details.azureContainerName; } const memBucketName = `memputcopypartazure${genUniqID()}`; @@ -57,12 +65,8 @@ const result = { MaxParts: 1000, IsTruncated: false, Parts: [], - Initiator: - { ID: ownerID, - DisplayName: ownerDisplayName }, - Owner: - { DisplayName: ownerDisplayName, - ID: ownerID }, + Initiator: { ID: ownerID, DisplayName: ownerDisplayName }, + Owner: { DisplayName: ownerDisplayName, ID: ownerID }, StorageClass: 'STANDARD', }; @@ -70,8 +74,7 @@ let s3; let bucketUtil; function assertCopyPart(infos, cb) { - const { azureContainerName, mpuKeyNameAzure, uploadId, md5, - subPartSize } = infos; + const { azureContainerName, mpuKeyNameAzure, uploadId, md5, subPartSize } = infos; const resultCopy = JSON.parse(JSON.stringify(result)); resultCopy.Bucket = azureContainerName; resultCopy.Key = mpuKeyNameAzure; @@ -80,37 +83,51 @@ function assertCopyPart(infos, cb) { for (let i = 0; i < subPartSize.length; i++) { totalSize = totalSize + subPartSize[i]; } - async.waterfall([ - next => s3.listParts({ - Bucket: azureContainerName, - Key: mpuKeyNameAzure, - UploadId: uploadId, - }, (err, res) => { - assert.equal(err, null, 'listParts: Expected success,' + - ` got error: ${err}`); - resultCopy.Parts = - [{ PartNumber: 1, - LastModified: res.Parts[0].LastModified, - ETag: `"${md5}"`, - Size: totalSize }]; - assert.deepStrictEqual(res, resultCopy); - next(); - }), - next => azureClient.getContainerClient(azureContainerName) - .getBlockBlobClient(mpuKeyNameAzure) - .getBlockList('all').then(res => { - subPartSize.forEach((size, index) => { - const partName = azureMpuUtils.getBlockId(uploadId, 1, index); - assert.strictEqual(res.uncommittedBlocks[index].name, partName); - assert.equal(res.uncommittedBlocks[index].size, size); - }); - next(); - }, err => { - assert.equal(err, null, 'listBlocks: Expected ' + - `success, got error: ${err}`); - next(); - }), - ], cb); + async.waterfall( + [ + next => + s3.listParts( + { + Bucket: azureContainerName, + Key: mpuKeyNameAzure, + UploadId: uploadId, + }, + (err, res) => { + assert.equal(err, null, 'listParts: Expected success,' + ` got error: ${err}`); + resultCopy.Parts = [ + { + PartNumber: 1, + LastModified: res.Parts[0].LastModified, + ETag: `"${md5}"`, + Size: totalSize, + }, + ]; + assert.deepStrictEqual(res, resultCopy); + next(); + } + ), + next => + azureClient + .getContainerClient(azureContainerName) + .getBlockBlobClient(mpuKeyNameAzure) + .getBlockList('all') + .then( + res => { + subPartSize.forEach((size, index) => { + const partName = azureMpuUtils.getBlockId(uploadId, 1, index); + assert.strictEqual(res.uncommittedBlocks[index].name, partName); + assert.equal(res.uncommittedBlocks[index].size, size); + }); + next(); + }, + err => { + assert.equal(err, null, 'listBlocks: Expected ' + `success, got error: ${err}`); + next(); + } + ), + ], + cb + ); } describeSkipIfNotMultipleOrCeph('Put Copy Part to AZURE', function describeF() { @@ -123,39 +140,33 @@ describeSkipIfNotMultipleOrCeph('Put Copy Part to AZURE', function describeF() { afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(azureContainerName) - .then(() => bucketUtil.empty(memBucketName)) - .then(() => { - process.stdout.write(`Deleting bucket ${azureContainerName}\n`); - return bucketUtil.deleteOne(azureContainerName); - }) - .then(() => { - process.stdout.write(`Deleting bucket ${memBucketName}\n`); - return bucketUtil.deleteOne(memBucketName); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(azureContainerName) + .then(() => bucketUtil.empty(memBucketName)) + .then(() => { + process.stdout.write(`Deleting bucket ${azureContainerName}\n`); + return bucketUtil.deleteOne(azureContainerName); + }) + .then(() => { + process.stdout.write(`Deleting bucket ${memBucketName}\n`); + return bucketUtil.deleteOne(memBucketName); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); describe('Basic test: ', () => { beforeEach(function beF(done) { - this.currentTest.keyNameNormalAzure = - `normalazure${uniqName(keyObjectAzure)}`; - this.currentTest.keyNameNormalAzureMismatch = - `normalazuremismatch${uniqName(keyObjectAzure)}`; - - this.currentTest.keyNameFiveMbAzure = - `fivembazure${uniqName(keyObjectAzure)}`; - this.currentTest.keyNameFiveMbMem = - `fivembmem${uniqName(keyObjectMemory)}`; - - this.currentTest.mpuKeyNameAzure = - `mpukeyname${uniqName(keyObjectAzure)}`; - this.currentTest.mpuKeyNameMem = - `mpukeyname${uniqName(keyObjectMemory)}`; - this.currentTest.mpuKeyNameAWS = - `mpukeyname${uniqName(keyObjectAWS)}`; + this.currentTest.keyNameNormalAzure = `normalazure${uniqName(keyObjectAzure)}`; + this.currentTest.keyNameNormalAzureMismatch = `normalazuremismatch${uniqName(keyObjectAzure)}`; + + this.currentTest.keyNameFiveMbAzure = `fivembazure${uniqName(keyObjectAzure)}`; + this.currentTest.keyNameFiveMbMem = `fivembmem${uniqName(keyObjectMemory)}`; + + this.currentTest.mpuKeyNameAzure = `mpukeyname${uniqName(keyObjectAzure)}`; + this.currentTest.mpuKeyNameMem = `mpukeyname${uniqName(keyObjectMemory)}`; + this.currentTest.mpuKeyNameAWS = `mpukeyname${uniqName(keyObjectAWS)}`; const paramsAzure = { Bucket: azureContainerName, Key: this.currentTest.mpuKeyNameAzure, @@ -171,58 +182,83 @@ describeSkipIfNotMultipleOrCeph('Put Copy Part to AZURE', function describeF() { Key: this.currentTest.mpuKeyNameAWS, Metadata: { 'scal-location-constraint': awsLocation }, }; - async.waterfall([ - next => s3.createBucket({ Bucket: azureContainerName }, - err => next(err)), - next => s3.createBucket({ Bucket: memBucketName }, - err => next(err)), - next => s3.putObject({ - Bucket: azureContainerName, - Key: this.currentTest.keyNameNormalAzure, - Body: normalBody, - Metadata: { 'scal-location-constraint': azureLocation }, - }, err => next(err)), - next => s3.putObject({ - Bucket: azureContainerName, - Key: this.currentTest.keyNameNormalAzureMismatch, - Body: normalBody, - Metadata: { 'scal-location-constraint': - azureLocationMismatch }, - }, err => next(err)), - next => s3.putObject({ - Bucket: azureContainerName, - Key: this.currentTest.keyNameFiveMbAzure, - Body: fiveMbBody, - Metadata: { 'scal-location-constraint': azureLocation }, - }, err => next(err)), - next => s3.putObject({ - Bucket: azureContainerName, - Key: this.currentTest.keyNameFiveMbMem, - Body: fiveMbBody, - Metadata: { 'scal-location-constraint': memLocation }, - }, err => next(err)), - next => s3.createMultipartUpload(paramsAzure, - (err, res) => { - assert.equal(err, null, 'createMultipartUpload ' + - `on Azure: Expected success, got error: ${err}`); - this.currentTest.uploadId = res.UploadId; - next(); - }), - next => s3.createMultipartUpload(paramsMem, - (err, res) => { - assert.equal(err, null, 'createMultipartUpload ' + - `in memory: Expected success, got error: ${err}`); - this.currentTest.uploadIdMem = res.UploadId; - next(); - }), - next => s3.createMultipartUpload(paramsAWS, - (err, res) => { - assert.equal(err, null, 'createMultipartUpload ' + - `on AWS: Expected success, got error: ${err}`); - this.currentTest.uploadIdAWS = res.UploadId; - next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: azureContainerName }, err => next(err)), + next => s3.createBucket({ Bucket: memBucketName }, err => next(err)), + next => + s3.putObject( + { + Bucket: azureContainerName, + Key: this.currentTest.keyNameNormalAzure, + Body: normalBody, + Metadata: { 'scal-location-constraint': azureLocation }, + }, + err => next(err) + ), + next => + s3.putObject( + { + Bucket: azureContainerName, + Key: this.currentTest.keyNameNormalAzureMismatch, + Body: normalBody, + Metadata: { 'scal-location-constraint': azureLocationMismatch }, + }, + err => next(err) + ), + next => + s3.putObject( + { + Bucket: azureContainerName, + Key: this.currentTest.keyNameFiveMbAzure, + Body: fiveMbBody, + Metadata: { 'scal-location-constraint': azureLocation }, + }, + err => next(err) + ), + next => + s3.putObject( + { + Bucket: azureContainerName, + Key: this.currentTest.keyNameFiveMbMem, + Body: fiveMbBody, + Metadata: { 'scal-location-constraint': memLocation }, + }, + err => next(err) + ), + next => + s3.createMultipartUpload(paramsAzure, (err, res) => { + assert.equal( + err, + null, + 'createMultipartUpload ' + `on Azure: Expected success, got error: ${err}` + ); + this.currentTest.uploadId = res.UploadId; + next(); + }), + next => + s3.createMultipartUpload(paramsMem, (err, res) => { + assert.equal( + err, + null, + 'createMultipartUpload ' + `in memory: Expected success, got error: ${err}` + ); + this.currentTest.uploadIdMem = res.UploadId; + next(); + }), + next => + s3.createMultipartUpload(paramsAWS, (err, res) => { + assert.equal( + err, + null, + 'createMultipartUpload ' + `on AWS: Expected success, got error: ${err}` + ); + this.currentTest.uploadIdAWS = res.UploadId; + next(); + }), + ], + done + ); }); afterEach(function afterEachF(done) { const paramsAzure = { @@ -240,271 +276,272 @@ describeSkipIfNotMultipleOrCeph('Put Copy Part to AZURE', function describeF() { Key: this.currentTest.mpuKeyNameAWS, UploadId: this.currentTest.uploadIdAWS, }; - async.waterfall([ - next => s3.abortMultipartUpload(paramsAzure, - err => next(err)), - next => s3.abortMultipartUpload(paramsMem, - err => next(err)), - next => s3.abortMultipartUpload(paramsAWS, - err => next(err)), - ], done); + async.waterfall( + [ + next => s3.abortMultipartUpload(paramsAzure, err => next(err)), + next => s3.abortMultipartUpload(paramsMem, err => next(err)), + next => s3.abortMultipartUpload(paramsAWS, err => next(err)), + ], + done + ); }); - it('should copy small part from Azure to MPU with Azure location', - function ifF(done) { + it('should copy small part from Azure to MPU with Azure location', function ifF(done) { const params = { Bucket: azureContainerName, - CopySource: - `${azureContainerName}/${this.test.keyNameNormalAzure}`, + CopySource: `${azureContainerName}/${this.test.keyNameNormalAzure}`, Key: this.test.mpuKeyNameAzure, PartNumber: 1, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${normalMD5}"`); - next(err); - }), - next => { - const infos = { - azureContainerName, - mpuKeyNameAzure: this.test.mpuKeyNameAzure, - uploadId: this.test.uploadId, - md5: normalMD5, - subPartSize: [normalBodySize], - }; - assertCopyPart(infos, next); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${normalMD5}"`); + next(err); + }), + next => { + const infos = { + azureContainerName, + mpuKeyNameAzure: this.test.mpuKeyNameAzure, + uploadId: this.test.uploadId, + md5: normalMD5, + subPartSize: [normalBodySize], + }; + assertCopyPart(infos, next); + }, + ], + done + ); }); - it('should copy small part from Azure location with ' + - 'bucketMatch=false to MPU with Azure location', - function ifF(done) { - const params = { - Bucket: azureContainerName, - CopySource: - `${azureContainerName}/` + - `${this.test.keyNameNormalAzureMismatch}`, - Key: this.test.mpuKeyNameAzure, - PartNumber: 1, - UploadId: this.test.uploadId, - }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${normalMD5}"`); - next(err); - }), - next => { - const infos = { - azureContainerName, - mpuKeyNameAzure: this.test.mpuKeyNameAzure, - uploadId: this.test.uploadId, - md5: normalMD5, - subPartSize: [normalBodySize], - }; - assertCopyPart(infos, next); - }, - ], done); - }); + it( + 'should copy small part from Azure location with ' + 'bucketMatch=false to MPU with Azure location', + function ifF(done) { + const params = { + Bucket: azureContainerName, + CopySource: `${azureContainerName}/` + `${this.test.keyNameNormalAzureMismatch}`, + Key: this.test.mpuKeyNameAzure, + PartNumber: 1, + UploadId: this.test.uploadId, + }; + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${normalMD5}"`); + next(err); + }), + next => { + const infos = { + azureContainerName, + mpuKeyNameAzure: this.test.mpuKeyNameAzure, + uploadId: this.test.uploadId, + md5: normalMD5, + subPartSize: [normalBodySize], + }; + assertCopyPart(infos, next); + }, + ], + done + ); + } + ); - it('should copy 5 Mb part from Azure to MPU with Azure location', - function ifF(done) { + it('should copy 5 Mb part from Azure to MPU with Azure location', function ifF(done) { const params = { Bucket: azureContainerName, - CopySource: - `${azureContainerName}/${this.test.keyNameFiveMbAzure}`, + CopySource: `${azureContainerName}/${this.test.keyNameFiveMbAzure}`, Key: this.test.mpuKeyNameAzure, PartNumber: 1, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); - next(err); - }), - next => { - const infos = { - azureContainerName, - mpuKeyNameAzure: this.test.mpuKeyNameAzure, - uploadId: this.test.uploadId, - md5: fiveMbMD5, - subPartSize: [fiveMB], - }; - assertCopyPart(infos, next); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); + next(err); + }), + next => { + const infos = { + azureContainerName, + mpuKeyNameAzure: this.test.mpuKeyNameAzure, + uploadId: this.test.uploadId, + md5: fiveMbMD5, + subPartSize: [fiveMB], + }; + assertCopyPart(infos, next); + }, + ], + done + ); }); - it('should copy part from Azure to MPU with memory location', - function ifF(done) { + it('should copy part from Azure to MPU with memory location', function ifF(done) { const params = { Bucket: memBucketName, - CopySource: - `${azureContainerName}/${this.test.keyNameNormalAzure}`, + CopySource: `${azureContainerName}/${this.test.keyNameNormalAzure}`, Key: this.test.mpuKeyNameMem, PartNumber: 1, UploadId: this.test.uploadIdMem, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${normalMD5}"`); - next(err); - }), - next => { - s3.listParts({ - Bucket: memBucketName, - Key: this.test.mpuKeyNameMem, - UploadId: this.test.uploadIdMem, - }, (err, res) => { - assert.equal(err, null, - 'listParts: Expected success,' + - ` got error: ${err}`); - const resultCopy = - JSON.parse(JSON.stringify(result)); - resultCopy.Bucket = memBucketName; - resultCopy.Key = this.test.mpuKeyNameMem; - resultCopy.UploadId = this.test.uploadIdMem; - resultCopy.Parts = - [{ PartNumber: 1, - LastModified: res.Parts[0].LastModified, - ETag: `"${normalMD5}"`, - Size: normalBodySize }]; - assert.deepStrictEqual(res, resultCopy); - next(); - }); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${normalMD5}"`); + next(err); + }), + next => { + s3.listParts( + { + Bucket: memBucketName, + Key: this.test.mpuKeyNameMem, + UploadId: this.test.uploadIdMem, + }, + (err, res) => { + assert.equal(err, null, 'listParts: Expected success,' + ` got error: ${err}`); + const resultCopy = JSON.parse(JSON.stringify(result)); + resultCopy.Bucket = memBucketName; + resultCopy.Key = this.test.mpuKeyNameMem; + resultCopy.UploadId = this.test.uploadIdMem; + resultCopy.Parts = [ + { + PartNumber: 1, + LastModified: res.Parts[0].LastModified, + ETag: `"${normalMD5}"`, + Size: normalBodySize, + }, + ]; + assert.deepStrictEqual(res, resultCopy); + next(); + } + ); + }, + ], + done + ); }); - it('should copy part from Azure to MPU with AWS location', - function ifF(done) { + it('should copy part from Azure to MPU with AWS location', function ifF(done) { const params = { Bucket: memBucketName, - CopySource: - `${azureContainerName}/${this.test.keyNameNormalAzure}`, + CopySource: `${azureContainerName}/${this.test.keyNameNormalAzure}`, Key: this.test.mpuKeyNameAWS, PartNumber: 1, UploadId: this.test.uploadIdAWS, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${normalMD5}"`); - next(err); - }), - next => { - const awsBucket = - config.locationConstraints[awsLocation] - .details.bucketName; - awsS3.listParts({ - Bucket: awsBucket, - Key: this.test.mpuKeyNameAWS, - UploadId: this.test.uploadIdAWS, - }, (err, res) => { - assert.equal(err, null, - 'listParts: Expected success,' + - ` got error: ${err}`); - assert.strictEqual(res.Bucket, awsBucket); - assert.strictEqual(res.Key, - this.test.mpuKeyNameAWS); - assert.strictEqual(res.UploadId, - this.test.uploadIdAWS); - assert.strictEqual(res.Parts.length, 1); - assert.strictEqual(res.Parts[0].PartNumber, 1); - assert.strictEqual(res.Parts[0].ETag, - `"${normalMD5}"`); - assert.strictEqual(res.Parts[0].Size, - normalBodySize); - next(); - }); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${normalMD5}"`); + next(err); + }), + next => { + const awsBucket = config.locationConstraints[awsLocation].details.bucketName; + awsS3.listParts( + { + Bucket: awsBucket, + Key: this.test.mpuKeyNameAWS, + UploadId: this.test.uploadIdAWS, + }, + (err, res) => { + assert.equal(err, null, 'listParts: Expected success,' + ` got error: ${err}`); + assert.strictEqual(res.Bucket, awsBucket); + assert.strictEqual(res.Key, this.test.mpuKeyNameAWS); + assert.strictEqual(res.UploadId, this.test.uploadIdAWS); + assert.strictEqual(res.Parts.length, 1); + assert.strictEqual(res.Parts[0].PartNumber, 1); + assert.strictEqual(res.Parts[0].ETag, `"${normalMD5}"`); + assert.strictEqual(res.Parts[0].Size, normalBodySize); + next(); + } + ); + }, + ], + done + ); }); - it('should copy part from Azure object with range to MPU ' + - 'with AWS location', function ifF(done) { + it('should copy part from Azure object with range to MPU ' + 'with AWS location', function ifF(done) { const params = { Bucket: memBucketName, - CopySource: - `${azureContainerName}/${this.test.keyNameNormalAzure}`, + CopySource: `${azureContainerName}/${this.test.keyNameNormalAzure}`, Key: this.test.mpuKeyNameAWS, CopySourceRange: 'bytes=0-5', PartNumber: 1, UploadId: this.test.uploadIdAWS, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${sixBytesMD5}"`); - next(err); - }), - next => { - const awsBucket = - config.locationConstraints[awsLocation] - .details.bucketName; - awsS3.listParts({ - Bucket: awsBucket, - Key: this.test.mpuKeyNameAWS, - UploadId: this.test.uploadIdAWS, - }, (err, res) => { - assert.equal(err, null, - 'listParts: Expected success,' + - ` got error: ${err}`); - assert.strictEqual(res.Bucket, awsBucket); - assert.strictEqual(res.Key, - this.test.mpuKeyNameAWS); - assert.strictEqual(res.UploadId, - this.test.uploadIdAWS); - assert.strictEqual(res.Parts.length, 1); - assert.strictEqual(res.Parts[0].PartNumber, 1); - assert.strictEqual(res.Parts[0].ETag, - `"${sixBytesMD5}"`); - assert.strictEqual(res.Parts[0].Size, 6); - next(); - }); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${sixBytesMD5}"`); + next(err); + }), + next => { + const awsBucket = config.locationConstraints[awsLocation].details.bucketName; + awsS3.listParts( + { + Bucket: awsBucket, + Key: this.test.mpuKeyNameAWS, + UploadId: this.test.uploadIdAWS, + }, + (err, res) => { + assert.equal(err, null, 'listParts: Expected success,' + ` got error: ${err}`); + assert.strictEqual(res.Bucket, awsBucket); + assert.strictEqual(res.Key, this.test.mpuKeyNameAWS); + assert.strictEqual(res.UploadId, this.test.uploadIdAWS); + assert.strictEqual(res.Parts.length, 1); + assert.strictEqual(res.Parts[0].PartNumber, 1); + assert.strictEqual(res.Parts[0].ETag, `"${sixBytesMD5}"`); + assert.strictEqual(res.Parts[0].Size, 6); + next(); + } + ); + }, + ], + done + ); }); - it('should copy 5 Mb part from a memory location to MPU with ' + - 'Azure location', - function ifF(done) { + it('should copy 5 Mb part from a memory location to MPU with ' + 'Azure location', function ifF(done) { const params = { Bucket: azureContainerName, - CopySource: - `${azureContainerName}/${this.test.keyNameFiveMbMem}`, + CopySource: `${azureContainerName}/${this.test.keyNameFiveMbMem}`, Key: this.test.mpuKeyNameAzure, PartNumber: 1, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); - next(err); - }), - next => { - const infos = { - azureContainerName, - mpuKeyNameAzure: this.test.mpuKeyNameAzure, - uploadId: this.test.uploadId, - md5: fiveMbMD5, - subPartSize: [fiveMB], - }; - assertCopyPart(infos, next); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); + next(err); + }), + next => { + const infos = { + azureContainerName, + mpuKeyNameAzure: this.test.mpuKeyNameAzure, + uploadId: this.test.uploadId, + md5: fiveMbMD5, + subPartSize: [fiveMB], + }; + assertCopyPart(infos, next); + }, + ], + done + ); }); describe('with existing part', () => { @@ -518,74 +555,87 @@ describeSkipIfNotMultipleOrCeph('Put Copy Part to AZURE', function describeF() { }; s3.uploadPart(params, done); }); - it('should copy part from Azure to Azure with existing ' + - 'parts', function ifF(done) { + it('should copy part from Azure to Azure with existing ' + 'parts', function ifF(done) { const resultCopy = JSON.parse(JSON.stringify(result)); const params = { Bucket: azureContainerName, - CopySource: - `${azureContainerName}/${this.test.keyNameNormalAzure}`, + CopySource: `${azureContainerName}/${this.test.keyNameNormalAzure}`, Key: this.test.mpuKeyNameAzure, PartNumber: 2, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.equal(err, null, - 'uploadPartCopy: Expected success, got ' + - `error: ${err}`); - assert.strictEqual(res.ETag, `"${normalMD5}"`); - next(err); - }), - next => s3.listParts({ - Bucket: azureContainerName, - Key: this.test.mpuKeyNameAzure, - UploadId: this.test.uploadId, - }, (err, res) => { - assert.equal(err, null, 'listParts: Expected ' + - `success, got error: ${err}`); - resultCopy.Bucket = azureContainerName; - resultCopy.Key = this.test.mpuKeyNameAzure; - resultCopy.UploadId = this.test.uploadId; - resultCopy.Parts = - [{ PartNumber: 1, - LastModified: res.Parts[0].LastModified, - ETag: `"${oneKbMD5}"`, - Size: oneKb }, - { PartNumber: 2, - LastModified: res.Parts[1].LastModified, - ETag: `"${normalMD5}"`, - Size: 11 }, - ]; - assert.deepStrictEqual(res, resultCopy); - next(); - }), - next => azureClient.getContainerClient(azureContainerName) - .getBlockBlobClient(this.test.mpuKeyNameAzure) - .getBlockList('all').then(res => { - const partName = azureMpuUtils.getBlockId( - this.test.uploadId, 1, 0); - const partName2 = azureMpuUtils.getBlockId( - this.test.uploadId, 2, 0); - assert.strictEqual(res.uncommittedBlocks[0].name, partName); - assert.equal(res.uncommittedBlocks[0].size, oneKb); - assert.strictEqual(res.uncommittedBlocks[1].name, partName2); - assert.equal(res.uncommittedBlocks[1].size, 11); - next(); - }, err => { - assert.equal(err, null, 'listBlocks: Expected ' + - `success, got error: ${err}`); - next(); - }), - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected success, got ' + `error: ${err}`); + assert.strictEqual(res.ETag, `"${normalMD5}"`); + next(err); + }), + next => + s3.listParts( + { + Bucket: azureContainerName, + Key: this.test.mpuKeyNameAzure, + UploadId: this.test.uploadId, + }, + (err, res) => { + assert.equal(err, null, 'listParts: Expected ' + `success, got error: ${err}`); + resultCopy.Bucket = azureContainerName; + resultCopy.Key = this.test.mpuKeyNameAzure; + resultCopy.UploadId = this.test.uploadId; + resultCopy.Parts = [ + { + PartNumber: 1, + LastModified: res.Parts[0].LastModified, + ETag: `"${oneKbMD5}"`, + Size: oneKb, + }, + { + PartNumber: 2, + LastModified: res.Parts[1].LastModified, + ETag: `"${normalMD5}"`, + Size: 11, + }, + ]; + assert.deepStrictEqual(res, resultCopy); + next(); + } + ), + next => + azureClient + .getContainerClient(azureContainerName) + .getBlockBlobClient(this.test.mpuKeyNameAzure) + .getBlockList('all') + .then( + res => { + const partName = azureMpuUtils.getBlockId(this.test.uploadId, 1, 0); + const partName2 = azureMpuUtils.getBlockId(this.test.uploadId, 2, 0); + assert.strictEqual(res.uncommittedBlocks[0].name, partName); + assert.equal(res.uncommittedBlocks[0].size, oneKb); + assert.strictEqual(res.uncommittedBlocks[1].name, partName2); + assert.equal(res.uncommittedBlocks[1].size, 11); + next(); + }, + err => { + assert.equal( + err, + null, + 'listBlocks: Expected ' + `success, got error: ${err}` + ); + next(); + } + ), + ], + done + ); }); }); }); }); }); -describeSkipIfNotMultipleOrCeph('Put Copy Part to AZURE with large object', -function describeF() { +describeSkipIfNotMultipleOrCeph('Put Copy Part to AZURE with large object', function describeF() { this.timeout(800000); withV4(sigCfg => { beforeEach(() => { @@ -595,44 +645,53 @@ function describeF() { afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(azureContainerName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(azureContainerName); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(azureContainerName) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(azureContainerName); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); describe('Basic test with large object: ', () => { beforeEach(function beF(done) { - this.currentTest.keyNameOneHundredAndFiveMbAzure = - `onehundredandfivembazure${uniqName(keyObjectAzure)}`; - this.currentTest.mpuKeyNameAzure = - `mpukeyname${uniqName(keyObjectAzure)}`; + this.currentTest.keyNameOneHundredAndFiveMbAzure = `onehundredandfivembazure${uniqName(keyObjectAzure)}`; + this.currentTest.mpuKeyNameAzure = `mpukeyname${uniqName(keyObjectAzure)}`; const params = { Bucket: azureContainerName, Key: this.currentTest.mpuKeyNameAzure, Metadata: { 'scal-location-constraint': azureLocation }, }; - async.waterfall([ - next => s3.createBucket({ Bucket: azureContainerName }, - err => next(err)), - next => s3.putObject({ - Bucket: azureContainerName, - Key: this.currentTest.keyNameOneHundredAndFiveMbAzure, - Body: oneHundredAndFiveMbBody, - Metadata: { 'scal-location-constraint': azureLocation }, - }, err => next(err)), - next => s3.createMultipartUpload(params, (err, res) => { - assert.equal(err, null, 'createMultipartUpload: ' + - `Expected success, got error: ${err}`); - this.currentTest.uploadId = res.UploadId; - next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: azureContainerName }, err => next(err)), + next => + s3.putObject( + { + Bucket: azureContainerName, + Key: this.currentTest.keyNameOneHundredAndFiveMbAzure, + Body: oneHundredAndFiveMbBody, + Metadata: { 'scal-location-constraint': azureLocation }, + }, + err => next(err) + ), + next => + s3.createMultipartUpload(params, (err, res) => { + assert.equal( + err, + null, + 'createMultipartUpload: ' + `Expected success, got error: ${err}` + ); + this.currentTest.uploadId = res.UploadId; + next(); + }), + ], + done + ); }); afterEach(function afterEachF(done) { const params = { @@ -643,44 +702,41 @@ function describeF() { s3.abortMultipartUpload(params, done); }); - it('should copy 105 MB part from Azure to MPU with Azure ' + - 'location', function ifF(done) { + it('should copy 105 MB part from Azure to MPU with Azure ' + 'location', function ifF(done) { const params = { Bucket: azureContainerName, - CopySource: - `${azureContainerName}/` + - `${this.test.keyNameOneHundredAndFiveMbAzure}`, + CopySource: `${azureContainerName}/` + `${this.test.keyNameOneHundredAndFiveMbAzure}`, Key: this.test.mpuKeyNameAzure, PartNumber: 1, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, - `"${oneHundredAndFiveMbMD5}"`); - next(err); - }), - next => { - const infos = { - azureContainerName, - mpuKeyNameAzure: - this.test.mpuKeyNameAzure, - uploadId: this.test.uploadId, - md5: oneHundredAndFiveMbMD5, - subPartSize: [100 * 1024 * 1024, 5 * 1024 * 1024], - }; - assertCopyPart(infos, next); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${oneHundredAndFiveMbMD5}"`); + next(err); + }), + next => { + const infos = { + azureContainerName, + mpuKeyNameAzure: this.test.mpuKeyNameAzure, + uploadId: this.test.uploadId, + md5: oneHundredAndFiveMbMD5, + subPartSize: [100 * 1024 * 1024, 5 * 1024 * 1024], + }; + assertCopyPart(infos, next); + }, + ], + done + ); }); }); }); }); -describeSkipIfNotMultipleOrCeph('Put Copy Part to AZURE with complete MPU', -function describeF() { +describeSkipIfNotMultipleOrCeph('Put Copy Part to AZURE with complete MPU', function describeF() { this.timeout(800000); withV4(sigCfg => { beforeEach(() => { @@ -690,120 +746,125 @@ function describeF() { afterEach(() => { process.stdout.write('Emptying bucket azureContainerName\n'); - return bucketUtil.empty(azureContainerName) - .then(() => { - process.stdout.write('Deleting bucket azureContainerName\n'); - return bucketUtil.deleteOne(azureContainerName); - }) - .then(() => { - process.stdout.write('Emptying bucket awsBucketName\n'); - return bucketUtil.empty(awsBucketName); - }) - .then(() => { - process.stdout.write('Deleting bucket awsBucketName\n'); - return bucketUtil.deleteOne(awsBucketName); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(azureContainerName) + .then(() => { + process.stdout.write('Deleting bucket azureContainerName\n'); + return bucketUtil.deleteOne(azureContainerName); + }) + .then(() => { + process.stdout.write('Emptying bucket awsBucketName\n'); + return bucketUtil.empty(awsBucketName); + }) + .then(() => { + process.stdout.write('Deleting bucket awsBucketName\n'); + return bucketUtil.deleteOne(awsBucketName); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); - describe('Basic test with complete MPU from AWS to Azure location: ', - () => { + describe('Basic test with complete MPU from AWS to Azure location: ', () => { beforeEach(function beF(done) { - this.currentTest.keyNameAws = - `onehundredandfivembazure${uniqName(keyObjectAWS)}`; - this.currentTest.mpuKeyNameAzure = - `mpukeyname${uniqName(keyObjectAzure)}`; + this.currentTest.keyNameAws = `onehundredandfivembazure${uniqName(keyObjectAWS)}`; + this.currentTest.mpuKeyNameAzure = `mpukeyname${uniqName(keyObjectAzure)}`; const createMpuParams = { Bucket: azureContainerName, Key: this.currentTest.mpuKeyNameAzure, Metadata: { 'scal-location-constraint': azureLocation }, }; - async.waterfall([ - next => s3.createBucket({ Bucket: awsBucketName }, - err => next(err)), - next => s3.createBucket({ Bucket: azureContainerName }, - err => next(err)), - next => s3.putObject({ - Bucket: awsBucketName, - Key: this.currentTest.keyNameAws, - Body: fiveMbBody, - Metadata: { 'scal-location-constraint': awsLocation }, - }, err => next(err)), - next => s3.createMultipartUpload(createMpuParams, - (err, res) => { - assert.equal(err, null, 'createMultipartUpload: ' + - `Expected success, got error: ${err}`); - this.currentTest.uploadId = res.UploadId; - next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: awsBucketName }, err => next(err)), + next => s3.createBucket({ Bucket: azureContainerName }, err => next(err)), + next => + s3.putObject( + { + Bucket: awsBucketName, + Key: this.currentTest.keyNameAws, + Body: fiveMbBody, + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => next(err) + ), + next => + s3.createMultipartUpload(createMpuParams, (err, res) => { + assert.equal( + err, + null, + 'createMultipartUpload: ' + `Expected success, got error: ${err}` + ); + this.currentTest.uploadId = res.UploadId; + next(); + }), + ], + done + ); }); - it('should copy two 5 MB part from Azure to MPU with Azure ' + - 'location', function ifF(done) { + it('should copy two 5 MB part from Azure to MPU with Azure ' + 'location', function ifF(done) { const uploadParams = { Bucket: azureContainerName, - CopySource: - `${awsBucketName}/` + - `${this.test.keyNameAws}`, + CopySource: `${awsBucketName}/` + `${this.test.keyNameAws}`, Key: this.test.mpuKeyNameAzure, PartNumber: 1, UploadId: this.test.uploadId, }; const uploadParams2 = { Bucket: azureContainerName, - CopySource: - `${awsBucketName}/` + - `${this.test.keyNameAws}`, + CopySource: `${awsBucketName}/` + `${this.test.keyNameAws}`, Key: this.test.mpuKeyNameAzure, PartNumber: 2, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(uploadParams, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); - next(err); - }), - next => s3.uploadPartCopy(uploadParams2, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); - next(err); - }), - next => { - const completeMpuParams = { - Bucket: azureContainerName, - Key: this.test.mpuKeyNameAzure, - MultipartUpload: { - Parts: [ - { - ETag: `"${fiveMbMD5}"`, - PartNumber: 1, - }, - { - ETag: `"${fiveMbMD5}"`, - PartNumber: 2, - }, - ], - }, - UploadId: this.test.uploadId, - }; - s3.completeMultipartUpload(completeMpuParams, - (err, res) => { - assert.equal(err, null, 'completeMultipartUpload:' + - ` Expected success, got error: ${err}`); - assert.strictEqual(res.Bucket, azureContainerName); - assert.strictEqual(res.Key, - this.test.mpuKeyNameAzure); - next(); - }); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(uploadParams, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); + next(err); + }), + next => + s3.uploadPartCopy(uploadParams2, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); + next(err); + }), + next => { + const completeMpuParams = { + Bucket: azureContainerName, + Key: this.test.mpuKeyNameAzure, + MultipartUpload: { + Parts: [ + { + ETag: `"${fiveMbMD5}"`, + PartNumber: 1, + }, + { + ETag: `"${fiveMbMD5}"`, + PartNumber: 2, + }, + ], + }, + UploadId: this.test.uploadId, + }; + s3.completeMultipartUpload(completeMpuParams, (err, res) => { + assert.equal( + err, + null, + 'completeMultipartUpload:' + ` Expected success, got error: ${err}` + ); + assert.strictEqual(res.Bucket, azureContainerName); + assert.strictEqual(res.Key, this.test.mpuKeyNameAzure); + next(); + }); + }, + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectPutCopyPart/objectPutCopyPartGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectPutCopyPart/objectPutCopyPartGcp.js index 1171e26543..615c22edad 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectPutCopyPart/objectPutCopyPartGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectPutCopyPart/objectPutCopyPartGcp.js @@ -4,9 +4,19 @@ const assert = require('assert'); const { config } = require('../../../../../../lib/Config'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, uniqName, gcpBucketMPU, - gcpClient, gcpLocation, gcpLocationMismatch, memLocation, - awsLocation, awsS3, getOwnerInfo, genUniqID } = require('../utils'); +const { + describeSkipIfNotMultipleOrCeph, + uniqName, + gcpBucketMPU, + gcpClient, + gcpLocation, + gcpLocationMismatch, + memLocation, + awsLocation, + awsS3, + getOwnerInfo, + genUniqID, +} = require('../utils'); const bucket = `partcopygcp${genUniqID()}`; @@ -40,12 +50,8 @@ const result = { MaxParts: 1000, IsTruncated: false, Parts: [], - Initiator: - { ID: ownerID, - DisplayName: ownerDisplayName }, - Owner: - { DisplayName: ownerDisplayName, - ID: ownerID }, + Initiator: { ID: ownerID, DisplayName: ownerDisplayName }, + Owner: { DisplayName: ownerDisplayName, ID: ownerID }, StorageClass: 'STANDARD', }; @@ -58,33 +64,45 @@ function assertCopyPart(infos, cb) { resultCopy.Bucket = bucketName; resultCopy.Key = keyName; resultCopy.UploadId = uploadId; - async.waterfall([ - next => s3.listParts({ - Bucket: bucketName, - Key: keyName, - UploadId: uploadId, - }, (err, res) => { - assert.ifError(err, 'listParts: Expected success,' + - ` got error: ${err}`); - resultCopy.Parts = - [{ PartNumber: 1, - LastModified: res.Parts[0].LastModified, - ETag: `"${md5}"`, - Size: totalSize }]; - assert.deepStrictEqual(res, resultCopy); - next(); - }), - next => gcpClient.listParts({ - Bucket: gcpBucketMPU, - Key: keyName, - UploadId: uploadId, - }, (err, res) => { - assert.ifError(err, 'GCP listParts: Expected success,' + - `got error: ${err}`); - assert.strictEqual(res.Contents[0].ETag, `"${md5}"`); - next(); - }), - ], cb); + async.waterfall( + [ + next => + s3.listParts( + { + Bucket: bucketName, + Key: keyName, + UploadId: uploadId, + }, + (err, res) => { + assert.ifError(err, 'listParts: Expected success,' + ` got error: ${err}`); + resultCopy.Parts = [ + { + PartNumber: 1, + LastModified: res.Parts[0].LastModified, + ETag: `"${md5}"`, + Size: totalSize, + }, + ]; + assert.deepStrictEqual(res, resultCopy); + next(); + } + ), + next => + gcpClient.listParts( + { + Bucket: gcpBucketMPU, + Key: keyName, + UploadId: uploadId, + }, + (err, res) => { + assert.ifError(err, 'GCP listParts: Expected success,' + `got error: ${err}`); + assert.strictEqual(res.Contents[0].ETag, `"${md5}"`); + next(); + } + ), + ], + cb + ); } describeSkipIfNotMultipleOrCeph('Put Copy Part to GCP', function describeFn() { @@ -93,49 +111,47 @@ describeSkipIfNotMultipleOrCeph('Put Copy Part to GCP', function describeFn() { beforeEach(done => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - s3.createBucket({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: gcpLocation, + s3.createBucket( + { + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: gcpLocation, + }, }, - }, done); + done + ); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => bucketUtil.empty(memBucketName)) - .then(() => { - process.stdout.write(`Deleting bucket ${bucket}\n`); - return bucketUtil.deleteOne(bucket); - }) - .then(() => { - process.stdout.write(`Deleting bucket ${memBucketName}\n`); - return bucketUtil.deleteOne(memBucketName); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => bucketUtil.empty(memBucketName)) + .then(() => { + process.stdout.write(`Deleting bucket ${bucket}\n`); + return bucketUtil.deleteOne(bucket); + }) + .then(() => { + process.stdout.write(`Deleting bucket ${memBucketName}\n`); + return bucketUtil.deleteOne(memBucketName); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); describe('Basic test: ', () => { beforeEach(function beforeFn(done) { - this.currentTest.keyNameNormalGcp = - `normalgcp${uniqName(keyObjectGcp)}`; - this.currentTest.keyNameNormalGcpMismatch = - `normalgcpmismatch${uniqName(keyObjectGcp)}`; + this.currentTest.keyNameNormalGcp = `normalgcp${uniqName(keyObjectGcp)}`; + this.currentTest.keyNameNormalGcpMismatch = `normalgcpmismatch${uniqName(keyObjectGcp)}`; - this.currentTest.keyNameFiveMbGcp = - `fivembgcp${uniqName(keyObjectGcp)}`; - this.currentTest.keyNameFiveMbMem = - `fivembmem${uniqName(keyObjectMemory)}`; + this.currentTest.keyNameFiveMbGcp = `fivembgcp${uniqName(keyObjectGcp)}`; + this.currentTest.keyNameFiveMbMem = `fivembmem${uniqName(keyObjectMemory)}`; - this.currentTest.mpuKeyNameGcp = - `mpukeyname${uniqName(keyObjectGcp)}`; - this.currentTest.mpuKeyNameMem = - `mpukeyname${uniqName(keyObjectMemory)}`; - this.currentTest.mpuKeyNameAWS = - `mpukeyname${uniqName(keyObjectAWS)}`; + this.currentTest.mpuKeyNameGcp = `mpukeyname${uniqName(keyObjectGcp)}`; + this.currentTest.mpuKeyNameMem = `mpukeyname${uniqName(keyObjectMemory)}`; + this.currentTest.mpuKeyNameAWS = `mpukeyname${uniqName(keyObjectAWS)}`; const paramsGcp = { Bucket: bucket, Key: this.currentTest.mpuKeyNameGcp, @@ -151,58 +167,80 @@ describeSkipIfNotMultipleOrCeph('Put Copy Part to GCP', function describeFn() { Key: this.currentTest.mpuKeyNameAWS, Metadata: { 'scal-location-constraint': awsLocation }, }; - async.waterfall([ - next => s3.createBucket({ Bucket: bucket }, - err => next(err)), - next => s3.createBucket({ Bucket: memBucketName }, - err => next(err)), - next => s3.putObject({ - Bucket: bucket, - Key: this.currentTest.keyNameNormalGcp, - Body: normalBody, - Metadata: { 'scal-location-constraint': gcpLocation }, - }, err => next(err)), - next => s3.putObject({ - Bucket: bucket, - Key: this.currentTest.keyNameNormalGcpMismatch, - Body: normalBody, - Metadata: { 'scal-location-constraint': - gcpLocationMismatch }, - }, err => next(err)), - next => s3.putObject({ - Bucket: bucket, - Key: this.currentTest.keyNameFiveMbGcp, - Body: fiveMbBody, - Metadata: { 'scal-location-constraint': gcpLocation }, - }, err => next(err)), - next => s3.putObject({ - Bucket: bucket, - Key: this.currentTest.keyNameFiveMbMem, - Body: fiveMbBody, - Metadata: { 'scal-location-constraint': memLocation }, - }, err => next(err)), - next => s3.createMultipartUpload(paramsGcp, - (err, res) => { - assert.ifError(err, 'createMultipartUpload ' + - `on gcp: Expected success, got error: ${err}`); - this.currentTest.uploadId = res.UploadId; - next(); - }), - next => s3.createMultipartUpload(paramsMem, - (err, res) => { - assert.ifError(err, 'createMultipartUpload ' + - `in memory: Expected success, got error: ${err}`); - this.currentTest.uploadIdMem = res.UploadId; - next(); - }), - next => s3.createMultipartUpload(paramsAWS, - (err, res) => { - assert.ifError(err, 'createMultipartUpload ' + - `on AWS: Expected success, got error: ${err}`); - this.currentTest.uploadIdAWS = res.UploadId; - next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucket }, err => next(err)), + next => s3.createBucket({ Bucket: memBucketName }, err => next(err)), + next => + s3.putObject( + { + Bucket: bucket, + Key: this.currentTest.keyNameNormalGcp, + Body: normalBody, + Metadata: { 'scal-location-constraint': gcpLocation }, + }, + err => next(err) + ), + next => + s3.putObject( + { + Bucket: bucket, + Key: this.currentTest.keyNameNormalGcpMismatch, + Body: normalBody, + Metadata: { 'scal-location-constraint': gcpLocationMismatch }, + }, + err => next(err) + ), + next => + s3.putObject( + { + Bucket: bucket, + Key: this.currentTest.keyNameFiveMbGcp, + Body: fiveMbBody, + Metadata: { 'scal-location-constraint': gcpLocation }, + }, + err => next(err) + ), + next => + s3.putObject( + { + Bucket: bucket, + Key: this.currentTest.keyNameFiveMbMem, + Body: fiveMbBody, + Metadata: { 'scal-location-constraint': memLocation }, + }, + err => next(err) + ), + next => + s3.createMultipartUpload(paramsGcp, (err, res) => { + assert.ifError( + err, + 'createMultipartUpload ' + `on gcp: Expected success, got error: ${err}` + ); + this.currentTest.uploadId = res.UploadId; + next(); + }), + next => + s3.createMultipartUpload(paramsMem, (err, res) => { + assert.ifError( + err, + 'createMultipartUpload ' + `in memory: Expected success, got error: ${err}` + ); + this.currentTest.uploadIdMem = res.UploadId; + next(); + }), + next => + s3.createMultipartUpload(paramsAWS, (err, res) => { + assert.ifError( + err, + 'createMultipartUpload ' + `on AWS: Expected success, got error: ${err}` + ); + this.currentTest.uploadIdAWS = res.UploadId; + next(); + }), + ], + done + ); }); afterEach(function afterFn(done) { @@ -221,271 +259,273 @@ describeSkipIfNotMultipleOrCeph('Put Copy Part to GCP', function describeFn() { Key: this.currentTest.mpuKeyNameAWS, UploadId: this.currentTest.uploadIdAWS, }; - async.waterfall([ - next => s3.abortMultipartUpload(paramsGcp, - err => next(err)), - next => s3.abortMultipartUpload(paramsMem, - err => next(err)), - next => s3.abortMultipartUpload(paramsAWS, - err => next(err)), - ], done); + async.waterfall( + [ + next => s3.abortMultipartUpload(paramsGcp, err => next(err)), + next => s3.abortMultipartUpload(paramsMem, err => next(err)), + next => s3.abortMultipartUpload(paramsAWS, err => next(err)), + ], + done + ); }); - it('should copy small part from GCP to MPU with GCP location', - function itFn(done) { + it('should copy small part from GCP to MPU with GCP location', function itFn(done) { const params = { Bucket: bucket, - CopySource: - `${bucket}/${this.test.keyNameNormalGcp}`, + CopySource: `${bucket}/${this.test.keyNameNormalGcp}`, Key: this.test.mpuKeyNameGcp, PartNumber: 1, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.ifError(err, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${normalMD5}"`); - next(err); - }), - next => { - const infos = { - bucketName: bucket, - keyName: this.test.mpuKeyNameGcp, - uploadId: this.test.uploadId, - md5: normalMD5, - totalSize: normalBodySize, - }; - assertCopyPart(infos, next); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.ifError(err, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${normalMD5}"`); + next(err); + }), + next => { + const infos = { + bucketName: bucket, + keyName: this.test.mpuKeyNameGcp, + uploadId: this.test.uploadId, + md5: normalMD5, + totalSize: normalBodySize, + }; + assertCopyPart(infos, next); + }, + ], + done + ); }); - it('should copy small part from GCP with bucketMatch=false to ' + - 'MPU with GCP location', - function itFn(done) { - const params = { - Bucket: bucket, - CopySource: - `${bucket}/${this.test.keyNameNormalGcpMismatch}`, - Key: this.test.mpuKeyNameGcp, - PartNumber: 1, - UploadId: this.test.uploadId, - }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.ifError(err, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${normalMD5}"`); - next(err); - }), - next => { - const infos = { - bucketName: bucket, - keyName: this.test.mpuKeyNameGcp, - uploadId: this.test.uploadId, - md5: normalMD5, - totalSize: normalBodySize, - }; - assertCopyPart(infos, next); - }, - ], done); - }); + it( + 'should copy small part from GCP with bucketMatch=false to ' + 'MPU with GCP location', + function itFn(done) { + const params = { + Bucket: bucket, + CopySource: `${bucket}/${this.test.keyNameNormalGcpMismatch}`, + Key: this.test.mpuKeyNameGcp, + PartNumber: 1, + UploadId: this.test.uploadId, + }; + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.ifError(err, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${normalMD5}"`); + next(err); + }), + next => { + const infos = { + bucketName: bucket, + keyName: this.test.mpuKeyNameGcp, + uploadId: this.test.uploadId, + md5: normalMD5, + totalSize: normalBodySize, + }; + assertCopyPart(infos, next); + }, + ], + done + ); + } + ); - it('should copy 5 Mb part from GCP to MPU with GCP location', - function ifF(done) { + it('should copy 5 Mb part from GCP to MPU with GCP location', function ifF(done) { const params = { Bucket: bucket, - CopySource: - `${bucket}/${this.test.keyNameFiveMbGcp}`, + CopySource: `${bucket}/${this.test.keyNameFiveMbGcp}`, Key: this.test.mpuKeyNameGcp, PartNumber: 1, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.ifError(err, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); - next(err); - }), - next => { - const infos = { - bucketName: bucket, - keyName: this.test.mpuKeyNameGcp, - uploadId: this.test.uploadId, - md5: fiveMbMD5, - totalSize: fiveMB, - }; - assertCopyPart(infos, next); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.ifError(err, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); + next(err); + }), + next => { + const infos = { + bucketName: bucket, + keyName: this.test.mpuKeyNameGcp, + uploadId: this.test.uploadId, + md5: fiveMbMD5, + totalSize: fiveMB, + }; + assertCopyPart(infos, next); + }, + ], + done + ); }); - it('should copy part from GCP to MPU with memory location', - function ifF(done) { + it('should copy part from GCP to MPU with memory location', function ifF(done) { const params = { Bucket: memBucketName, - CopySource: - `${bucket}/${this.test.keyNameNormalGcp}`, + CopySource: `${bucket}/${this.test.keyNameNormalGcp}`, Key: this.test.mpuKeyNameMem, PartNumber: 1, UploadId: this.test.uploadIdMem, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.ifError(err, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${normalMD5}"`); - next(err); - }), - next => { - s3.listParts({ - Bucket: memBucketName, - Key: this.test.mpuKeyNameMem, - UploadId: this.test.uploadIdMem, - }, (err, res) => { - assert.ifError(err, - 'listParts: Expected success,' + - ` got error: ${err}`); - const resultCopy = - JSON.parse(JSON.stringify(result)); - resultCopy.Bucket = memBucketName; - resultCopy.Key = this.test.mpuKeyNameMem; - resultCopy.UploadId = this.test.uploadIdMem; - resultCopy.Parts = - [{ PartNumber: 1, - LastModified: res.Parts[0].LastModified, - ETag: `"${normalMD5}"`, - Size: normalBodySize }]; - assert.deepStrictEqual(res, resultCopy); - next(); - }); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.ifError(err, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${normalMD5}"`); + next(err); + }), + next => { + s3.listParts( + { + Bucket: memBucketName, + Key: this.test.mpuKeyNameMem, + UploadId: this.test.uploadIdMem, + }, + (err, res) => { + assert.ifError(err, 'listParts: Expected success,' + ` got error: ${err}`); + const resultCopy = JSON.parse(JSON.stringify(result)); + resultCopy.Bucket = memBucketName; + resultCopy.Key = this.test.mpuKeyNameMem; + resultCopy.UploadId = this.test.uploadIdMem; + resultCopy.Parts = [ + { + PartNumber: 1, + LastModified: res.Parts[0].LastModified, + ETag: `"${normalMD5}"`, + Size: normalBodySize, + }, + ]; + assert.deepStrictEqual(res, resultCopy); + next(); + } + ); + }, + ], + done + ); }); - it('should copy part from GCP to MPU with AWS location', - function ifF(done) { + it('should copy part from GCP to MPU with AWS location', function ifF(done) { const params = { Bucket: memBucketName, - CopySource: - `${bucket}/${this.test.keyNameNormalGcp}`, + CopySource: `${bucket}/${this.test.keyNameNormalGcp}`, Key: this.test.mpuKeyNameAWS, PartNumber: 1, UploadId: this.test.uploadIdAWS, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.ifError(err, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${normalMD5}"`); - next(err); - }), - next => { - const awsBucket = - config.locationConstraints[awsLocation] - .details.bucketName; - awsS3.listParts({ - Bucket: awsBucket, - Key: this.test.mpuKeyNameAWS, - UploadId: this.test.uploadIdAWS, - }, (err, res) => { - assert.ifError(err, - 'listParts: Expected success,' + - ` got error: ${err}`); - assert.strictEqual(res.Bucket, awsBucket); - assert.strictEqual(res.Key, - this.test.mpuKeyNameAWS); - assert.strictEqual(res.UploadId, - this.test.uploadIdAWS); - assert.strictEqual(res.Parts.length, 1); - assert.strictEqual(res.Parts[0].PartNumber, 1); - assert.strictEqual(res.Parts[0].ETag, - `"${normalMD5}"`); - assert.strictEqual(res.Parts[0].Size, - normalBodySize); - next(); - }); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.ifError(err, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${normalMD5}"`); + next(err); + }), + next => { + const awsBucket = config.locationConstraints[awsLocation].details.bucketName; + awsS3.listParts( + { + Bucket: awsBucket, + Key: this.test.mpuKeyNameAWS, + UploadId: this.test.uploadIdAWS, + }, + (err, res) => { + assert.ifError(err, 'listParts: Expected success,' + ` got error: ${err}`); + assert.strictEqual(res.Bucket, awsBucket); + assert.strictEqual(res.Key, this.test.mpuKeyNameAWS); + assert.strictEqual(res.UploadId, this.test.uploadIdAWS); + assert.strictEqual(res.Parts.length, 1); + assert.strictEqual(res.Parts[0].PartNumber, 1); + assert.strictEqual(res.Parts[0].ETag, `"${normalMD5}"`); + assert.strictEqual(res.Parts[0].Size, normalBodySize); + next(); + } + ); + }, + ], + done + ); }); - it('should copy part from GCP object with range to MPU ' + - 'with AWS location', function ifF(done) { + it('should copy part from GCP object with range to MPU ' + 'with AWS location', function ifF(done) { const params = { Bucket: memBucketName, - CopySource: - `${bucket}/${this.test.keyNameNormalGcp}`, + CopySource: `${bucket}/${this.test.keyNameNormalGcp}`, Key: this.test.mpuKeyNameAWS, CopySourceRange: 'bytes=0-5', PartNumber: 1, UploadId: this.test.uploadIdAWS, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.ifError(err, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${sixBytesMD5}"`); - next(err); - }), - next => { - const awsBucket = - config.locationConstraints[awsLocation] - .details.bucketName; - awsS3.listParts({ - Bucket: awsBucket, - Key: this.test.mpuKeyNameAWS, - UploadId: this.test.uploadIdAWS, - }, (err, res) => { - assert.ifError(err, - 'listParts: Expected success,' + - ` got error: ${err}`); - assert.strictEqual(res.Bucket, awsBucket); - assert.strictEqual(res.Key, - this.test.mpuKeyNameAWS); - assert.strictEqual(res.UploadId, - this.test.uploadIdAWS); - assert.strictEqual(res.Parts.length, 1); - assert.strictEqual(res.Parts[0].PartNumber, 1); - assert.strictEqual(res.Parts[0].ETag, - `"${sixBytesMD5}"`); - assert.strictEqual(res.Parts[0].Size, 6); - next(); - }); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.ifError(err, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${sixBytesMD5}"`); + next(err); + }), + next => { + const awsBucket = config.locationConstraints[awsLocation].details.bucketName; + awsS3.listParts( + { + Bucket: awsBucket, + Key: this.test.mpuKeyNameAWS, + UploadId: this.test.uploadIdAWS, + }, + (err, res) => { + assert.ifError(err, 'listParts: Expected success,' + ` got error: ${err}`); + assert.strictEqual(res.Bucket, awsBucket); + assert.strictEqual(res.Key, this.test.mpuKeyNameAWS); + assert.strictEqual(res.UploadId, this.test.uploadIdAWS); + assert.strictEqual(res.Parts.length, 1); + assert.strictEqual(res.Parts[0].PartNumber, 1); + assert.strictEqual(res.Parts[0].ETag, `"${sixBytesMD5}"`); + assert.strictEqual(res.Parts[0].Size, 6); + next(); + } + ); + }, + ], + done + ); }); - it('should copy 5 Mb part from a memory location to MPU with ' + - 'GCP location', - function ifF(done) { + it('should copy 5 Mb part from a memory location to MPU with ' + 'GCP location', function ifF(done) { const params = { Bucket: bucket, - CopySource: - `${bucket}/${this.test.keyNameFiveMbMem}`, + CopySource: `${bucket}/${this.test.keyNameFiveMbMem}`, Key: this.test.mpuKeyNameGcp, PartNumber: 1, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.ifError(err, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); - next(err); - }), - next => { - const infos = { - bucketName: bucket, - keyName: this.test.mpuKeyNameGcp, - uploadId: this.test.uploadId, - md5: fiveMbMD5, - totalSize: fiveMB, - }; - assertCopyPart(infos, next); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.ifError(err, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); + next(err); + }), + next => { + const infos = { + bucketName: bucket, + keyName: this.test.mpuKeyNameGcp, + uploadId: this.test.uploadId, + md5: fiveMbMD5, + totalSize: fiveMB, + }; + assertCopyPart(infos, next); + }, + ], + done + ); }); describe('with existing part', () => { @@ -499,70 +539,77 @@ describeSkipIfNotMultipleOrCeph('Put Copy Part to GCP', function describeFn() { }; s3.uploadPart(params, done); }); - it('should copy part from GCP to GCP with existing ' + - 'parts', function ifF(done) { + it('should copy part from GCP to GCP with existing ' + 'parts', function ifF(done) { const resultCopy = JSON.parse(JSON.stringify(result)); const params = { Bucket: bucket, - CopySource: - `${bucket}/${this.test.keyNameNormalGcp}`, + CopySource: `${bucket}/${this.test.keyNameNormalGcp}`, Key: this.test.mpuKeyNameGcp, PartNumber: 2, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(params, (err, res) => { - assert.ifError(err, - 'uploadPartCopy: Expected success, got ' + - `error: ${err}`); - assert.strictEqual(res.ETag, `"${normalMD5}"`); - next(err); - }), - next => s3.listParts({ - Bucket: bucket, - Key: this.test.mpuKeyNameGcp, - UploadId: this.test.uploadId, - }, (err, res) => { - assert.ifError(err, 'listParts: Expected ' + - `success, got error: ${err}`); - resultCopy.Bucket = bucket; - resultCopy.Key = this.test.mpuKeyNameGcp; - resultCopy.UploadId = this.test.uploadId; - resultCopy.Parts = - [{ PartNumber: 1, - LastModified: res.Parts[0].LastModified, - ETag: `"${oneKbMD5}"`, - Size: oneKb }, - { PartNumber: 2, - LastModified: res.Parts[1].LastModified, - ETag: `"${normalMD5}"`, - Size: 11 }, - ]; - assert.deepStrictEqual(res, resultCopy); - next(); - }), - next => gcpClient.listParts({ - Bucket: gcpBucketMPU, - Key: this.test.mpuKeyNameGcp, - UploadId: this.test.uploadId, - }, (err, res) => { - assert.ifError(err, 'GCP listParts: Expected ' + - `success, got error: ${err}`); - assert.strictEqual( - res.Contents[0].ETag, `"${oneKbMD5}"`); - assert.strictEqual( - res.Contents[1].ETag, `"${normalMD5}"`); - next(); - }), - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(params, (err, res) => { + assert.ifError(err, 'uploadPartCopy: Expected success, got ' + `error: ${err}`); + assert.strictEqual(res.ETag, `"${normalMD5}"`); + next(err); + }), + next => + s3.listParts( + { + Bucket: bucket, + Key: this.test.mpuKeyNameGcp, + UploadId: this.test.uploadId, + }, + (err, res) => { + assert.ifError(err, 'listParts: Expected ' + `success, got error: ${err}`); + resultCopy.Bucket = bucket; + resultCopy.Key = this.test.mpuKeyNameGcp; + resultCopy.UploadId = this.test.uploadId; + resultCopy.Parts = [ + { + PartNumber: 1, + LastModified: res.Parts[0].LastModified, + ETag: `"${oneKbMD5}"`, + Size: oneKb, + }, + { + PartNumber: 2, + LastModified: res.Parts[1].LastModified, + ETag: `"${normalMD5}"`, + Size: 11, + }, + ]; + assert.deepStrictEqual(res, resultCopy); + next(); + } + ), + next => + gcpClient.listParts( + { + Bucket: gcpBucketMPU, + Key: this.test.mpuKeyNameGcp, + UploadId: this.test.uploadId, + }, + (err, res) => { + assert.ifError(err, 'GCP listParts: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.Contents[0].ETag, `"${oneKbMD5}"`); + assert.strictEqual(res.Contents[1].ETag, `"${normalMD5}"`); + next(); + } + ), + ], + done + ); }); }); }); }); }); -describeSkipIfNotMultipleOrCeph('Put Copy Part to GCP with complete MPU', -function describeF() { +describeSkipIfNotMultipleOrCeph('Put Copy Part to GCP with complete MPU', function describeF() { this.timeout(800000); withV4(sigCfg => { beforeEach(() => { @@ -572,120 +619,125 @@ function describeF() { afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .then(() => { - process.stdout.write('Emptying bucket awsBucketName\n'); - return bucketUtil.empty(awsBucketName); - }) - .then(() => { - process.stdout.write('Deleting bucket awsBucketName\n'); - return bucketUtil.deleteOne(awsBucketName); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .then(() => { + process.stdout.write('Emptying bucket awsBucketName\n'); + return bucketUtil.empty(awsBucketName); + }) + .then(() => { + process.stdout.write('Deleting bucket awsBucketName\n'); + return bucketUtil.deleteOne(awsBucketName); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); - describe('Basic test with complete MPU from AWS to GCP location: ', - () => { + describe('Basic test with complete MPU from AWS to GCP location: ', () => { beforeEach(function beF(done) { - this.currentTest.keyNameAws = - `onehundredandfivembgcp${uniqName(keyObjectAWS)}`; - this.currentTest.mpuKeyNameGcp = - `mpukeyname${uniqName(keyObjectGcp)}`; + this.currentTest.keyNameAws = `onehundredandfivembgcp${uniqName(keyObjectAWS)}`; + this.currentTest.mpuKeyNameGcp = `mpukeyname${uniqName(keyObjectGcp)}`; const createMpuParams = { Bucket: bucket, Key: this.currentTest.mpuKeyNameGcp, Metadata: { 'scal-location-constraint': gcpLocation }, }; - async.waterfall([ - next => s3.createBucket({ Bucket: awsBucketName }, - err => next(err)), - next => s3.createBucket({ Bucket: bucket }, - err => next(err)), - next => s3.putObject({ - Bucket: awsBucketName, - Key: this.currentTest.keyNameAws, - Body: fiveMbBody, - Metadata: { 'scal-location-constraint': awsLocation }, - }, err => next(err)), - next => s3.createMultipartUpload(createMpuParams, - (err, res) => { - assert.equal(err, null, 'createMultipartUpload: ' + - `Expected success, got error: ${err}`); - this.currentTest.uploadId = res.UploadId; - next(); - }), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: awsBucketName }, err => next(err)), + next => s3.createBucket({ Bucket: bucket }, err => next(err)), + next => + s3.putObject( + { + Bucket: awsBucketName, + Key: this.currentTest.keyNameAws, + Body: fiveMbBody, + Metadata: { 'scal-location-constraint': awsLocation }, + }, + err => next(err) + ), + next => + s3.createMultipartUpload(createMpuParams, (err, res) => { + assert.equal( + err, + null, + 'createMultipartUpload: ' + `Expected success, got error: ${err}` + ); + this.currentTest.uploadId = res.UploadId; + next(); + }), + ], + done + ); }); - it('should copy two 5 MB part from GCP to MPU with GCP' + - 'location', function ifF(done) { + it('should copy two 5 MB part from GCP to MPU with GCP' + 'location', function ifF(done) { const uploadParams = { Bucket: bucket, - CopySource: - `${awsBucketName}/` + - `${this.test.keyNameAws}`, + CopySource: `${awsBucketName}/` + `${this.test.keyNameAws}`, Key: this.test.mpuKeyNameGcp, PartNumber: 1, UploadId: this.test.uploadId, }; const uploadParams2 = { Bucket: bucket, - CopySource: - `${awsBucketName}/` + - `${this.test.keyNameAws}`, + CopySource: `${awsBucketName}/` + `${this.test.keyNameAws}`, Key: this.test.mpuKeyNameGcp, PartNumber: 2, UploadId: this.test.uploadId, }; - async.waterfall([ - next => s3.uploadPartCopy(uploadParams, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); - next(err); - }), - next => s3.uploadPartCopy(uploadParams2, (err, res) => { - assert.equal(err, null, 'uploadPartCopy: Expected ' + - `success, got error: ${err}`); - assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); - next(err); - }), - next => { - const completeMpuParams = { - Bucket: bucket, - Key: this.test.mpuKeyNameGcp, - MultipartUpload: { - Parts: [ - { - ETag: `"${fiveMbMD5}"`, - PartNumber: 1, - }, - { - ETag: `"${fiveMbMD5}"`, - PartNumber: 2, - }, - ], - }, - UploadId: this.test.uploadId, - }; - s3.completeMultipartUpload(completeMpuParams, - (err, res) => { - assert.equal(err, null, 'completeMultipartUpload:' + - ` Expected success, got error: ${err}`); - assert.strictEqual(res.Bucket, bucket); - assert.strictEqual(res.Key, - this.test.mpuKeyNameGcp); - next(); - }); - }, - ], done); + async.waterfall( + [ + next => + s3.uploadPartCopy(uploadParams, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); + next(err); + }), + next => + s3.uploadPartCopy(uploadParams2, (err, res) => { + assert.equal(err, null, 'uploadPartCopy: Expected ' + `success, got error: ${err}`); + assert.strictEqual(res.ETag, `"${fiveMbMD5}"`); + next(err); + }), + next => { + const completeMpuParams = { + Bucket: bucket, + Key: this.test.mpuKeyNameGcp, + MultipartUpload: { + Parts: [ + { + ETag: `"${fiveMbMD5}"`, + PartNumber: 1, + }, + { + ETag: `"${fiveMbMD5}"`, + PartNumber: 2, + }, + ], + }, + UploadId: this.test.uploadId, + }; + s3.completeMultipartUpload(completeMpuParams, (err, res) => { + assert.equal( + err, + null, + 'completeMultipartUpload:' + ` Expected success, got error: ${err}` + ); + assert.strictEqual(res.Bucket, bucket); + assert.strictEqual(res.Key, this.test.mpuKeyNameGcp); + next(); + }); + }, + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/objectTagging.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/objectTagging.js index 4529c2abba..b80183a065 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/objectTagging.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/objectTagging.js @@ -2,10 +2,21 @@ const assert = require('assert'); const async = require('async'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultiple, awsS3, awsBucket, getAwsRetry, - getAzureClient, getAzureContainerName, convertMD5, memLocation, - fileLocation, awsLocation, azureLocation, genUniqID, - isCEPH } = require('../utils'); +const { + describeSkipIfNotMultiple, + awsS3, + awsBucket, + getAwsRetry, + getAzureClient, + getAzureContainerName, + convertMD5, + memLocation, + fileLocation, + awsLocation, + azureLocation, + genUniqID, + isCEPH, +} = require('../utils'); const azureClient = getAzureClient(); const azureContainerName = getAzureContainerName(azureLocation); @@ -45,19 +56,14 @@ const tagObj = { key1: 'value1', key2: 'value2' }; function getAndAssertObjectTags(tagParams, callback) { return s3.getObjectTagging(tagParams, (err, res) => { assert.strictEqual(res.TagSet.length, 2); - assert.strictEqual(res.TagSet[0].Key, - putTags.TagSet[0].Key); - assert.strictEqual(res.TagSet[0].Value, - putTags.TagSet[0].Value); - assert.strictEqual(res.TagSet[1].Key, - putTags.TagSet[1].Key); - assert.strictEqual(res.TagSet[1].Value, - putTags.TagSet[1].Value); + assert.strictEqual(res.TagSet[0].Key, putTags.TagSet[0].Key); + assert.strictEqual(res.TagSet[0].Value, putTags.TagSet[0].Value); + assert.strictEqual(res.TagSet[1].Key, putTags.TagSet[1].Key); + assert.strictEqual(res.TagSet[1].Value, putTags.TagSet[1].Value); return callback(); }); } - function awsGet(key, tagCheck, isEmpty, isMpu, callback) { process.stdout.write('Getting object from AWS\n'); getAwsRetry({ key }, 0, (err, res) => { @@ -80,24 +86,29 @@ function awsGet(key, tagCheck, isEmpty, isMpu, callback) { function azureGet(key, tagCheck, isEmpty, callback) { process.stdout.write('Getting object from Azure\n'); - azureClient.getContainerClient(azureContainerName).getProperties(key).then(res => { - const resMD5 = convertMD5(res.contentSettings.contentMD5); - if (isEmpty) { - assert.strictEqual(resMD5, `${emptyMD5}`); - } else { - assert.strictEqual(resMD5, `${correctMD5}`); - } - if (tagCheck) { - assert.strictEqual(res.metadata.tags, - JSON.stringify(tagObj)); - } else { - assert.strictEqual(res.metadata.tags, undefined); - } - return callback(); - }, err => { - assert.equal(err, null); - return callback(); - }); + azureClient + .getContainerClient(azureContainerName) + .getProperties(key) + .then( + res => { + const resMD5 = convertMD5(res.contentSettings.contentMD5); + if (isEmpty) { + assert.strictEqual(resMD5, `${emptyMD5}`); + } else { + assert.strictEqual(resMD5, `${correctMD5}`); + } + if (tagCheck) { + assert.strictEqual(res.metadata.tags, JSON.stringify(tagObj)); + } else { + assert.strictEqual(res.metadata.tags, undefined); + } + return callback(); + }, + err => { + assert.equal(err, null); + return callback(); + } + ); } function getObject(key, backend, tagCheck, isEmpty, isMpu, callback) { @@ -112,8 +123,7 @@ function getObject(key, backend, tagCheck, isEmpty, isMpu, callback) { } else { assert.strictEqual(res.ETag, `"${correctMD5}"`); } - assert.strictEqual(res.Metadata['scal-location-constraint'], - backend); + assert.strictEqual(res.Metadata['scal-location-constraint'], backend); if (tagCheck) { assert.strictEqual(res.TagCount, 2); } else { @@ -134,38 +144,43 @@ function getObject(key, backend, tagCheck, isEmpty, isMpu, callback) { } function mpuWaterfall(params, cb) { - async.waterfall([ - next => s3.createMultipartUpload(params, (err, data) => { + async.waterfall( + [ + next => + s3.createMultipartUpload(params, (err, data) => { + assert.equal(err, null); + next(null, data.UploadId); + }), + (uploadId, next) => { + const partParams = { Bucket: bucket, Key: params.Key, PartNumber: 1, UploadId: uploadId, Body: body }; + s3.uploadPart(partParams, (err, result) => { + assert.equal(err, null); + next(null, uploadId, result.ETag); + }); + }, + (uploadId, eTag, next) => { + const compParams = { + Bucket: bucket, + Key: params.Key, + MultipartUpload: { + Parts: [{ ETag: eTag, PartNumber: 1 }], + }, + UploadId: uploadId, + }; + s3.completeMultipartUpload(compParams, err => { + assert.equal(err, null); + next(); + }); + }, + ], + err => { assert.equal(err, null); - next(null, data.UploadId); - }), - (uploadId, next) => { - const partParams = { Bucket: bucket, Key: params.Key, PartNumber: 1, - UploadId: uploadId, Body: body }; - s3.uploadPart(partParams, (err, result) => { - assert.equal(err, null); - next(null, uploadId, result.ETag); - }); - }, - (uploadId, eTag, next) => { - const compParams = { Bucket: bucket, Key: params.Key, - MultipartUpload: { - Parts: [{ ETag: eTag, PartNumber: 1 }], - }, - UploadId: uploadId }; - s3.completeMultipartUpload(compParams, err => { - assert.equal(err, null); - next(); - }); - }, - ], err => { - assert.equal(err, null); - cb(); - }); + cb(); + } + ); } -describeSkipIfNotMultiple('Object tagging with multiple backends', -function testSuite() { +describeSkipIfNotMultiple('Object tagging with multiple backends', function testSuite() { if (!process.env.S3_END_TO_END) { this.retries(2); } @@ -174,36 +189,38 @@ function testSuite() { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); describe('putObject with tags and putObjectTagging', () => { testBackends.forEach(backend => { - const itSkipIfAzureOrCeph = backend === 'azurebackend' || - isCEPH ? it.skip : it; - it(`should put an object with tags to ${backend} backend`, - done => { + const itSkipIfAzureOrCeph = backend === 'azurebackend' || isCEPH ? it.skip : it; + it(`should put an object with tags to ${backend} backend`, done => { const key = `somekey-${genUniqID()}`; - const params = Object.assign({ Key: key, Tagging: tagString, - Metadata: { 'scal-location-constraint': backend } }, - putParams); + const params = Object.assign( + { Key: key, Tagging: tagString, Metadata: { 'scal-location-constraint': backend } }, + putParams + ); process.stdout.write('Putting object\n'); s3.putObject(params, err => { assert.equal(err, null); @@ -211,8 +228,7 @@ function testSuite() { }); }); - it(`should put a 0 byte object with tags to ${backend} backend`, - done => { + it(`should put a 0 byte object with tags to ${backend} backend`, done => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, @@ -227,16 +243,16 @@ function testSuite() { }); }); - it(`should put tags to preexisting object in ${backend} ` + - 'backend', done => { + it(`should put tags to preexisting object in ${backend} ` + 'backend', done => { const key = `somekey-${genUniqID()}`; - const params = Object.assign({ Key: key, Metadata: - { 'scal-location-constraint': backend } }, putParams); + const params = Object.assign( + { Key: key, Metadata: { 'scal-location-constraint': backend } }, + putParams + ); process.stdout.write('Putting object\n'); s3.putObject(params, err => { assert.equal(err, null); - const putTagParams = { Bucket: bucket, Key: key, - Tagging: putTags }; + const putTagParams = { Bucket: bucket, Key: key, Tagging: putTags }; process.stdout.write('Putting object tags\n'); s3.putObjectTagging(putTagParams, err => { assert.equal(err, null); @@ -245,8 +261,7 @@ function testSuite() { }); }); - it('should put tags to preexisting 0 byte object in ' + - `${backend} backend`, done => { + it('should put tags to preexisting 0 byte object in ' + `${backend} backend`, done => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, @@ -256,8 +271,7 @@ function testSuite() { process.stdout.write('Putting object\n'); s3.putObject(params, err => { assert.equal(err, null); - const putTagParams = { Bucket: bucket, Key: key, - Tagging: putTags }; + const putTagParams = { Bucket: bucket, Key: key, Tagging: putTags }; process.stdout.write('Putting object tags\n'); s3.putObjectTagging(putTagParams, err => { assert.equal(err, null); @@ -266,8 +280,7 @@ function testSuite() { }); }); - itSkipIfAzureOrCeph('should put tags to completed MPU ' + - `object in ${backend}`, done => { + itSkipIfAzureOrCeph('should put tags to completed MPU ' + `object in ${backend}`, done => { const key = `somekey-${genUniqID()}`; const params = { Bucket: bucket, @@ -275,8 +288,7 @@ function testSuite() { Metadata: { 'scal-location-constraint': backend }, }; mpuWaterfall(params, () => { - const putTagParams = { Bucket: bucket, Key: key, - Tagging: putTags }; + const putTagParams = { Bucket: bucket, Key: key, Tagging: putTags }; process.stdout.write('Putting object\n'); s3.putObjectTagging(putTagParams, err => { assert.equal(err, null); @@ -286,39 +298,42 @@ function testSuite() { }); }); - it('should not return error putting tags to correct object ' + - 'version in AWS, even if a delete marker was created directly ' + - 'on AWS before tags are put', - done => { - const key = `somekey-${genUniqID()}`; - const params = Object.assign({ Key: key, Metadata: - { 'scal-location-constraint': awsLocation } }, putParams); - process.stdout.write('Putting object\n'); - s3.putObject(params, err => { - assert.equal(err, null); - process.stdout.write('Deleting object from AWS\n'); - awsS3.deleteObject({ Bucket: awsBucket, Key: key }, err => { + it( + 'should not return error putting tags to correct object ' + + 'version in AWS, even if a delete marker was created directly ' + + 'on AWS before tags are put', + done => { + const key = `somekey-${genUniqID()}`; + const params = Object.assign( + { Key: key, Metadata: { 'scal-location-constraint': awsLocation } }, + putParams + ); + process.stdout.write('Putting object\n'); + s3.putObject(params, err => { assert.equal(err, null); - const putTagParams = { Bucket: bucket, Key: key, - Tagging: putTags }; - process.stdout.write('Putting object tags\n'); - s3.putObjectTagging(putTagParams, err => { - assert.strictEqual(err, null); - done(); + process.stdout.write('Deleting object from AWS\n'); + awsS3.deleteObject({ Bucket: awsBucket, Key: key }, err => { + assert.equal(err, null); + const putTagParams = { Bucket: bucket, Key: key, Tagging: putTags }; + process.stdout.write('Putting object tags\n'); + s3.putObjectTagging(putTagParams, err => { + assert.strictEqual(err, null); + done(); + }); }); }); - }); - }); + } + ); }); describe('getObjectTagging', () => { testBackends.forEach(backend => { - it(`should get tags from object on ${backend} backend`, - done => { + it(`should get tags from object on ${backend} backend`, done => { const key = `somekey-${genUniqID()}`; - const params = Object.assign({ Key: key, Tagging: tagString, - Metadata: { 'scal-location-constraint': backend } }, - putParams); + const params = Object.assign( + { Key: key, Tagging: tagString, Metadata: { 'scal-location-constraint': backend } }, + putParams + ); process.stdout.write('Putting object\n'); s3.putObject(params, err => { assert.equal(err, null); @@ -328,33 +343,37 @@ function testSuite() { }); }); - it('should not return error on getting tags from object that has ' + - 'had a delete marker put directly on AWS', done => { - const key = `somekey-${genUniqID()}`; - const params = Object.assign({ Key: key, Tagging: tagString, - Metadata: { 'scal-location-constraint': awsLocation } }, - putParams); - process.stdout.write('Putting object\n'); - s3.putObject(params, err => { - assert.equal(err, null); - process.stdout.write('Deleting object from AWS\n'); - awsS3.deleteObject({ Bucket: awsBucket, Key: key }, err => { + it( + 'should not return error on getting tags from object that has ' + + 'had a delete marker put directly on AWS', + done => { + const key = `somekey-${genUniqID()}`; + const params = Object.assign( + { Key: key, Tagging: tagString, Metadata: { 'scal-location-constraint': awsLocation } }, + putParams + ); + process.stdout.write('Putting object\n'); + s3.putObject(params, err => { assert.equal(err, null); - const tagParams = { Bucket: bucket, Key: key }; - getAndAssertObjectTags(tagParams, done); + process.stdout.write('Deleting object from AWS\n'); + awsS3.deleteObject({ Bucket: awsBucket, Key: key }, err => { + assert.equal(err, null); + const tagParams = { Bucket: bucket, Key: key }; + getAndAssertObjectTags(tagParams, done); + }); }); - }); - }); + } + ); }); describe('deleteObjectTagging', () => { testBackends.forEach(backend => { - it(`should delete tags from object on ${backend} backend`, - done => { + it(`should delete tags from object on ${backend} backend`, done => { const key = `somekey-${genUniqID()}`; - const params = Object.assign({ Key: key, Tagging: tagString, - Metadata: { 'scal-location-constraint': backend } }, - putParams); + const params = Object.assign( + { Key: key, Tagging: tagString, Metadata: { 'scal-location-constraint': backend } }, + putParams + ); process.stdout.write('Putting object\n'); s3.putObject(params, err => { assert.equal(err, null); @@ -367,26 +386,30 @@ function testSuite() { }); }); - it('should not return error on deleting tags from object that ' + - 'has had delete markers put directly on AWS', done => { - const key = `somekey-${genUniqID()}`; - const params = Object.assign({ Key: key, Tagging: tagString, - Metadata: { 'scal-location-constraint': awsLocation } }, - putParams); - process.stdout.write('Putting object\n'); - s3.putObject(params, err => { - assert.equal(err, null); - process.stdout.write('Deleting object from AWS\n'); - awsS3.deleteObject({ Bucket: awsBucket, Key: key }, err => { + it( + 'should not return error on deleting tags from object that ' + + 'has had delete markers put directly on AWS', + done => { + const key = `somekey-${genUniqID()}`; + const params = Object.assign( + { Key: key, Tagging: tagString, Metadata: { 'scal-location-constraint': awsLocation } }, + putParams + ); + process.stdout.write('Putting object\n'); + s3.putObject(params, err => { assert.equal(err, null); - const tagParams = { Bucket: bucket, Key: key }; - s3.deleteObjectTagging(tagParams, err => { - assert.strictEqual(err, null); - done(); + process.stdout.write('Deleting object from AWS\n'); + awsS3.deleteObject({ Bucket: awsBucket, Key: key }, err => { + assert.equal(err, null); + const tagParams = { Bucket: bucket, Key: key }; + s3.deleteObjectTagging(tagParams, err => { + assert.strictEqual(err, null); + done(); + }); }); }); - }); - }); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-delete.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-delete.js index bd3456bb86..b5a2028f33 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-delete.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-delete.js @@ -20,20 +20,24 @@ const { putTaggingAndAssert, delTaggingAndAssert, awsGetAssertTags } = tagging; const bucket = `awsversioningtagdel${genUniqID()}`; const someBody = 'teststring'; -describeSkipIfNotMultiple('AWS backend object delete tagging with versioning ', -function testSuite() { +describeSkipIfNotMultiple('AWS backend object delete tagging with versioning ', function testSuite() { this.timeout(120000); const tags = { key1: 'value1', key2: 'value2' }; withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); const s3 = bucketUtil.s3; - beforeEach(done => s3.createBucket({ - Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: awsLocation, - }, - }, done)); + beforeEach(done => + s3.createBucket( + { + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: awsLocation, + }, + }, + done + ) + ); afterEach(done => { removeAllVersions({ Bucket: bucket }, err => { if (err) { @@ -43,182 +47,250 @@ function testSuite() { }); }); - it('versioning not configured: should delete a tag set on the ' + - 'latest version if no version is specified', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, - expectedVersionId: false }, next), - (versionId, next) => delTaggingAndAssert(s3, { bucket, key, - expectedVersionId: false }, next), - next => awsGetAssertTags({ key, expectedTags: {} }, next), - ], done); - }); + it( + 'versioning not configured: should delete a tag set on the ' + 'latest version if no version is specified', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: false }, next), + (versionId, next) => delTaggingAndAssert(s3, { bucket, key, expectedVersionId: false }, next), + next => awsGetAssertTags({ key, expectedTags: {} }, next), + ], + done + ); + } + ); - it('versioning not configured: should delete a tag set on the ' + - 'version if specified (null)', done => { + it('versioning not configured: should delete a tag set on the ' + 'version if specified (null)', done => { const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, - versionId: 'null', expectedVersionId: false }, next), - (versionId, next) => delTaggingAndAssert(s3, { bucket, key, - versionId: 'null', expectedVersionId: false }, next), - next => awsGetAssertTags({ key, expectedTags: {} }, next), - ], done); + async.waterfall( + [ + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: 'null', expectedVersionId: false }, + next + ), + (versionId, next) => + delTaggingAndAssert(s3, { bucket, key, versionId: 'null', expectedVersionId: false }, next), + next => awsGetAssertTags({ key, expectedTags: {} }, next), + ], + done + ); }); - it('versioning suspended: should delete a tag set on the latest ' + - 'version if no version is specified', done => { - const data = [undefined, 'test1', 'test2']; - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, data, next), - (versionIds, next) => putTaggingAndAssert(s3, { bucket, key, - tags, expectedVersionId: 'null' }, next), - (versionId, next) => delTaggingAndAssert(s3, { bucket, key, - expectedVersionId: 'null' }, next), - next => awsGetAssertTags({ key, expectedTags: {} }, next), - ], done); - }); + it( + 'versioning suspended: should delete a tag set on the latest ' + 'version if no version is specified', + done => { + const data = [undefined, 'test1', 'test2']; + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, data, next), + (versionIds, next) => + putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: 'null' }, next), + (versionId, next) => delTaggingAndAssert(s3, { bucket, key, expectedVersionId: 'null' }, next), + next => awsGetAssertTags({ key, expectedTags: {} }, next), + ], + done + ); + } + ); - it('versioning suspended: should delete a tag set on a specific ' + - 'version (null)', done => { + it('versioning suspended: should delete a tag set on a specific ' + 'version (null)', done => { const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, [undefined], - next), - (versionIds, next) => putTaggingAndAssert(s3, { bucket, key, - tags, versionId: 'null', expectedVersionId: 'null' }, next), - (versionId, next) => delTaggingAndAssert(s3, { bucket, key, - versionId: 'null', expectedTags: tags, - expectedVersionId: 'null' }, next), - next => awsGetAssertTags({ key, expectedTags: {} }, next), - ], done); + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, [undefined], next), + (versionIds, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: 'null', expectedVersionId: 'null' }, + next + ), + (versionId, next) => + delTaggingAndAssert( + s3, + { bucket, key, versionId: 'null', expectedTags: tags, expectedVersionId: 'null' }, + next + ), + next => awsGetAssertTags({ key, expectedTags: {} }, next), + ], + done + ); }); - it('versioning enabled then suspended: should delete a tag set on ' + - 'a specific (non-null) version if specified', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', - (err, awsVid) => next(err, putData.VersionId, awsVid)), - (s3Vid, awsVid, next) => putNullVersionsToAws(s3, bucket, key, - [someBody], () => next(null, s3Vid, awsVid)), - (s3Vid, awsVid, next) => putTaggingAndAssert(s3, { bucket, key, - tags, versionId: s3Vid, expectedVersionId: s3Vid }, () => - next(null, s3Vid, awsVid)), - (s3Vid, awsVid, next) => delTaggingAndAssert(s3, { bucket, key, - versionId: s3Vid, expectedVersionId: s3Vid }, - () => next(null, awsVid)), - (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, - expectedTags: {} }, next), - ], done); - }); + it( + 'versioning enabled then suspended: should delete a tag set on ' + + 'a specific (non-null) version if specified', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), + (s3Vid, awsVid, next) => + putNullVersionsToAws(s3, bucket, key, [someBody], () => next(null, s3Vid, awsVid)), + (s3Vid, awsVid, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: s3Vid, expectedVersionId: s3Vid }, + () => next(null, s3Vid, awsVid) + ), + (s3Vid, awsVid, next) => + delTaggingAndAssert(s3, { bucket, key, versionId: s3Vid, expectedVersionId: s3Vid }, () => + next(null, awsVid) + ), + (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, expectedTags: {} }, next), + ], + done + ); + } + ); - it('versioning enabled: should delete a tag set on the latest ' + - 'version if no version is specified', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, - expectedVersionId: putData.VersionId }, next), - (versionId, next) => delTaggingAndAssert(s3, { bucket, key, - expectedVersionId: versionId }, next), - next => awsGetAssertTags({ key, expectedTags: {} }, next), - ], done); - }); + it( + 'versioning enabled: should delete a tag set on the latest ' + 'version if no version is specified', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: putData.VersionId }, next), + (versionId, next) => + delTaggingAndAssert(s3, { bucket, key, expectedVersionId: versionId }, next), + next => awsGetAssertTags({ key, expectedTags: {} }, next), + ], + done + ); + } + ); - it('versioning enabled: should delete a tag set on a specific version', - done => { + it('versioning enabled: should delete a tag set on a specific version', done => { const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, - versionId: putData.VersionId, - expectedVersionId: putData.VersionId }, next), - (versionId, next) => delTaggingAndAssert(s3, { bucket, key, - versionId, expectedVersionId: versionId }, next), - next => awsGetAssertTags({ key, expectedTags: {} }, next), - ], done); + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: putData.VersionId, expectedVersionId: putData.VersionId }, + next + ), + (versionId, next) => + delTaggingAndAssert(s3, { bucket, key, versionId, expectedVersionId: versionId }, next), + next => awsGetAssertTags({ key, expectedTags: {} }, next), + ], + done + ); }); - it('versioning enabled: should delete a tag set on a specific ' + - 'version that is not the latest version', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', - (err, awsVid) => next(err, putData.VersionId, awsVid)), - // put another version - (s3Vid, awsVid, next) => s3.putObject({ Bucket: bucket, - Key: key, Body: someBody }, - err => next(err, s3Vid, awsVid)), - (s3Vid, awsVid, next) => putTaggingAndAssert(s3, { bucket, key, - tags, versionId: s3Vid, expectedVersionId: s3Vid }, err => - next(err, s3Vid, awsVid)), - (s3Vid, awsVid, next) => delTaggingAndAssert(s3, { bucket, key, - versionId: s3Vid, expectedVersionId: s3Vid }, - () => next(null, awsVid)), - (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, - expectedTags: {} }, next), - ], done); - }); + it( + 'versioning enabled: should delete a tag set on a specific ' + 'version that is not the latest version', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), + // put another version + (s3Vid, awsVid, next) => + s3.putObject({ Bucket: bucket, Key: key, Body: someBody }, err => next(err, s3Vid, awsVid)), + (s3Vid, awsVid, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: s3Vid, expectedVersionId: s3Vid }, + err => next(err, s3Vid, awsVid) + ), + (s3Vid, awsVid, next) => + delTaggingAndAssert(s3, { bucket, key, versionId: s3Vid, expectedVersionId: s3Vid }, () => + next(null, awsVid) + ), + (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, expectedTags: {} }, next), + ], + done + ); + } + ); - it('versioning suspended then enabled: should delete a tag set on ' + - 'a specific version (null) if specified', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, [undefined], - () => next()), - next => awsGetLatestVerId(key, '', next), - (awsVid, next) => putVersionsToAws(s3, bucket, key, [someBody], - () => next(null, awsVid)), - (awsVid, next) => putTaggingAndAssert(s3, { bucket, key, tags, - versionId: 'null', expectedVersionId: 'null' }, - () => next(null, awsVid)), - (awsVid, next) => delTaggingAndAssert(s3, { bucket, key, - versionId: 'null', expectedVersionId: 'null' }, - () => next(null, awsVid)), - (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, - expectedTags: {} }, next), - ], done); - }); + it( + 'versioning suspended then enabled: should delete a tag set on ' + 'a specific version (null) if specified', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, [undefined], () => next()), + next => awsGetLatestVerId(key, '', next), + (awsVid, next) => putVersionsToAws(s3, bucket, key, [someBody], () => next(null, awsVid)), + (awsVid, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: 'null', expectedVersionId: 'null' }, + () => next(null, awsVid) + ), + (awsVid, next) => + delTaggingAndAssert(s3, { bucket, key, versionId: 'null', expectedVersionId: 'null' }, () => + next(null, awsVid) + ), + (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, expectedTags: {} }, next), + ], + done + ); + } + ); - it('should return an ServiceUnavailable if trying to delete ' + - 'tags from object that was deleted from AWS directly', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', next), - (awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, next), - (delData, next) => delTaggingAndAssert(s3, { bucket, key, - expectedError: 'ServiceUnavailable' }, next), - ], done); - }); + it( + 'should return an ServiceUnavailable if trying to delete ' + + 'tags from object that was deleted from AWS directly', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => awsGetLatestVerId(key, '', next), + (awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, Key: key, VersionId: awsVid }, next), + (delData, next) => + delTaggingAndAssert(s3, { bucket, key, expectedError: 'ServiceUnavailable' }, next), + ], + done + ); + } + ); - it('should return an ServiceUnavailable if trying to delete ' + - 'tags from object that was deleted from AWS directly', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', - (err, awsVid) => next(err, putData.VersionId, awsVid)), - (s3Vid, awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, err => next(err, s3Vid)), - (s3Vid, next) => delTaggingAndAssert(s3, { bucket, key, - versionId: s3Vid, expectedError: 'ServiceUnavailable' }, - next), - ], done); - }); + it( + 'should return an ServiceUnavailable if trying to delete ' + + 'tags from object that was deleted from AWS directly', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), + (s3Vid, awsVid, next) => + awsS3.deleteObject({ Bucket: awsBucket, Key: key, VersionId: awsVid }, err => + next(err, s3Vid) + ), + (s3Vid, next) => + delTaggingAndAssert( + s3, + { bucket, key, versionId: s3Vid, expectedError: 'ServiceUnavailable' }, + next + ), + ], + done + ); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js index 9dd74a291f..d33f5133a0 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/objectTagging/taggingAwsVersioning-putget.js @@ -17,25 +17,28 @@ const { genUniqID, } = require('../utils'); -const { putTaggingAndAssert, getTaggingAndAssert, delTaggingAndAssert, - awsGetAssertTags } = tagging; +const { putTaggingAndAssert, getTaggingAndAssert, delTaggingAndAssert, awsGetAssertTags } = tagging; const bucket = `awsversioningtag${genUniqID()}`; const someBody = 'teststring'; -describeSkipIfNotMultiple('AWS backend object put/get tagging with versioning', -function testSuite() { +describeSkipIfNotMultiple('AWS backend object put/get tagging with versioning', function testSuite() { this.timeout(120000); const tags = { key1: 'value1', key2: 'value2' }; withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); const s3 = bucketUtil.s3; - beforeEach(done => s3.createBucket({ - Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: awsLocation, - }, - }, done)); + beforeEach(done => + s3.createBucket( + { + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: awsLocation, + }, + }, + done + ) + ); afterEach(done => { removeAllVersions({ Bucket: bucket }, err => { if (err) { @@ -45,243 +48,366 @@ function testSuite() { }); }); - it('versioning not configured: should put/get a tag set on the ' + - 'latest version if no version is specified', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, - expectedVersionId: false }, next), - (versionId, next) => getTaggingAndAssert(s3, { bucket, key, - expectedTags: tags, expectedVersionId: false }, next), - (versionId, next) => awsGetAssertTags({ key, - expectedTags: tags }, next), - ], done); - }); + it( + 'versioning not configured: should put/get a tag set on the ' + 'latest version if no version is specified', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: false }, next), + (versionId, next) => + getTaggingAndAssert( + s3, + { bucket, key, expectedTags: tags, expectedVersionId: false }, + next + ), + (versionId, next) => awsGetAssertTags({ key, expectedTags: tags }, next), + ], + done + ); + } + ); - it('versioning not configured: should put/get a tag set on a ' + - 'specific version if specified (null)', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, - versionId: 'null', expectedVersionId: false }, next), - (versionId, next) => getTaggingAndAssert(s3, { bucket, key, - versionId: 'null', expectedTags: tags, - expectedVersionId: false }, next), - (versionId, next) => awsGetAssertTags({ key, - expectedTags: tags }, next), - ], done); - }); + it( + 'versioning not configured: should put/get a tag set on a ' + 'specific version if specified (null)', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: 'null', expectedVersionId: false }, + next + ), + (versionId, next) => + getTaggingAndAssert( + s3, + { bucket, key, versionId: 'null', expectedTags: tags, expectedVersionId: false }, + next + ), + (versionId, next) => awsGetAssertTags({ key, expectedTags: tags }, next), + ], + done + ); + } + ); - it('versioning suspended: should put/get a tag set on the latest ' + - 'version if no version is specified', done => { - const data = [undefined, 'test1', 'test2']; - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, data, next), - (versionIds, next) => putTaggingAndAssert(s3, { bucket, key, - tags, expectedVersionId: 'null' }, next), - (versionId, next) => getTaggingAndAssert(s3, { bucket, key, - expectedTags: tags, expectedVersionId: 'null' }, next), - (versionId, next) => awsGetAssertTags({ key, - expectedTags: tags }, next), - ], done); - }); + it( + 'versioning suspended: should put/get a tag set on the latest ' + 'version if no version is specified', + done => { + const data = [undefined, 'test1', 'test2']; + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, data, next), + (versionIds, next) => + putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: 'null' }, next), + (versionId, next) => + getTaggingAndAssert( + s3, + { bucket, key, expectedTags: tags, expectedVersionId: 'null' }, + next + ), + (versionId, next) => awsGetAssertTags({ key, expectedTags: tags }, next), + ], + done + ); + } + ); - it('versioning suspended: should put/get a tag set on a specific ' + - 'version (null)', done => { + it('versioning suspended: should put/get a tag set on a specific ' + 'version (null)', done => { const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, [undefined], - next), - (versionIds, next) => putTaggingAndAssert(s3, { bucket, key, - tags, versionId: 'null', expectedVersionId: 'null' }, next), - (versionId, next) => getTaggingAndAssert(s3, { bucket, key, - versionId: 'null', expectedTags: tags, - expectedVersionId: 'null' }, next), - (versionId, next) => awsGetAssertTags({ key, - expectedTags: tags }, next), - ], done); + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, [undefined], next), + (versionIds, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: 'null', expectedVersionId: 'null' }, + next + ), + (versionId, next) => + getTaggingAndAssert( + s3, + { bucket, key, versionId: 'null', expectedTags: tags, expectedVersionId: 'null' }, + next + ), + (versionId, next) => awsGetAssertTags({ key, expectedTags: tags }, next), + ], + done + ); }); - it('versioning enabled then suspended: should put/get a tag set on ' + - 'a specific (non-null) version if specified', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', - (err, awsVid) => next(err, putData.VersionId, awsVid)), - (s3Vid, awsVid, next) => putNullVersionsToAws(s3, bucket, key, - [someBody], () => next(null, s3Vid, awsVid)), - (s3Vid, awsVid, next) => putTaggingAndAssert(s3, { bucket, key, - tags, versionId: s3Vid, expectedVersionId: s3Vid }, () => - next(null, s3Vid, awsVid)), - (s3Vid, awsVid, next) => getTaggingAndAssert(s3, { bucket, key, - versionId: s3Vid, expectedTags: tags, - expectedVersionId: s3Vid }, () => next(null, awsVid)), - (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, - expectedTags: tags }, next), - ], done); - }); + it( + 'versioning enabled then suspended: should put/get a tag set on ' + + 'a specific (non-null) version if specified', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), + (s3Vid, awsVid, next) => + putNullVersionsToAws(s3, bucket, key, [someBody], () => next(null, s3Vid, awsVid)), + (s3Vid, awsVid, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: s3Vid, expectedVersionId: s3Vid }, + () => next(null, s3Vid, awsVid) + ), + (s3Vid, awsVid, next) => + getTaggingAndAssert( + s3, + { bucket, key, versionId: s3Vid, expectedTags: tags, expectedVersionId: s3Vid }, + () => next(null, awsVid) + ), + (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, expectedTags: tags }, next), + ], + done + ); + } + ); - it('versioning enabled: should put/get a tag set on the latest ' + - 'version if no version is specified', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, - expectedVersionId: putData.VersionId }, next), - (versionId, next) => getTaggingAndAssert(s3, { bucket, key, - expectedTags: tags, expectedVersionId: versionId }, next), - (versionId, next) => awsGetAssertTags({ key, - expectedTags: tags }, next), - ], done); - }); + it( + 'versioning enabled: should put/get a tag set on the latest ' + 'version if no version is specified', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: putData.VersionId }, next), + (versionId, next) => + getTaggingAndAssert( + s3, + { bucket, key, expectedTags: tags, expectedVersionId: versionId }, + next + ), + (versionId, next) => awsGetAssertTags({ key, expectedTags: tags }, next), + ], + done + ); + } + ); - it('versioning enabled: should put/get a tag set on a specific version', - done => { + it('versioning enabled: should put/get a tag set on a specific version', done => { const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, - versionId: putData.VersionId, - expectedVersionId: putData.VersionId }, next), - (versionId, next) => getTaggingAndAssert(s3, { bucket, key, - versionId, expectedTags: tags, - expectedVersionId: versionId }, next), - (versionId, next) => awsGetAssertTags({ key, - expectedTags: tags }, next), - ], done); + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: putData.VersionId, expectedVersionId: putData.VersionId }, + next + ), + (versionId, next) => + getTaggingAndAssert( + s3, + { bucket, key, versionId, expectedTags: tags, expectedVersionId: versionId }, + next + ), + (versionId, next) => awsGetAssertTags({ key, expectedTags: tags }, next), + ], + done + ); }); - it('versioning enabled: should put/get a tag set on a specific version', - done => { + it('versioning enabled: should put/get a tag set on a specific version', done => { const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => putTaggingAndAssert(s3, { bucket, key, tags, - versionId: putData.VersionId, - expectedVersionId: putData.VersionId }, next), - (versionId, next) => delTaggingAndAssert(s3, { bucket, key, - versionId, expectedVersionId: versionId }, next), - next => awsGetAssertTags({ key, expectedTags: {} }, next), - ], done); + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: putData.VersionId, expectedVersionId: putData.VersionId }, + next + ), + (versionId, next) => + delTaggingAndAssert(s3, { bucket, key, versionId, expectedVersionId: versionId }, next), + next => awsGetAssertTags({ key, expectedTags: {} }, next), + ], + done + ); }); - it('versioning enabled: should put/get a tag set on a specific ' + - 'version that is not the latest version', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', - (err, awsVid) => next(err, putData.VersionId, awsVid)), - // put another version - (s3Vid, awsVid, next) => s3.putObject({ Bucket: bucket, - Key: key, Body: someBody }, - err => next(err, s3Vid, awsVid)), - (s3Vid, awsVid, next) => putTaggingAndAssert(s3, { bucket, key, - tags, versionId: s3Vid, expectedVersionId: s3Vid }, err => - next(err, s3Vid, awsVid)), - (s3Vid, awsVid, next) => getTaggingAndAssert(s3, { bucket, key, - versionId: s3Vid, expectedTags: tags, - expectedVersionId: s3Vid }, () => next(null, awsVid)), - (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, - expectedTags: tags }, next), - ], done); - }); + it( + 'versioning enabled: should put/get a tag set on a specific ' + 'version that is not the latest version', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), + // put another version + (s3Vid, awsVid, next) => + s3.putObject({ Bucket: bucket, Key: key, Body: someBody }, err => next(err, s3Vid, awsVid)), + (s3Vid, awsVid, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: s3Vid, expectedVersionId: s3Vid }, + err => next(err, s3Vid, awsVid) + ), + (s3Vid, awsVid, next) => + getTaggingAndAssert( + s3, + { bucket, key, versionId: s3Vid, expectedTags: tags, expectedVersionId: s3Vid }, + () => next(null, awsVid) + ), + (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, expectedTags: tags }, next), + ], + done + ); + } + ); + it( + 'versioning suspended then enabled: should put/get a tag set on ' + + 'a specific version (null) if specified', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => putNullVersionsToAws(s3, bucket, key, [undefined], () => next()), + next => awsGetLatestVerId(key, '', next), + (awsVid, next) => putVersionsToAws(s3, bucket, key, [someBody], () => next(null, awsVid)), + (awsVid, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: 'null', expectedVersionId: 'null' }, + () => next(null, awsVid) + ), + (awsVid, next) => + getTaggingAndAssert( + s3, + { bucket, key, versionId: 'null', expectedTags: tags, expectedVersionId: 'null' }, + () => next(null, awsVid) + ), + (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, expectedTags: tags }, next), + ], + done + ); + } + ); - it('versioning suspended then enabled: should put/get a tag set on ' + - 'a specific version (null) if specified', done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => putNullVersionsToAws(s3, bucket, key, [undefined], - () => next()), - next => awsGetLatestVerId(key, '', next), - (awsVid, next) => putVersionsToAws(s3, bucket, key, [someBody], - () => next(null, awsVid)), - (awsVid, next) => putTaggingAndAssert(s3, { bucket, key, tags, - versionId: 'null', expectedVersionId: 'null' }, - () => next(null, awsVid)), - (awsVid, next) => getTaggingAndAssert(s3, { bucket, key, - versionId: 'null', expectedTags: tags, - expectedVersionId: 'null' }, () => next(null, awsVid)), - (awsVid, next) => awsGetAssertTags({ key, versionId: awsVid, - expectedTags: tags }, next), - ], done); - }); + it( + 'should get tags for an object even if it was deleted from ' + 'AWS directly (we rely on s3 metadata)', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => awsGetLatestVerId(key, '', next), + (awsVid, next) => + putTaggingAndAssert(s3, { bucket, key, tags, expectedVersionId: false }, () => + next(null, awsVid) + ), + (awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, Key: key, VersionId: awsVid }, next), + (delData, next) => + getTaggingAndAssert( + s3, + { bucket, key, expectedTags: tags, expectedVersionId: false, getObject: false }, + next + ), + ], + done + ); + } + ); - it('should get tags for an object even if it was deleted from ' + - 'AWS directly (we rely on s3 metadata)', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', next), - (awsVid, next) => putTaggingAndAssert(s3, { bucket, key, tags, - expectedVersionId: false }, () => next(null, awsVid)), - (awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, next), - (delData, next) => getTaggingAndAssert(s3, { bucket, key, - expectedTags: tags, expectedVersionId: false, - getObject: false }, next), - ], done); - }); + it( + 'should return an ServiceUnavailable if trying to put ' + + 'tags from object that was deleted from AWS directly', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => awsGetLatestVerId(key, '', next), + (awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, Key: key, VersionId: awsVid }, next), + (delData, next) => + putTaggingAndAssert(s3, { bucket, key, tags, expectedError: 'ServiceUnavailable' }, next), + ], + done + ); + } + ); - it('should return an ServiceUnavailable if trying to put ' + - 'tags from object that was deleted from AWS directly', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', next), - (awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, next), - (delData, next) => putTaggingAndAssert(s3, { bucket, key, tags, - expectedError: 'ServiceUnavailable' }, next), - ], done); - }); + it( + 'should get tags for an version even if it was deleted from ' + 'AWS directly (we rely on s3 metadata)', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => enableVersioning(s3, bucket, next), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), + (s3Vid, awsVid, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: s3Vid, expectedVersionId: s3Vid }, + () => next(null, s3Vid, awsVid) + ), + (s3Vid, awsVid, next) => + awsS3.deleteObject({ Bucket: awsBucket, Key: key, VersionId: awsVid }, err => + next(err, s3Vid) + ), + (s3Vid, next) => + getTaggingAndAssert( + s3, + { + bucket, + key, + versionId: s3Vid, + expectedTags: tags, + expectedVersionId: s3Vid, + getObject: false, + }, + next + ), + ], + done + ); + } + ); - it('should get tags for an version even if it was deleted from ' + - 'AWS directly (we rely on s3 metadata)', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => enableVersioning(s3, bucket, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', - (err, awsVid) => next(err, putData.VersionId, awsVid)), - (s3Vid, awsVid, next) => putTaggingAndAssert(s3, { bucket, key, - tags, versionId: s3Vid, expectedVersionId: s3Vid }, - () => next(null, s3Vid, awsVid)), - (s3Vid, awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, err => next(err, s3Vid)), - (s3Vid, next) => getTaggingAndAssert(s3, { bucket, key, - versionId: s3Vid, expectedTags: tags, - expectedVersionId: s3Vid, getObject: false }, next), - ], done); - }); - - it('should return an ServiceUnavailable if trying to put ' + - 'tags on version that was deleted from AWS directly', - done => { - const key = `somekey-${genUniqID()}`; - async.waterfall([ - next => s3.putObject({ Bucket: bucket, Key: key }, next), - (putData, next) => awsGetLatestVerId(key, '', - (err, awsVid) => next(err, putData.VersionId, awsVid)), - (s3Vid, awsVid, next) => awsS3.deleteObject({ Bucket: awsBucket, - Key: key, VersionId: awsVid }, err => next(err, s3Vid)), - (s3Vid, next) => putTaggingAndAssert(s3, { bucket, key, tags, - versionId: s3Vid, expectedError: - 'ServiceUnavailable' }, next), - ], done); - }); + it( + 'should return an ServiceUnavailable if trying to put ' + + 'tags on version that was deleted from AWS directly', + done => { + const key = `somekey-${genUniqID()}`; + async.waterfall( + [ + next => s3.putObject({ Bucket: bucket, Key: key }, next), + (putData, next) => + awsGetLatestVerId(key, '', (err, awsVid) => next(err, putData.VersionId, awsVid)), + (s3Vid, awsVid, next) => + awsS3.deleteObject({ Bucket: awsBucket, Key: key, VersionId: awsVid }, err => + next(err, s3Vid) + ), + (s3Vid, next) => + putTaggingAndAssert( + s3, + { bucket, key, tags, versionId: s3Vid, expectedError: 'ServiceUnavailable' }, + next + ), + ], + done + ); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/put/put.js b/tests/functional/aws-node-sdk/test/multipleBackend/put/put.js index 7def7262f1..dabea21edf 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/put/put.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/put/put.js @@ -5,13 +5,19 @@ const async = require('async'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); const { config } = require('../../../../../../lib/Config'); -const { createEncryptedBucketPromise } = - require('../../../lib/utility/createEncryptedBucket'); +const { createEncryptedBucketPromise } = require('../../../lib/utility/createEncryptedBucket'); const { versioningEnabled } = require('../../../lib/utility/versioning-util'); -const { describeSkipIfNotMultiple, getAwsRetry, awsLocation, - awsLocationEncryption, memLocation, fileLocation, genUniqID, isCEPH } - = require('../utils'); +const { + describeSkipIfNotMultiple, + getAwsRetry, + awsLocation, + awsLocationEncryption, + memLocation, + fileLocation, + genUniqID, + isCEPH, +} = require('../utils'); const bucket = `putaws${genUniqID()}`; const body = Buffer.from('I am a body', 'utf8'); const bigBody = Buffer.alloc(10485760); @@ -28,8 +34,7 @@ const retryTimeout = 10000; function getAwsSuccess(key, awsMD5, location, cb) { return getAwsRetry({ key }, 0, (err, res) => { - assert.strictEqual(err, null, 'Expected success, got error ' + - `on direct AWS call: ${err}`); + assert.strictEqual(err, null, 'Expected success, got error ' + `on direct AWS call: ${err}`); if (location === awsLocationEncryption) { // doesn't check ETag because it's different // with every PUT with encryption @@ -38,37 +43,31 @@ function getAwsSuccess(key, awsMD5, location, cb) { if (process.env.ENABLE_KMS_ENCRYPTION !== 'true') { assert.strictEqual(res.ETag, `"${awsMD5}"`); } - assert.strictEqual(res.Metadata['scal-location-constraint'], - location); + assert.strictEqual(res.Metadata['scal-location-constraint'], location); return cb(res); }); } function getAwsError(key, expectedError, cb) { return getAwsRetry({ key }, 0, err => { - assert.notStrictEqual(err, undefined, - 'Expected error but did not find one'); - assert.strictEqual(err.code, expectedError, - `Expected error code ${expectedError} but got ${err.code}`); + assert.notStrictEqual(err, undefined, 'Expected error but did not find one'); + assert.strictEqual(err.code, expectedError, `Expected error code ${expectedError} but got ${err.code}`); cb(); }); } function awsGetCheck(objectKey, s3MD5, awsMD5, location, cb) { process.stdout.write('Getting object\n'); - s3.getObject({ Bucket: bucket, Key: objectKey }, - function s3GetCallback(err, res) { + s3.getObject({ Bucket: bucket, Key: objectKey }, function s3GetCallback(err, res) { if (err && err.code === 'NetworkingError') { return setTimeout(() => { process.stdout.write('Getting object retry\n'); s3.getObject({ Bucket: bucket, Key: objectKey }, s3GetCallback); }, retryTimeout); } - assert.strictEqual(err, null, 'Expected success, got error ' + - `on call to AWS through S3: ${err}`); + assert.strictEqual(err, null, 'Expected success, got error ' + `on call to AWS through S3: ${err}`); assert.strictEqual(res.ETag, `"${s3MD5}"`); - assert.strictEqual(res.Metadata['scal-location-constraint'], - location); + assert.strictEqual(res.Metadata['scal-location-constraint'], location); process.stdout.write('Getting object from AWS\n'); return getAwsSuccess(objectKey, awsMD5, location, cb); }); @@ -85,8 +84,7 @@ describe('MultipleBackend put object', function testSuite() { if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { s3.createBucketPromise = createEncryptedBucketPromise; } - return s3.createBucketPromise({ Bucket: bucket }) - .catch(err => { + return s3.createBucketPromise({ Bucket: bucket }).catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; }); @@ -94,335 +92,342 @@ describe('MultipleBackend put object', function testSuite() { afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); // aws-sdk now (v2.363.0) returns 'UriParameterError' error - it.skip('should return an error to put request without a valid ' + - 'bucket name', - done => { - const key = `somekey-${genUniqID()}`; - s3.putObject({ Bucket: '', Key: key }, err => { - assert.notEqual(err, null, - 'Expected failure but got success'); - assert.strictEqual(err.code, 'MethodNotAllowed'); - done(); - }); + it.skip('should return an error to put request without a valid ' + 'bucket name', done => { + const key = `somekey-${genUniqID()}`; + s3.putObject({ Bucket: '', Key: key }, err => { + assert.notEqual(err, null, 'Expected failure but got success'); + assert.strictEqual(err.code, 'MethodNotAllowed'); + done(); }); + }); - describeSkipIfNotMultiple('with set location from "x-amz-meta-scal-' + - 'location-constraint" header', function describe() { - if (!process.env.S3_END_TO_END) { - this.retries(2); - } - - it('should return an error to put request without a valid ' + - 'location constraint', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': 'fail-region' } }; - s3.putObject(params, err => { - assert.notEqual(err, null, 'Expected failure but got ' + - 'success'); - assert.strictEqual(err.code, 'InvalidArgument'); - done(); - }); - }); + describeSkipIfNotMultiple( + 'with set location from "x-amz-meta-scal-' + 'location-constraint" header', + function describe() { + if (!process.env.S3_END_TO_END) { + this.retries(2); + } - it('should put an object to mem', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': memLocation }, - }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); + it('should return an error to put request without a valid ' + 'location constraint', done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': 'fail-region' }, + }; + s3.putObject(params, err => { + assert.notEqual(err, null, 'Expected failure but got ' + 'success'); + assert.strictEqual(err.code, 'InvalidArgument'); done(); }); }); - }); - it('should put a 0-byte object to mem', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Metadata: { 'scal-location-constraint': memLocation }, - }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - s3.getObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual(res.ETag, `"${emptyMD5}"`); - done(); + it('should put an object to mem', done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': memLocation }, + }; + s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.strictEqual(err, null, 'Expected success, ' + `got error ${err}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + done(); + }); }); }); - }); - it('should put only metadata to mem with mdonly header', done => { - const key = `mdonly-${genUniqID()}`; - const b64 = Buffer.from(correctMD5, 'hex').toString('base64'); - const params = { Bucket: bucket, Key: key, - Metadata: { 'scal-location-constraint': awsLocation, - 'mdonly': 'true', - 'md5chksum': b64, - 'size': body.length.toString(), - } }; - s3.putObject(params, err => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); - s3.headObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - getAwsError(key, 'NoSuchKey', () => done()); + it('should put a 0-byte object to mem', done => { + const key = `somekey-${genUniqID()}`; + const params = { Bucket: bucket, Key: key, Metadata: { 'scal-location-constraint': memLocation } }; + s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.strictEqual(err, null, 'Expected success, ' + `got error ${err}`); + assert.strictEqual(res.ETag, `"${emptyMD5}"`); + done(); + }); }); }); - }); - it('should put actual object with body and mdonly header', done => { - const key = `mdonly-${genUniqID()}`; - const b64 = Buffer.from(correctMD5, 'hex').toString('base64'); - const params = { Bucket: bucket, Key: key, Body: body, - Metadata: { 'scal-location-constraint': awsLocation, - 'mdonly': 'true', - 'md5chksum': b64, - 'size': body.length.toString(), - } }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - awsGetCheck(key, correctMD5, correctMD5, awsLocation, - () => done()); + it('should put only metadata to mem with mdonly header', done => { + const key = `mdonly-${genUniqID()}`; + const b64 = Buffer.from(correctMD5, 'hex').toString('base64'); + const params = { + Bucket: bucket, + Key: key, + Metadata: { + 'scal-location-constraint': awsLocation, + mdonly: 'true', + md5chksum: b64, + size: body.length.toString(), + }, + }; + s3.putObject(params, err => { + assert.strictEqual(err, null, `Unexpected err: ${err}`); + s3.headObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + getAwsError(key, 'NoSuchKey', () => done()); + }); }); }); - }); - - it('should put 0-byte normally with mdonly header', done => { - const key = `mdonly-${genUniqID()}`; - const b64 = Buffer.from(emptyMD5, 'hex').toString('base64'); - const params = { Bucket: bucket, Key: key, - Metadata: { 'scal-location-constraint': awsLocation, - 'mdonly': 'true', - 'md5chksum': b64, - 'size': '0', - } }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - awsGetCheck(key, emptyMD5, emptyMD5, awsLocation, - () => done()); - }); - }); - it('should put a 0-byte object to AWS', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Metadata: { 'scal-location-constraint': awsLocation }, - }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return awsGetCheck(key, emptyMD5, emptyMD5, awsLocation, - () => done()); + it('should put actual object with body and mdonly header', done => { + const key = `mdonly-${genUniqID()}`; + const b64 = Buffer.from(correctMD5, 'hex').toString('base64'); + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { + 'scal-location-constraint': awsLocation, + mdonly: 'true', + md5chksum: b64, + size: body.length.toString(), + }, + }; + s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.strictEqual(err, null, 'Expected success, ' + `got error ${err}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + awsGetCheck(key, correctMD5, correctMD5, awsLocation, () => done()); + }); + }); }); - }); - it('should put an object to file', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': fileLocation }, - }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); + it('should put 0-byte normally with mdonly header', done => { + const key = `mdonly-${genUniqID()}`; + const b64 = Buffer.from(emptyMD5, 'hex').toString('base64'); + const params = { + Bucket: bucket, + Key: key, + Metadata: { + 'scal-location-constraint': awsLocation, + mdonly: 'true', + md5chksum: b64, + size: '0', + }, + }; + s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + awsGetCheck(key, emptyMD5, emptyMD5, awsLocation, () => done()); }); }); - }); - it('should put an object to AWS', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return awsGetCheck(key, correctMD5, correctMD5, awsLocation, - () => done()); + it('should put a 0-byte object to AWS', done => { + const key = `somekey-${genUniqID()}`; + const params = { Bucket: bucket, Key: key, Metadata: { 'scal-location-constraint': awsLocation } }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return awsGetCheck(key, emptyMD5, emptyMD5, awsLocation, () => done()); + }); }); - }); - it('should encrypt body only if bucket encrypted putting ' + - 'object to AWS', - done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return getAwsSuccess(key, correctMD5, awsLocation, - () => done()); + it('should put an object to file', done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': fileLocation }, + }; + s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.strictEqual(err, null, 'Expected success, ' + `got error ${err}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + done(); + }); + }); }); - }); - - it('should put an object to AWS with encryption', done => { - // Test refuses to skip using itSkipCeph so just mark it passed - if (isCEPH) { - return done(); - } - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': - awsLocationEncryption } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return awsGetCheck(key, correctMD5, correctMD5, - awsLocationEncryption, () => done()); + it('should put an object to AWS', done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': awsLocation }, + }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return awsGetCheck(key, correctMD5, correctMD5, awsLocation, () => done()); + }); }); - }); - it('should return a version id putting object to ' + - 'to AWS with versioning enabled', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }; - async.waterfall([ - next => s3.putBucketVersioning({ + it('should encrypt body only if bucket encrypted putting ' + 'object to AWS', done => { + const key = `somekey-${genUniqID()}`; + const params = { Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, err => next(err)), - next => s3.putObject(params, (err, res) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); - assert(res.VersionId); - next(null, res.ETag); - }), - (eTag, next) => getAwsSuccess(key, correctMD5, awsLocation, - () => next()), - ], done); - }); - - it('should put a large object to AWS', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: bigBody, - Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected sucess, ' + - `got error ${err}`); - return awsGetCheck(key, bigS3MD5, bigAWSMD5, awsLocation, - () => done()); + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': awsLocation }, + }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return getAwsSuccess(key, correctMD5, awsLocation, () => done()); + }); }); - }); - it('should put objects with same key to AWS ' + - 'then file, and object should only be present in file', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - params.Metadata = - { 'scal-location-constraint': fileLocation }; + it('should put an object to AWS with encryption', done => { + // Test refuses to skip using itSkipCeph so just mark it passed + if (isCEPH) { + return done(); + } + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': awsLocationEncryption }, + }; return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return s3.getObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual( - res.Metadata['scal-location-constraint'], - fileLocation); - return getAwsError(key, 'NoSuchKey', done); - }); + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return awsGetCheck(key, correctMD5, correctMD5, awsLocationEncryption, () => done()); }); }); - }); - it('should put objects with same key to file ' + - 'then AWS, and object should only be present on AWS', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': fileLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - params.Metadata = { - 'scal-location-constraint': awsLocation }; + it('should return a version id putting object to ' + 'to AWS with versioning enabled', done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': awsLocation }, + }; + async.waterfall( + [ + next => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + err => next(err) + ), + next => + s3.putObject(params, (err, res) => { + assert.strictEqual( + err, + null, + 'Expected success ' + `putting object, got error ${err}` + ); + assert(res.VersionId); + next(null, res.ETag); + }), + (eTag, next) => getAwsSuccess(key, correctMD5, awsLocation, () => next()), + ], + done + ); + }); + + it('should put a large object to AWS', done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: bigBody, + Metadata: { 'scal-location-constraint': awsLocation }, + }; return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return awsGetCheck(key, correctMD5, correctMD5, - awsLocation, () => done()); + assert.equal(err, null, 'Expected sucess, ' + `got error ${err}`); + return awsGetCheck(key, bigS3MD5, bigAWSMD5, awsLocation, () => done()); }); }); - }); - it('should put two objects to AWS with same ' + - 'key, and newest object should be returned', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': awsLocation, - 'unique-header': 'first object' } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - params.Metadata = { 'scal-location-constraint': awsLocation, - 'unique-header': 'second object' }; + it( + 'should put objects with same key to AWS ' + 'then file, and object should only be present in file', + done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': awsLocation }, + }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + params.Metadata = { 'scal-location-constraint': fileLocation }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + assert.strictEqual(res.Metadata['scal-location-constraint'], fileLocation); + return getAwsError(key, 'NoSuchKey', done); + }); + }); + }); + } + ); + + it( + 'should put objects with same key to file ' + 'then AWS, and object should only be present on AWS', + done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': fileLocation }, + }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + params.Metadata = { + 'scal-location-constraint': awsLocation, + }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return awsGetCheck(key, correctMD5, correctMD5, awsLocation, () => done()); + }); + }); + } + ); + + it('should put two objects to AWS with same ' + 'key, and newest object should be returned', done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': awsLocation, 'unique-header': 'first object' }, + }; return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return awsGetCheck(key, correctMD5, correctMD5, - awsLocation, result => { - assert.strictEqual(result.Metadata - ['unique-header'], 'second object'); - done(); + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + params.Metadata = { 'scal-location-constraint': awsLocation, 'unique-header': 'second object' }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return awsGetCheck(key, correctMD5, correctMD5, awsLocation, result => { + assert.strictEqual(result.Metadata['unique-header'], 'second object'); + done(); + }); }); }); }); - }); - }); + } + ); }); }); -describeSkipIfNotMultiple('MultipleBackend put object based on bucket location', -() => { +describeSkipIfNotMultiple('MultipleBackend put object based on bucket location', () => { withV4(sigCfg => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); @@ -431,84 +436,90 @@ describeSkipIfNotMultiple('MultipleBackend put object based on bucket location', afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); - it('should put an object to mem with no location header', - done => { + it('should put an object to mem with no location header', done => { process.stdout.write('Creating bucket\n'); - return s3.createBucket({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: memLocation, + return s3.createBucket( + { + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: memLocation, + }, }, - }, err => { - assert.equal(err, null, `Error creating bucket: ${err}`); - process.stdout.write('Putting object\n'); - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, Body: body }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); + err => { + assert.equal(err, null, `Error creating bucket: ${err}`); + process.stdout.write('Putting object\n'); + const key = `somekey-${genUniqID()}`; + const params = { Bucket: bucket, Key: key, Body: body }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.strictEqual(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + done(); + }); }); - }); - }); + } + ); }); it('should put an object to file with no location header', done => { process.stdout.write('Creating bucket\n'); - return s3.createBucket({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: fileLocation, + return s3.createBucket( + { + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: fileLocation, + }, }, - }, err => { - assert.equal(err, null, `Error creating bucket: ${err}`); - process.stdout.write('Putting object\n'); - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, Body: body }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.strictEqual(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - done(); + err => { + assert.equal(err, null, `Error creating bucket: ${err}`); + process.stdout.write('Putting object\n'); + const key = `somekey-${genUniqID()}`; + const params = { Bucket: bucket, Key: key, Body: body }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.strictEqual(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + done(); + }); }); - }); - }); + } + ); }); it('should put an object to AWS with no location header', done => { process.stdout.write('Creating bucket\n'); - return s3.createBucket({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: awsLocation, + return s3.createBucket( + { + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: awsLocation, + }, }, - }, err => { - assert.equal(err, null, `Error creating bucket: ${err}`); - process.stdout.write('Putting object\n'); - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, Body: body }; - return s3.putObject(params, err => { - assert.equal(err, null, - `Expected success, got error ${err}`); - return awsGetCheck(key, correctMD5, correctMD5, undefined, - () => done()); - }); - }); + err => { + assert.equal(err, null, `Error creating bucket: ${err}`); + process.stdout.write('Putting object\n'); + const key = `somekey-${genUniqID()}`; + const params = { Bucket: bucket, Key: key, Body: body }; + return s3.putObject(params, err => { + assert.equal(err, null, `Expected success, got error ${err}`); + return awsGetCheck(key, correctMD5, correctMD5, undefined, () => done()); + }); + } + ); }); }); }); @@ -521,15 +532,16 @@ describe('MultipleBackend put based on request endpoint', () => { }); after(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in after: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write(`Error in after: ${err}\n`); + throw err; + }); }); it('should create bucket in corresponding backend', done => { @@ -542,8 +554,7 @@ describe('MultipleBackend put based on request endpoint', () => { assert.strictEqual(err, null, `Error creating bucket: ${err}`); const key = `somekey-${genUniqID()}`; s3.putObject({ Bucket: bucket, Key: key, Body: body }, err => { - assert.strictEqual(err, null, 'Expected succes, ' + - `got error ${JSON.stringify(err)}`); + assert.strictEqual(err, null, 'Expected succes, ' + `got error ${JSON.stringify(err)}`); const host = request.service.endpoint.hostname; let endpoint = config.restEndpoints[host]; // s3 returns '' for us-east-1 @@ -551,13 +562,10 @@ describe('MultipleBackend put based on request endpoint', () => { endpoint = ''; } s3.getBucketLocation({ Bucket: bucket }, (err, data) => { - assert.strictEqual(err, null, 'Expected succes, ' + - `got error ${JSON.stringify(err)}`); + assert.strictEqual(err, null, 'Expected succes, ' + `got error ${JSON.stringify(err)}`); assert.strictEqual(data.LocationConstraint, endpoint); - s3.getObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.strictEqual(err, null, 'Expected succes, ' + - `got error ${JSON.stringify(err)}`); + s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.strictEqual(err, null, 'Expected succes, ' + `got error ${JSON.stringify(err)}`); assert.strictEqual(res.ETag, `"${correctMD5}"`); done(); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/put/putAzure.js b/tests/functional/aws-node-sdk/test/multipleBackend/put/putAzure.js index 4f8e590e5c..e4c115f32f 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/put/putAzure.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/put/putAzure.js @@ -35,20 +35,24 @@ let bucketUtil; let s3; function azureGetCheck(objectKey, azureMD5, azureMetadata, cb) { - azureClient.getContainerClient(azureContainerName).getProperties(objectKey).then(res => { - const resMD5 = convertMD5(res.contentSettings.contentMD5); - assert.strictEqual(resMD5, azureMD5); - assert.deepStrictEqual(res.metadata, azureMetadata); - return cb(); - }, err => { - assert.strictEqual(err, null, 'Expected success, got error ' + - `on call to Azure: ${err}`); - return cb(); - }); + azureClient + .getContainerClient(azureContainerName) + .getProperties(objectKey) + .then( + res => { + const resMD5 = convertMD5(res.contentSettings.contentMD5); + assert.strictEqual(resMD5, azureMD5); + assert.deepStrictEqual(res.metadata, azureMetadata); + return cb(); + }, + err => { + assert.strictEqual(err, null, 'Expected success, got error ' + `on call to Azure: ${err}`); + return cb(); + } + ); } -describeSkipIfNotMultipleOrCeph('MultipleBackend put object to AZURE', function -describeF() { +describeSkipIfNotMultipleOrCeph('MultipleBackend put object to AZURE', function describeF() { this.timeout(250000); withV4(sigCfg => { beforeEach(function beforeEachF() { @@ -59,65 +63,78 @@ describeF() { afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(azureContainerName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(azureContainerName); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(azureContainerName) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(azureContainerName); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); describe('with bucket location header', () => { beforeEach(done => - s3.createBucket({ Bucket: azureContainerName, - CreateBucketConfiguration: { - LocationConstraint: azureLocation, + s3.createBucket( + { + Bucket: azureContainerName, + CreateBucketConfiguration: { + LocationConstraint: azureLocation, + }, }, - }, done)); + done + ) + ); - it('should return a NotImplemented error if try to put ' + - 'versioning to bucket with Azure location', done => { - const params = { - Bucket: azureContainerName, - VersioningConfiguration: { - Status: 'Enabled', - }, - }; - s3.putBucketVersioning(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); - done(); - }); - }); + it( + 'should return a NotImplemented error if try to put ' + 'versioning to bucket with Azure location', + done => { + const params = { + Bucket: azureContainerName, + VersioningConfiguration: { + Status: 'Enabled', + }, + }; + s3.putBucketVersioning(params, err => { + assert.strictEqual(err.code, 'NotImplemented'); + done(); + }); + } + ); - it('should put an object to Azure, with no object location ' + - 'header, based on bucket location', function it(done) { - const params = { - Bucket: azureContainerName, - Key: this.test.keyName, - Body: normalBody, - }; - async.waterfall([ - next => s3.putObject(params, err => setTimeout(() => - next(err), azureTimeout)), - next => azureGetCheck(this.test.keyName, normalMD5, {}, - next), - ], done); - }); + it( + 'should put an object to Azure, with no object location ' + 'header, based on bucket location', + function it(done) { + const params = { + Bucket: azureContainerName, + Key: this.test.keyName, + Body: normalBody, + }; + async.waterfall( + [ + next => s3.putObject(params, err => setTimeout(() => next(err), azureTimeout)), + next => azureGetCheck(this.test.keyName, normalMD5, {}, next), + ], + done + ); + } + ); }); describe('with no bucket location header', () => { beforeEach(() => - s3.createBucket({ Bucket: azureContainerName }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - })); + s3 + .createBucket({ Bucket: azureContainerName }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }) + ); keys.forEach(key => { - it(`should put a ${key.describe} object to Azure`, - function itF(done) { + it(`should put a ${key.describe} object to Azure`, function itF(done) { const params = { Bucket: azureContainerName, Key: this.test.keyName, @@ -125,23 +142,20 @@ describeF() { Body: key.body, }; s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - setTimeout(() => - azureGetCheck(this.test.keyName, - key.MD5, azureMetadata, - () => done()), azureTimeout); + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + setTimeout( + () => azureGetCheck(this.test.keyName, key.MD5, azureMetadata, () => done()), + azureTimeout + ); }); }); }); - it('should put a object to Azure location with bucketMatch=false', - function itF(done) { + it('should put a object to Azure location with bucketMatch=false', function itF(done) { const params = { Bucket: azureContainerName, Key: this.test.keyName, - Metadata: { 'scal-location-constraint': - azureLocationMismatch }, + Metadata: { 'scal-location-constraint': azureLocationMismatch }, Body: normalBody, }; const azureMetadataMismatch = { @@ -150,18 +164,21 @@ describeF() { /* eslint-enable camelcase */ }; s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - setTimeout(() => - azureGetCheck( - `${azureContainerName}/${this.test.keyName}`, - normalMD5, azureMetadataMismatch, - () => done()), azureTimeout); + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + setTimeout( + () => + azureGetCheck( + `${azureContainerName}/${this.test.keyName}`, + normalMD5, + azureMetadataMismatch, + () => done() + ), + azureTimeout + ); }); }); - it('should return error ServiceUnavailable putting an invalid ' + - 'key name to Azure', done => { + it('should return error ServiceUnavailable putting an invalid ' + 'key name to Azure', done => { const params = { Bucket: azureContainerName, Key: '.', @@ -174,141 +191,169 @@ describeF() { }); }); - it('should return error NotImplemented putting a ' + - 'version to Azure', function itF(done) { - s3.putBucketVersioning({ - Bucket: azureContainerName, - VersioningConfiguration: versioningEnabled, - }, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - const params = { Bucket: azureContainerName, - Key: this.test.keyName, - Body: normalBody, - Metadata: { 'scal-location-constraint': - azureLocation } }; - s3.putObject(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); - done(); - }); - }); - }); - - it('should put two objects to Azure with same ' + - 'key, and newest object should be returned', function itF(done) { - const params = { - Bucket: azureContainerName, - Key: this.test.keyName, - Metadata: { 'scal-location-constraint': azureLocation }, - }; - async.waterfall([ - next => s3.putObject(params, err => next(err)), - next => { - params.Body = normalBody; - s3.putObject(params, err => setTimeout(() => - next(err), azureTimeout)); - }, - next => { - setTimeout(() => { - azureGetCheck(this.test.keyName, normalMD5, - azureMetadata, next); - }, azureTimeout); + it('should return error NotImplemented putting a ' + 'version to Azure', function itF(done) { + s3.putBucketVersioning( + { + Bucket: azureContainerName, + VersioningConfiguration: versioningEnabled, }, - ], done); + err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + const params = { + Bucket: azureContainerName, + Key: this.test.keyName, + Body: normalBody, + Metadata: { 'scal-location-constraint': azureLocation }, + }; + s3.putObject(params, err => { + assert.strictEqual(err.code, 'NotImplemented'); + done(); + }); + } + ); }); - it('should put objects with same key to Azure ' + - 'then file, and object should only be present in file', function - itF(done) { - const params = { - Bucket: azureContainerName, - Key: this.test.keyName, - Body: normalBody, - Metadata: { 'scal-location-constraint': azureLocation } }; - async.waterfall([ - next => s3.putObject(params, err => next(err)), - next => { - params.Metadata = { 'scal-location-constraint': - fileLocation }; - s3.putObject(params, err => setTimeout(() => - next(err), azureTimeout)); - }, - next => s3.getObject({ + it( + 'should put two objects to Azure with same ' + 'key, and newest object should be returned', + function itF(done) { + const params = { Bucket: azureContainerName, Key: this.test.keyName, - }, (err, res) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual( - res.Metadata['scal-location-constraint'], - fileLocation); - next(); - }), - next => azureClient.getContainerClient(azureContainerName) - .getProperties(this.test.keyName).then(() => { - assert.fail('unexpected success'); - next(); - }, err => { - assert.strictEqual(err.code, 'NotFound'); - next(); - }), - ], done); - }); + Metadata: { 'scal-location-constraint': azureLocation }, + }; + async.waterfall( + [ + next => s3.putObject(params, err => next(err)), + next => { + params.Body = normalBody; + s3.putObject(params, err => setTimeout(() => next(err), azureTimeout)); + }, + next => { + setTimeout(() => { + azureGetCheck(this.test.keyName, normalMD5, azureMetadata, next); + }, azureTimeout); + }, + ], + done + ); + } + ); - it('should put objects with same key to file ' + - 'then Azure, and object should only be present on Azure', - function itF(done) { - const params = { Bucket: azureContainerName, Key: - this.test.keyName, - Body: normalBody, - Metadata: { 'scal-location-constraint': fileLocation } }; - async.waterfall([ - next => s3.putObject(params, err => next(err)), - next => { - params.Metadata = { - 'scal-location-constraint': azureLocation, - }; - s3.putObject(params, err => setTimeout(() => - next(err), azureTimeout)); - }, - next => azureGetCheck(this.test.keyName, normalMD5, - azureMetadata, next), - ], done); - }); + it( + 'should put objects with same key to Azure ' + 'then file, and object should only be present in file', + function itF(done) { + const params = { + Bucket: azureContainerName, + Key: this.test.keyName, + Body: normalBody, + Metadata: { 'scal-location-constraint': azureLocation }, + }; + async.waterfall( + [ + next => s3.putObject(params, err => next(err)), + next => { + params.Metadata = { 'scal-location-constraint': fileLocation }; + s3.putObject(params, err => setTimeout(() => next(err), azureTimeout)); + }, + next => + s3.getObject( + { + Bucket: azureContainerName, + Key: this.test.keyName, + }, + (err, res) => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + assert.strictEqual(res.Metadata['scal-location-constraint'], fileLocation); + next(); + } + ), + next => + azureClient + .getContainerClient(azureContainerName) + .getProperties(this.test.keyName) + .then( + () => { + assert.fail('unexpected success'); + next(); + }, + err => { + assert.strictEqual(err.code, 'NotFound'); + next(); + } + ), + ], + done + ); + } + ); + + it( + 'should put objects with same key to file ' + 'then Azure, and object should only be present on Azure', + function itF(done) { + const params = { + Bucket: azureContainerName, + Key: this.test.keyName, + Body: normalBody, + Metadata: { 'scal-location-constraint': fileLocation }, + }; + async.waterfall( + [ + next => s3.putObject(params, err => next(err)), + next => { + params.Metadata = { + 'scal-location-constraint': azureLocation, + }; + s3.putObject(params, err => setTimeout(() => next(err), azureTimeout)); + }, + next => azureGetCheck(this.test.keyName, normalMD5, azureMetadata, next), + ], + done + ); + } + ); describe('with ongoing MPU with same key name', () => { beforeEach(function beFn(done) { - s3.createMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.keyName, - Metadata: { 'scal-location-constraint': azureLocation }, - }, (err, res) => { - assert.equal(err, null, `Err creating MPU: ${err}`); - this.currentTest.uploadId = res.UploadId; - done(); - }); + s3.createMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.keyName, + Metadata: { 'scal-location-constraint': azureLocation }, + }, + (err, res) => { + assert.equal(err, null, `Err creating MPU: ${err}`); + this.currentTest.uploadId = res.UploadId; + done(); + } + ); }); afterEach(function afFn(done) { - s3.abortMultipartUpload({ - Bucket: azureContainerName, - Key: this.currentTest.keyName, - UploadId: this.currentTest.uploadId, - }, err => { - assert.equal(err, null, `Err aborting MPU: ${err}`); - done(); - }); + s3.abortMultipartUpload( + { + Bucket: azureContainerName, + Key: this.currentTest.keyName, + UploadId: this.currentTest.uploadId, + }, + err => { + assert.equal(err, null, `Err aborting MPU: ${err}`); + done(); + } + ); }); it('should return ServiceUnavailable', function itFn(done) { - s3.putObject({ - Bucket: azureContainerName, - Key: this.test.keyName, - Metadata: { 'scal-location-constraint': azureLocation }, - }, err => { - assert.strictEqual(err.code, 'ServiceUnavailable'); - done(); - }); + s3.putObject( + { + Bucket: azureContainerName, + Key: this.test.keyName, + Metadata: { 'scal-location-constraint': azureLocation }, + }, + err => { + assert.strictEqual(err.code, 'ServiceUnavailable'); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/put/putGcp.js b/tests/functional/aws-node-sdk/test/multipleBackend/put/putGcp.js index c12e30adf5..776e033bda 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/put/putGcp.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/put/putGcp.js @@ -2,8 +2,14 @@ const assert = require('assert'); const withV4 = require('../../support/withV4'); const BucketUtility = require('../../../lib/utility/bucket-util'); -const { describeSkipIfNotMultipleOrCeph, gcpClient, gcpBucket, - gcpLocation, fileLocation, genUniqID } = require('../utils'); +const { + describeSkipIfNotMultipleOrCeph, + gcpClient, + gcpBucket, + gcpLocation, + fileLocation, + genUniqID, +} = require('../utils'); const bucket = `putgcp${genUniqID()}`; const body = Buffer.from('I am a body', 'utf8'); @@ -19,87 +25,87 @@ let s3; const retryTimeout = 10000; function checkGcp(key, gcpMD5, location, callback) { - gcpClient.getObject({ - Bucket: gcpBucket, - Key: key, - }, (err, res) => { - assert.equal(err, null, `Expected success, got error ${err}`); - if (res.Metadata && res.Metadata['scal-etag']) { - assert.strictEqual(res.Metadata['scal-etag'], gcpMD5); - } else { - assert.strictEqual( - res.ETag.substring(1, res.ETag.length - 1), gcpMD5); + gcpClient.getObject( + { + Bucket: gcpBucket, + Key: key, + }, + (err, res) => { + assert.equal(err, null, `Expected success, got error ${err}`); + if (res.Metadata && res.Metadata['scal-etag']) { + assert.strictEqual(res.Metadata['scal-etag'], gcpMD5); + } else { + assert.strictEqual(res.ETag.substring(1, res.ETag.length - 1), gcpMD5); + } + assert.strictEqual(res.Metadata['scal-location-constraint'], location); + callback(res); } - assert.strictEqual(res.Metadata['scal-location-constraint'], - location); - callback(res); - }); + ); } function checkGcpError(key, expectedError, callback) { setTimeout(() => { - gcpClient.getObject({ - Bucket: gcpBucket, - Key: key, - }, err => { - assert.notStrictEqual(err, undefined, - 'Expected error but did not find one'); - assert.strictEqual(err.code, expectedError, - `Expected error code ${expectedError} but got ${err.code}`); - callback(); - }); + gcpClient.getObject( + { + Bucket: gcpBucket, + Key: key, + }, + err => { + assert.notStrictEqual(err, undefined, 'Expected error but did not find one'); + assert.strictEqual(err.code, expectedError, `Expected error code ${expectedError} but got ${err.code}`); + callback(); + } + ); }, 1000); } function gcpGetCheck(objectKey, s3MD5, gcpMD5, location, callback) { process.stdout.write('Getting object\n'); - s3.getObject({ Bucket: bucket, Key: objectKey }, - function s3GetCallback(err, res) { + s3.getObject({ Bucket: bucket, Key: objectKey }, function s3GetCallback(err, res) { if (err && err.code === 'NetworkingError') { return setTimeout(() => { process.stdout.write('Getting object retry\n'); s3.getObject({ Bucket: bucket, Key: objectKey }, s3GetCallback); }, retryTimeout); } - assert.strictEqual(err, null, 'Expected success, got error ' + - `on call to GCP through S3: ${err}`); + assert.strictEqual(err, null, 'Expected success, got error ' + `on call to GCP through S3: ${err}`); assert.strictEqual(res.ETag, `"${s3MD5}"`); - assert.strictEqual(res.Metadata['scal-location-constraint'], - location); + assert.strictEqual(res.Metadata['scal-location-constraint'], location); process.stdout.write('Getting object from GCP\n'); return checkGcp(objectKey, gcpMD5, location, callback); }); } -describeSkipIfNotMultipleOrCeph('MultipleBackend put object to GCP', function -describeFn() { +describeSkipIfNotMultipleOrCeph('MultipleBackend put object to GCP', function describeFn() { this.timeout(250000); withV4(sigCfg => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); - describe('with set location from "x-amz-meta-scal-' + - 'location-constraint" header', function describe() { + describe('with set location from "x-amz-meta-scal-' + 'location-constraint" header', function describe() { if (!process.env.S3_END_TO_END) { this.retries(2); } @@ -126,14 +132,15 @@ describeFn() { const { s3MD5, gcpMD5 } = test.output; it(test.msg, done => { const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, Body, + const params = { + Bucket: bucket, + Key: key, + Body, Metadata: { 'scal-location-constraint': location }, }; return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return gcpGetCheck(key, s3MD5, gcpMD5, location, - () => done()); + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return gcpGetCheck(key, s3MD5, gcpMD5, location, () => done()); }); }); }); @@ -144,75 +151,71 @@ describeFn() { this.retries(2); } - it('should put objects with same key to GCP ' + - 'then file, and object should only be present in file', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': gcpLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - params.Metadata = - { 'scal-location-constraint': fileLocation }; + it( + 'should put objects with same key to GCP ' + 'then file, and object should only be present in file', + done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': gcpLocation }, + }; return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return s3.getObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - assert.strictEqual( - res.Metadata['scal-location-constraint'], - fileLocation); - return checkGcpError(key, 'NoSuchKey', - () => done()); + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + params.Metadata = { 'scal-location-constraint': fileLocation }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + assert.strictEqual(res.Metadata['scal-location-constraint'], fileLocation); + return checkGcpError(key, 'NoSuchKey', () => done()); + }); }); }); - }); - }); + } + ); - it('should put objects with same key to file ' + - 'then GCP, and object should only be present on GCP', done => { - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, - Body: body, - Metadata: { 'scal-location-constraint': fileLocation } }; - return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - params.Metadata = { - 'scal-location-constraint': gcpLocation }; + it( + 'should put objects with same key to file ' + 'then GCP, and object should only be present on GCP', + done => { + const key = `somekey-${genUniqID()}`; + const params = { + Bucket: bucket, + Key: key, + Body: body, + Metadata: { 'scal-location-constraint': fileLocation }, + }; return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return gcpGetCheck(key, correctMD5, correctMD5, - gcpLocation, () => done()); + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + params.Metadata = { + 'scal-location-constraint': gcpLocation, + }; + return s3.putObject(params, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return gcpGetCheck(key, correctMD5, correctMD5, gcpLocation, () => done()); + }); }); - }); - }); + } + ); - it('should put two objects to GCP with same ' + - 'key, and newest object should be returned', done => { + it('should put two objects to GCP with same ' + 'key, and newest object should be returned', done => { const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, + const params = { + Bucket: bucket, + Key: key, Body: body, - Metadata: { 'scal-location-constraint': gcpLocation, - 'unique-header': 'first object' } }; + Metadata: { 'scal-location-constraint': gcpLocation, 'unique-header': 'first object' }, + }; return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - params.Metadata = { 'scal-location-constraint': gcpLocation, - 'unique-header': 'second object' }; + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + params.Metadata = { 'scal-location-constraint': gcpLocation, 'unique-header': 'second object' }; return s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${err}`); - return gcpGetCheck(key, correctMD5, correctMD5, - gcpLocation, result => { - assert.strictEqual(result.Metadata - ['unique-header'], 'second object'); - done(); - }); + assert.equal(err, null, 'Expected success, ' + `got error ${err}`); + return gcpGetCheck(key, correctMD5, correctMD5, gcpLocation, result => { + assert.strictEqual(result.Metadata['unique-header'], 'second object'); + done(); + }); }); }); }); @@ -220,8 +223,7 @@ describeFn() { }); }); -describeSkipIfNotMultipleOrCeph('MultipleBackend put object' + - 'based on bucket location', () => { +describeSkipIfNotMultipleOrCeph('MultipleBackend put object' + 'based on bucket location', () => { withV4(sigCfg => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); @@ -230,35 +232,38 @@ describeSkipIfNotMultipleOrCeph('MultipleBackend put object' + afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); it('should put an object to GCP with no location header', done => { process.stdout.write('Creating bucket\n'); - return s3.createBucket({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: gcpLocation, + return s3.createBucket( + { + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: gcpLocation, + }, }, - }, err => { - assert.equal(err, null, `Error creating bucket: ${err}`); - process.stdout.write('Putting object\n'); - const key = `somekey-${genUniqID()}`; - const params = { Bucket: bucket, Key: key, Body: body }; - return s3.putObject(params, err => { - assert.equal(err, null, - `Expected success, got error ${err}`); - return gcpGetCheck(key, correctMD5, correctMD5, undefined, - () => done()); - }); - }); + err => { + assert.equal(err, null, `Error creating bucket: ${err}`); + process.stdout.write('Putting object\n'); + const key = `somekey-${genUniqID()}`; + const params = { Bucket: bucket, Key: key, Body: body }; + return s3.putObject(params, err => { + assert.equal(err, null, `Expected success, got error ${err}`); + return gcpGetCheck(key, correctMD5, correctMD5, undefined, () => done()); + }); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/unknownEndpoint.js b/tests/functional/aws-node-sdk/test/multipleBackend/unknownEndpoint.js index 834af24875..753f7808b9 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/unknownEndpoint.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/unknownEndpoint.js @@ -23,70 +23,70 @@ describe('Requests to ip endpoint not in config', () => { after(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); - it('should accept put bucket request ' + - 'to IP address endpoint that is not in config using ' + - 'path style', + it( + 'should accept put bucket request ' + 'to IP address endpoint that is not in config using ' + 'path style', done => { s3.createBucket({ Bucket: bucket }, err => { assert.ifError(err); done(); }); - }); + } + ); const itSkipIfE2E = process.env.S3_END_TO_END ? it.skip : it; // skipping in E2E since in E2E 127.0.0.3 resolving to // localhost which is in config. Once integration is using // different machines we can update this. - itSkipIfE2E('should show us-east-1 as bucket location since' + - 'IP address endpoint was not in config thereby ' + - 'defaulting to us-east-1', + itSkipIfE2E( + 'should show us-east-1 as bucket location since' + + 'IP address endpoint was not in config thereby ' + + 'defaulting to us-east-1', done => { - s3.getBucketLocation({ Bucket: bucket }, - (err, res) => { - assert.ifError(err); - // us-east-1 is returned as empty string - assert.strictEqual(res - .LocationConstraint, ''); - done(); - }); - }); + s3.getBucketLocation({ Bucket: bucket }, (err, res) => { + assert.ifError(err); + // us-east-1 is returned as empty string + assert.strictEqual(res.LocationConstraint, ''); + done(); + }); + } + ); - it('should accept put object request ' + - 'to IP address endpoint that is not in config using ' + - 'path style and use the bucket location for the object', + it( + 'should accept put object request ' + + 'to IP address endpoint that is not in config using ' + + 'path style and use the bucket location for the object', done => { - s3.putObject({ Bucket: bucket, Key: key, Body: body }, - err => { + s3.putObject({ Bucket: bucket, Key: key, Body: body }, err => { + assert.ifError(err); + return s3.headObject({ Bucket: bucket, Key: key }, err => { assert.ifError(err); - return s3.headObject({ Bucket: bucket, Key: key }, - err => { - assert.ifError(err); - done(); - }); + done(); }); - }); + }); + } + ); - it('should accept get object request ' + - 'to IP address endpoint that is not in config using ' + - 'path style', + it( + 'should accept get object request ' + 'to IP address endpoint that is not in config using ' + 'path style', done => { - s3.getObject({ Bucket: bucket, Key: key }, - (err, res) => { - assert.ifError(err); - assert.strictEqual(res.ETag, expectedETag); - done(); - }); - }); + s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { + assert.ifError(err); + assert.strictEqual(res.ETag, expectedETag); + done(); + }); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/utils.js b/tests/functional/aws-node-sdk/test/multipleBackend/utils.js index 142479ef1b..bcce8a3290 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/utils.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/utils.js @@ -54,11 +54,9 @@ if (config.backends.data === 'multiple') { const gcpConfig = getRealAwsConfig(gcpLocation); gcpClient = new GCP(gcpConfig); gcpBucket = config.locationConstraints[gcpLocation].details.bucketName; - gcpBucketMPU = - config.locationConstraints[gcpLocation].details.mpuBucketName; + gcpBucketMPU = config.locationConstraints[gcpLocation].details.mpuBucketName; } - function _assertErrorResult(err, expectedError, desc) { if (!expectedError) { assert.strictEqual(err, null, `got error for ${desc}: ${err}`); @@ -136,11 +134,12 @@ utils.getAzureClient = () => { params[key] = envVariable; return true; } - if (config.locationConstraints[azureLocation] && + if ( + config.locationConstraints[azureLocation] && config.locationConstraints[azureLocation].details && - config.locationConstraints[azureLocation].details[key]) { - params[key] = - config.locationConstraints[azureLocation].details[key]; + config.locationConstraints[azureLocation].details[key] + ) { + params[key] = config.locationConstraints[azureLocation].details[key]; return true; } return false; @@ -150,20 +149,18 @@ utils.getAzureClient = () => { return undefined; } - const cred = new azure.StorageSharedKeyCredential( - params.azureStorageAccountName, - params.azureStorageAccessKey, - ); + const cred = new azure.StorageSharedKeyCredential(params.azureStorageAccountName, params.azureStorageAccessKey); return new azure.BlobServiceClient(params.azureStorageEndpoint, cred); }; utils.getAzureContainerName = azureLocation => { let azureContainerName; - if (config.locationConstraints[azureLocation] && - config.locationConstraints[azureLocation].details && - config.locationConstraints[azureLocation].details.azureContainerName) { - azureContainerName = - config.locationConstraints[azureLocation].details.azureContainerName; + if ( + config.locationConstraints[azureLocation] && + config.locationConstraints[azureLocation].details && + config.locationConstraints[azureLocation].details.azureContainerName + ) { + azureContainerName = config.locationConstraints[azureLocation].details.azureContainerName; } return azureContainerName; }; @@ -194,8 +191,7 @@ utils.getAzureKeys = () => { // For contentMD5, Azure requires base64 but AWS requires hex, so convert // from base64 to hex -utils.convertMD5 = contentMD5 => - Buffer.from(contentMD5, 'base64').toString('hex'); +utils.convertMD5 = contentMD5 => Buffer.from(contentMD5, 'base64').toString('hex'); utils.expectedETag = (body, getStringified = true) => { const eTagValue = crypto.createHash('md5').update(body).digest('hex'); @@ -206,8 +202,8 @@ utils.expectedETag = (body, getStringified = true) => { }; utils.putToAwsBackend = (s3, bucket, key, body, cb) => { - s3.putObject({ Bucket: bucket, Key: key, Body: body, - Metadata: { 'scal-location-constraint': awsLocation } }, + s3.putObject( + { Bucket: bucket, Key: key, Body: body, Metadata: { 'scal-location-constraint': awsLocation } }, (err, result) => { cb(err, result.VersionId); } @@ -215,31 +211,30 @@ utils.putToAwsBackend = (s3, bucket, key, body, cb) => { }; utils.enableVersioning = (s3, bucket, cb) => { - s3.putBucketVersioning({ Bucket: bucket, - VersioningConfiguration: versioningEnabled }, err => { - assert.strictEqual(err, null, 'Expected success ' + - `enabling versioning, got error ${err}`); + s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: versioningEnabled }, err => { + assert.strictEqual(err, null, 'Expected success ' + `enabling versioning, got error ${err}`); cb(); }); }; utils.suspendVersioning = (s3, bucket, cb) => { - s3.putBucketVersioning({ Bucket: bucket, - VersioningConfiguration: versioningSuspended }, err => { - assert.strictEqual(err, null, 'Expected success ' + - `enabling versioning, got error ${err}`); + s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: versioningSuspended }, err => { + assert.strictEqual(err, null, 'Expected success ' + `enabling versioning, got error ${err}`); cb(); }); }; utils.mapToAwsPuts = (s3, bucket, key, dataArray, cb) => { - async.mapSeries(dataArray, (data, next) => { - utils.putToAwsBackend(s3, bucket, key, data, next); - }, (err, results) => { - assert.strictEqual(err, null, 'Expected success ' + - `putting object, got error ${err}`); - cb(null, results); - }); + async.mapSeries( + dataArray, + (data, next) => { + utils.putToAwsBackend(s3, bucket, key, data, next); + }, + (err, results) => { + assert.strictEqual(err, null, 'Expected success ' + `putting object, got error ${err}`); + cb(null, results); + } + ); }; utils.putVersionsToAws = (s3, bucket, key, versions, cb) => { @@ -255,38 +250,38 @@ utils.putNullVersionsToAws = (s3, bucket, key, versions, cb) => { }; utils.getAndAssertResult = (s3, params, cb) => { - const { bucket, key, body, versionId, expectedVersionId, expectedTagCount, - expectedError } = params; - s3.getObject({ Bucket: bucket, Key: key, VersionId: versionId }, - (err, data) => { - _assertErrorResult(err, expectedError, 'putting tags'); - if (expectedError) { - return cb(); - } - assert.strictEqual(err, null, 'Expected success ' + - `getting object, got error ${err}`); - if (body) { - assert(data.Body, 'expected object body in response'); - assert.equal(data.Body.length, data.ContentLength, - `received data of length ${data.Body.length} does not ` + - 'equal expected based on ' + - `content length header of ${data.ContentLength}`); - const expectedMD5 = utils.expectedETag(body, false); - const resultMD5 = utils.expectedETag(data.Body, false); - assert.strictEqual(resultMD5, expectedMD5); - } - if (!expectedVersionId) { - assert.strictEqual(data.VersionId, undefined); - } else { - assert.strictEqual(data.VersionId, expectedVersionId); - } - if (expectedTagCount && expectedTagCount === '0') { - assert.strictEqual(data.TagCount, undefined); - } else if (expectedTagCount) { - assert.strictEqual(data.TagCount, parseInt(expectedTagCount, 10)); - } + const { bucket, key, body, versionId, expectedVersionId, expectedTagCount, expectedError } = params; + s3.getObject({ Bucket: bucket, Key: key, VersionId: versionId }, (err, data) => { + _assertErrorResult(err, expectedError, 'putting tags'); + if (expectedError) { return cb(); - }); + } + assert.strictEqual(err, null, 'Expected success ' + `getting object, got error ${err}`); + if (body) { + assert(data.Body, 'expected object body in response'); + assert.equal( + data.Body.length, + data.ContentLength, + `received data of length ${data.Body.length} does not ` + + 'equal expected based on ' + + `content length header of ${data.ContentLength}` + ); + const expectedMD5 = utils.expectedETag(body, false); + const resultMD5 = utils.expectedETag(data.Body, false); + assert.strictEqual(resultMD5, expectedMD5); + } + if (!expectedVersionId) { + assert.strictEqual(data.VersionId, undefined); + } else { + assert.strictEqual(data.VersionId, expectedVersionId); + } + if (expectedTagCount && expectedTagCount === '0') { + assert.strictEqual(data.TagCount, undefined); + } else if (expectedTagCount) { + assert.strictEqual(data.TagCount, parseInt(expectedTagCount, 10)); + } + return cb(); + }); }; utils.getAwsRetry = (params, retryNumber, assertCb) => { @@ -299,27 +294,23 @@ utils.getAwsRetry = (params, retryNumber, assertCb) => { const maxRetries = 2; const getObject = awsS3.getObject.bind(awsS3); const timeout = retryTimeout[retryNumber]; - return setTimeout(getObject, timeout, { Bucket: awsBucket, Key: key, - VersionId: versionId }, - (err, res) => { - try { - // note: this will only catch exceptions thrown before an - // asynchronous call - return assertCb(err, res); - } catch (e) { - if (retryNumber !== maxRetries) { - return utils.getAwsRetry(params, retryNumber + 1, - assertCb); - } - throw e; + return setTimeout(getObject, timeout, { Bucket: awsBucket, Key: key, VersionId: versionId }, (err, res) => { + try { + // note: this will only catch exceptions thrown before an + // asynchronous call + return assertCb(err, res); + } catch (e) { + if (retryNumber !== maxRetries) { + return utils.getAwsRetry(params, retryNumber + 1, assertCb); } - }); + throw e; + } + }); }; utils.awsGetLatestVerId = (key, body, cb) => utils.getAwsRetry({ key }, 0, (err, result) => { - assert.strictEqual(err, null, 'Expected success ' + - `getting object from AWS, got error ${err}`); + assert.strictEqual(err, null, 'Expected success ' + `getting object from AWS, got error ${err}`); const resultMD5 = utils.expectedETag(result.Body, false); const expectedMD5 = utils.expectedETag(body, false); assert.strictEqual(resultMD5, expectedMD5, 'expected different body'); @@ -341,56 +332,54 @@ function _getTaggingConfig(tags) { } utils.tagging.putTaggingAndAssert = (s3, params, cb) => { - const { bucket, key, tags, versionId, expectedVersionId, - expectedError } = params; + const { bucket, key, tags, versionId, expectedVersionId, expectedError } = params; const taggingConfig = _getTaggingConfig(tags); - return s3.putObjectTagging({ Bucket: bucket, Key: key, VersionId: versionId, - Tagging: taggingConfig }, (err, data) => { - _assertErrorResult(err, expectedError, 'putting tags'); - if (expectedError) { - return cb(); - } - assert.strictEqual(err, null, `got error for putting tags: ${err}`); - if (expectedVersionId) { - assert.strictEqual(data.VersionId, expectedVersionId); - } else { - assert.strictEqual(data.VersionId, undefined); - } - return cb(null, data.VersionId); - }); -}; - -utils.tagging.getTaggingAndAssert = (s3, params, cb) => { - const { bucket, key, expectedTags, versionId, expectedVersionId, - expectedError, getObject } = params; - s3.getObjectTagging({ Bucket: bucket, Key: key, VersionId: versionId }, + return s3.putObjectTagging( + { Bucket: bucket, Key: key, VersionId: versionId, Tagging: taggingConfig }, (err, data) => { _assertErrorResult(err, expectedError, 'putting tags'); if (expectedError) { return cb(); } - const expectedTagResult = _getTaggingConfig(expectedTags); - const expectedTagCount = `${Object.keys(expectedTags).length}`; assert.strictEqual(err, null, `got error for putting tags: ${err}`); if (expectedVersionId) { assert.strictEqual(data.VersionId, expectedVersionId); } else { assert.strictEqual(data.VersionId, undefined); } - assert.deepStrictEqual(data.TagSet, expectedTagResult.TagSet); - if (getObject === false) { - return process.nextTick(cb, null, data.VersionId); - } - return utils.getAndAssertResult(s3, { bucket, key, versionId, - expectedVersionId, expectedTagCount }, - () => cb(null, data.VersionId)); - }); + return cb(null, data.VersionId); + } + ); +}; + +utils.tagging.getTaggingAndAssert = (s3, params, cb) => { + const { bucket, key, expectedTags, versionId, expectedVersionId, expectedError, getObject } = params; + s3.getObjectTagging({ Bucket: bucket, Key: key, VersionId: versionId }, (err, data) => { + _assertErrorResult(err, expectedError, 'putting tags'); + if (expectedError) { + return cb(); + } + const expectedTagResult = _getTaggingConfig(expectedTags); + const expectedTagCount = `${Object.keys(expectedTags).length}`; + assert.strictEqual(err, null, `got error for putting tags: ${err}`); + if (expectedVersionId) { + assert.strictEqual(data.VersionId, expectedVersionId); + } else { + assert.strictEqual(data.VersionId, undefined); + } + assert.deepStrictEqual(data.TagSet, expectedTagResult.TagSet); + if (getObject === false) { + return process.nextTick(cb, null, data.VersionId); + } + return utils.getAndAssertResult(s3, { bucket, key, versionId, expectedVersionId, expectedTagCount }, () => + cb(null, data.VersionId) + ); + }); }; utils.tagging.delTaggingAndAssert = (s3, params, cb) => { const { bucket, key, versionId, expectedVersionId, expectedError } = params; - return s3.deleteObjectTagging({ Bucket: bucket, Key: key, - VersionId: versionId }, (err, data) => { + return s3.deleteObjectTagging({ Bucket: bucket, Key: key, VersionId: versionId }, (err, data) => { _assertErrorResult(err, expectedError, 'putting tags'); if (expectedError) { return cb(); @@ -401,18 +390,19 @@ utils.tagging.delTaggingAndAssert = (s3, params, cb) => { } else { assert.strictEqual(data.VersionId, undefined); } - return utils.tagging.getTaggingAndAssert(s3, { bucket, key, versionId, - expectedVersionId, expectedTags: {} }, () => cb()); + return utils.tagging.getTaggingAndAssert( + s3, + { bucket, key, versionId, expectedVersionId, expectedTags: {} }, + () => cb() + ); }); }; utils.tagging.awsGetAssertTags = (params, cb) => { const { key, versionId, expectedTags } = params; const expectedTagResult = _getTaggingConfig(expectedTags); - awsS3.getObjectTagging({ Bucket: awsBucket, Key: key, - VersionId: versionId }, (err, data) => { - assert.strictEqual(err, null, 'got unexpected error getting ' + - `tags directly from AWS: ${err}`); + awsS3.getObjectTagging({ Bucket: awsBucket, Key: key, VersionId: versionId }, (err, data) => { + assert.strictEqual(err, null, 'got unexpected error getting ' + `tags directly from AWS: ${err}`); assert.deepStrictEqual(data.TagSet, expectedTagResult.TagSet); return cb(); }); diff --git a/tests/functional/aws-node-sdk/test/object/100-continue.js b/tests/functional/aws-node-sdk/test/object/100-continue.js index db76fb7f6b..ecdcdd309d 100644 --- a/tests/functional/aws-node-sdk/test/object/100-continue.js +++ b/tests/functional/aws-node-sdk/test/object/100-continue.js @@ -41,7 +41,7 @@ class ContinueRequestHandler { method: 'PUT', headers: { 'content-length': body.length, - 'Expect': this.expectHeader, + Expect: this.expectHeader, }, }; } @@ -98,8 +98,7 @@ class ContinueRequestHandler { req.flushHeaders(); // At this point we have only sent the header. const headerLen = req._header.length; - req.on('continue', () => - cb('Continue beeing seen when 403 is expected')); + req.on('continue', () => cb('Continue beeing seen when 403 is expected')); req.on('response', res => { res.on('data', () => {}); res.on('end', () => { @@ -132,30 +131,22 @@ describeSkipIfE2E('PUT public object with 100-continue header', () => { return s3.createBucket({ Bucket: bucket }).promise(); }); - afterEach(() => - bucketUtil.empty(bucket) - .then(() => bucketUtil.deleteOne(bucket))); + afterEach(() => bucketUtil.empty(bucket).then(() => bucketUtil.deleteOne(bucket))); - it('should return 200 status code', done => - continueRequest.hasStatusCode(200, done)); + it('should return 200 status code', done => continueRequest.hasStatusCode(200, done)); it('should return 200 status code with upper case value', done => - continueRequest.setExpectHeader('100-CONTINUE') - .hasStatusCode(200, done)); + continueRequest.setExpectHeader('100-CONTINUE').hasStatusCode(200, done)); it('should return 200 status code if incorrect value', done => - continueRequest.setExpectHeader('101-continue') - .hasStatusCode(200, done)); + continueRequest.setExpectHeader('101-continue').hasStatusCode(200, done)); it('should return 403 status code if cannot authenticate', done => - continueRequest.setRequestPath(invalidSignedURL) - .hasStatusCode(403, done)); + continueRequest.setRequestPath(invalidSignedURL).hasStatusCode(403, done)); - it('should wait for continue event before sending body', done => - continueRequest.sendsBodyOnContinue(done)); + it('should wait for continue event before sending body', done => continueRequest.sendsBodyOnContinue(done)); it('should not send continue if denied for a public user', done => - continueRequest.setRequestPath(invalidSignedURL) - .shouldNotGetContinue(done)); + continueRequest.setRequestPath(invalidSignedURL).shouldNotGetContinue(done)); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/abortMPU.js b/tests/functional/aws-node-sdk/test/object/abortMPU.js index 787d9d1304..acf8dd5a20 100644 --- a/tests/functional/aws-node-sdk/test/object/abortMPU.js +++ b/tests/functional/aws-node-sdk/test/object/abortMPU.js @@ -24,43 +24,61 @@ describe('Abort MPU', () => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: bucket, Key: key }).promise()) - .then(res => { - uploadId = res.UploadId; - return s3.uploadPart({ Bucket: bucket, Key: key, - PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart, - }).promise(); - }) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .then(() => + s3 + .createMultipartUpload({ + Bucket: bucket, + Key: key, + }) + .promise() + ) + .then(res => { + uploadId = res.UploadId; + return s3 + .uploadPart({ + Bucket: bucket, + Key: key, + PartNumber: 1, + UploadId: uploadId, + Body: bodyFirstPart, + }) + .promise(); + }) + .catch(err => { + process.stdout.write(`Error in beforeEach: ${err}\n`); + throw err; + }); }); afterEach(() => - s3.abortMultipartUpload({ - Bucket: bucket, - Key: key, - UploadId: uploadId, - }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => bucketUtil.deleteOne(bucket)) + s3 + .abortMultipartUpload({ + Bucket: bucket, + Key: key, + UploadId: uploadId, + }) + .promise() + .then(() => bucketUtil.empty(bucket)) + .then(() => bucketUtil.deleteOne(bucket)) ); // aws-sdk now (v2.363.0) returns 'UriParameterError' error // this test was not replaced in any other suite - it.skip('should return InvalidRequest error if aborting without key', - done => { - s3.abortMultipartUpload({ - Bucket: bucket, - Key: '', - UploadId: uploadId }, - err => { - checkError(err, 'InvalidRequest', 'A key must be specified'); - done(); - }); + it.skip('should return InvalidRequest error if aborting without key', done => { + s3.abortMultipartUpload( + { + Bucket: bucket, + Key: '', + UploadId: uploadId, + }, + err => { + checkError(err, 'InvalidRequest', 'A key must be specified'); + done(); + } + ); }); }); }); @@ -86,20 +104,24 @@ describe('Abort MPU with existing object', function AbortMPUExistingObject() { afterEach(async () => { const data = await s3.listMultipartUploads({ Bucket: bucketName }).promise(); const uploads = data.Uploads; - await Promise.all(uploads.map(async upload => { - try { - await s3.abortMultipartUpload({ - Bucket: bucketName, - Key: upload.Key, - UploadId: upload.UploadId, - }).promise(); - } catch (err) { - if (err.code !== 'NoSuchUpload') { - throw err; + await Promise.all( + uploads.map(async upload => { + try { + await s3 + .abortMultipartUpload({ + Bucket: bucketName, + Key: upload.Key, + UploadId: upload.UploadId, + }) + .promise(); + } catch (err) { + if (err.code !== 'NoSuchUpload') { + throw err; + } + // If NoSuchUpload, swallow error } - // If NoSuchUpload, swallow error - } - })); + }) + ); await bucketUtil.empty(bucketName); await bucketUtil.deleteOne(bucketName); }); @@ -110,69 +132,81 @@ describe('Abort MPU with existing object', function AbortMPUExistingObject() { let uploadId1; let uploadId2; let etag1; - async.waterfall([ - next => { - s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => { - assert.ifError(err, `error creating MPU 1: ${err}`); - uploadId1 = data.UploadId; - s3.uploadPart({ - Bucket: bucketName, - Key: objectKey, - PartNumber: 1, - UploadId: uploadId1, - Body: part1, - }, (err, data) => { - assert.ifError(err, `error uploading part for MPU 1: ${err}`); - etag1 = data.ETag; - s3.completeMultipartUpload({ - Bucket: bucketName, - Key: objectKey, - UploadId: uploadId1, - MultipartUpload: { Parts: [{ ETag: etag1, PartNumber: 1 }] }, - }, err => { - assert.ifError(err, `error completing MPU 1: ${err}`); - next(); - }); + async.waterfall( + [ + next => { + s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => { + assert.ifError(err, `error creating MPU 1: ${err}`); + uploadId1 = data.UploadId; + s3.uploadPart( + { + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId1, + Body: part1, + }, + (err, data) => { + assert.ifError(err, `error uploading part for MPU 1: ${err}`); + etag1 = data.ETag; + s3.completeMultipartUpload( + { + Bucket: bucketName, + Key: objectKey, + UploadId: uploadId1, + MultipartUpload: { Parts: [{ ETag: etag1, PartNumber: 1 }] }, + }, + err => { + assert.ifError(err, `error completing MPU 1: ${err}`); + next(); + } + ); + } + ); }); - }); - }, - next => { - s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { - assert.ifError(err, `error getting object after MPU 1: ${err}`); - assert.strictEqual(data.Body.toString(), part1.toString()); - next(); - }); - }, - next => { - s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => { - assert.ifError(err, `error creating MPU 2: ${err}`); - uploadId2 = data.UploadId; - s3.uploadPart({ - Bucket: bucketName, - Key: objectKey, - PartNumber: 1, - UploadId: uploadId2, - Body: part2, - }, err => { - assert.ifError(err, `error uploading part for MPU 2: ${err}`); + }, + next => { + s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { + assert.ifError(err, `error getting object after MPU 1: ${err}`); + assert.strictEqual(data.Body.toString(), part1.toString()); next(); }); - }); - }, - next => { - s3.abortMultipartUpload({ Bucket: bucketName, Key: objectKey, UploadId: uploadId2 }, err => { - assert.ifError(err, `error aborting MPU 2: ${err}`); - next(); - }); - }, - next => { - s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { - assert.ifError(err, `error getting object after aborting MPU 2: ${err}`); - assert.strictEqual(data.Body.toString(), part1.toString()); - next(); - }); - }, - ], done); + }, + next => { + s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => { + assert.ifError(err, `error creating MPU 2: ${err}`); + uploadId2 = data.UploadId; + s3.uploadPart( + { + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId2, + Body: part2, + }, + err => { + assert.ifError(err, `error uploading part for MPU 2: ${err}`); + next(); + } + ); + }); + }, + next => { + s3.abortMultipartUpload({ Bucket: bucketName, Key: objectKey, UploadId: uploadId2 }, err => { + assert.ifError(err, `error aborting MPU 2: ${err}`); + next(); + }); + }, + next => { + s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { + assert.ifError(err, `error getting object after aborting MPU 2: ${err}`); + assert.strictEqual(data.Body.toString(), part1.toString()); + next(); + }); + }, + ], + done + ); }); it('should not delete existing object data when aborting an old MPU for same key', done => { @@ -181,80 +215,106 @@ describe('Abort MPU with existing object', function AbortMPUExistingObject() { let uploadId1; let uploadId2; let etag2; - async.waterfall([ - next => { - s3.createMultipartUpload({ - Bucket: bucketName, Key: objectKey, - }, (err, data) => { - assert.ifError(err, `error creating MPU 1: ${err}`); - uploadId1 = data.UploadId; - s3.uploadPart({ - Bucket: bucketName, - Key: objectKey, - PartNumber: 1, - UploadId: uploadId1, - Body: part1, - }, err => { - assert.ifError(err, `error uploading part for MPU 1: ${err}`); - next(); - }); - }); - }, - next => { - s3.createMultipartUpload({ - Bucket: bucketName, Key: objectKey, - }, (err, data) => { - assert.ifError(err, `error creating MPU 2: ${err}`); - uploadId2 = data.UploadId; - s3.uploadPart({ - Bucket: bucketName, - Key: objectKey, - PartNumber: 1, - UploadId: uploadId2, - Body: part2, - }, (err, data) => { - assert.ifError(err, `error uploading part for MPU 2: ${err}`); - etag2 = data.ETag; - s3.completeMultipartUpload({ + async.waterfall( + [ + next => { + s3.createMultipartUpload( + { Bucket: bucketName, Key: objectKey, - UploadId: uploadId2, - MultipartUpload: { Parts: [{ ETag: etag2, PartNumber: 1 }] }, - }, err => { - assert.ifError(err, `error completing MPU 2: ${err}`); + }, + (err, data) => { + assert.ifError(err, `error creating MPU 1: ${err}`); + uploadId1 = data.UploadId; + s3.uploadPart( + { + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId1, + Body: part1, + }, + err => { + assert.ifError(err, `error uploading part for MPU 1: ${err}`); + next(); + } + ); + } + ); + }, + next => { + s3.createMultipartUpload( + { + Bucket: bucketName, + Key: objectKey, + }, + (err, data) => { + assert.ifError(err, `error creating MPU 2: ${err}`); + uploadId2 = data.UploadId; + s3.uploadPart( + { + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId2, + Body: part2, + }, + (err, data) => { + assert.ifError(err, `error uploading part for MPU 2: ${err}`); + etag2 = data.ETag; + s3.completeMultipartUpload( + { + Bucket: bucketName, + Key: objectKey, + UploadId: uploadId2, + MultipartUpload: { Parts: [{ ETag: etag2, PartNumber: 1 }] }, + }, + err => { + assert.ifError(err, `error completing MPU 2: ${err}`); + next(); + } + ); + } + ); + } + ); + }, + next => { + s3.getObject( + { + Bucket: bucketName, + Key: objectKey, + }, + (err, data) => { + assert.ifError(err, `error getting object after MPU 2: ${err}`); + assert.strictEqual(data.Body.toString(), part2.toString()); next(); - }); + } + ); + }, + next => { + s3.abortMultipartUpload( + { + Bucket: bucketName, + Key: objectKey, + UploadId: uploadId1, + }, + err => { + assert.ifError(err, `error aborting MPU 1: ${err}`); + next(); + } + ); + }, + next => { + s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { + assert.ifError(err, `error getting object after aborting MPU 1: ${err}`); + assert.strictEqual(data.Body.toString(), part2.toString()); + next(); }); - }); - }, - next => { - s3.getObject({ - Bucket: bucketName, - Key: objectKey, - }, (err, data) => { - assert.ifError(err, `error getting object after MPU 2: ${err}`); - assert.strictEqual(data.Body.toString(), part2.toString()); - next(); - }); - }, - next => { - s3.abortMultipartUpload({ - Bucket: bucketName, - Key: objectKey, - UploadId: uploadId1, - }, err => { - assert.ifError(err, `error aborting MPU 1: ${err}`); - next(); - }); - }, - next => { - s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { - assert.ifError(err, `error getting object after aborting MPU 1: ${err}`); - assert.strictEqual(data.Body.toString(), part2.toString()); - next(); - }); - }, - ], done); + }, + ], + done + ); }); }); }); @@ -272,17 +332,19 @@ describe('Abort MPU - No Such Upload', () => { afterEach(() => bucketUtil.deleteOne(bucket)); - it('should return NoSuchUpload error when aborting non-existent mpu', - done => { - s3.abortMultipartUpload({ - Bucket: bucket, - Key: key, - UploadId: uuidv4().replace(/-/g, '') }, - err => { - assert.notEqual(err, null, 'Expected failure but got success'); - assert.strictEqual(err.code, 'NoSuchUpload'); - done(); - }); + it('should return NoSuchUpload error when aborting non-existent mpu', done => { + s3.abortMultipartUpload( + { + Bucket: bucket, + Key: key, + UploadId: uuidv4().replace(/-/g, ''), + }, + err => { + assert.notEqual(err, null, 'Expected failure but got success'); + assert.strictEqual(err.code, 'NoSuchUpload'); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/bigMpu.js b/tests/functional/aws-node-sdk/test/object/bigMpu.js index 1ce32529af..636408172e 100644 --- a/tests/functional/aws-node-sdk/test/object/bigMpu.js +++ b/tests/functional/aws-node-sdk/test/object/bigMpu.js @@ -10,9 +10,10 @@ const key = 'mpuKey'; const body = 'abc'; const partCount = 10000; const eTag = require('crypto').createHash('md5').update(body).digest('hex'); -const finalETag = require('crypto').createHash('md5') - .update(Buffer.from(eTag.repeat(partCount), 'hex').toString('binary'), - 'binary').digest('hex'); +const finalETag = require('crypto') + .createHash('md5') + .update(Buffer.from(eTag.repeat(partCount), 'hex').toString('binary'), 'binary') + .digest('hex'); function uploadPart(n, uploadId, s3, next) { const params = { @@ -66,63 +67,65 @@ describe('large mpu', function tester() { const itSkipIfAWS = process.env.AWS_ON_AIR ? it.skip : it; // will fail on AWS because parts too small - itSkipIfAWS('should intiate, put parts and complete mpu ' + - `with ${partCount} parts`, done => { + itSkipIfAWS('should intiate, put parts and complete mpu ' + `with ${partCount} parts`, done => { process.stdout.write('***Running large MPU test***\n'); let uploadId; - return waterfall([ - next => s3.createMultipartUpload({ Bucket: bucket, Key: key }, - (err, data) => { - if (err) { - return done(err); - } - process.stdout.write('initated mpu\n'); - uploadId = data.UploadId; - return next(); - }), - next => { - process.stdout.write('putting parts'); - return timesLimit(partCount, 20, (n, cb) => - uploadPart(n, uploadId, s3, cb), err => - next(err) + return waterfall( + [ + next => + s3.createMultipartUpload({ Bucket: bucket, Key: key }, (err, data) => { + if (err) { + return done(err); + } + process.stdout.write('initated mpu\n'); + uploadId = data.UploadId; + return next(); + }), + next => { + process.stdout.write('putting parts'); + return timesLimit( + partCount, + 20, + (n, cb) => uploadPart(n, uploadId, s3, cb), + err => next(err) ); - }, - next => { - const parts = []; - for (let i = 1; i <= partCount; i++) { - parts.push({ - ETag: eTag, - PartNumber: i, - }); - } - const params = { - Bucket: bucket, - Key: key, - UploadId: uploadId, - MultipartUpload: { - Parts: parts, - }, - }; - return s3.completeMultipartUpload(params, err => { - if (err) { - process.stdout.write('err complting mpu: ', err); - return next(err); + }, + next => { + const parts = []; + for (let i = 1; i <= partCount; i++) { + parts.push({ + ETag: eTag, + PartNumber: i, + }); } - return next(); - }); - }, - next => { - process.stdout.write('about to get object'); - return s3.getObject({ Bucket: bucket, Key: key }, - (err, data) => { + const params = { + Bucket: bucket, + Key: key, + UploadId: uploadId, + MultipartUpload: { + Parts: parts, + }, + }; + return s3.completeMultipartUpload(params, err => { + if (err) { + process.stdout.write('err complting mpu: ', err); + return next(err); + } + return next(); + }); + }, + next => { + process.stdout.write('about to get object'); + return s3.getObject({ Bucket: bucket, Key: key }, (err, data) => { if (err) { return next(err); } - assert.strictEqual(data.ETag, - `"${finalETag}-${partCount}"`); + assert.strictEqual(data.ETag, `"${finalETag}-${partCount}"`); return next(); }); - }, - ], done); + }, + ], + done + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/completeMPU.js b/tests/functional/aws-node-sdk/test/object/completeMPU.js index ee01269148..c490d5cceb 100644 --- a/tests/functional/aws-node-sdk/test/object/completeMPU.js +++ b/tests/functional/aws-node-sdk/test/object/completeMPU.js @@ -2,11 +2,7 @@ const assert = require('assert'); const async = require('async'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); -const { - removeAllVersions, - versioningEnabled, - versioningSuspended, -} = require('../../lib/utility/versioning-util.js'); +const { removeAllVersions, versioningEnabled, versioningSuspended } = require('../../lib/utility/versioning-util.js'); const { taggingTests } = require('../../lib/utility/tagging'); const date = Date.now(); @@ -14,68 +10,77 @@ const bucket = `completempu${date}`; const key = 'key'; function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } - describe('Complete MPU', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); const s3 = bucketUtil.s3; function _completeMpuAndCheckVid(uploadId, eTag, expectedVid, cb) { - s3.completeMultipartUpload({ - Bucket: bucket, - Key: key, - MultipartUpload: { - Parts: [{ ETag: eTag, PartNumber: 1 }], - }, - UploadId: uploadId }, - (err, data) => { - checkNoError(err); - const versionId = data.VersionId; - if (expectedVid) { - assert.notEqual(versionId, undefined); - } else { - assert.strictEqual(versionId, expectedVid); - } - return s3.getObject({ + s3.completeMultipartUpload( + { Bucket: bucket, Key: key, + MultipartUpload: { + Parts: [{ ETag: eTag, PartNumber: 1 }], + }, + UploadId: uploadId, }, (err, data) => { checkNoError(err); - if (versionId) { - assert.strictEqual(data.VersionId, versionId); + const versionId = data.VersionId; + if (expectedVid) { + assert.notEqual(versionId, undefined); + } else { + assert.strictEqual(versionId, expectedVid); } - cb(); - }); - }); + return s3.getObject( + { + Bucket: bucket, + Key: key, + }, + (err, data) => { + checkNoError(err); + if (versionId) { + assert.strictEqual(data.VersionId, versionId); + } + cb(); + } + ); + } + ); } function _initiateMpuAndPutOnePart() { const result = {}; - return s3.createMultipartUpload({ - Bucket: bucket, Key: key }).promise() - .then(data => { - result.uploadId = data.UploadId; - return s3.uploadPart({ + return s3 + .createMultipartUpload({ Bucket: bucket, Key: key, - PartNumber: 1, - UploadId: data.UploadId, - Body: 'foo', - }).promise(); - }) - .then(data => { - result.eTag = data.ETag; - return result; - }) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + }) + .promise() + .then(data => { + result.uploadId = data.UploadId; + return s3 + .uploadPart({ + Bucket: bucket, + Key: key, + PartNumber: 1, + UploadId: data.UploadId, + Body: 'foo', + }) + .promise(); + }) + .then(data => { + result.eTag = data.ETag; + return result; + }) + .catch(err => { + process.stdout.write(`Error in beforeEach: ${err}\n`); + throw err; + }); } beforeEach(done => { @@ -95,55 +100,66 @@ describe('Complete MPU', () => { let uploadId; let eTag; - beforeEach(() => _initiateMpuAndPutOnePart() - .then(result => { + beforeEach(() => + _initiateMpuAndPutOnePart().then(result => { uploadId = result.uploadId; eTag = result.eTag; }) ); - it('should complete an MPU with fewer parts than were ' + - 'originally put without returning a version id', done => { - _completeMpuAndCheckVid(uploadId, eTag, undefined, done); - }); + it( + 'should complete an MPU with fewer parts than were ' + 'originally put without returning a version id', + done => { + _completeMpuAndCheckVid(uploadId, eTag, undefined, done); + } + ); }); describe('on bucket with enabled versioning', () => { let uploadId; let eTag; - beforeEach(() => s3.putBucketVersioning({ Bucket: bucket, - VersioningConfiguration: versioningEnabled }).promise() - .then(() => _initiateMpuAndPutOnePart()) - .then(result => { - uploadId = result.uploadId; - eTag = result.eTag; - }) + beforeEach(() => + s3 + .putBucketVersioning({ Bucket: bucket, VersioningConfiguration: versioningEnabled }) + .promise() + .then(() => _initiateMpuAndPutOnePart()) + .then(result => { + uploadId = result.uploadId; + eTag = result.eTag; + }) ); - it('should complete an MPU with fewer parts than were ' + - 'originally put and return a version id', done => { - _completeMpuAndCheckVid(uploadId, eTag, true, done); - }); + it( + 'should complete an MPU with fewer parts than were ' + 'originally put and return a version id', + done => { + _completeMpuAndCheckVid(uploadId, eTag, true, done); + } + ); }); describe('on bucket with suspended versioning', () => { let uploadId; let eTag; - beforeEach(() => s3.putBucketVersioning({ Bucket: bucket, - VersioningConfiguration: versioningSuspended }).promise() - .then(() => _initiateMpuAndPutOnePart()) - .then(result => { - uploadId = result.uploadId; - eTag = result.eTag; - }) + beforeEach(() => + s3 + .putBucketVersioning({ Bucket: bucket, VersioningConfiguration: versioningSuspended }) + .promise() + .then(() => _initiateMpuAndPutOnePart()) + .then(result => { + uploadId = result.uploadId; + eTag = result.eTag; + }) ); - it('should complete an MPU with fewer parts than were ' + - 'originally put and should not return a version id', done => { - _completeMpuAndCheckVid(uploadId, eTag, undefined, done); - }); + it( + 'should complete an MPU with fewer parts than were ' + + 'originally put and should not return a version id', + done => { + _completeMpuAndCheckVid(uploadId, eTag, undefined, done); + } + ); }); describe('with tags set on initiation', () => { @@ -151,62 +167,82 @@ describe('Complete MPU', () => { taggingTests.forEach(test => { it(test.it, done => { - const [key, value] = - [test.tag.key, test.tag.value].map(encodeURIComponent); + const [key, value] = [test.tag.key, test.tag.value].map(encodeURIComponent); const tagging = `${key}=${value}`; - async.waterfall([ - next => s3.createMultipartUpload({ - Bucket: bucket, - Key: tagKey, - Tagging: tagging, - }, (err, data) => { - if (test.error) { - assert.strictEqual(err.code, test.error); - assert.strictEqual(err.statusCode, 400); - return next('expected'); - } - return next(err, data.UploadId); - }), - (uploadId, next) => s3.uploadPart({ - Bucket: bucket, - Key: tagKey, - PartNumber: 1, - UploadId: uploadId, - Body: 'foo', - }, (err, data) => { - next(err, data.ETag, uploadId); - }), - (eTag, uploadId, next) => s3.completeMultipartUpload({ - Bucket: bucket, - Key: tagKey, - UploadId: uploadId, - MultipartUpload: { - Parts: [{ - ETag: eTag, - PartNumber: 1, - }], - }, - }, next), - ], err => { - if (err === 'expected') { - done(); - } else { - assert.ifError(err); - s3.getObjectTagging({ - Bucket: bucket, - Key: tagKey, - }, (err, tagData) => { - assert.ifError(err); - assert.deepStrictEqual(tagData.TagSet, - [{ - Key: test.tag.key, - Value: test.tag.value, - }]); + async.waterfall( + [ + next => + s3.createMultipartUpload( + { + Bucket: bucket, + Key: tagKey, + Tagging: tagging, + }, + (err, data) => { + if (test.error) { + assert.strictEqual(err.code, test.error); + assert.strictEqual(err.statusCode, 400); + return next('expected'); + } + return next(err, data.UploadId); + } + ), + (uploadId, next) => + s3.uploadPart( + { + Bucket: bucket, + Key: tagKey, + PartNumber: 1, + UploadId: uploadId, + Body: 'foo', + }, + (err, data) => { + next(err, data.ETag, uploadId); + } + ), + (eTag, uploadId, next) => + s3.completeMultipartUpload( + { + Bucket: bucket, + Key: tagKey, + UploadId: uploadId, + MultipartUpload: { + Parts: [ + { + ETag: eTag, + PartNumber: 1, + }, + ], + }, + }, + next + ), + ], + err => { + if (err === 'expected') { done(); - }); + } else { + assert.ifError(err); + s3.getObjectTagging( + { + Bucket: bucket, + Key: tagKey, + }, + (err, tagData) => { + assert.ifError(err); + assert.deepStrictEqual(tagData.TagSet, [ + { + Key: test.tag.key, + Value: test.tag.value, + }, + ]); + done(); + } + ); + } } - }); + ); }); }); }); @@ -215,33 +251,39 @@ describe('Complete MPU', () => { let uploadId; let eTag; - beforeEach(() => _initiateMpuAndPutOnePart() - .then(result => { + beforeEach(() => + _initiateMpuAndPutOnePart().then(result => { uploadId = result.uploadId; eTag = result.eTag; }) ); it('should complete the MPU successfully and leave a readable object', done => { - async.parallel([ - doneReUpload => s3.uploadPart({ - Bucket: bucket, - Key: key, - PartNumber: 1, - UploadId: uploadId, - Body: 'foo', - }, err => { - // in case the CompleteMPU finished earlier, - // we may get a NoSuchKey error, so just - // ignore it - if (err && err.code === 'NoSuchKey') { - return doneReUpload(); - } - return doneReUpload(err); - }), - doneComplete => _completeMpuAndCheckVid( - uploadId, eTag, undefined, doneComplete), - ], done); + async.parallel( + [ + doneReUpload => + s3.uploadPart( + { + Bucket: bucket, + Key: key, + PartNumber: 1, + UploadId: uploadId, + Body: 'foo', + }, + err => { + // in case the CompleteMPU finished earlier, + // we may get a NoSuchKey error, so just + // ignore it + if (err && err.code === 'NoSuchKey') { + return doneReUpload(); + } + return doneReUpload(err); + } + ), + doneComplete => _completeMpuAndCheckVid(uploadId, eTag, undefined, doneComplete), + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/compluteMpu.js b/tests/functional/aws-node-sdk/test/object/compluteMpu.js index 5bd74287ca..0118872caf 100644 --- a/tests/functional/aws-node-sdk/test/object/compluteMpu.js +++ b/tests/functional/aws-node-sdk/test/object/compluteMpu.js @@ -43,8 +43,7 @@ describe('aws-node-sdk test bucket complete mpu', () => { s3.completeMultipartUpload(params, error => { if (error) { assert.strictEqual(error.statusCode, 400); - assert.strictEqual( - error.code, 'InvalidRequest'); + assert.strictEqual(error.code, 'InvalidRequest'); done(); } else { done('accepted xml body larger than 1 MB'); diff --git a/tests/functional/aws-node-sdk/test/object/copyPart.js b/tests/functional/aws-node-sdk/test/object/copyPart.js index 126c02128b..84470a459b 100644 --- a/tests/functional/aws-node-sdk/test/object/copyPart.js +++ b/tests/functional/aws-node-sdk/test/object/copyPart.js @@ -2,11 +2,9 @@ const { promisify } = require('util'); const assert = require('assert'); const crypto = require('crypto'); - const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); -const { createEncryptedBucketPromise } = - require('../../lib/utility/createEncryptedBucket'); +const { createEncryptedBucketPromise } = require('../../lib/utility/createEncryptedBucket'); const { fakeMetadataTransition, fakeMetadataArchive } = require('../utils/init'); const sourceBucketName = 'supersourcebucket81033016532'; @@ -22,8 +20,7 @@ const otherAccountS3 = otherAccountBucketUtility.s3; const oneHundredMBPlus11 = 110100481; function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } function checkError(err, code) { @@ -45,287 +42,361 @@ describe('Object Part Copy', () => { if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { s3.createBucketPromise = createEncryptedBucketPromise; } - return s3.createBucketPromise({ Bucket: sourceBucketName }) - .catch(err => { - process.stdout.write(`Error creating source bucket: ${err}\n`); - throw err; - }).then(() => - s3.createBucketPromise({ Bucket: destBucketName }) - ).catch(err => { - process.stdout.write(`Error creating dest bucket: ${err}\n`); - throw err; - }) - .then(() => - s3.putObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - Body: content, - }).promise()) - .then(res => { - etag = res.ETag; - return s3.headObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - }).promise(); - }).then(() => - s3.createMultipartUpload({ - Bucket: destBucketName, - Key: destObjName, - }).promise()).then(iniateRes => { - uploadId = iniateRes.UploadId; - }).catch(err => { - process.stdout.write(`Error in outer beforeEach: ${err}\n`); - throw err; - }); - }); - - afterEach(() => bucketUtil.empty(sourceBucketName) - .then(() => bucketUtil.empty(destBucketName)) - .then(() => s3.abortMultipartUpload({ - Bucket: destBucketName, - Key: destObjName, - UploadId: uploadId, - }).promise()) - .catch(err => { - if (err.code !== 'NoSuchUpload') { - process.stdout.write(`Error in afterEach: ${err}\n`); + return s3 + .createBucketPromise({ Bucket: sourceBucketName }) + .catch(err => { + process.stdout.write(`Error creating source bucket: ${err}\n`); throw err; - } - }) - .then(() => bucketUtil.deleteMany([sourceBucketName, - destBucketName])) - ); + }) + .then(() => s3.createBucketPromise({ Bucket: destBucketName })) + .catch(err => { + process.stdout.write(`Error creating dest bucket: ${err}\n`); + throw err; + }) + .then(() => + s3 + .putObject({ + Bucket: sourceBucketName, + Key: sourceObjName, + Body: content, + }) + .promise() + ) + .then(res => { + etag = res.ETag; + return s3 + .headObject({ + Bucket: sourceBucketName, + Key: sourceObjName, + }) + .promise(); + }) + .then(() => + s3 + .createMultipartUpload({ + Bucket: destBucketName, + Key: destObjName, + }) + .promise() + ) + .then(iniateRes => { + uploadId = iniateRes.UploadId; + }) + .catch(err => { + process.stdout.write(`Error in outer beforeEach: ${err}\n`); + throw err; + }); + }); + afterEach(() => + bucketUtil + .empty(sourceBucketName) + .then(() => bucketUtil.empty(destBucketName)) + .then(() => + s3 + .abortMultipartUpload({ + Bucket: destBucketName, + Key: destObjName, + UploadId: uploadId, + }) + .promise() + ) + .catch(err => { + if (err.code !== 'NoSuchUpload') { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + } + }) + .then(() => bucketUtil.deleteMany([sourceBucketName, destBucketName])) + ); - it('should copy a part from a source bucket to a different ' + - 'destination bucket', done => { - s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - }, + it('should copy a part from a source bucket to a different ' + 'destination bucket', done => { + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, + }, (err, res) => { checkNoError(err); assert.strictEqual(res.ETag, etag); assert(res.LastModified); done(); - }); + } + ); }); - it('should copy a part from a source bucket to a different ' + - 'destination bucket and complete the MPU', done => { - s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - }, - (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); - s3.completeMultipartUpload({ + it( + 'should copy a part from a source bucket to a different ' + 'destination bucket and complete the MPU', + done => { + s3.uploadPartCopy( + { Bucket: destBucketName, Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: etag, PartNumber: 1 }, - ], - }, - }, (err, res) => { + }, + (err, res) => { checkNoError(err); - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); - // AWS confirmed final ETag for MPU - assert.strictEqual(res.ETag, - '"db77ebbae9e9f5a244a26b86193ad818-1"'); - done(); - }); - }); - }); + assert.strictEqual(res.ETag, etag); + assert(res.LastModified); + s3.completeMultipartUpload( + { + Bucket: destBucketName, + Key: destObjName, + UploadId: uploadId, + MultipartUpload: { + Parts: [{ ETag: etag, PartNumber: 1 }], + }, + }, + (err, res) => { + checkNoError(err); + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); + // AWS confirmed final ETag for MPU + assert.strictEqual(res.ETag, '"db77ebbae9e9f5a244a26b86193ad818-1"'); + done(); + } + ); + } + ); + } + ); it('should return InvalidArgument error given invalid range', done => { - s3.putObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), - }, err => { - checkNoError(err); - s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - CopySourceRange: 'bad-range-parameter', + s3.putObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), }, err => { - checkError(err, 'InvalidArgument'); - done(); - }); - }); + checkNoError(err); + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, + CopySourceRange: 'bad-range-parameter', + }, + err => { + checkError(err, 'InvalidArgument'); + done(); + } + ); + } + ); }); - it('should return EntityTooLarge error if attempt to copy ' + - 'object larger than max and do not specify smaller ' + - 'range in request', done => { - s3.putObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), - }, err => { - checkNoError(err); - s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - }, + it( + 'should return EntityTooLarge error if attempt to copy ' + + 'object larger than max and do not specify smaller ' + + 'range in request', + done => { + s3.putObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), + }, err => { - checkError(err, 'EntityTooLarge'); - done(); - }); - }); - }); + checkNoError(err); + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, + }, + err => { + checkError(err, 'EntityTooLarge'); + done(); + } + ); + } + ); + } + ); - it('should return EntityTooLarge error if attempt to copy ' + - 'object larger than max and specify too large ' + - 'range in request', done => { - s3.putObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), - }, err => { - checkNoError(err); - s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - CopySourceRange: `bytes=0-${oneHundredMBPlus11}`, - }, + it( + 'should return EntityTooLarge error if attempt to copy ' + + 'object larger than max and specify too large ' + + 'range in request', + done => { + s3.putObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), + }, err => { - checkError(err, 'EntityTooLarge'); - done(); - }); - }); - }); + checkNoError(err); + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, + CopySourceRange: `bytes=0-${oneHundredMBPlus11}`, + }, + err => { + checkError(err, 'EntityTooLarge'); + done(); + } + ); + } + ); + } + ); - it('should succeed if attempt to copy ' + - 'object larger than max but specify acceptable ' + - 'range in request', done => { - s3.putObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), - }, err => { - checkNoError(err); - s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - CopySourceRange: 'bytes=0-100', - }, + it( + 'should succeed if attempt to copy ' + + 'object larger than max but specify acceptable ' + + 'range in request', + done => { + s3.putObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), + }, err => { checkNoError(err); - done(); - }); - }); - }); + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, + CopySourceRange: 'bytes=0-100', + }, + err => { + checkNoError(err); + done(); + } + ); + } + ); + } + ); - it('should copy a 0 byte object part from a source bucket to a ' + - 'different destination bucket and complete the MPU', done => { - const emptyFileETag = '"d41d8cd98f00b204e9800998ecf8427e"'; - s3.putObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - Body: '', - }, () => { - s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - }, - (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, emptyFileETag); - assert(res.LastModified); - s3.completeMultipartUpload({ - Bucket: destBucketName, - Key: destObjName, - UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: emptyFileETag, PartNumber: 1 }, - ], + it( + 'should copy a 0 byte object part from a source bucket to a ' + + 'different destination bucket and complete the MPU', + done => { + const emptyFileETag = '"d41d8cd98f00b204e9800998ecf8427e"'; + s3.putObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + Body: '', + }, + () => { + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, }, - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); - // AWS confirmed final ETag for MPU - assert.strictEqual(res.ETag, - '"59adb24ef3cdbe0297f05b395827453f-1"'); - done(); - }); - }); - }); - }); + (err, res) => { + checkNoError(err); + assert.strictEqual(res.ETag, emptyFileETag); + assert(res.LastModified); + s3.completeMultipartUpload( + { + Bucket: destBucketName, + Key: destObjName, + UploadId: uploadId, + MultipartUpload: { + Parts: [{ ETag: emptyFileETag, PartNumber: 1 }], + }, + }, + (err, res) => { + checkNoError(err); + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); + // AWS confirmed final ETag for MPU + assert.strictEqual(res.ETag, '"59adb24ef3cdbe0297f05b395827453f-1"'); + done(); + } + ); + } + ); + } + ); + } + ); - it('should copy a part using a range header from a source bucket ' + - 'to a different destination bucket and complete the MPU', done => { - const rangeETag = '"ac1be00f1f162e20d58099eec2ea1c70"'; - // AWS confirmed final ETag for MPU - const finalMpuETag = '"bff2a6af3adfd8e107a06de01d487176-1"'; - s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - CopySourceRange: 'bytes=0-3', - UploadId: uploadId, - }, - (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, rangeETag); - assert(res.LastModified); - s3.completeMultipartUpload({ + it( + 'should copy a part using a range header from a source bucket ' + + 'to a different destination bucket and complete the MPU', + done => { + const rangeETag = '"ac1be00f1f162e20d58099eec2ea1c70"'; + // AWS confirmed final ETag for MPU + const finalMpuETag = '"bff2a6af3adfd8e107a06de01d487176-1"'; + s3.uploadPartCopy( + { Bucket: destBucketName, Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + CopySourceRange: 'bytes=0-3', UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: rangeETag, PartNumber: 1 }, - ], - }, - }, (err, res) => { + }, + (err, res) => { checkNoError(err); - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); - assert.strictEqual(res.ETag, finalMpuETag); - s3.getObject({ - Bucket: destBucketName, - Key: destObjName, - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, finalMpuETag); - assert.strictEqual(res.ContentLength, 4); - assert.strictEqual(res.Body.toString(), 'I am'); - done(); - }); - }); - }); - }); + assert.strictEqual(res.ETag, rangeETag); + assert(res.LastModified); + s3.completeMultipartUpload( + { + Bucket: destBucketName, + Key: destObjName, + UploadId: uploadId, + MultipartUpload: { + Parts: [{ ETag: rangeETag, PartNumber: 1 }], + }, + }, + (err, res) => { + checkNoError(err); + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); + assert.strictEqual(res.ETag, finalMpuETag); + s3.getObject( + { + Bucket: destBucketName, + Key: destObjName, + }, + (err, res) => { + checkNoError(err); + assert.strictEqual(res.ETag, finalMpuETag); + assert.strictEqual(res.ContentLength, 4); + assert.strictEqual(res.Body.toString(), 'I am'); + done(); + } + ); + } + ); + } + ); + } + ); describe('When copy source was put by MPU', () => { let sourceMpuId; const sourceMpuKey = 'sourceMpuKey'; // total hash for sourceMpuKey when MPU completed // (confirmed with AWS) - const totalMpuObjectHash = - '"9b0de95bd76728c778b9e25fd7ce2ef7"'; + const totalMpuObjectHash = '"9b0de95bd76728c778b9e25fd7ce2ef7"'; beforeEach(() => { const parts = []; @@ -337,318 +408,407 @@ describe('Object Part Copy', () => { const otherPartBuff = Buffer.alloc(5242880, 1); otherMd5HashPart.update(otherPartBuff); const otherPartHash = otherMd5HashPart.digest('hex'); - return s3.createMultipartUpload({ - Bucket: sourceBucketName, - Key: sourceMpuKey, - }).promise().then(iniateRes => { - sourceMpuId = iniateRes.UploadId; - }).catch(err => { - process.stdout.write(`Error initiating MPU ' + + return s3 + .createMultipartUpload({ + Bucket: sourceBucketName, + Key: sourceMpuKey, + }) + .promise() + .then(iniateRes => { + sourceMpuId = iniateRes.UploadId; + }) + .catch(err => { + process.stdout.write(`Error initiating MPU ' + 'in MPU beforeEach: ${err}\n`); - throw err; - }).then(() => { - const partUploads = []; - for (let i = 1; i < 10; i++) { - const partBuffHere = i % 2 ? partBuff : otherPartBuff; - const partHashHere = i % 2 ? partHash : otherPartHash; - partUploads.push(s3.uploadPart({ - Bucket: sourceBucketName, - Key: sourceMpuKey, - PartNumber: i, - UploadId: sourceMpuId, - Body: partBuffHere, - }).promise()); - parts.push({ - ETag: partHashHere, - PartNumber: i, - }); - } - process.stdout.write('about to put parts'); - return Promise.all(partUploads); - }).catch(err => { - process.stdout.write(`Error putting parts in ' + + throw err; + }) + .then(() => { + const partUploads = []; + for (let i = 1; i < 10; i++) { + const partBuffHere = i % 2 ? partBuff : otherPartBuff; + const partHashHere = i % 2 ? partHash : otherPartHash; + partUploads.push( + s3 + .uploadPart({ + Bucket: sourceBucketName, + Key: sourceMpuKey, + PartNumber: i, + UploadId: sourceMpuId, + Body: partBuffHere, + }) + .promise() + ); + parts.push({ + ETag: partHashHere, + PartNumber: i, + }); + } + process.stdout.write('about to put parts'); + return Promise.all(partUploads); + }) + .catch(err => { + process.stdout.write(`Error putting parts in ' + 'MPU beforeEach: ${err}\n`); - throw err; - }).then(() => { - process.stdout.write('completing mpu'); - return s3.completeMultipartUpload({ + throw err; + }) + .then(() => { + process.stdout.write('completing mpu'); + return s3 + .completeMultipartUpload({ + Bucket: sourceBucketName, + Key: sourceMpuKey, + UploadId: sourceMpuId, + MultipartUpload: { + Parts: parts, + }, + }) + .promise(); + }) + .then(() => { + process.stdout.write('finished completing mpu'); + }) + .catch(err => { + process.stdout.write(`Error in MPU beforeEach: ${err}\n`); + throw err; + }); + }); + + afterEach(() => + s3 + .abortMultipartUpload({ Bucket: sourceBucketName, Key: sourceMpuKey, UploadId: sourceMpuId, - MultipartUpload: { - Parts: parts, - }, - }).promise(); - }).then(() => { - process.stdout.write('finished completing mpu'); - }).catch(err => { - process.stdout.write(`Error in MPU beforeEach: ${err}\n`); - throw err; - }); - }); - - afterEach(() => s3.abortMultipartUpload({ - Bucket: sourceBucketName, - Key: sourceMpuKey, - UploadId: sourceMpuId, - }).promise().catch(err => { - if (err.code !== 'NoSuchUpload' - && err.code !== 'NoSuchBucket') { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - } - })); + }) + .promise() + .catch(err => { + if (err.code !== 'NoSuchUpload' && err.code !== 'NoSuchBucket') { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + } + }) + ); - it('should copy a part from a source bucket to a different ' + - 'destination bucket', done => { + it('should copy a part from a source bucket to a different ' + 'destination bucket', done => { process.stdout.write('Entered first mpu test'); - return s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceMpuKey}`, - PartNumber: 1, - UploadId: uploadId, - }, + return s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceMpuKey}`, + PartNumber: 1, + UploadId: uploadId, + }, (err, res) => { checkNoError(err); - assert.strictEqual(res.ETag, - totalMpuObjectHash); + assert.strictEqual(res.ETag, totalMpuObjectHash); assert(res.LastModified); done(); - }); + } + ); }); - it('should copy two parts from a source bucket to a different ' + - 'destination bucket and complete the MPU', () => { - process.stdout.write('Putting first part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceMpuKey}`, - PartNumber: 1, - UploadId: uploadId, - }).promise().then(res => { - assert.strictEqual(res.ETag, totalMpuObjectHash); - assert(res.LastModified); - }).then(() => { - process.stdout.write('Putting second part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceMpuKey}`, - PartNumber: 2, - UploadId: uploadId, - }).promise().then(res => { - assert.strictEqual(res.ETag, totalMpuObjectHash); - assert(res.LastModified); - }).then(() => { - process.stdout.write('Completing MPU'); - return s3.completeMultipartUpload({ + it( + 'should copy two parts from a source bucket to a different ' + + 'destination bucket and complete the MPU', + () => { + process.stdout.write('Putting first part in MPU test'); + return s3 + .uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + CopySource: `${sourceBucketName}/${sourceMpuKey}`, + PartNumber: 1, UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: totalMpuObjectHash, PartNumber: 1 }, - { ETag: totalMpuObjectHash, PartNumber: 2 }, - ], - }, - }).promise(); - }).then(res => { - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); + }) + .promise() + .then(res => { + assert.strictEqual(res.ETag, totalMpuObjectHash); + assert(res.LastModified); + }) + .then(() => { + process.stdout.write('Putting second part in MPU test'); + return s3 + .uploadPartCopy({ + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceMpuKey}`, + PartNumber: 2, + UploadId: uploadId, + }) + .promise() + .then(res => { + assert.strictEqual(res.ETag, totalMpuObjectHash); + assert(res.LastModified); + }) + .then(() => { + process.stdout.write('Completing MPU'); + return s3 + .completeMultipartUpload({ + Bucket: destBucketName, + Key: destObjName, + UploadId: uploadId, + MultipartUpload: { + Parts: [ + { ETag: totalMpuObjectHash, PartNumber: 1 }, + { ETag: totalMpuObjectHash, PartNumber: 2 }, + ], + }, + }) + .promise(); + }) + .then(res => { + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); + // combined ETag returned by AWS (combination of part ETags + // with number of parts at the end) + assert.strictEqual(res.ETag, '"5bba96810ff449d94aa8f5c5a859b0cb-2"'); + }) + .catch(err => { + checkNoError(err); + }); + }); + } + ); + + it( + 'should copy two parts with range headers from a source ' + + 'bucket to a different destination bucket and ' + + 'complete the MPU', + () => { + process.stdout.write('Putting first part in MPU range test'); + const part1ETag = '"b1e0d096c8f0670c5367d131e392b84a"'; + const part2ETag = '"a2468d5c0ec2d4d5fc13b73beb63080a"'; // combined ETag returned by AWS (combination of part ETags // with number of parts at the end) - assert.strictEqual(res.ETag, - '"5bba96810ff449d94aa8f5c5a859b0cb-2"'); - }).catch(err => { - checkNoError(err); - }); - }); - }); - - it('should copy two parts with range headers from a source ' + - 'bucket to a different destination bucket and ' + - 'complete the MPU', () => { - process.stdout.write('Putting first part in MPU range test'); - const part1ETag = '"b1e0d096c8f0670c5367d131e392b84a"'; - const part2ETag = '"a2468d5c0ec2d4d5fc13b73beb63080a"'; - // combined ETag returned by AWS (combination of part ETags - // with number of parts at the end) - const finalCombinedETag = - '"e08ede4e8b942e18537cb2289f613ae3-2"'; - return s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceMpuKey}`, - PartNumber: 1, - UploadId: uploadId, - CopySourceRange: 'bytes=5242890-15242880', - }).promise().then(res => { - assert.strictEqual(res.ETag, part1ETag); - assert(res.LastModified); - }).then(() => { - process.stdout.write('Putting second part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceMpuKey}`, - PartNumber: 2, - UploadId: uploadId, - CopySourceRange: 'bytes=15242891-30242991', - }).promise().then(res => { - assert.strictEqual(res.ETag, part2ETag); - assert(res.LastModified); - }).then(() => { - process.stdout.write('Completing MPU'); - return s3.completeMultipartUpload({ + const finalCombinedETag = '"e08ede4e8b942e18537cb2289f613ae3-2"'; + return s3 + .uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + CopySource: `${sourceBucketName}/${sourceMpuKey}`, + PartNumber: 1, UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: part1ETag, PartNumber: 1 }, - { ETag: part2ETag, PartNumber: 2 }, - ], - }, - }).promise(); - }).then(res => { - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); - assert.strictEqual(res.ETag, finalCombinedETag); - }).then(() => { - process.stdout.write('Getting new object'); - return s3.getObject({ - Bucket: destBucketName, - Key: destObjName, - }).promise(); - }).then(res => { - assert.strictEqual(res.ContentLength, 25000092); - assert.strictEqual(res.ETag, finalCombinedETag); - }) - .catch(err => { - checkNoError(err); - }); - }); - }); + CopySourceRange: 'bytes=5242890-15242880', + }) + .promise() + .then(res => { + assert.strictEqual(res.ETag, part1ETag); + assert(res.LastModified); + }) + .then(() => { + process.stdout.write('Putting second part in MPU test'); + return s3 + .uploadPartCopy({ + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceMpuKey}`, + PartNumber: 2, + UploadId: uploadId, + CopySourceRange: 'bytes=15242891-30242991', + }) + .promise() + .then(res => { + assert.strictEqual(res.ETag, part2ETag); + assert(res.LastModified); + }) + .then(() => { + process.stdout.write('Completing MPU'); + return s3 + .completeMultipartUpload({ + Bucket: destBucketName, + Key: destObjName, + UploadId: uploadId, + MultipartUpload: { + Parts: [ + { ETag: part1ETag, PartNumber: 1 }, + { ETag: part2ETag, PartNumber: 2 }, + ], + }, + }) + .promise(); + }) + .then(res => { + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); + assert.strictEqual(res.ETag, finalCombinedETag); + }) + .then(() => { + process.stdout.write('Getting new object'); + return s3 + .getObject({ + Bucket: destBucketName, + Key: destObjName, + }) + .promise(); + }) + .then(res => { + assert.strictEqual(res.ContentLength, 25000092); + assert.strictEqual(res.ETag, finalCombinedETag); + }) + .catch(err => { + checkNoError(err); + }); + }); + } + ); it('should overwrite an existing part by copying a part', () => { // AWS response etag for this completed MPU const finalObjETag = '"db77ebbae9e9f5a244a26b86193ad818-1"'; process.stdout.write('Putting first part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceMpuKey}`, - PartNumber: 1, - UploadId: uploadId, - }).promise().then(res => { - assert.strictEqual(res.ETag, totalMpuObjectHash); - assert(res.LastModified); - }).then(() => { - process.stdout.write('Overwriting first part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId }).promise(); - }).then(res => { - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); - }).then(() => { - process.stdout.write('Completing MPU'); - return s3.completeMultipartUpload({ + return s3 + .uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + CopySource: `${sourceBucketName}/${sourceMpuKey}`, + PartNumber: 1, UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: etag, PartNumber: 1 }, - ], - }, - }).promise(); - }).then(res => { - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); - assert.strictEqual(res.ETag, finalObjETag); - }).then(() => { - process.stdout.write('Getting object put by MPU with ' + - 'overwrite part'); - return s3.getObject({ - Bucket: destBucketName, - Key: destObjName, - }).promise(); - }).then(res => { - assert.strictEqual(res.ETag, finalObjETag); - }).catch(err => { - checkNoError(err); - }); + }) + .promise() + .then(res => { + assert.strictEqual(res.ETag, totalMpuObjectHash); + assert(res.LastModified); + }) + .then(() => { + process.stdout.write('Overwriting first part in MPU test'); + return s3 + .uploadPartCopy({ + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, + }) + .promise(); + }) + .then(res => { + assert.strictEqual(res.ETag, etag); + assert(res.LastModified); + }) + .then(() => { + process.stdout.write('Completing MPU'); + return s3 + .completeMultipartUpload({ + Bucket: destBucketName, + Key: destObjName, + UploadId: uploadId, + MultipartUpload: { + Parts: [{ ETag: etag, PartNumber: 1 }], + }, + }) + .promise(); + }) + .then(res => { + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); + assert.strictEqual(res.ETag, finalObjETag); + }) + .then(() => { + process.stdout.write('Getting object put by MPU with ' + 'overwrite part'); + return s3 + .getObject({ + Bucket: destBucketName, + Key: destObjName, + }) + .promise(); + }) + .then(res => { + assert.strictEqual(res.ETag, finalObjETag); + }) + .catch(err => { + checkNoError(err); + }); }); - it('should not corrupt object if overwriting an existing part by copying a part ' + - 'while the MPU is being completed', () => { - // AWS response etag for this completed MPU - const finalObjETag = '"db77ebbae9e9f5a244a26b86193ad818-1"'; - process.stdout.write('Putting first part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - }).promise().then(res => { - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); - }).then(() => { - process.stdout.write('Overwriting first part in MPU test and completing MPU ' + - 'at the same time'); - return Promise.all([ - s3.uploadPartCopy({ + it( + 'should not corrupt object if overwriting an existing part by copying a part ' + + 'while the MPU is being completed', + () => { + // AWS response etag for this completed MPU + const finalObjETag = '"db77ebbae9e9f5a244a26b86193ad818-1"'; + process.stdout.write('Putting first part in MPU test'); + return s3 + .uploadPartCopy({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }).promise().catch(err => { - // in case the CompleteMPU finished - // earlier, we may get a NoSuchKey error, - // so just ignore it and resolve with a - // special value, otherwise re-throw the - // error - if (err && err.code === 'NoSuchKey') { - return Promise.resolve(null); + }) + .promise() + .then(res => { + assert.strictEqual(res.ETag, etag); + assert(res.LastModified); + }) + .then(() => { + process.stdout.write( + 'Overwriting first part in MPU test and completing MPU ' + 'at the same time' + ); + return Promise.all([ + s3 + .uploadPartCopy({ + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, + }) + .promise() + .catch(err => { + // in case the CompleteMPU finished + // earlier, we may get a NoSuchKey error, + // so just ignore it and resolve with a + // special value, otherwise re-throw the + // error + if (err && err.code === 'NoSuchKey') { + return Promise.resolve(null); + } + throw err; + }), + s3 + .completeMultipartUpload({ + Bucket: destBucketName, + Key: destObjName, + UploadId: uploadId, + MultipartUpload: { + Parts: [{ ETag: etag, PartNumber: 1 }], + }, + }) + .promise(), + ]); + }) + .then(([uploadRes, completeRes]) => { + // if upload succeeded before CompleteMPU finished + if (uploadRes !== null) { + assert.strictEqual(uploadRes.ETag, etag); + assert(uploadRes.LastModified); } - throw err; - }), - s3.completeMultipartUpload({ - Bucket: destBucketName, - Key: destObjName, - UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: etag, PartNumber: 1 }, - ], - }, - }).promise(), - ]); - }).then(([uploadRes, completeRes]) => { - // if upload succeeded before CompleteMPU finished - if (uploadRes !== null) { - assert.strictEqual(uploadRes.ETag, etag); - assert(uploadRes.LastModified); - } - assert.strictEqual(completeRes.Bucket, destBucketName); - assert.strictEqual(completeRes.Key, destObjName); - assert.strictEqual(completeRes.ETag, finalObjETag); - }).then(() => { - process.stdout.write('Getting object put by MPU with ' + - 'overwrite part'); - return s3.getObject({ - Bucket: destBucketName, - Key: destObjName, - }).promise(); - }).then(res => { - assert.strictEqual(res.ETag, finalObjETag); - }); - }); + assert.strictEqual(completeRes.Bucket, destBucketName); + assert.strictEqual(completeRes.Key, destObjName); + assert.strictEqual(completeRes.ETag, finalObjETag); + }) + .then(() => { + process.stdout.write('Getting object put by MPU with ' + 'overwrite part'); + return s3 + .getObject({ + Bucket: destBucketName, + Key: destObjName, + }) + .promise(); + }) + .then(res => { + assert.strictEqual(res.ETag, finalObjETag); + }); + } + ); }); - it('should return an error if no such upload initiated', - done => { - s3.uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + it('should return an error if no such upload initiated', done => { + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: 'madeupuploadid444233232', @@ -656,12 +816,15 @@ describe('Object Part Copy', () => { err => { checkError(err, 'NoSuchUpload'); done(); - }); - }); + } + ); + }); - it('should return an error if attempt to copy from nonexistent bucket', - done => { - s3.uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + it('should return an error if attempt to copy from nonexistent bucket', done => { + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, CopySource: `nobucket453234/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, @@ -669,12 +832,15 @@ describe('Object Part Copy', () => { err => { checkError(err, 'NoSuchBucket'); done(); - }); - }); + } + ); + }); - it('should return an error if attempt to copy to nonexistent bucket', - done => { - s3.uploadPartCopy({ Bucket: 'nobucket453234', Key: destObjName, + it('should return an error if attempt to copy to nonexistent bucket', done => { + s3.uploadPartCopy( + { + Bucket: 'nobucket453234', + Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, @@ -682,12 +848,15 @@ describe('Object Part Copy', () => { err => { checkError(err, 'NoSuchBucket'); done(); - }); - }); + } + ); + }); - it('should return an error if attempt to copy nonexistent object', - done => { - s3.uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + it('should return an error if attempt to copy nonexistent object', done => { + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, CopySource: `${sourceBucketName}/nokey`, PartNumber: 1, UploadId: uploadId, @@ -695,12 +864,15 @@ describe('Object Part Copy', () => { err => { checkError(err, 'NoSuchKey'); done(); - }); - }); + } + ); + }); - it('should return an error if use invalid part number', - done => { - s3.uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + it('should return an error if use invalid part number', done => { + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, CopySource: `${sourceBucketName}/nokey`, PartNumber: 10001, UploadId: uploadId, @@ -708,47 +880,54 @@ describe('Object Part Copy', () => { err => { checkError(err, 'InvalidArgument'); done(); - }); - }); + } + ); + }); it('should not copy a part of a cold object', done => { const archive = { archiveInfo: { archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', - archiveVersion: 5577006791947779 + archiveVersion: 5577006791947779, }, }; fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archive, err => { assert.ifError(err); - s3.uploadPartCopy({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - }, err => { + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, + }, + err => { assert.strictEqual(err.code, 'InvalidObjectState'); assert.strictEqual(err.statusCode, 403); done(); - }); + } + ); }); }); - it('should copy a part of an object when it\'s transitioning to cold', done => { + it("should copy a part of an object when it's transitioning to cold", done => { fakeMetadataTransition(sourceBucketName, sourceObjName, undefined, err => { assert.ifError(err); - s3.uploadPartCopy({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); - done(); - }); + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, + }, + (err, res) => { + checkNoError(err); + assert.strictEqual(res.ETag, etag); + assert(res.LastModified); + done(); + } + ); }); }); @@ -758,22 +937,25 @@ describe('Object Part Copy', () => { restoreRequestedAt: new Date(0), restoreRequestedDays: 5, restoreCompletedAt: new Date(10), - restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)), + restoreWillExpireAt: new Date(10 + 5 * 24 * 60 * 60 * 1000), }; fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archiveCompleted, err => { assert.ifError(err); - s3.uploadPartCopy({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: uploadId, - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); - done(); - }); + s3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: uploadId, + }, + (err, res) => { + checkNoError(err); + assert.strictEqual(res.ETag, etag); + assert(res.LastModified); + done(); + } + ); }); }); @@ -784,91 +966,115 @@ describe('Object Part Copy', () => { beforeEach(() => { process.stdout.write('In other account before each'); - return otherAccountS3.createBucket({ Bucket: - otherAccountBucket }).promise() - .catch(err => { - process.stdout.write('Error creating other account ' + - `bucket: ${err}\n`); - throw err; - }).then(() => { - process.stdout.write('Initiating other account MPU'); - return otherAccountS3.createMultipartUpload({ - Bucket: otherAccountBucket, - Key: otherAccountKey, - }).promise(); - }).then(iniateRes => { - otherAccountUploadId = iniateRes.UploadId; - }).catch(err => { - process.stdout.write('Error in other account ' + - `beforeEach: ${err}\n`); - throw err; - }); - }); - - afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket) - .then(() => otherAccountS3.abortMultipartUpload({ - Bucket: otherAccountBucket, - Key: otherAccountKey, - UploadId: otherAccountUploadId, - }).promise()) - .catch(err => { - if (err.code !== 'NoSuchUpload') { - process.stdout.write('Error in other account ' + - `afterEach: ${err}\n`); + return otherAccountS3 + .createBucket({ Bucket: otherAccountBucket }) + .promise() + .catch(err => { + process.stdout.write('Error creating other account ' + `bucket: ${err}\n`); + throw err; + }) + .then(() => { + process.stdout.write('Initiating other account MPU'); + return otherAccountS3 + .createMultipartUpload({ + Bucket: otherAccountBucket, + Key: otherAccountKey, + }) + .promise(); + }) + .then(iniateRes => { + otherAccountUploadId = iniateRes.UploadId; + }) + .catch(err => { + process.stdout.write('Error in other account ' + `beforeEach: ${err}\n`); throw err; - } - }).then(() => otherAccountBucketUtility - .deleteOne(otherAccountBucket)) - ); - - it('should not allow an account without read persmission on the ' + - 'source object to copy the object', done => { - otherAccountS3.uploadPartCopy({ Bucket: otherAccountBucket, - Key: otherAccountKey, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: otherAccountUploadId, - }, - err => { - checkError(err, 'AccessDenied'); - done(); }); }); - it('should not allow an account without write persmission on the ' + - 'destination bucket to upload part copy the object', done => { - otherAccountS3.putObject({ Bucket: otherAccountBucket, - Key: otherAccountKey, Body: '' }, () => { - otherAccountS3.uploadPartCopy({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${otherAccountBucket}/${otherAccountKey}`, - PartNumber: 1, - UploadId: uploadId, - }, + afterEach(() => + otherAccountBucketUtility + .empty(otherAccountBucket) + .then(() => + otherAccountS3 + .abortMultipartUpload({ + Bucket: otherAccountBucket, + Key: otherAccountKey, + UploadId: otherAccountUploadId, + }) + .promise() + ) + .catch(err => { + if (err.code !== 'NoSuchUpload') { + process.stdout.write('Error in other account ' + `afterEach: ${err}\n`); + throw err; + } + }) + .then(() => otherAccountBucketUtility.deleteOne(otherAccountBucket)) + ); + + it( + 'should not allow an account without read persmission on the ' + 'source object to copy the object', + done => { + otherAccountS3.uploadPartCopy( + { + Bucket: otherAccountBucket, + Key: otherAccountKey, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: otherAccountUploadId, + }, err => { checkError(err, 'AccessDenied'); done(); - }); - }); - }); + } + ); + } + ); - it('should allow an account with read permission on the ' + - 'source object and write permission on the destination ' + - 'bucket to upload part copy the object', done => { - s3.putObjectAcl({ Bucket: sourceBucketName, - Key: sourceObjName, ACL: 'public-read' }, () => { - otherAccountS3.uploadPartCopy({ Bucket: otherAccountBucket, - Key: otherAccountKey, - CopySource: `${sourceBucketName}/${sourceObjName}`, - PartNumber: 1, - UploadId: otherAccountUploadId, - }, - err => { - checkNoError(err); - done(); - }); - }); - }); + it( + 'should not allow an account without write persmission on the ' + + 'destination bucket to upload part copy the object', + done => { + otherAccountS3.putObject({ Bucket: otherAccountBucket, Key: otherAccountKey, Body: '' }, () => { + otherAccountS3.uploadPartCopy( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${otherAccountBucket}/${otherAccountKey}`, + PartNumber: 1, + UploadId: uploadId, + }, + err => { + checkError(err, 'AccessDenied'); + done(); + } + ); + }); + } + ); + + it( + 'should allow an account with read permission on the ' + + 'source object and write permission on the destination ' + + 'bucket to upload part copy the object', + done => { + s3.putObjectAcl({ Bucket: sourceBucketName, Key: sourceObjName, ACL: 'public-read' }, () => { + otherAccountS3.uploadPartCopy( + { + Bucket: otherAccountBucket, + Key: otherAccountKey, + CopySource: `${sourceBucketName}/${sourceObjName}`, + PartNumber: 1, + UploadId: otherAccountUploadId, + }, + err => { + checkNoError(err); + done(); + } + ); + }); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/corsHeaders.js b/tests/functional/aws-node-sdk/test/object/corsHeaders.js index 2647c7a95b..92b28262ed 100644 --- a/tests/functional/aws-node-sdk/test/object/corsHeaders.js +++ b/tests/functional/aws-node-sdk/test/object/corsHeaders.js @@ -15,8 +15,7 @@ const bucket = 'bucketcorsheadertest'; const objectKey = 'objectKeyName'; const allowedOrigin = 'http://www.allowedwebsite.com'; const notAllowedOrigin = 'http://www.notallowedwebsite.com'; -const vary = 'Origin, Access-Control-Request-Headers, ' + - 'Access-Control-Request-Method'; +const vary = 'Origin, Access-Control-Request-Headers, ' + 'Access-Control-Request-Method'; const defaultOptions = { allowedMethods: ['GET'], allowedOrigins: [allowedOrigin], @@ -119,10 +118,12 @@ const apiMethods = [ params: { Bucket: bucket, CORSConfiguration: { - CORSRules: [{ - AllowedOrigins: [allowedOrigin], - AllowedMethods: ['PUT'], - }], + CORSRules: [ + { + AllowedOrigins: [allowedOrigin], + AllowedMethods: ['PUT'], + }, + ], }, }, }, @@ -186,9 +187,7 @@ const apiMethods = [ params: { Bucket: bucket, Delete: { - Objects: [ - { Key: objectKey }, - ], + Objects: [{ Key: objectKey }], }, }, }, @@ -233,19 +232,19 @@ function _checkHeaders(action, params, origin, expectedHeaders, callback) { function _runAssertions(resHeaders, cb) { if (expectedHeaders) { Object.keys(expectedHeaders).forEach(key => { - assert.deepEqual(resHeaders[key], expectedHeaders[key], - `error header: ${key}`); + assert.deepEqual(resHeaders[key], expectedHeaders[key], `error header: ${key}`); }); } else { // if no headersResponse provided, should not have these headers // in the request - ['access-control-allow-origin', + [ + 'access-control-allow-origin', 'access-control-allow-methods', 'access-control-allow-credentials', - 'vary'].forEach(key => { - assert.strictEqual(resHeaders[key], undefined, - `Error: ${key} should not have value`); - }); + 'vary', + ].forEach(key => { + assert.strictEqual(resHeaders[key], undefined, `Error: ${key} should not have value`); + }); } cb(); } @@ -261,8 +260,10 @@ function _checkHeaders(action, params, origin, expectedHeaders, callback) { _runAssertions(resHeaders, () => { if (response.data.UploadId) { // abort multipart upload before deleting bucket in afterEach - return s3.abortMultipartUpload({ Bucket: bucket, Key: objectKey, - UploadId: response.data.UploadId }, callback); + return s3.abortMultipartUpload( + { Bucket: bucket, Key: objectKey, UploadId: response.data.UploadId }, + callback + ); } return callback(); }); @@ -297,31 +298,31 @@ describe('Cross Origin Resource Sharing requests', () => { }); describe('on non-existing bucket', () => { - it('should not respond to request with CORS headers, even ' + - 'if request was sent with Origin header', done => { - _checkHeaders(s3.listObjects, { Bucket: 'nonexistingbucket' }, - allowedOrigin, null, done); - }); + it( + 'should not respond to request with CORS headers, even ' + 'if request was sent with Origin header', + done => { + _checkHeaders(s3.listObjects, { Bucket: 'nonexistingbucket' }, allowedOrigin, null, done); + } + ); }); describe('on bucket without CORS configuration', () => { - it('should not respond to request with CORS headers, even ' + - 'if request was sent with Origin header', done => { - _checkHeaders(s3.listObjects, { Bucket: bucket }, - allowedOrigin, null, done); - }); + it( + 'should not respond to request with CORS headers, even ' + 'if request was sent with Origin header', + done => { + _checkHeaders(s3.listObjects, { Bucket: bucket }, allowedOrigin, null, done); + } + ); }); - describe('on bucket with CORS configuration: ' + - 'allow one origin and all methods', () => { + describe('on bucket with CORS configuration: ' + 'allow one origin and all methods', () => { const corsParams = generateCorsParams(bucket, { allowedMethods: ['GET', 'PUT', 'HEAD', 'POST', 'DELETE'], allowedOrigins: [allowedOrigin], }); const expectedHeaders = { 'access-control-allow-origin': allowedOrigin, - 'access-control-allow-methods': corsParams.CORSConfiguration - .CORSRules[0].AllowedMethods.join(', '), + 'access-control-allow-methods': corsParams.CORSConfiguration.CORSRules[0].AllowedMethods.join(', '), 'access-control-allow-credentials': 'true', vary, }; @@ -330,8 +331,7 @@ describe('Cross Origin Resource Sharing requests', () => { afterEach(done => { removeAllVersions({ Bucket: bucket }, err => { - if (err && err.code !== 'NoSuchKey' && - err.code !== 'NoSuchBucket') { + if (err && err.code !== 'NoSuchKey' && err.code !== 'NoSuchBucket') { process.stdout.write(`Unexpected err in afterEach: ${err}`); return done(err); } @@ -340,45 +340,48 @@ describe('Cross Origin Resource Sharing requests', () => { }); describe('when request Origin/method match CORS configuration', () => { - it('should not respond with CORS headers to GET service (list ' + - 'buckets), even if Origin/method match CORS rule', done => { - // no bucket specified in this request - _checkHeaders(s3.listBuckets, {}, allowedOrigin, - null, done); - }); - - it('should not respond with CORS headers after deleting bucket, ' + - 'even if Origin/method match CORS rule', done => { - s3.deleteBucket({ Bucket: bucket }, err => { - assert.strictEqual(err, null, `Unexpected err ${err}`); - _checkHeaders(s3.listObjects, { Bucket: bucket }, - allowedOrigin, null, done); - }); - }); + it( + 'should not respond with CORS headers to GET service (list ' + + 'buckets), even if Origin/method match CORS rule', + done => { + // no bucket specified in this request + _checkHeaders(s3.listBuckets, {}, allowedOrigin, null, done); + } + ); + + it( + 'should not respond with CORS headers after deleting bucket, ' + + 'even if Origin/method match CORS rule', + done => { + s3.deleteBucket({ Bucket: bucket }, err => { + assert.strictEqual(err, null, `Unexpected err ${err}`); + _checkHeaders(s3.listObjects, { Bucket: bucket }, allowedOrigin, null, done); + }); + } + ); apiMethods.forEach(method => { - it(`should respond to ${method.description} with CORS ` + - 'headers (access-control-allow-origin, access-control-allow-' + - 'methods, access-control-allow-credentials and vary)', done => { - _checkHeaders(method.action, method.params, allowedOrigin, - expectedHeaders, done); - }); + it( + `should respond to ${method.description} with CORS ` + + 'headers (access-control-allow-origin, access-control-allow-' + + 'methods, access-control-allow-credentials and vary)', + done => { + _checkHeaders(method.action, method.params, allowedOrigin, expectedHeaders, done); + } + ); }); }); describe('when request Origin does not match CORS rule', () => { apiMethods.forEach(method => { - it(`should not respond to ${method.description} with ` + - 'CORS headers', done => { - _checkHeaders(method.action, method.params, - notAllowedOrigin, null, done); + it(`should not respond to ${method.description} with ` + 'CORS headers', done => { + _checkHeaders(method.action, method.params, notAllowedOrigin, null, done); }); }); }); }); - describe('on bucket with CORS configuration: ' + - 'allow PUT method and one origin', () => { + describe('on bucket with CORS configuration: ' + 'allow PUT method and one origin', () => { const corsParams = generateCorsParams(bucket, { allowedMethods: ['PUT'], allowedOrigins: [allowedOrigin], @@ -392,17 +395,13 @@ describe('Cross Origin Resource Sharing requests', () => { s3.deleteBucketCors({ Bucket: bucket }, done); }); - it('when request method does not match CORS rule ' + - 'should not respond with CORS headers', done => { - _checkHeaders(s3.listObjects, { Bucket: bucket }, - allowedOrigin, null, done); + it('when request method does not match CORS rule ' + 'should not respond with CORS headers', done => { + _checkHeaders(s3.listObjects, { Bucket: bucket }, allowedOrigin, null, done); }); }); - describe('on bucket with CORS configuration and website configuration', - () => { - const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : - 'bucketwebsitetester'; + describe('on bucket with CORS configuration and website configuration', () => { + const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : 'bucketwebsitetester'; const corsParams = generateCorsParams(bucket, { allowedMethods: ['GET', 'HEAD'], allowedOrigins: [allowedOrigin], @@ -419,30 +418,38 @@ describe('Cross Origin Resource Sharing requests', () => { webConfig.addRoutingRule(redirect, condition); beforeEach(done => - async.series([ - next => s3.createBucket({ - Bucket: bucket, - ACL: 'public-read', - }, next), - next => s3.putBucketCors(corsParams, next), - next => s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, next), - next => s3.putObject({ - Bucket: bucket, - Key: 'index.html', - ACL: 'public-read', - }, next), - ], err => { - assert.strictEqual(err, null, - `Unexpected err ${err} in beforeEach`); - done(err); - }) + async.series( + [ + next => + s3.createBucket( + { + Bucket: bucket, + ACL: 'public-read', + }, + next + ), + next => s3.putBucketCors(corsParams, next), + next => s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, next), + next => + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + }, + next + ), + ], + err => { + assert.strictEqual(err, null, `Unexpected err ${err} in beforeEach`); + done(err); + } + ) ); afterEach(done => s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, err => { - assert.strictEqual(err, null, - `Unexpected err ${err} in afterEach`); + assert.strictEqual(err, null, `Unexpected err ${err} in afterEach`); s3.deleteBucket({ Bucket: bucket }, err => { if (err) { process.stdout.write(`Error in afterEach ${err}`); @@ -453,47 +460,41 @@ describe('Cross Origin Resource Sharing requests', () => { }) ); - it('should respond with CORS headers at website endpoint (GET)', - done => { + it('should respond with CORS headers at website endpoint (GET)', done => { const headers = { Origin: allowedOrigin }; - methodRequest({ method: 'GET', bucket, headers, headersResponse, - code: 200, isWebsite: true }, done); + methodRequest({ method: 'GET', bucket, headers, headersResponse, code: 200, isWebsite: true }, done); }); - it('should respond with CORS headers at website endpoint (GET) ' + - 'even in case of error', - done => { + it('should respond with CORS headers at website endpoint (GET) ' + 'even in case of error', done => { const headers = { Origin: allowedOrigin }; - methodRequest({ method: 'GET', bucket, objectKey: 'test', - headers, headersResponse, code: 404, isWebsite: true }, done); + methodRequest( + { method: 'GET', bucket, objectKey: 'test', headers, headersResponse, code: 404, isWebsite: true }, + done + ); }); - it('should respond with CORS headers at website endpoint (GET) ' + - 'even in case of redirect', - done => { + it('should respond with CORS headers at website endpoint (GET) ' + 'even in case of redirect', done => { const headers = { Origin: allowedOrigin }; - methodRequest({ method: 'GET', bucket, objectKey: 'redirect', - headers, headersResponse, code: 301, isWebsite: true }, done); + methodRequest( + { method: 'GET', bucket, objectKey: 'redirect', headers, headersResponse, code: 301, isWebsite: true }, + done + ); }); - it('should respond with CORS headers at website endpoint (HEAD)', - done => { + it('should respond with CORS headers at website endpoint (HEAD)', done => { const headers = { Origin: allowedOrigin }; - methodRequest({ method: 'HEAD', bucket, headers, headersResponse, - code: 200, isWebsite: true }, done); + methodRequest({ method: 'HEAD', bucket, headers, headersResponse, code: 200, isWebsite: true }, done); }); }); - describe('on bucket with additional cors configuration', - () => { + describe('on bucket with additional cors configuration', () => { afterEach(done => { s3.deleteBucketCors({ Bucket: bucket }, done); }); describe('cors configuration : AllowedHeaders', () => { const corsParams = generateCorsParams(bucket, defaultOptions); - corsParams.CORSConfiguration.CORSRules[0] - .AllowedHeaders = ['Content-Type']; + corsParams.CORSConfiguration.CORSRules[0].AllowedHeaders = ['Content-Type']; const headersResponse = { 'access-control-allow-origin': allowedOrigin, @@ -506,30 +507,33 @@ describe('Cross Origin Resource Sharing requests', () => { s3.putBucketCors(corsParams, done); }); - it('should not return access-control-allow-headers response ' + - 'header even if request matches CORS rule and other access-' + - 'control headers are returned', done => { - const headers = { - 'Origin': allowedOrigin, - 'Content-Type': 'testvalue', - }; - const headersOmitted = ['access-control-allow-headers']; - methodRequest({ method: 'GET', bucket, headers, headersResponse, - headersOmitted, code: 200 }, done); - }); - - it('Request with matching Origin/method but additional headers ' + - 'that violate CORS rule:\n\t should still respond with access-' + - 'control headers (headers are only checked in preflight requests)', - done => { - const headers = { - Origin: allowedOrigin, - Test: 'test', - Expires: 86400, - }; - methodRequest({ method: 'GET', bucket, headers, headersResponse, - code: 200 }, done); - }); + it( + 'should not return access-control-allow-headers response ' + + 'header even if request matches CORS rule and other access-' + + 'control headers are returned', + done => { + const headers = { + Origin: allowedOrigin, + 'Content-Type': 'testvalue', + }; + const headersOmitted = ['access-control-allow-headers']; + methodRequest({ method: 'GET', bucket, headers, headersResponse, headersOmitted, code: 200 }, done); + } + ); + + it( + 'Request with matching Origin/method but additional headers ' + + 'that violate CORS rule:\n\t should still respond with access-' + + 'control headers (headers are only checked in preflight requests)', + done => { + const headers = { + Origin: allowedOrigin, + Test: 'test', + Expires: 86400, + }; + methodRequest({ method: 'GET', bucket, headers, headersResponse, code: 200 }, done); + } + ); }); [ @@ -546,15 +550,13 @@ describe('Cross Origin Resource Sharing requests', () => { ].forEach(elem => { describe(`cors configuration : ${elem.name}`, () => { const corsParams = generateCorsParams(bucket, defaultOptions); - corsParams.CORSConfiguration.CORSRules[0][elem.name] = - elem.testValue; + corsParams.CORSConfiguration.CORSRules[0][elem.name] = elem.testValue; beforeEach(done => { s3.putBucketCors(corsParams, done); }); - it(`should respond with ${elem.header} header ` + - 'if request matches CORS rule', done => { + it(`should respond with ${elem.header} header ` + 'if request matches CORS rule', done => { const headers = { Origin: allowedOrigin }; const headersResponse = { 'access-control-allow-origin': allowedOrigin, @@ -562,11 +564,8 @@ describe('Cross Origin Resource Sharing requests', () => { 'access-control-allow-credentials': 'true', vary, }; - headersResponse[elem.header] = - Array.isArray(elem.testValue) ? elem.testValue[0] : - elem.testValue; - methodRequest({ method: 'GET', bucket, headers, - headersResponse, code: 200 }, done); + headersResponse[elem.header] = Array.isArray(elem.testValue) ? elem.testValue[0] : elem.testValue; + methodRequest({ method: 'GET', bucket, headers, headersResponse, code: 200 }, done); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/corsPreflight.js b/tests/functional/aws-node-sdk/test/object/corsPreflight.js index 79ca135890..03ba1f56d9 100644 --- a/tests/functional/aws-node-sdk/test/object/corsPreflight.js +++ b/tests/functional/aws-node-sdk/test/object/corsPreflight.js @@ -9,14 +9,9 @@ const s3 = new S3(config); const bucket = 'bucketcorstester'; const methods = ['PUT', 'POST', 'DELETE', 'GET']; -const originsWithWildcards = [ - '*.allowedorigin.com', - 'http://*.allowedorigin.com', - 'http://www.allowedorigin.*', -]; +const originsWithWildcards = ['*.allowedorigin.com', 'http://*.allowedorigin.com', 'http://www.allowedorigin.*']; const allowedOrigin = 'http://www.allowedwebsite.com'; -const vary = 'Origin, Access-Control-Request-Headers, ' + - 'Access-Control-Request-Method'; +const vary = 'Origin, Access-Control-Request-Headers, ' + 'Access-Control-Request-Method'; // AWS seems to take a bit long so sometimes by the time we send the request // the bucket has not yet been created or the bucket has been deleted. @@ -33,21 +28,17 @@ describe('Preflight CORS request on non-existing bucket', () => { const headers = { Origin: allowedOrigin, }; - methodRequest({ method: 'GET', bucket, headers, code: 'NoSuchBucket', - headersResponse: null }, done); + methodRequest({ method: 'GET', bucket, headers, code: 'NoSuchBucket', headersResponse: null }, done); }); it('should return BadRequest for OPTIONS request without origin', done => { const headers = {}; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 'BadRequest', - headersResponse: null }, done); + methodRequest({ method: 'OPTIONS', bucket, headers, code: 'BadRequest', headersResponse: null }, done); }); - it('should return BadRequest for OPTIONS request without ' + - 'Access-Control-Request-Method', done => { + it('should return BadRequest for OPTIONS request without ' + 'Access-Control-Request-Method', done => { const headers = { Origin: allowedOrigin, }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 'BadRequest', - headersResponse: null }, done); + methodRequest({ method: 'OPTIONS', bucket, headers, code: 'BadRequest', headersResponse: null }, done); }); }); @@ -63,45 +54,34 @@ describe('Preflight CORS request with existing bucket', () => { }); }); - it('should allow GET on bucket without cors configuration even if ' + - 'Origin header sent', done => { + it('should allow GET on bucket without cors configuration even if ' + 'Origin header sent', done => { const headers = { Origin: allowedOrigin, }; - methodRequest({ method: 'GET', bucket, headers, code: 200, - headersResponse: null }, done); + methodRequest({ method: 'GET', bucket, headers, code: 200, headersResponse: null }, done); }); - it('should allow HEAD on bucket without cors configuration even if ' + - 'Origin header sent', done => { + it('should allow HEAD on bucket without cors configuration even if ' + 'Origin header sent', done => { const headers = { Origin: allowedOrigin, }; - methodRequest({ method: 'HEAD', bucket, headers, code: 200, - headersResponse: null }, done); + methodRequest({ method: 'HEAD', bucket, headers, code: 200, headersResponse: null }, done); }); - it('should respond AccessForbidden for OPTIONS request on bucket without ' + - 'CORSConfiguration', done => { + it('should respond AccessForbidden for OPTIONS request on bucket without ' + 'CORSConfiguration', done => { const headers = { - 'Origin': allowedOrigin, + Origin: allowedOrigin, 'Access-Control-Request-Method': 'GET', }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); + methodRequest({ method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, done); }); - describe('allow PUT, POST, DELETE, GET methods and allow only ' + - 'one origin', () => { + describe('allow PUT, POST, DELETE, GET methods and allow only ' + 'one origin', () => { const corsParams = { Bucket: bucket, CORSConfiguration: { CORSRules: [ { - AllowedMethods: [ - 'PUT', 'POST', 'DELETE', 'GET', - ], - AllowedOrigins: [ - allowedOrigin, - ], + AllowedMethods: ['PUT', 'POST', 'DELETE', 'GET'], + AllowedOrigins: [allowedOrigin], }, ], }, @@ -115,43 +95,46 @@ describe('Preflight CORS request with existing bucket', () => { }); methods.forEach(method => { - it('should respond with 200 and access control headers to ' + - 'OPTIONS request from allowed origin and allowed method ' + - `"${method}"`, done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': method, - }; - const headersResponse = { - 'access-control-allow-origin': allowedOrigin, - 'access-control-allow-methods': 'PUT, POST, DELETE, GET', - 'access-control-allow-credentials': 'true', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); - }); - it('should respond AccessForbidden to OPTIONS request from ' + - 'not allowed origin', done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - 'Access-Control-Request-Headers': 'Origin, Accept, ' + - 'Content-Type', - }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); + it( + 'should respond with 200 and access control headers to ' + + 'OPTIONS request from allowed origin and allowed method ' + + `"${method}"`, + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': method, + }; + const headersResponse = { + 'access-control-allow-origin': allowedOrigin, + 'access-control-allow-methods': 'PUT, POST, DELETE, GET', + 'access-control-allow-credentials': 'true', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); }); - it('should respond AccessForbidden to OPTIONS request with ' + - 'not allowed Access-Control-Request-Headers', done => { + it('should respond AccessForbidden to OPTIONS request from ' + 'not allowed origin', done => { const headers = { - 'Origin': 'http://www.forbiddenwebsite.com', + Origin: allowedOrigin, 'Access-Control-Request-Method': 'GET', + 'Access-Control-Request-Headers': 'Origin, Accept, ' + 'Content-Type', }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); + methodRequest({ method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, done); }); + it( + 'should respond AccessForbidden to OPTIONS request with ' + 'not allowed Access-Control-Request-Headers', + done => { + const headers = { + Origin: 'http://www.forbiddenwebsite.com', + 'Access-Control-Request-Method': 'GET', + }; + methodRequest( + { method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, + done + ); + } + ); }); describe('CORS allows method GET and allows one origin', () => { @@ -160,12 +143,8 @@ describe('Preflight CORS request with existing bucket', () => { CORSConfiguration: { CORSRules: [ { - AllowedMethods: [ - 'GET', - ], - AllowedOrigins: [ - allowedOrigin, - ], + AllowedMethods: ['GET'], + AllowedOrigins: [allowedOrigin], }, ], }, @@ -178,52 +157,66 @@ describe('Preflight CORS request with existing bucket', () => { s3.deleteBucketCors({ Bucket: bucket }, done); }); - it('should respond with 200 and access control headers to OPTIONS ' + - 'request from allowed origin and method "GET"', done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - }; - const headersResponse = { - 'access-control-allow-origin': allowedOrigin, - 'access-control-allow-methods': 'GET', - 'access-control-allow-credentials': 'true', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); - it('should respond AccessForbidden to OPTIONS request with allowed ' + - 'method but not from allowed origin', done => { - const headers = { - 'Origin': 'http://www.forbiddenwebsite.com', - 'Access-Control-Request-Method': 'GET', - }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); - }); - it('should respond AccessForbidden to OPTIONS request from allowed ' + - 'origin and method but with not allowed Access-Control-Request-Headers', - done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - 'Access-Control-Request-Headers': 'Origin, Accept, ' + - 'Content-Type', - }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); - }); - ['PUT', 'POST', 'DELETE'].forEach(method => { - it('should respond AccessForbidden to OPTIONS request from ' + - `allowed origin but not allowed method "${method}"`, done => { + it( + 'should respond with 200 and access control headers to OPTIONS ' + + 'request from allowed origin and method "GET"', + done => { const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': method, + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); - }); + const headersResponse = { + 'access-control-allow-origin': allowedOrigin, + 'access-control-allow-methods': 'GET', + 'access-control-allow-credentials': 'true', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); + it( + 'should respond AccessForbidden to OPTIONS request with allowed ' + 'method but not from allowed origin', + done => { + const headers = { + Origin: 'http://www.forbiddenwebsite.com', + 'Access-Control-Request-Method': 'GET', + }; + methodRequest( + { method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, + done + ); + } + ); + it( + 'should respond AccessForbidden to OPTIONS request from allowed ' + + 'origin and method but with not allowed Access-Control-Request-Headers', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + 'Access-Control-Request-Headers': 'Origin, Accept, ' + 'Content-Type', + }; + methodRequest( + { method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, + done + ); + } + ); + ['PUT', 'POST', 'DELETE'].forEach(method => { + it( + 'should respond AccessForbidden to OPTIONS request from ' + + `allowed origin but not allowed method "${method}"`, + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': method, + }; + methodRequest( + { method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, + done + ); + } + ); }); }); @@ -239,8 +232,7 @@ describe('Preflight CORS request with existing bucket', () => { ], }, }; - describe(`CORS allows method "${allowedMethod}" and allows all origins`, - () => { + describe(`CORS allows method "${allowedMethod}" and allows all origins`, () => { beforeEach(done => { s3.putBucketCors(corsParams, done); }); @@ -249,45 +241,56 @@ describe('Preflight CORS request with existing bucket', () => { s3.deleteBucketCors({ Bucket: bucket }, done); }); - it('should respond with 200 and access control headers to ' + - `OPTIONS request from allowed origin and method "${allowedMethod}"`, - done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': allowedMethod, - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': allowedMethod, - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); - it('should respond AccessForbidden to OPTIONS request from ' + - 'allowed origin and method but with not allowed Access-Control-' + - 'Request-Headers', done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': allowedMethod, - 'Access-Control-Request-Headers': 'Origin, Accept, ' + - 'Content-Type', - }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); - }); - methods.filter(method => method !== allowedMethod) - .forEach(method => { - it('should respond AccessForbidden to OPTIONS request from ' + - `allowed origin but not allowed method "${method}"`, done => { + it( + 'should respond with 200 and access control headers to ' + + `OPTIONS request from allowed origin and method "${allowedMethod}"`, + done => { const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': method, + Origin: allowedOrigin, + 'Access-Control-Request-Method': allowedMethod, }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': allowedMethod, + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); + it( + 'should respond AccessForbidden to OPTIONS request from ' + + 'allowed origin and method but with not allowed Access-Control-' + + 'Request-Headers', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': allowedMethod, + 'Access-Control-Request-Headers': 'Origin, Accept, ' + 'Content-Type', + }; + methodRequest( + { method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, + done + ); + } + ); + methods + .filter(method => method !== allowedMethod) + .forEach(method => { + it( + 'should respond AccessForbidden to OPTIONS request from ' + + `allowed origin but not allowed method "${method}"`, + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': method, + }; + methodRequest( + { method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, + done + ); + } + ); }); - }); }); }); @@ -315,54 +318,61 @@ describe('Preflight CORS request with existing bucket', () => { s3.deleteBucketCors({ Bucket: bucket }, done); }); - [originWithoutWildcard, originReplaceWildcard] - .forEach(acceptableOrigin => { - it('should return 200 and CORS header to OPTIONS request ' + - `from allowed method and origin "${acceptableOrigin}"`, - done => { - const headers = { - 'Origin': acceptableOrigin, - 'Access-Control-Request-Method': 'GET', - }; - const headersResponse = { - 'access-control-allow-origin': acceptableOrigin, - 'access-control-allow-methods': 'GET', - 'access-control-allow-credentials': 'true', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 200, headersResponse }, done); - }); + [originWithoutWildcard, originReplaceWildcard].forEach(acceptableOrigin => { + it( + 'should return 200 and CORS header to OPTIONS request ' + + `from allowed method and origin "${acceptableOrigin}"`, + done => { + const headers = { + Origin: acceptableOrigin, + 'Access-Control-Request-Method': 'GET', + }; + const headersResponse = { + 'access-control-allow-origin': acceptableOrigin, + 'access-control-allow-methods': 'GET', + 'access-control-allow-credentials': 'true', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); }); if (!origin.endsWith('*')) { - it('should respond AccessForbidden to OPTIONS request from ' + - `allowed method and origin "${originWithoutWildcard}test"`, - done => { - const headers = { - 'Origin': `${originWithoutWildcard}test`, - 'Access-Control-Request-Method': 'GET', - }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); - }); + it( + 'should respond AccessForbidden to OPTIONS request from ' + + `allowed method and origin "${originWithoutWildcard}test"`, + done => { + const headers = { + Origin: `${originWithoutWildcard}test`, + 'Access-Control-Request-Method': 'GET', + }; + methodRequest( + { method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, + done + ); + } + ); } if (!origin.startsWith('*')) { - it('should respond AccessForbidden to OPTIONS request from ' + - `allowed method and origin "test${originWithoutWildcard}"`, - done => { - const headers = { - 'Origin': `test${originWithoutWildcard}`, - 'Access-Control-Request-Method': 'GET', - }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); - }); + it( + 'should respond AccessForbidden to OPTIONS request from ' + + `allowed method and origin "test${originWithoutWildcard}"`, + done => { + const headers = { + Origin: `test${originWithoutWildcard}`, + 'Access-Control-Request-Method': 'GET', + }; + methodRequest( + { method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, + done + ); + } + ); } }); }); - describe('CORS response access-control-allow-origin header value', - () => { + describe('CORS response access-control-allow-origin header value', () => { const anotherOrigin = 'http://www.anotherorigin.com'; const originContainingWildcard = 'http://www.originwith*.com'; const corsParams = { @@ -370,21 +380,12 @@ describe('Preflight CORS request with existing bucket', () => { CORSConfiguration: { CORSRules: [ { - AllowedMethods: [ - 'GET', - ], - AllowedOrigins: [ - allowedOrigin, - originContainingWildcard, - ], + AllowedMethods: ['GET'], + AllowedOrigins: [allowedOrigin, originContainingWildcard], }, { - AllowedMethods: [ - 'GET', - ], - AllowedOrigins: [ - '*', - ], + AllowedMethods: ['GET'], + AllowedOrigins: ['*'], }, ], }, @@ -397,72 +398,71 @@ describe('Preflight CORS request with existing bucket', () => { s3.deleteBucketCors({ Bucket: bucket }, done); }); - it('if OPTIONS request matches rule with multiple origins, response ' + - 'access-control-request-origin header value should be request Origin ' + - '(not list of AllowedOrigins)', done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - }; - const headersResponse = { - 'access-control-allow-origin': allowedOrigin, - 'access-control-allow-methods': 'GET', - 'access-control-allow-credentials': 'true', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); - it('if OPTIONS request matches rule with origin containing wildcard, ' + - 'response access-control-request-origin header value should be ' + - 'request Origin (not value containing wildcard)', done => { - const requestOrigin = originContainingWildcard.replace('*', 'test'); - const headers = { - 'Origin': requestOrigin, - 'Access-Control-Request-Method': 'GET', - }; - const headersResponse = { - 'access-control-allow-origin': requestOrigin, - 'access-control-allow-methods': 'GET', - 'access-control-allow-credentials': 'true', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); - it('if OPTIONS request matches rule that allows all origins, ' + - 'e.g. "*", response access-control-request-origin header should ' + - 'return "*"', done => { - const headers = { - 'Origin': anotherOrigin, - 'Access-Control-Request-Method': 'GET', - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); + it( + 'if OPTIONS request matches rule with multiple origins, response ' + + 'access-control-request-origin header value should be request Origin ' + + '(not list of AllowedOrigins)', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + }; + const headersResponse = { + 'access-control-allow-origin': allowedOrigin, + 'access-control-allow-methods': 'GET', + 'access-control-allow-credentials': 'true', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); + it( + 'if OPTIONS request matches rule with origin containing wildcard, ' + + 'response access-control-request-origin header value should be ' + + 'request Origin (not value containing wildcard)', + done => { + const requestOrigin = originContainingWildcard.replace('*', 'test'); + const headers = { + Origin: requestOrigin, + 'Access-Control-Request-Method': 'GET', + }; + const headersResponse = { + 'access-control-allow-origin': requestOrigin, + 'access-control-allow-methods': 'GET', + 'access-control-allow-credentials': 'true', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); + it( + 'if OPTIONS request matches rule that allows all origins, ' + + 'e.g. "*", response access-control-request-origin header should ' + + 'return "*"', + done => { + const headers = { + Origin: anotherOrigin, + 'Access-Control-Request-Method': 'GET', + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); }); - describe('CORS allows method GET, allows all origins and allows ' + - 'header Content-Type', () => { + describe('CORS allows method GET, allows all origins and allows ' + 'header Content-Type', () => { const corsParams = { Bucket: bucket, CORSConfiguration: { CORSRules: [ { - AllowedMethods: [ - 'GET', - ], - AllowedOrigins: [ - '*', - ], - AllowedHeaders: [ - 'content-type', - ], + AllowedMethods: ['GET'], + AllowedOrigins: ['*'], + AllowedHeaders: ['content-type'], }, ], }, @@ -475,80 +475,74 @@ describe('Preflight CORS request with existing bucket', () => { s3.deleteBucketCors({ Bucket: bucket }, done); }); - it('should respond with 200 and access control headers to OPTIONS ' + - 'request from allowed origin and method, even without request ' + - 'Access-Control-Request-Headers header value', done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); - it('should respond with 200 and access control headers to OPTIONS ' + - 'request from allowed origin and method with Access-Control-' + - 'Request-Headers \'Content-Type\'', done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - 'Access-Control-Request-Headers': 'content-type', - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - 'access-control-allow-headers': 'content-type', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); - it('should respond AccessForbidden to OPTIONS request from allowed ' + - 'origin and method but not allowed Access-Control-Request-Headers ' + - 'in addition to Content-Type', - done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - 'Access-Control-Request-Headers': 'Origin, Accept, ' + - 'content-type', - }; - methodRequest({ method: 'OPTIONS', bucket, headers, - code: 'AccessForbidden', headersResponse: null }, done); - }); + it( + 'should respond with 200 and access control headers to OPTIONS ' + + 'request from allowed origin and method, even without request ' + + 'Access-Control-Request-Headers header value', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); + it( + 'should respond with 200 and access control headers to OPTIONS ' + + 'request from allowed origin and method with Access-Control-' + + "Request-Headers 'Content-Type'", + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + 'Access-Control-Request-Headers': 'content-type', + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + 'access-control-allow-headers': 'content-type', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); + it( + 'should respond AccessForbidden to OPTIONS request from allowed ' + + 'origin and method but not allowed Access-Control-Request-Headers ' + + 'in addition to Content-Type', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + 'Access-Control-Request-Headers': 'Origin, Accept, ' + 'content-type', + }; + methodRequest( + { method: 'OPTIONS', bucket, headers, code: 'AccessForbidden', headersResponse: null }, + done + ); + } + ); }); - describe('CORS response Access-Control-Allow-Headers header value', - () => { + describe('CORS response Access-Control-Allow-Headers header value', () => { const corsParams = { Bucket: bucket, CORSConfiguration: { CORSRules: [ { - AllowedMethods: [ - 'GET', - ], - AllowedOrigins: [ - '*', - ], - AllowedHeaders: [ - 'Content-Type', 'amz-*', 'Expires', - ], + AllowedMethods: ['GET'], + AllowedOrigins: ['*'], + AllowedHeaders: ['Content-Type', 'amz-*', 'Expires'], }, { - AllowedMethods: [ - 'GET', - ], - AllowedOrigins: [ - '*', - ], - AllowedHeaders: [ - '*', - ], + AllowedMethods: ['GET'], + AllowedOrigins: ['*'], + AllowedHeaders: ['*'], }, ], }, @@ -561,82 +555,86 @@ describe('Preflight CORS request with existing bucket', () => { s3.deleteBucketCors({ Bucket: bucket }, done); }); - it('should return request access-control-request-headers value, ' + - 'not list of AllowedHeaders from rule or corresponding AllowedHeader ' + - 'value containing wildcard', - done => { - const requestHeaderValue = 'amz-meta-header-test, content-type'; - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - 'Access-Control-Request-Headers': requestHeaderValue, - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - 'access-control-allow-headers': requestHeaderValue, - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); - it('should return lowercase version of request Access-Control-' + - 'Request-Method header value if it contains any upper-case values', - done => { - const requestHeaderValue = 'Content-Type'; - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - 'Access-Control-Request-Headers': requestHeaderValue, - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - 'access-control-allow-headers': - requestHeaderValue.toLowerCase(), - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); - it('should remove empty comma-separated values derived from request ' + - 'Access-Control-Request-Method header and separate values with ' + - 'spaces when responding with Access-Control-Allow-Headers header', - done => { - const requestHeaderValue = 'content-type,,expires'; - const expectedValue = 'content-type, expires'; - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - 'Access-Control-Request-Headers': requestHeaderValue, - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - 'access-control-allow-headers': expectedValue, - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); - it('should return request Access-Control-Request-Headers value ' + - 'even if rule allows all headers (e.g. "*"), unlike access-control-' + - 'allow-origin value', done => { - const requestHeaderValue = 'puppies'; - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - 'Access-Control-Request-Headers': requestHeaderValue, - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - 'access-control-allow-headers': requestHeaderValue, - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); + it( + 'should return request access-control-request-headers value, ' + + 'not list of AllowedHeaders from rule or corresponding AllowedHeader ' + + 'value containing wildcard', + done => { + const requestHeaderValue = 'amz-meta-header-test, content-type'; + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + 'Access-Control-Request-Headers': requestHeaderValue, + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + 'access-control-allow-headers': requestHeaderValue, + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); + it( + 'should return lowercase version of request Access-Control-' + + 'Request-Method header value if it contains any upper-case values', + done => { + const requestHeaderValue = 'Content-Type'; + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + 'Access-Control-Request-Headers': requestHeaderValue, + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + 'access-control-allow-headers': requestHeaderValue.toLowerCase(), + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); + it( + 'should remove empty comma-separated values derived from request ' + + 'Access-Control-Request-Method header and separate values with ' + + 'spaces when responding with Access-Control-Allow-Headers header', + done => { + const requestHeaderValue = 'content-type,,expires'; + const expectedValue = 'content-type, expires'; + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + 'Access-Control-Request-Headers': requestHeaderValue, + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + 'access-control-allow-headers': expectedValue, + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); + it( + 'should return request Access-Control-Request-Headers value ' + + 'even if rule allows all headers (e.g. "*"), unlike access-control-' + + 'allow-origin value', + done => { + const requestHeaderValue = 'puppies'; + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + 'Access-Control-Request-Headers': requestHeaderValue, + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + 'access-control-allow-headers': requestHeaderValue, + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); }); describe('CORS and OPTIONS request with object keys', () => { @@ -645,12 +643,8 @@ describe('Preflight CORS request with existing bucket', () => { CORSConfiguration: { CORSRules: [ { - AllowedMethods: [ - 'GET', - ], - AllowedOrigins: [ - allowedOrigin, - ], + AllowedMethods: ['GET'], + AllowedOrigins: [allowedOrigin], }, ], }, @@ -676,38 +670,44 @@ describe('Preflight CORS request with existing bucket', () => { }); }); - it('should respond with 200 and access control headers to OPTIONS ' + - 'request from allowed origin, allowed method and existing object key', - done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - }; - const headersResponse = { - 'access-control-allow-origin': allowedOrigin, - 'access-control-allow-methods': 'GET', - 'access-control-allow-credentials': 'true', - vary, - }; - methodRequest({ method: 'OPTIONS', objectKey, bucket, headers, - code: 200, headersResponse }, done); - }); - it('should respond with 200 and access control headers to OPTIONS ' + - 'request from allowed origin, allowed method, even with non-existing ' + - 'object key', done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - }; - const headersResponse = { - 'access-control-allow-origin': allowedOrigin, - 'access-control-allow-methods': 'GET', - 'access-control-allow-credentials': 'true', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, objectKey: - 'anotherObjectKey', headers, code: 200, headersResponse }, done); - }); + it( + 'should respond with 200 and access control headers to OPTIONS ' + + 'request from allowed origin, allowed method and existing object key', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + }; + const headersResponse = { + 'access-control-allow-origin': allowedOrigin, + 'access-control-allow-methods': 'GET', + 'access-control-allow-credentials': 'true', + vary, + }; + methodRequest({ method: 'OPTIONS', objectKey, bucket, headers, code: 200, headersResponse }, done); + } + ); + it( + 'should respond with 200 and access control headers to OPTIONS ' + + 'request from allowed origin, allowed method, even with non-existing ' + + 'object key', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + }; + const headersResponse = { + 'access-control-allow-origin': allowedOrigin, + 'access-control-allow-methods': 'GET', + 'access-control-allow-credentials': 'true', + vary, + }; + methodRequest( + { method: 'OPTIONS', bucket, objectKey: 'anotherObjectKey', headers, code: 200, headersResponse }, + done + ); + } + ); }); describe('CORS and OPTIONS request', () => { @@ -730,37 +730,41 @@ describe('Preflight CORS request with existing bucket', () => { s3.deleteBucketCors({ Bucket: bucket }, done); }); - it('with fake auth credentials: should respond with 200 and access ' + - 'control headers even if request has fake auth credentials', done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - 'Authorization': 'AWS fakeKey:fakesignature', - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); + it( + 'with fake auth credentials: should respond with 200 and access ' + + 'control headers even if request has fake auth credentials', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + Authorization: 'AWS fakeKey:fakesignature', + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); - it('with cookies: should send identical response as to request ' + - 'without cookies (200 and access control headers)', done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - 'Cookie': 'testcookie=1', - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); + it( + 'with cookies: should send identical response as to request ' + + 'without cookies (200 and access control headers)', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + Cookie: 'testcookie=1', + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); }); describe('CORS exposes headers', () => { @@ -769,17 +773,9 @@ describe('Preflight CORS request with existing bucket', () => { CORSConfiguration: { CORSRules: [ { - AllowedMethods: [ - 'GET', - ], - AllowedOrigins: [ - '*', - ], - ExposeHeaders: [ - 'x-amz-server-side-encryption', - 'x-amz-request-id', - 'x-amz-id-2', - ], + AllowedMethods: ['GET'], + AllowedOrigins: ['*'], + ExposeHeaders: ['x-amz-server-side-encryption', 'x-amz-request-id', 'x-amz-id-2'], }, ], }, @@ -792,23 +788,23 @@ describe('Preflight CORS request with existing bucket', () => { s3.deleteBucketCors({ Bucket: bucket }, done); }); - it('if OPTIONS request matches CORS rule with ExposeHeader\'s, ' + - 'response should include Access-Control-Expose-Headers header', - done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - 'access-control-expose-headers': - 'x-amz-server-side-encryption, x-amz-request-id, x-amz-id-2', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); + it( + "if OPTIONS request matches CORS rule with ExposeHeader's, " + + 'response should include Access-Control-Expose-Headers header', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + 'access-control-expose-headers': 'x-amz-server-side-encryption, x-amz-request-id, x-amz-id-2', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); }); describe('CORS max age seconds', () => { @@ -817,12 +813,8 @@ describe('Preflight CORS request with existing bucket', () => { CORSConfiguration: { CORSRules: [ { - AllowedMethods: [ - 'GET', - ], - AllowedOrigins: [ - '*', - ], + AllowedMethods: ['GET'], + AllowedOrigins: ['*'], MaxAgeSeconds: 86400, }, ], @@ -836,20 +828,22 @@ describe('Preflight CORS request with existing bucket', () => { s3.deleteBucketCors({ Bucket: bucket }, done); }); - it('if OPTIONS request matches CORS rule with max age seconds, ' + - 'response should include Access-Control-Max-Age header', done => { - const headers = { - 'Origin': allowedOrigin, - 'Access-Control-Request-Method': 'GET', - }; - const headersResponse = { - 'access-control-allow-origin': '*', - 'access-control-allow-methods': 'GET', - 'access-control-max-age': '86400', - vary, - }; - methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, - headersResponse }, done); - }); + it( + 'if OPTIONS request matches CORS rule with max age seconds, ' + + 'response should include Access-Control-Max-Age header', + done => { + const headers = { + Origin: allowedOrigin, + 'Access-Control-Request-Method': 'GET', + }; + const headersResponse = { + 'access-control-allow-origin': '*', + 'access-control-allow-methods': 'GET', + 'access-control-max-age': '86400', + vary, + }; + methodRequest({ method: 'OPTIONS', bucket, headers, code: 200, headersResponse }, done); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/deleteMpu.js b/tests/functional/aws-node-sdk/test/object/deleteMpu.js index 60978a3001..f3194ebd0a 100644 --- a/tests/functional/aws-node-sdk/test/object/deleteMpu.js +++ b/tests/functional/aws-node-sdk/test/object/deleteMpu.js @@ -12,8 +12,8 @@ const westLocation = 'scality-us-west-1'; const eastLocation = 'us-east-1'; const confLocations = [ - { name: 'us-west-1', statusCode: 204, location: westLocation, describe }, - { name: 'us-east-1', statusCode: 404, location: eastLocation, describe }, + { name: 'us-west-1', statusCode: 204, location: westLocation, describe }, + { name: 'us-east-1', statusCode: 404, location: eastLocation, describe }, ]; describe('DELETE multipart', () => { @@ -22,91 +22,87 @@ describe('DELETE multipart', () => { const s3 = bucketUtil.s3; function _assertStatusCode(uploadId, statusCodeExpected, callback) { - const request = - s3.abortMultipartUpload({ Bucket: bucket, Key: key, - UploadId: uploadId }, err => { - const statusCode = - request.response.httpResponse.statusCode; - assert.strictEqual(statusCode, statusCodeExpected, - `Found unexpected statusCode ${statusCode}`); + const request = s3.abortMultipartUpload({ Bucket: bucket, Key: key, UploadId: uploadId }, err => { + const statusCode = request.response.httpResponse.statusCode; + assert.strictEqual(statusCode, statusCodeExpected, `Found unexpected statusCode ${statusCode}`); if (statusCode === 204) { - assert.strictEqual(err, null, - `Expected no err but found ${err}`); + assert.strictEqual(err, null, `Expected no err but found ${err}`); return callback(err); } return callback(); }); } - it('on bucket that does not exist: should return NoSuchBucket', - done => { + it('on bucket that does not exist: should return NoSuchBucket', done => { const uploadId = 'nonexistinguploadid'; - s3.abortMultipartUpload({ Bucket: bucket, Key: key, - UploadId: uploadId }, err => { - assert.notEqual(err, null, - 'Expected NoSuchBucket but found no err'); + s3.abortMultipartUpload({ Bucket: bucket, Key: key, UploadId: uploadId }, err => { + assert.notEqual(err, null, 'Expected NoSuchBucket but found no err'); assert.strictEqual(err.code, 'NoSuchBucket'); done(); }); }); confLocations.forEach(confLocation => { - confLocation.describe('on existing bucket with ' + - `${confLocation.name}`, - () => { + confLocation.describe('on existing bucket with ' + `${confLocation.name}`, () => { beforeEach(() => - s3.createBucket({ Bucket: bucket, - CreateBucketConfiguration: { - LocationConstraint: confLocation.location, - } }).promise() - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }) + s3 + .createBucket({ + Bucket: bucket, + CreateBucketConfiguration: { + LocationConstraint: confLocation.location, + }, + }) + .promise() + .catch(err => { + process.stdout.write(`Error in beforeEach: ${err}\n`); + throw err; + }) ); afterEach(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); - itSkipIfAWS(`should return ${confLocation.statusCode} if ` + - 'mpu does not exist with uploadId', - done => { - const uploadId = 'nonexistinguploadid'; - _assertStatusCode(uploadId, confLocation.statusCode, done); - }); + itSkipIfAWS( + `should return ${confLocation.statusCode} if ` + 'mpu does not exist with uploadId', + done => { + const uploadId = 'nonexistinguploadid'; + _assertStatusCode(uploadId, confLocation.statusCode, done); + } + ); - describe('if mpu exists with uploadId + at least one part', - () => { + describe('if mpu exists with uploadId + at least one part', () => { let uploadId; beforeEach(() => - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - }).promise() - .then(res => { - uploadId = res.UploadId; - return s3.uploadPart({ + s3 + .createMultipartUpload({ Bucket: bucket, Key: key, - PartNumber: 1, - UploadId: uploadId, - }); - }) + }) + .promise() + .then(res => { + uploadId = res.UploadId; + return s3.uploadPart({ + Bucket: bucket, + Key: key, + PartNumber: 1, + UploadId: uploadId, + }); + }) ); it('should return 204 for abortMultipartUpload', done => { - _assertStatusCode(uploadId, 204, - done); + _assertStatusCode(uploadId, 204, done); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/deleteObjTagging.js b/tests/functional/aws-node-sdk/test/object/deleteObjTagging.js index 6870de1a16..c215852f58 100644 --- a/tests/functional/aws-node-sdk/test/object/deleteObjTagging.js +++ b/tests/functional/aws-node-sdk/test/object/deleteObjTagging.js @@ -8,16 +8,18 @@ const bucketName = 'testdeletetaggingbucket'; const objectName = 'testtaggingobject'; const objectNameAcl = 'testtaggingobjectacl'; -const taggingConfig = { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }, - { - Key: 'key2', - Value: 'value2', - }, -] }; +const taggingConfig = { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + { + Key: 'key2', + Value: 'value2', + }, + ], +}; function _checkError(err, code, statusCode) { assert(err, 'Expected error but found none'); @@ -32,112 +34,124 @@ describe('DELETE object taggings', () => { const otherAccountBucketUtility = new BucketUtility('lisa', {}); const otherAccountS3 = otherAccountBucketUtility.s3; - beforeEach(done => s3.createBucket({ Bucket: bucketName }, err => { - if (err) { - return done(err); - } - return s3.putObject({ Bucket: bucketName, Key: objectName }, done); - })); + beforeEach(done => + s3.createBucket({ Bucket: bucketName }, err => { + if (err) { + return done(err); + } + return s3.putObject({ Bucket: bucketName, Key: objectName }, done); + }) + ); afterEach(() => { process.stdout.write('Emptying bucket'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return bucketUtil + .empty(bucketName) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(bucketName); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); it('should delete tag set', done => { - s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - Tagging: taggingConfig, - }, err => { - assert.ifError(err, `putObjectTagging error: ${err}`); - s3.deleteObjectTagging({ Bucket: bucketName, Key: objectName }, - (err, data) => { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(Object.keys(data).length, 0); - done(); - }); - }); + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + Tagging: taggingConfig, + }, + err => { + assert.ifError(err, `putObjectTagging error: ${err}`); + s3.deleteObjectTagging({ Bucket: bucketName, Key: objectName }, (err, data) => { + assert.ifError(err, `Found unexpected err ${err}`); + assert.strictEqual(Object.keys(data).length, 0); + done(); + }); + } + ); }); it('should delete a non-existing tag set', done => { - s3.deleteObjectTagging({ Bucket: bucketName, Key: objectName }, - (err, data) => { + s3.deleteObjectTagging({ Bucket: bucketName, Key: objectName }, (err, data) => { assert.ifError(err, `Found unexpected err ${err}`); assert.strictEqual(Object.keys(data).length, 0); done(); }); }); - it('should return NoSuchKey deleting tag set to a non-existing object', - done => { - s3.deleteObjectTagging({ - Bucket: bucketName, - Key: 'nonexisting', - }, err => { - _checkError(err, 'NoSuchKey', 404); - done(); - }); + it('should return NoSuchKey deleting tag set to a non-existing object', done => { + s3.deleteObjectTagging( + { + Bucket: bucketName, + Key: 'nonexisting', + }, + err => { + _checkError(err, 'NoSuchKey', 404); + done(); + } + ); }); - it('should return 403 AccessDenied deleting tag set with another ' + - 'account', done => { - otherAccountS3.deleteObjectTagging({ Bucket: bucketName, Key: - objectName }, err => { + it('should return 403 AccessDenied deleting tag set with another ' + 'account', done => { + otherAccountS3.deleteObjectTagging({ Bucket: bucketName, Key: objectName }, err => { _checkError(err, 'AccessDenied', 403); done(); }); }); - it('should return 403 AccessDenied deleting tag set with a different ' + - 'account to an object with ACL "public-read-write"', - done => { - s3.putObjectAcl({ Bucket: bucketName, Key: objectName, - ACL: 'public-read-write' }, err => { - if (err) { - return done(err); - } - return otherAccountS3.deleteObjectTagging({ Bucket: bucketName, - Key: objectName }, err => { - _checkError(err, 'AccessDenied', 403); - done(); + it( + 'should return 403 AccessDenied deleting tag set with a different ' + + 'account to an object with ACL "public-read-write"', + done => { + s3.putObjectAcl({ Bucket: bucketName, Key: objectName, ACL: 'public-read-write' }, err => { + if (err) { + return done(err); + } + return otherAccountS3.deleteObjectTagging({ Bucket: bucketName, Key: objectName }, err => { + _checkError(err, 'AccessDenied', 403); + done(); + }); }); - }); - }); + } + ); - it('should return 403 AccessDenied deleting tag set to an object' + - ' in a bucket created with a different account', - done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => otherAccountS3.deleteObjectTagging({ Bucket: bucketName, - Key: objectNameAcl }, err => next(err)), - ], err => { - _checkError(err, 'AccessDenied', 403); - done(); - }); - }); + it( + 'should return 403 AccessDenied deleting tag set to an object' + + ' in a bucket created with a different account', + done => { + async.waterfall( + [ + next => s3.putBucketAcl({ Bucket: bucketName, ACL: 'public-read-write' }, err => next(err)), + next => otherAccountS3.putObject({ Bucket: bucketName, Key: objectNameAcl }, err => next(err)), + next => + otherAccountS3.deleteObjectTagging({ Bucket: bucketName, Key: objectNameAcl }, err => + next(err) + ), + ], + err => { + _checkError(err, 'AccessDenied', 403); + done(); + } + ); + } + ); - it('should delete tag set to an object in a bucket created with same ' + - 'account even though object put by other account', done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => s3.deleteObjectTagging({ Bucket: bucketName, - Key: objectNameAcl }, err => next(err)), - ], done); - }); + it( + 'should delete tag set to an object in a bucket created with same ' + + 'account even though object put by other account', + done => { + async.waterfall( + [ + next => s3.putBucketAcl({ Bucket: bucketName, ACL: 'public-read-write' }, err => next(err)), + next => otherAccountS3.putObject({ Bucket: bucketName, Key: objectNameAcl }, err => next(err)), + next => s3.deleteObjectTagging({ Bucket: bucketName, Key: objectNameAcl }, err => next(err)), + ], + done + ); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/deleteObject.js b/tests/functional/aws-node-sdk/test/object/deleteObject.js index 6a7bffa7fc..78192dd955 100644 --- a/tests/functional/aws-node-sdk/test/object/deleteObject.js +++ b/tests/functional/aws-node-sdk/test/object/deleteObject.js @@ -21,77 +21,81 @@ describe('DELETE object', () => { const bucketName = 'testdeletempu'; before(() => { process.stdout.write('creating bucket\n'); - return s3.createBucket({ Bucket: bucketName }).promise() - .then(() => { - process.stdout.write('initiating multipart upload\n'); - return s3.createMultipartUpload({ - Bucket: bucketName, - Key: objectName, - }).promise(); - }) - .then(res => { - process.stdout.write('uploading parts\n'); - uploadId = res.UploadId; - const uploads = []; - for (let i = 1; i <= 3; i++) { - uploads.push( - s3.uploadPart({ + return s3 + .createBucket({ Bucket: bucketName }) + .promise() + .then(() => { + process.stdout.write('initiating multipart upload\n'); + return s3 + .createMultipartUpload({ + Bucket: bucketName, + Key: objectName, + }) + .promise(); + }) + .then(res => { + process.stdout.write('uploading parts\n'); + uploadId = res.UploadId; + const uploads = []; + for (let i = 1; i <= 3; i++) { + uploads.push( + s3 + .uploadPart({ + Bucket: bucketName, + Key: objectName, + PartNumber: i, + Body: testfile, + UploadId: uploadId, + }) + .promise() + ); + } + return Promise.all(uploads); + }) + .catch(err => { + process.stdout.write(`Error with uploadPart ${err}\n`); + throw err; + }) + .then(res => { + process.stdout.write('about to complete multipart ' + 'upload\n'); + return s3 + .completeMultipartUpload({ Bucket: bucketName, Key: objectName, - PartNumber: i, - Body: testfile, UploadId: uploadId, - }).promise() - ); - } - return Promise.all(uploads); - }) - .catch(err => { - process.stdout.write(`Error with uploadPart ${err}\n`); - throw err; - }) - .then(res => { - process.stdout.write('about to complete multipart ' + - 'upload\n'); - return s3.completeMultipartUpload({ - Bucket: bucketName, - Key: objectName, - UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: res[0].ETag, PartNumber: 1 }, - { ETag: res[1].ETag, PartNumber: 2 }, - { ETag: res[2].ETag, PartNumber: 3 }, - ], - }, - }).promise(); - }) - .catch(err => { - process.stdout.write('completeMultipartUpload error: ' + - `${err}\n`); - throw err; - }); + MultipartUpload: { + Parts: [ + { ETag: res[0].ETag, PartNumber: 1 }, + { ETag: res[1].ETag, PartNumber: 2 }, + { ETag: res[2].ETag, PartNumber: 3 }, + ], + }, + }) + .promise(); + }) + .catch(err => { + process.stdout.write('completeMultipartUpload error: ' + `${err}\n`); + throw err; + }); }); after(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write('Error in after\n'); - throw err; - }); + return bucketUtil + .empty(bucketName) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucketName); + }) + .catch(err => { + process.stdout.write('Error in after\n'); + throw err; + }); }); - it('should delete a object uploaded in parts successfully', - done => { - s3.deleteObject({ Bucket: bucketName, Key: objectName }, - err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + it('should delete a object uploaded in parts successfully', done => { + s3.deleteObject({ Bucket: bucketName, Key: objectName }, err => { + assert.strictEqual(err, null, `Expected success, got error ${JSON.stringify(err)}`); done(); }); }); @@ -104,131 +108,157 @@ describe('DELETE object', () => { const retainDate = moment().add(10, 'days').toISOString(); before(() => { process.stdout.write('creating bucket\n'); - return s3.createBucket({ - Bucket: bucketName, - ObjectLockEnabledForBucket: true, - }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket ${err}\n`); - throw err; - }) - .then(() => { - process.stdout.write('putting object\n'); - return s3.putObject({ + return s3 + .createBucket({ Bucket: bucketName, - Key: objectName, - }).promise(); - }) - .catch(err => { - process.stdout.write('Error putting object'); - throw err; - }) - .then(res => { - versionIdOne = res.VersionId; - process.stdout.write('putting object retention\n'); - return s3.putObjectRetention({ - Bucket: bucketName, - Key: objectName, - Retention: { - Mode: 'GOVERNANCE', - RetainUntilDate: retainDate, - }, - }).promise(); - }) - .catch(err => { - process.stdout.write('Err putting object retention\n'); - throw err; - }) - .then(() => { - process.stdout.write('putting object\n'); - return s3.putObject({ - Bucket: bucketName, - Key: objectNameTwo, - }).promise(); - }) - .catch(err => { - process.stdout.write(('Err putting second object\n')); - throw err; - }) - .then(res => { - versionIdTwo = res.VersionId; - process.stdout.write('putting object legal hold\n'); - return s3.putObjectLegalHold({ - Bucket: bucketName, - Key: objectNameTwo, - LegalHold: { - Status: 'ON', - }, - }).promise(); - }) - .catch(err => { - process.stdout.write('Err putting object legal hold\n'); - throw err; - }); + ObjectLockEnabledForBucket: true, + }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket ${err}\n`); + throw err; + }) + .then(() => { + process.stdout.write('putting object\n'); + return s3 + .putObject({ + Bucket: bucketName, + Key: objectName, + }) + .promise(); + }) + .catch(err => { + process.stdout.write('Error putting object'); + throw err; + }) + .then(res => { + versionIdOne = res.VersionId; + process.stdout.write('putting object retention\n'); + return s3 + .putObjectRetention({ + Bucket: bucketName, + Key: objectName, + Retention: { + Mode: 'GOVERNANCE', + RetainUntilDate: retainDate, + }, + }) + .promise(); + }) + .catch(err => { + process.stdout.write('Err putting object retention\n'); + throw err; + }) + .then(() => { + process.stdout.write('putting object\n'); + return s3 + .putObject({ + Bucket: bucketName, + Key: objectNameTwo, + }) + .promise(); + }) + .catch(err => { + process.stdout.write('Err putting second object\n'); + throw err; + }) + .then(res => { + versionIdTwo = res.VersionId; + process.stdout.write('putting object legal hold\n'); + return s3 + .putObjectLegalHold({ + Bucket: bucketName, + Key: objectNameTwo, + LegalHold: { + Status: 'ON', + }, + }) + .promise(); + }) + .catch(err => { + process.stdout.write('Err putting object legal hold\n'); + throw err; + }); }); after(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write('Error in after\n'); - throw err; - }); + return bucketUtil + .empty(bucketName) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucketName); + }) + .catch(err => { + process.stdout.write('Error in after\n'); + throw err; + }); }); it('should put delete marker if no version id specified', done => { - s3.deleteObject({ - Bucket: bucketName, - Key: objectName, - }, err => { - assert.ifError(err); - done(); - }); + s3.deleteObject( + { + Bucket: bucketName, + Key: objectName, + }, + err => { + assert.ifError(err); + done(); + } + ); }); - it('should not delete object version locked with object ' + - 'retention', done => { - s3.deleteObject({ - Bucket: bucketName, - Key: objectName, - VersionId: versionIdOne, - }, err => { - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); + it('should not delete object version locked with object ' + 'retention', done => { + s3.deleteObject( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionIdOne, + }, + err => { + assert.strictEqual(err.code, 'AccessDenied'); + done(); + } + ); }); - it('should delete locked object version with GOVERNANCE ' + - 'retention mode and correct header', done => { - s3.deleteObject({ - Bucket: bucketName, - Key: objectName, - VersionId: versionIdOne, - BypassGovernanceRetention: true, - }, err => { - assert.ifError(err); - done(); - }); + it('should delete locked object version with GOVERNANCE ' + 'retention mode and correct header', done => { + s3.deleteObject( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionIdOne, + BypassGovernanceRetention: true, + }, + err => { + assert.ifError(err); + done(); + } + ); }); it('should not delete object locked with legal hold', done => { - s3.deleteObject({ - Bucket: bucketName, - Key: objectNameTwo, - VersionId: versionIdTwo, - }, err => { - assert.strictEqual(err.code, 'AccessDenied'); - changeObjectLock( - [{ - bucket: bucketName, - key: objectNameTwo, - versionId: versionIdTwo, - }], '', done); - }); + s3.deleteObject( + { + Bucket: bucketName, + Key: objectNameTwo, + VersionId: versionIdTwo, + }, + err => { + assert.strictEqual(err.code, 'AccessDenied'); + changeObjectLock( + [ + { + bucket: bucketName, + key: objectNameTwo, + versionId: versionIdTwo, + }, + ], + '', + done + ); + } + ); }); }); @@ -238,28 +268,32 @@ describe('DELETE object', () => { let versionId; before(() => { process.stdout.write('creating bucket\n'); - return s3.createBucket({ - Bucket: bucketName, - ObjectLockEnabledForBucket: true, - }).promise() + return s3 + .createBucket({ + Bucket: bucketName, + ObjectLockEnabledForBucket: true, + }) + .promise() .catch(err => { process.stdout.write(`Error creating bucket ${err}\n`); throw err; }) .then(() => { process.stdout.write('putting object lock configuration\n'); - return s3.putObjectLockConfiguration({ - Bucket: bucketName, - ObjectLockConfiguration: { - ObjectLockEnabled: 'Enabled', - Rule: { - DefaultRetention: { - Mode: 'GOVERNANCE', - Days: 1, + return s3 + .putObjectLockConfiguration({ + Bucket: bucketName, + ObjectLockConfiguration: { + ObjectLockEnabled: 'Enabled', + Rule: { + DefaultRetention: { + Mode: 'GOVERNANCE', + Days: 1, + }, }, }, - }, - }).promise(); + }) + .promise(); }) .catch(err => { process.stdout.write('Error putting object lock configuration\n'); @@ -267,10 +301,12 @@ describe('DELETE object', () => { }) .then(() => { process.stdout.write('putting object\n'); - return s3.putObject({ - Bucket: bucketName, - Key: objectName, - }).promise(); + return s3 + .putObject({ + Bucket: bucketName, + Key: objectName, + }) + .promise(); }) .catch(err => { process.stdout.write('Error putting object'); @@ -279,13 +315,15 @@ describe('DELETE object', () => { .then(res => { versionId = res.VersionId; process.stdout.write('putting object legal hold\n'); - return s3.putObjectLegalHold({ - Bucket: bucketName, - Key: objectName, - LegalHold: { - Status: 'ON', - }, - }).promise(); + return s3 + .putObjectLegalHold({ + Bucket: bucketName, + Key: objectName, + LegalHold: { + Status: 'ON', + }, + }) + .promise(); }) .catch(err => { process.stdout.write('Err putting object legal hold\n'); @@ -295,7 +333,8 @@ describe('DELETE object', () => { after(() => { process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucketName) + return bucketUtil + .empty(bucketName) .then(() => { process.stdout.write('Deleting bucket\n'); return bucketUtil.deleteOne(bucketName); @@ -306,23 +345,33 @@ describe('DELETE object', () => { }); }); - it('should not delete locked object version with GOVERNANCE ' + - 'retention mode and bypass header when object is legal-hold enabled', done => - s3.deleteObject({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - BypassGovernanceRetention: true, - }, err => { - assert.strictEqual(err.code, 'AccessDenied'); - changeObjectLock( - [{ - bucket: bucketName, - key: objectName, - versionId, - }], '', done); - } - )); + it( + 'should not delete locked object version with GOVERNANCE ' + + 'retention mode and bypass header when object is legal-hold enabled', + done => + s3.deleteObject( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + BypassGovernanceRetention: true, + }, + err => { + assert.strictEqual(err.code, 'AccessDenied'); + changeObjectLock( + [ + { + bucket: bucketName, + key: objectName, + versionId, + }, + ], + '', + done + ); + } + ) + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js b/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js index 0f5cd65342..772b6988fd 100644 --- a/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js +++ b/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js @@ -28,15 +28,16 @@ const testCases = [ }, ]; -function s3NoOp(_, cb) { cb(); } +function s3NoOp(_, cb) { + cb(); +} function getSSEConfig(s3, Bucket, Key, cb) { return s3.headObject({ Bucket, Key }, (err, resp) => { if (err) { return cb(err); } - return cb(null, - JSON.parse(JSON.stringify({ algo: resp.ServerSideEncryption, masterKeyId: resp.SSEKMSKeyId }))); + return cb(null, JSON.parse(JSON.stringify({ algo: resp.ServerSideEncryption, masterKeyId: resp.SSEKMSKeyId }))); }); } @@ -60,9 +61,7 @@ function createExpected(sseConfig, kmsKeyId) { } if (sseConfig.masterKeyId) { - expected.masterKeyId = config.kmsHideScalityArn - ? getKeyIdFromArn(kmsKeyId) - : kmsKeyId; + expected.masterKeyId = config.kmsHideScalityArn ? getKeyIdFromArn(kmsKeyId) : kmsKeyId; } return expected; } @@ -79,8 +78,7 @@ function hydrateSSEConfig({ algo: SSEAlgorithm, masterKeyId: KMSMasterKeyID }) { }, }, ], - } - ) + }) ); } @@ -95,15 +93,12 @@ describe('per object encryption headers', () => { let kmsKeyId; before(done => { - const bucket = new BucketInfo('enc-bucket-test', 'OwnerId', - 'OwnerDisplayName', new Date().toJSON()); - kms.createBucketKey(bucket, log, - (err, { masterKeyArn: keyId }) => { - assert.ifError(err); - kmsKeyId = keyId; - done(); - } - ); + const bucket = new BucketInfo('enc-bucket-test', 'OwnerId', 'OwnerDisplayName', new Date().toJSON()); + kms.createBucketKey(bucket, log, (err, { masterKeyArn: keyId }) => { + assert.ifError(err); + kmsKeyId = keyId; + done(); + }); }); beforeEach(() => { @@ -113,7 +108,9 @@ describe('per object encryption headers', () => { object2 = `enc-object-2-${uuid.v4()}`; bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() + return s3 + .createBucket({ Bucket: bucket }) + .promise() .then(() => s3.createBucket({ Bucket: bucket2 }).promise()) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); @@ -132,23 +129,18 @@ describe('per object encryption headers', () => { it('should put an encrypted object in a unencrypted bucket', done => putEncryptedObject(s3, bucket, object, target, kmsKeyId, error => { assert.ifError(error); - return getSSEConfig( - s3, - bucket, - object, - (error, sseConfig) => { - assert.ifError(error); - const expected = createExpected(target, kmsKeyId); - // We differ from aws behavior and always return a - // masterKeyId even when not explicitly configured. - if (expected.algo === 'aws:kms' && !expected.masterKeyId) { - // eslint-disable-next-line no-param-reassign - delete sseConfig.masterKeyId; - } - assert.deepStrictEqual(sseConfig, expected); - done(); + return getSSEConfig(s3, bucket, object, (error, sseConfig) => { + assert.ifError(error); + const expected = createExpected(target, kmsKeyId); + // We differ from aws behavior and always return a + // masterKeyId even when not explicitly configured. + if (expected.algo === 'aws:kms' && !expected.masterKeyId) { + // eslint-disable-next-line no-param-reassign + delete sseConfig.masterKeyId; } - ); + assert.deepStrictEqual(sseConfig, expected); + done(); + }); })); it('should put two encrypted objects in a unencrypted bucket, reusing the generated config', done => @@ -176,31 +168,28 @@ describe('per object encryption headers', () => { } )); - testCases - .forEach(existing => { + testCases.forEach(existing => { const hasKey = target.masterKeyId ? 'a' : 'no'; const { algo } = target; - it('should override bucket encryption settings with ' - + `algo ${algo || 'none'} with ${hasKey} key id`, done => { - const _existing = Object.assign({}, existing); - if (existing.masterKeyId) { - _existing.masterKeyId = kmsKeyId; - } - const params = { - Bucket: bucket, - ServerSideEncryptionConfiguration: hydrateSSEConfig(_existing), - }; - // no op putBucketNotification for the unencrypted case - const s3Op = existing.algo ? (...args) => s3.putBucketEncryption(...args) : s3NoOp; - s3Op(params, error => { - assert.ifError(error); - return putEncryptedObject(s3, bucket, object, target, kmsKeyId, error => { + it( + 'should override bucket encryption settings with ' + + `algo ${algo || 'none'} with ${hasKey} key id`, + done => { + const _existing = Object.assign({}, existing); + if (existing.masterKeyId) { + _existing.masterKeyId = kmsKeyId; + } + const params = { + Bucket: bucket, + ServerSideEncryptionConfiguration: hydrateSSEConfig(_existing), + }; + // no op putBucketNotification for the unencrypted case + const s3Op = existing.algo ? (...args) => s3.putBucketEncryption(...args) : s3NoOp; + s3Op(params, error => { assert.ifError(error); - return getSSEConfig( - s3, - bucket, - object, - (error, sseConfig) => { + return putEncryptedObject(s3, bucket, object, target, kmsKeyId, error => { + assert.ifError(error); + return getSSEConfig(s3, bucket, object, (error, sseConfig) => { assert.ifError(error); let expected = createExpected(target, kmsKeyId); // In the null case the expected encryption config is @@ -216,21 +205,18 @@ describe('per object encryption headers', () => { } assert.deepStrictEqual(sseConfig, expected); done(); - } - ); + }); + }); }); - }); - }); + } + ); }); - testCases - .forEach(existing => it('should copy an object to an encrypted key overriding bucket settings', - done => { + testCases.forEach(existing => + it('should copy an object to an encrypted key overriding bucket settings', done => { const _existing = Object.assign({}, existing); if (existing.masterKeyId) { - _existing.masterKeyId = config.kmsHideScalityArn - ? getKeyIdFromArn(kmsKeyId) - : kmsKeyId; + _existing.masterKeyId = config.kmsHideScalityArn ? getKeyIdFromArn(kmsKeyId) : kmsKeyId; } const params = { Bucket: bucket2, @@ -255,32 +241,28 @@ describe('per object encryption headers', () => { } return s3.copyObject(copyParams, error => { assert.ifError(error); - return getSSEConfig( - s3, - bucket2, - object2, - (error, sseConfig) => { - assert.ifError(error); - let expected = createExpected(target, kmsKeyId); - // In the null case the expected encryption config is - // the buckets default policy - if (!target.algo) { - expected = _existing; - } - // We differ from aws behavior and always return a - // masterKeyId even when not explicitly configured. - if (expected.algo === 'aws:kms' && !expected.masterKeyId) { + return getSSEConfig(s3, bucket2, object2, (error, sseConfig) => { + assert.ifError(error); + let expected = createExpected(target, kmsKeyId); + // In the null case the expected encryption config is + // the buckets default policy + if (!target.algo) { + expected = _existing; + } + // We differ from aws behavior and always return a + // masterKeyId even when not explicitly configured. + if (expected.algo === 'aws:kms' && !expected.masterKeyId) { // eslint-disable-next-line no-param-reassign - delete sseConfig.masterKeyId; - } - assert.deepStrictEqual(sseConfig, expected); - done(); + delete sseConfig.masterKeyId; } - ); + assert.deepStrictEqual(sseConfig, expected); + done(); + }); }); }); }); - })); + }) + ); it('should init an encrypted MPU and put an encrypted part', done => { const params = { diff --git a/tests/functional/aws-node-sdk/test/object/get.js b/tests/functional/aws-node-sdk/test/object/get.js index ef116c2381..0193c4b5f4 100644 --- a/tests/functional/aws-node-sdk/test/object/get.js +++ b/tests/functional/aws-node-sdk/test/object/get.js @@ -27,8 +27,7 @@ const etag = `"${etagTrim}"`; const partSize = 1024 * 1024 * 5; // 5MB minumum required part size. function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } function checkError(err, code) { @@ -47,7 +46,7 @@ function dateFromNow(diff) { } function dateConvert(d) { - return (new Date(d)).toISOString(); + return new Date(d).toISOString(); } describe('GET object', () => { @@ -56,137 +55,174 @@ describe('GET object', () => { let s3; function requestGet(fields, cb) { - s3.getObject(Object.assign({ - Bucket: bucketName, - Key: objectName, - }, fields), cb); + s3.getObject( + Object.assign( + { + Bucket: bucketName, + Key: objectName, + }, + fields + ), + cb + ); } function checkGetObjectPart(key, partNumber, len, body, cb) { - s3.getObject({ - Bucket: bucketName, - Key: key, - PartNumber: partNumber, - }, (err, data) => { - checkNoError(err); - checkIntegerHeader(data.ContentLength, len); - const md5Hash = crypto.createHash('md5'); - const md5HashExpected = crypto.createHash('md5'); - assert.strictEqual( - md5Hash.update(data.Body).digest('hex'), - md5HashExpected.update(body).digest('hex') - ); - return cb(); - }); + s3.getObject( + { + Bucket: bucketName, + Key: key, + PartNumber: partNumber, + }, + (err, data) => { + checkNoError(err); + checkIntegerHeader(data.ContentLength, len); + const md5Hash = crypto.createHash('md5'); + const md5HashExpected = crypto.createHash('md5'); + assert.strictEqual( + md5Hash.update(data.Body).digest('hex'), + md5HashExpected.update(body).digest('hex') + ); + return cb(); + } + ); } // Upload parts with the given partNumbers array and complete MPU. function completeMPU(partNumbers, cb) { let ETags = []; - return async.waterfall([ - next => { - const createMpuParams = { - Bucket: bucketName, - Key: objectName, - }; + return async.waterfall( + [ + next => { + const createMpuParams = { + Bucket: bucketName, + Key: objectName, + }; - s3.createMultipartUpload(createMpuParams, (err, data) => { - checkNoError(err); - return next(null, data.UploadId); - }); - }, - (uploadId, next) => - async.eachSeries(partNumbers, (partNumber, callback) => { - const uploadPartParams = { + s3.createMultipartUpload(createMpuParams, (err, data) => { + checkNoError(err); + return next(null, data.UploadId); + }); + }, + (uploadId, next) => + async.eachSeries( + partNumbers, + (partNumber, callback) => { + const uploadPartParams = { + Bucket: bucketName, + Key: objectName, + PartNumber: partNumber, + UploadId: uploadId, + Body: Buffer.alloc(partSize).fill(partNumber), + }; + return s3.uploadPart(uploadPartParams, (err, data) => { + checkNoError(err); + ETags = ETags.concat(data.ETag); + return callback(); + }); + }, + err => next(err, uploadId) + ), + (uploadId, next) => { + const parts = Array.from(Array(partNumbers.length).keys()); + const params = { Bucket: bucketName, Key: objectName, - PartNumber: partNumber, + MultipartUpload: { + Parts: parts.map(n => ({ + ETag: ETags[n], + PartNumber: partNumbers[n], + })), + }, UploadId: uploadId, - Body: Buffer.alloc(partSize).fill(partNumber), }; - return s3.uploadPart(uploadPartParams, (err, data) => { + return s3.completeMultipartUpload(params, err => { checkNoError(err); - ETags = ETags.concat(data.ETag); - return callback(); + return next(null, uploadId); }); - }, err => next(err, uploadId)), - (uploadId, next) => { - const parts = Array.from(Array(partNumbers.length).keys()); - const params = { - Bucket: bucketName, - Key: objectName, - MultipartUpload: { - Parts: parts.map(n => ({ - ETag: ETags[n], - PartNumber: partNumbers[n], - })), - }, - UploadId: uploadId, - }; - return s3.completeMultipartUpload(params, err => { - checkNoError(err); - return next(null, uploadId); - }); - }, - ], (err, uploadId) => { - if (err) { - return s3.abortMultipartUpload({ - Bucket: bucketName, - Key: objectName, - UploadId: uploadId, - }, cb); + }, + ], + (err, uploadId) => { + if (err) { + return s3.abortMultipartUpload( + { + Bucket: bucketName, + Key: objectName, + UploadId: uploadId, + }, + cb + ); + } + return cb(); } - return cb(); - }); + ); } function createMPUAndPutTwoParts(partTwoBody, cb) { let uploadId; const ETags = []; - return async.waterfall([ - next => s3.createMultipartUpload({ - Bucket: bucketName, - Key: copyPartKey, - }, (err, data) => { - checkNoError(err); - uploadId = data.UploadId; - return next(); - }), - // Copy an object with three parts. - next => s3.uploadPartCopy({ - Bucket: bucketName, - CopySource: `/${bucketName}/${objectName}`, - Key: copyPartKey, - PartNumber: 1, - UploadId: uploadId, - }, (err, data) => { - checkNoError(err); - ETags[0] = data.ETag; - return next(); - }), - // Put an object with one part. - next => s3.uploadPart({ - Bucket: bucketName, - Key: copyPartKey, - PartNumber: 2, - UploadId: uploadId, - Body: partTwoBody, - }, (err, data) => { - checkNoError(err); - ETags[1] = data.ETag; - return next(); - }), - ], err => { - if (err) { - return s3.abortMultipartUpload({ - Bucket: bucketName, - Key: copyPartKey, - UploadId: uploadId, - }, cb); + return async.waterfall( + [ + next => + s3.createMultipartUpload( + { + Bucket: bucketName, + Key: copyPartKey, + }, + (err, data) => { + checkNoError(err); + uploadId = data.UploadId; + return next(); + } + ), + // Copy an object with three parts. + next => + s3.uploadPartCopy( + { + Bucket: bucketName, + CopySource: `/${bucketName}/${objectName}`, + Key: copyPartKey, + PartNumber: 1, + UploadId: uploadId, + }, + (err, data) => { + checkNoError(err); + ETags[0] = data.ETag; + return next(); + } + ), + // Put an object with one part. + next => + s3.uploadPart( + { + Bucket: bucketName, + Key: copyPartKey, + PartNumber: 2, + UploadId: uploadId, + Body: partTwoBody, + }, + (err, data) => { + checkNoError(err); + ETags[1] = data.ETag; + return next(); + } + ), + ], + err => { + if (err) { + return s3.abortMultipartUpload( + { + Bucket: bucketName, + Key: copyPartKey, + UploadId: uploadId, + }, + cb + ); + } + return cb(null, uploadId, ETags); } - return cb(null, uploadId, ETags); - }); + ); } before(done => { @@ -206,103 +242,88 @@ describe('GET object', () => { }); // aws-sdk now (v2.363.0) returns 'UriParameterError' error - it.skip('should return an error to get request without a valid ' + - 'bucket name', - done => { - s3.getObject({ Bucket: '', Key: 'somekey' }, err => { - assert.notEqual(err, null, - 'Expected failure but got success'); - assert.strictEqual(err.code, 'MethodNotAllowed'); - return done(); - }); + it.skip('should return an error to get request without a valid ' + 'bucket name', done => { + s3.getObject({ Bucket: '', Key: 'somekey' }, err => { + assert.notEqual(err, null, 'Expected failure but got success'); + assert.strictEqual(err.code, 'MethodNotAllowed'); + return done(); }); + }); - it('should return NoSuchKey error when no such object', - done => { - s3.getObject({ Bucket: bucketName, Key: 'nope' }, err => { - assert.notEqual(err, null, - 'Expected failure but got success'); - assert.strictEqual(err.code, 'NoSuchKey'); - return done(); - }); + it('should return NoSuchKey error when no such object', done => { + s3.getObject({ Bucket: bucketName, Key: 'nope' }, err => { + assert.notEqual(err, null, 'Expected failure but got success'); + assert.strictEqual(err.code, 'NoSuchKey'); + return done(); }); + }); - describe('Additional headers: [Cache-Control, Content-Disposition, ' + - 'Content-Encoding, Expires, Accept-Ranges]', () => { - describe('if specified in put object request', () => { - before(done => { - const params = { - Bucket: bucketName, - Key: objectName, - CacheControl: cacheControl, - ContentDisposition: contentDisposition, - ContentEncoding: contentEncoding, - ContentType: contentType, - Expires: expires, - }; - s3.putObject(params, err => done(err)); - }); - it('should return additional headers', done => { - s3.getObject({ Bucket: bucketName, Key: objectName }, - (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.CacheControl, - cacheControl); - assert.strictEqual(res.ContentDisposition, - contentDisposition); - // Should remove V4 streaming value 'aws-chunked' - // to be compatible with AWS behavior - assert.strictEqual(res.ContentEncoding, - 'gzip'); - assert.strictEqual(res.ContentType, contentType); - assert.strictEqual(res.Expires.toGMTString(), - new Date(expires).toGMTString()); - assert.strictEqual(res.AcceptRanges, 'bytes'); - return done(); - }); + describe( + 'Additional headers: [Cache-Control, Content-Disposition, ' + 'Content-Encoding, Expires, Accept-Ranges]', + () => { + describe('if specified in put object request', () => { + before(done => { + const params = { + Bucket: bucketName, + Key: objectName, + CacheControl: cacheControl, + ContentDisposition: contentDisposition, + ContentEncoding: contentEncoding, + ContentType: contentType, + Expires: expires, + }; + s3.putObject(params, err => done(err)); + }); + it('should return additional headers', done => { + s3.getObject({ Bucket: bucketName, Key: objectName }, (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.CacheControl, cacheControl); + assert.strictEqual(res.ContentDisposition, contentDisposition); + // Should remove V4 streaming value 'aws-chunked' + // to be compatible with AWS behavior + assert.strictEqual(res.ContentEncoding, 'gzip'); + assert.strictEqual(res.ContentType, contentType); + assert.strictEqual(res.Expires.toGMTString(), new Date(expires).toGMTString()); + assert.strictEqual(res.AcceptRanges, 'bytes'); + return done(); + }); + }); }); - }); - describe('if response content headers are set in query', () => { - before(done => { - s3.putObject({ Bucket: bucketName, Key: objectName }, - err => done(err)); - }); + describe('if response content headers are set in query', () => { + before(done => { + s3.putObject({ Bucket: bucketName, Key: objectName }, err => done(err)); + }); - it('should return additional headers even if not set in ' + - 'put object request', done => { - const params = { - Bucket: bucketName, - Key: objectName, - ResponseCacheControl: cacheControl, - ResponseContentDisposition: contentDisposition, - ResponseContentEncoding: contentEncoding, - ResponseContentLanguage: contentLanguage, - ResponseContentType: contentType, - ResponseExpires: expires, - }; - s3.getObject(params, (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.CacheControl, - cacheControl); - assert.strictEqual(res.ContentDisposition, - contentDisposition); - assert.strictEqual(res.ContentEncoding, - contentEncoding); - assert.strictEqual(res.ContentLanguage, - contentLanguage); - assert.strictEqual(res.ContentType, contentType); - assert.strictEqual(res.Expires.toGMTString(), - new Date(expires).toGMTString()); - return done(); + it('should return additional headers even if not set in ' + 'put object request', done => { + const params = { + Bucket: bucketName, + Key: objectName, + ResponseCacheControl: cacheControl, + ResponseContentDisposition: contentDisposition, + ResponseContentEncoding: contentEncoding, + ResponseContentLanguage: contentLanguage, + ResponseContentType: contentType, + ResponseExpires: expires, + }; + s3.getObject(params, (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.CacheControl, cacheControl); + assert.strictEqual(res.ContentDisposition, contentDisposition); + assert.strictEqual(res.ContentEncoding, contentEncoding); + assert.strictEqual(res.ContentLanguage, contentLanguage); + assert.strictEqual(res.ContentType, contentType); + assert.strictEqual(res.Expires.toGMTString(), new Date(expires).toGMTString()); + return done(); + }); }); }); - }); - }); + } + ); describe('x-amz-website-redirect-location header', () => { before(done => { @@ -313,16 +334,14 @@ describe('GET object', () => { }; s3.putObject(params, err => done(err)); }); - it('should return website redirect header if specified in ' + - 'objectPUT request', done => { - s3.getObject({ Bucket: bucketName, Key: objectName }, - (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.WebsiteRedirectLocation, '/'); - return done(); - }); + it('should return website redirect header if specified in ' + 'objectPUT request', done => { + s3.getObject({ Bucket: bucketName, Key: objectName }, (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.WebsiteRedirectLocation, '/'); + return done(); + }); }); }); @@ -347,9 +366,7 @@ describe('GET object', () => { s3.putObject(params, done); }); - it('should not return "x-amz-tagging-count" if no tag ' + - 'associated with the object', - done => { + it('should not return "x-amz-tagging-count" if no tag ' + 'associated with the object', done => { s3.getObject(params, (err, data) => { if (err) { return done(err); @@ -363,17 +380,19 @@ describe('GET object', () => { beforeEach(done => { s3.putObjectTagging(paramsTagging, done); }); - it('should return "x-amz-tagging-count" header that provides ' + - 'the count of number of tags associated with the object', - done => { - s3.getObject(params, (err, data) => { - if (err) { - return done(err); - } - assert.equal(data.TagCount, 1); - return done(); - }); - }); + it( + 'should return "x-amz-tagging-count" header that provides ' + + 'the count of number of tags associated with the object', + done => { + s3.getObject(params, (err, data) => { + if (err) { + return done(err); + } + assert.equal(data.TagCount, 1); + return done(); + }); + } + ); }); }); @@ -382,43 +401,33 @@ describe('GET object', () => { beforeEach(done => { s3.putObject(params, done); }); - it('If-Match: returns no error when ETag match, with double ' + - 'quotes around ETag', - done => { - requestGet({ IfMatch: etag }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when ETag match, with double ' + 'quotes around ETag', done => { + requestGet({ IfMatch: etag }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when one of ETags match, with ' + - 'double quotes around ETag', - done => { - requestGet({ IfMatch: - `non-matching,${etag}` }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when one of ETags match, with ' + 'double quotes around ETag', done => { + requestGet({ IfMatch: `non-matching,${etag}` }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when ETag match, without double ' + - 'quotes around ETag', - done => { - requestGet({ IfMatch: etagTrim }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when ETag match, without double ' + 'quotes around ETag', done => { + requestGet({ IfMatch: etagTrim }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when one of ETags match, without ' + - 'double quotes around ETag', - done => { - requestGet({ IfMatch: - `non-matching,${etagTrim}` }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when one of ETags match, without ' + 'double quotes around ETag', done => { + requestGet({ IfMatch: `non-matching,${etagTrim}` }, err => { + checkNoError(err); + done(); }); + }); it('If-Match: returns no error when ETag match with *', done => { requestGet({ IfMatch: '*' }, err => { @@ -427,330 +436,368 @@ describe('GET object', () => { }); }); - it('If-Match: returns PreconditionFailed when ETag does not match', - done => { - requestGet({ + it('If-Match: returns PreconditionFailed when ETag does not match', done => { + requestGet( + { IfMatch: 'non-matching ETag', - }, err => { + }, + err => { checkError(err, 'PreconditionFailed'); done(); - }); - }); + } + ); + }); - it('If-None-Match: returns no error when ETag does not match', - done => { + it('If-None-Match: returns no error when ETag does not match', done => { requestGet({ IfNoneMatch: 'non-matching' }, err => { checkNoError(err); done(); }); }); - it('If-None-Match: returns no error when all ETags do not match', - done => { - requestGet({ - IfNoneMatch: 'non-matching,' + - 'non-matching-either', - }, err => { + it('If-None-Match: returns no error when all ETags do not match', done => { + requestGet( + { + IfNoneMatch: 'non-matching,' + 'non-matching-either', + }, + err => { checkNoError(err); done(); - }); - }); + } + ); + }); - it('If-None-Match: returns NotModified when ETag match, with ' + - 'double quotes around ETag', - done => { - requestGet({ IfNoneMatch: etag }, err => { - checkError(err, 'NotModified'); - done(); - }); + it('If-None-Match: returns NotModified when ETag match, with ' + 'double quotes around ETag', done => { + requestGet({ IfNoneMatch: etag }, err => { + checkError(err, 'NotModified'); + done(); }); + }); - it('If-None-Match: returns NotModified when one of ETags match, ' + - 'with double quotes around ETag', + it( + 'If-None-Match: returns NotModified when one of ETags match, ' + 'with double quotes around ETag', done => { - requestGet({ - IfNoneMatch: `non-matching,${etag}`, - }, err => { - checkError(err, 'NotModified'); - done(); - }); - }); + requestGet( + { + IfNoneMatch: `non-matching,${etag}`, + }, + err => { + checkError(err, 'NotModified'); + done(); + } + ); + } + ); - it('If-None-Match: returns NotModified when value is "*"', - done => { - requestGet({ + it('If-None-Match: returns NotModified when value is "*"', done => { + requestGet( + { IfNoneMatch: '*', - }, err => { - checkError(err, 'NotModified'); - done(); - }); - }); - - it('If-None-Match: returns NotModified when ETag match, without ' + - 'double quotes around ETag', - done => { - requestGet({ IfNoneMatch: etagTrim }, err => { - checkError(err, 'NotModified'); - done(); - }); - }); - - it('If-None-Match: returns NotModified when one of ETags match, ' + - 'without double quotes around ETag', - done => { - requestGet({ - IfNoneMatch: `non-matching,${etagTrim}`, - }, err => { + }, + err => { checkError(err, 'NotModified'); done(); - }); - }); + } + ); + }); - it('If-Modified-Since: returns no error if Last modified date is ' + - 'greater', - done => { - requestGet({ IfModifiedSince: dateFromNow(-1) }, - err => { - checkNoError(err); - done(); - }); + it('If-None-Match: returns NotModified when ETag match, without ' + 'double quotes around ETag', done => { + requestGet({ IfNoneMatch: etagTrim }, err => { + checkError(err, 'NotModified'); + done(); }); + }); - // Skipping this test, because real AWS does not provide error as - // expected - it.skip('If-Modified-Since: returns NotModified if Last modified ' + - 'date is lesser', + it( + 'If-None-Match: returns NotModified when one of ETags match, ' + 'without double quotes around ETag', done => { - requestGet({ IfModifiedSince: dateFromNow(1) }, + requestGet( + { + IfNoneMatch: `non-matching,${etagTrim}`, + }, err => { checkError(err, 'NotModified'); done(); - }); - }); - - it('If-Modified-Since: returns NotModified if Last modified ' + - 'date is equal', - done => { - s3.headObject({ Bucket: bucketName, Key: objectName }, - (err, data) => { - checkNoError(err); - const lastModified = dateConvert(data.LastModified); - requestGet({ IfModifiedSince: lastModified }, err => { - checkError(err, 'NotModified'); - done(); - }); - }); - }); + } + ); + } + ); - it('If-Unmodified-Since: returns no error when lastModified date ' + - 'is greater', - done => { - requestGet({ IfUnmodifiedSince: dateFromNow(1) }, - err => { - checkNoError(err); - done(); - }); + it('If-Modified-Since: returns no error if Last modified date is ' + 'greater', done => { + requestGet({ IfModifiedSince: dateFromNow(-1) }, err => { + checkNoError(err); + done(); }); - - it('If-Unmodified-Since: returns no error when lastModified ' + - 'date is equal', done => { - s3.headObject({ Bucket: bucketName, Key: objectName }, - (err, data) => { - checkNoError(err); - const lastModified = dateConvert(data.LastModified); - requestGet({ IfUnmodifiedSince: lastModified }, - err => { - checkNoError(err); - done(); - }); - }); }); - it('If-Unmodified-Since: returns PreconditionFailed when ' + - 'lastModified date is lesser', - done => { - requestGet({ IfUnmodifiedSince: dateFromNow(-1) }, - err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + // Skipping this test, because real AWS does not provide error as + // expected + it.skip('If-Modified-Since: returns NotModified if Last modified ' + 'date is lesser', done => { + requestGet({ IfModifiedSince: dateFromNow(1) }, err => { + checkError(err, 'NotModified'); + done(); }); + }); - it('If-Match & If-Unmodified-Since: returns no error when match ' + - 'Etag and lastModified is greater', - done => { - requestGet({ - IfMatch: etagTrim, - IfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkNoError(err); + it('If-Modified-Since: returns NotModified if Last modified ' + 'date is equal', done => { + s3.headObject({ Bucket: bucketName, Key: objectName }, (err, data) => { + checkNoError(err); + const lastModified = dateConvert(data.LastModified); + requestGet({ IfModifiedSince: lastModified }, err => { + checkError(err, 'NotModified'); done(); }); }); + }); - it('If-Match match & If-Unmodified-Since match', done => { - requestGet({ - IfMatch: etagTrim, - IfUnmodifiedSince: dateFromNow(1), - }, err => { + it('If-Unmodified-Since: returns no error when lastModified date ' + 'is greater', done => { + requestGet({ IfUnmodifiedSince: dateFromNow(1) }, err => { checkNoError(err); done(); }); }); - it('If-Match not match & If-Unmodified-Since not match', done => { - requestGet({ - IfMatch: 'non-matching', - IfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); + it('If-Unmodified-Since: returns no error when lastModified ' + 'date is equal', done => { + s3.headObject({ Bucket: bucketName, Key: objectName }, (err, data) => { + checkNoError(err); + const lastModified = dateConvert(data.LastModified); + requestGet({ IfUnmodifiedSince: lastModified }, err => { + checkNoError(err); + done(); + }); }); }); - it('If-Match not match & If-Unmodified-Since match', done => { - requestGet({ - IfMatch: 'non-matching', - IfUnmodifiedSince: dateFromNow(1), - }, err => { + it('If-Unmodified-Since: returns PreconditionFailed when ' + 'lastModified date is lesser', done => { + requestGet({ IfUnmodifiedSince: dateFromNow(-1) }, err => { checkError(err, 'PreconditionFailed'); done(); }); }); - // Skipping this test, because real AWS does not provide error as - // expected - it.skip('If-Match match & If-Modified-Since not match', done => { - requestGet({ - IfMatch: etagTrim, - IfModifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + it( + 'If-Match & If-Unmodified-Since: returns no error when match ' + 'Etag and lastModified is greater', + done => { + requestGet( + { + IfMatch: etagTrim, + IfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); + } + ); + + it('If-Match match & If-Unmodified-Since match', done => { + requestGet( + { + IfMatch: etagTrim, + IfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkNoError(err); + done(); + } + ); + }); + + it('If-Match not match & If-Unmodified-Since not match', done => { + requestGet( + { + IfMatch: 'non-matching', + IfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); + }); + + it('If-Match not match & If-Unmodified-Since match', done => { + requestGet( + { + IfMatch: 'non-matching', + IfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); + }); + + // Skipping this test, because real AWS does not provide error as + // expected + it.skip('If-Match match & If-Modified-Since not match', done => { + requestGet( + { + IfMatch: etagTrim, + IfModifiedSince: dateFromNow(1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-Match match & If-Modified-Since match', done => { - requestGet({ - IfMatch: etagTrim, - IfModifiedSince: dateFromNow(-1), - }, err => { - checkNoError(err); - done(); - }); + requestGet( + { + IfMatch: etagTrim, + IfModifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-Match not match & If-Modified-Since not match', done => { - requestGet({ - IfMatch: 'non-matching', - IfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestGet( + { + IfMatch: 'non-matching', + IfModifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-Match not match & If-Modified-Since match', done => { - requestGet({ - IfMatch: 'non-matching', - IfModifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestGet( + { + IfMatch: 'non-matching', + IfModifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); - it('If-None-Match & If-Modified-Since: returns NotModified when ' + - 'Etag does not match and lastModified is greater', + it( + 'If-None-Match & If-Modified-Since: returns NotModified when ' + + 'Etag does not match and lastModified is greater', done => { - const req = s3.getObject({ - Bucket: bucketName, - Key: objectName, - IfNoneMatch: etagTrim, - IfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'NotModified'); - done(); - }); + const req = s3.getObject( + { + Bucket: bucketName, + Key: objectName, + IfNoneMatch: etagTrim, + IfModifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'NotModified'); + done(); + } + ); req.on('httpHeaders', (code, headers) => { assert(!headers['content-type']); assert(!headers['content-length']); }); - }); + } + ); - it('If-None-Match not match & If-Modified-Since not match', - done => { - requestGet({ - IfNoneMatch: etagTrim, - IfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'NotModified'); - done(); - }); + it('If-None-Match not match & If-Modified-Since not match', done => { + requestGet( + { + IfNoneMatch: etagTrim, + IfModifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'NotModified'); + done(); + } + ); }); it('If-None-Match match & If-Modified-Since match', done => { - requestGet({ - IfNoneMatch: 'non-matching', - IfModifiedSince: dateFromNow(-1), - }, err => { - checkNoError(err); - done(); - }); + requestGet( + { + IfNoneMatch: 'non-matching', + IfModifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); }); // Skipping this test, because real AWS does not provide error as // expected - it.skip('If-None-Match match & If-Modified-Since not match', - done => { - requestGet({ - IfNoneMatch: 'non-matching', - IfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it.skip('If-None-Match match & If-Modified-Since not match', done => { + requestGet( + { + IfNoneMatch: 'non-matching', + IfModifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-None-Match match & If-Unmodified-Since match', done => { - requestGet({ - IfNoneMatch: 'non-matching', - IfUnmodifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + requestGet( + { + IfNoneMatch: 'non-matching', + IfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-None-Match match & If-Unmodified-Since not match', done => { - requestGet({ - IfNoneMatch: 'non-matching', - IfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestGet( + { + IfNoneMatch: 'non-matching', + IfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-None-Match not match & If-Unmodified-Since match', done => { - requestGet({ - IfNoneMatch: etagTrim, - IfUnmodifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'NotModified'); - done(); - }); + requestGet( + { + IfNoneMatch: etagTrim, + IfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'NotModified'); + done(); + } + ); }); - it('If-None-Match not match & If-Unmodified-Since not match', - done => { - requestGet({ - IfNoneMatch: etagTrim, - IfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it('If-None-Match not match & If-Unmodified-Since not match', done => { + requestGet( + { + IfNoneMatch: etagTrim, + IfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); }); @@ -760,8 +807,8 @@ describe('GET object', () => { const invalidPartNumbers = [-1, 0, 10001]; orderedPartNumbers.forEach(num => - it(`should get the body of part ${num} when ordered MPU`, - done => completeMPU(orderedPartNumbers, err => { + it(`should get the body of part ${num} when ordered MPU`, done => + completeMPU(orderedPartNumbers, err => { checkNoError(err); return requestGet({ PartNumber: num }, (err, data) => { checkNoError(err); @@ -775,132 +822,146 @@ describe('GET object', () => { ); return done(); }); - }))); + })) + ); // Use the orderedPartNumbers to retrieve parts with GetObject. orderedPartNumbers.forEach(num => - it(`should get the body of part ${num} when unordered MPU`, - done => completeMPU(unOrderedPartNumbers, err => { + it(`should get the body of part ${num} when unordered MPU`, done => + completeMPU(unOrderedPartNumbers, err => { checkNoError(err); return requestGet({ PartNumber: num }, (err, data) => { checkNoError(err); checkIntegerHeader(data.ContentLength, partSize); const md5Hash = crypto.createHash('md5'); const md5HashExpected = crypto.createHash('md5'); - const expected = Buffer.alloc(partSize) - .fill(unOrderedPartNumbers[num - 1]); + const expected = Buffer.alloc(partSize).fill(unOrderedPartNumbers[num - 1]); assert.strictEqual( md5Hash.update(data.Body).digest('hex'), md5HashExpected.update(expected).digest('hex') ); return done(); }); - }))); + })) + ); invalidPartNumbers.forEach(num => - it(`should not accept a partNumber that is not 1-10000: ${num}`, - done => completeMPU(orderedPartNumbers, err => { - checkNoError(err); - return requestGet({ PartNumber: num }, err => { - checkError(err, 'InvalidArgument'); - done(); - }); - }))); + it(`should not accept a partNumber that is not 1-10000: ${num}`, done => + completeMPU(orderedPartNumbers, err => { + checkNoError(err); + return requestGet({ PartNumber: num }, err => { + checkError(err, 'InvalidArgument'); + done(); + }); + })) + ); - it('should not accept a part number greater than the total parts ' + - 'uploaded for an MPU', done => + it('should not accept a part number greater than the total parts ' + 'uploaded for an MPU', done => completeMPU(orderedPartNumbers, err => { checkNoError(err); return requestGet({ PartNumber: 11 }, err => { checkError(err, 'InvalidPartNumber'); done(); }); - })); + }) + ); - it('should accept a part number of 1 for regular put object', - done => s3.putObject({ - Bucket: bucketName, - Key: objectName, - Body: Buffer.alloc(10), - }, err => { - checkNoError(err); - return requestGet({ PartNumber: 1 }, (err, data) => { - const md5Hash = crypto.createHash('md5'); - const md5HashExpected = crypto.createHash('md5'); - const expected = Buffer.alloc(10); - assert.strictEqual( - md5Hash.update(data.Body).digest('hex'), - md5HashExpected.update(expected).digest('hex') - ); - done(); - }); - })); + it('should accept a part number of 1 for regular put object', done => + s3.putObject( + { + Bucket: bucketName, + Key: objectName, + Body: Buffer.alloc(10), + }, + err => { + checkNoError(err); + return requestGet({ PartNumber: 1 }, (err, data) => { + const md5Hash = crypto.createHash('md5'); + const md5HashExpected = crypto.createHash('md5'); + const expected = Buffer.alloc(10); + assert.strictEqual( + md5Hash.update(data.Body).digest('hex'), + md5HashExpected.update(expected).digest('hex') + ); + done(); + }); + } + )); it('should accept a part number that is a string', done => - s3.putObject({ - Bucket: bucketName, - Key: objectName, - Body: Buffer.alloc(10), - }, err => { - checkNoError(err); - return requestGet({ PartNumber: '1' }, (err, data) => { - checkIntegerHeader(data.ContentLength, 10); - const md5Hash = crypto.createHash('md5'); - const md5HashExpected = crypto.createHash('md5'); - const expected = Buffer.alloc(10); - assert.strictEqual( - md5Hash.update(data.Body).digest('hex'), - md5HashExpected.update(expected).digest('hex') - ); - done(); - }); - })); + s3.putObject( + { + Bucket: bucketName, + Key: objectName, + Body: Buffer.alloc(10), + }, + err => { + checkNoError(err); + return requestGet({ PartNumber: '1' }, (err, data) => { + checkIntegerHeader(data.ContentLength, 10); + const md5Hash = crypto.createHash('md5'); + const md5HashExpected = crypto.createHash('md5'); + const expected = Buffer.alloc(10); + assert.strictEqual( + md5Hash.update(data.Body).digest('hex'), + md5HashExpected.update(expected).digest('hex') + ); + done(); + }); + } + )); - it('should not accept a part number greater than 1 for regular ' + - 'put object', done => - s3.putObject({ - Bucket: bucketName, - Key: objectName, - Body: Buffer.alloc(10), - }, err => { - checkNoError(err); - return requestGet({ PartNumber: 2 }, err => { - checkError(err, 'InvalidPartNumber'); - done(); - }); - })); + it('should not accept a part number greater than 1 for regular ' + 'put object', done => + s3.putObject( + { + Bucket: bucketName, + Key: objectName, + Body: Buffer.alloc(10), + }, + err => { + checkNoError(err); + return requestGet({ PartNumber: 2 }, err => { + checkError(err, 'InvalidPartNumber'); + done(); + }); + } + ) + ); it('should not accept both PartNumber and Range as params', done => completeMPU(orderedPartNumbers, err => { checkNoError(err); - return requestGet({ - PartNumber: 1, - Range: 'bytes=0-10', - }, err => { - checkError(err, 'InvalidRequest'); - done(); - }); + return requestGet( + { + PartNumber: 1, + Range: 'bytes=0-10', + }, + err => { + checkError(err, 'InvalidRequest'); + done(); + } + ); })); - it('should not include PartsCount response header for regular ' + - 'put object', done => { - s3.putObject({ - Bucket: bucketName, - Key: objectName, - Body: Buffer.alloc(10), - }, err => { - assert.ifError(err); - requestGet({ PartNumber: 1 }, (err, data) => { + it('should not include PartsCount response header for regular ' + 'put object', done => { + s3.putObject( + { + Bucket: bucketName, + Key: objectName, + Body: Buffer.alloc(10), + }, + err => { assert.ifError(err); - assert.strictEqual('PartsCount' in data, false, - 'PartsCount header is present.'); - done(); - }); - }); + requestGet({ PartNumber: 1 }, (err, data) => { + assert.ifError(err); + assert.strictEqual('PartsCount' in data, false, 'PartsCount header is present.'); + done(); + }); + } + ); }); - it('should include PartsCount response header for mpu object', - done => { + it('should include PartsCount response header for mpu object', done => { completeMPU(orderedPartNumbers, err => { assert.ifError(err); return requestGet({ PartNumber: 1 }, (err, data) => { @@ -914,121 +975,147 @@ describe('GET object', () => { describe('uploadPartCopy', () => { // The original object was composed of three parts const partOneSize = partSize * 10; - const bufs = orderedPartNumbers.map(n => - Buffer.alloc(partSize, n)); + const bufs = orderedPartNumbers.map(n => Buffer.alloc(partSize, n)); const partOneBody = Buffer.concat(bufs, partOneSize); const partTwoBody = Buffer.alloc(partSize, 4); - beforeEach(done => async.waterfall([ - next => completeMPU(orderedPartNumbers, next), - next => createMPUAndPutTwoParts(partTwoBody, next), - (uploadId, ETags, next) => - s3.completeMultipartUpload({ - Bucket: bucketName, - Key: copyPartKey, - MultipartUpload: { - Parts: [ - { - ETag: ETags[0], - PartNumber: 1, - }, + beforeEach(done => + async.waterfall( + [ + next => completeMPU(orderedPartNumbers, next), + next => createMPUAndPutTwoParts(partTwoBody, next), + (uploadId, ETags, next) => + s3.completeMultipartUpload( { - ETag: ETags[1], - PartNumber: 2, + Bucket: bucketName, + Key: copyPartKey, + MultipartUpload: { + Parts: [ + { + ETag: ETags[0], + PartNumber: 1, + }, + { + ETag: ETags[1], + PartNumber: 2, + }, + ], + }, + UploadId: uploadId, }, - ], - }, - UploadId: uploadId, - }, next), - ], done)); + next + ), + ], + done + ) + ); - afterEach(done => s3.deleteObject({ - Bucket: bucketName, - Key: copyPartKey, - }, done)); + afterEach(done => + s3.deleteObject( + { + Bucket: bucketName, + Key: copyPartKey, + }, + done + ) + ); it('should retrieve a part copied from an MPU', done => - checkGetObjectPart(copyPartKey, 1, partOneSize, partOneBody, - done)); + checkGetObjectPart(copyPartKey, 1, partOneSize, partOneBody, done)); - it('should retrieve a part put after part copied from MPU', - done => checkGetObjectPart(copyPartKey, 2, partSize, - partTwoBody, done)); + it('should retrieve a part put after part copied from MPU', done => + checkGetObjectPart(copyPartKey, 2, partSize, partTwoBody, done)); }); describe('uploadPartCopy overwrite', () => { const partOneBody = Buffer.alloc(partSize, 1); // The original object was composed of three parts const partTwoSize = partSize * 10; - const bufs = orderedPartNumbers.map(n => - Buffer.alloc(partSize, n)); + const bufs = orderedPartNumbers.map(n => Buffer.alloc(partSize, n)); const partTwoBody = Buffer.concat(bufs, partTwoSize); - beforeEach(done => async.waterfall([ - next => completeMPU(orderedPartNumbers, next), - next => createMPUAndPutTwoParts(partTwoBody, next), - /* eslint-disable no-param-reassign */ - // Overwrite part one. - (uploadId, ETags, next) => - s3.uploadPart({ - Bucket: bucketName, - Key: copyPartKey, - PartNumber: 1, - UploadId: uploadId, - Body: partOneBody, - }, (err, data) => { - checkNoError(err); - ETags[0] = data.ETag; - return next(null, uploadId, ETags); - }), - // Overwrite part one with an three-part object. - (uploadId, ETags, next) => - s3.uploadPartCopy({ - Bucket: bucketName, - CopySource: `/${bucketName}/${objectName}`, - Key: copyPartKey, - PartNumber: 2, - UploadId: uploadId, - }, (err, data) => { - checkNoError(err); - ETags[1] = data.ETag; - return next(null, uploadId, ETags); - }), - /* eslint-enable no-param-reassign */ - (uploadId, ETags, next) => - s3.completeMultipartUpload({ - Bucket: bucketName, - Key: copyPartKey, - MultipartUpload: { - Parts: [ + beforeEach(done => + async.waterfall( + [ + next => completeMPU(orderedPartNumbers, next), + next => createMPUAndPutTwoParts(partTwoBody, next), + /* eslint-disable no-param-reassign */ + // Overwrite part one. + (uploadId, ETags, next) => + s3.uploadPart( { - ETag: ETags[0], + Bucket: bucketName, + Key: copyPartKey, PartNumber: 1, + UploadId: uploadId, + Body: partOneBody, }, + (err, data) => { + checkNoError(err); + ETags[0] = data.ETag; + return next(null, uploadId, ETags); + } + ), + // Overwrite part one with an three-part object. + (uploadId, ETags, next) => + s3.uploadPartCopy( { - ETag: ETags[1], + Bucket: bucketName, + CopySource: `/${bucketName}/${objectName}`, + Key: copyPartKey, PartNumber: 2, + UploadId: uploadId, }, - ], - }, - UploadId: uploadId, - }, next), - ], done)); + (err, data) => { + checkNoError(err); + ETags[1] = data.ETag; + return next(null, uploadId, ETags); + } + ), + /* eslint-enable no-param-reassign */ + (uploadId, ETags, next) => + s3.completeMultipartUpload( + { + Bucket: bucketName, + Key: copyPartKey, + MultipartUpload: { + Parts: [ + { + ETag: ETags[0], + PartNumber: 1, + }, + { + ETag: ETags[1], + PartNumber: 2, + }, + ], + }, + UploadId: uploadId, + }, + next + ), + ], + done + ) + ); - afterEach(done => s3.deleteObject({ - Bucket: bucketName, - Key: copyPartKey, - }, done)); - - it('should retrieve a part that overwrote another part ' + - 'originally copied from an MPU', done => - checkGetObjectPart(copyPartKey, 1, partSize, partOneBody, - done)); - - it('should retrieve a part copied from an MPU after the ' + - 'original part was overwritten', - done => checkGetObjectPart(copyPartKey, 2, partTwoSize, - partTwoBody, done)); + afterEach(done => + s3.deleteObject( + { + Bucket: bucketName, + Key: copyPartKey, + }, + done + ) + ); + + it('should retrieve a part that overwrote another part ' + 'originally copied from an MPU', done => + checkGetObjectPart(copyPartKey, 1, partSize, partOneBody, done) + ); + + it('should retrieve a part copied from an MPU after the ' + 'original part was overwritten', done => + checkGetObjectPart(copyPartKey, 2, partTwoSize, partTwoBody, done) + ); }); }); @@ -1040,17 +1127,14 @@ describe('GET object', () => { }; s3.putObject(params, err => done(err)); }); - it('should return website redirect header if specified in ' + - 'objectPUT request', done => { - s3.getObject({ Bucket: bucketName, Key: objectName }, - (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.WebsiteRedirectLocation, - undefined); - return done(); - }); + it('should return website redirect header if specified in ' + 'objectPUT request', done => { + s3.getObject({ Bucket: bucketName, Key: objectName }, (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.WebsiteRedirectLocation, undefined); + return done(); + }); }); }); }); @@ -1078,48 +1162,55 @@ describeSkipIfCeph('GET object with object lock', () => { ObjectLockMode: mockMode, ObjectLockLegalHoldStatus: 'ON', }; - return s3.createBucket({ - Bucket: bucket, - ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.putObject(params).promise()) - .then(() => s3.getObject({ Bucket: bucket, Key: key }).promise()) - /* eslint-disable no-return-assign */ - .then(res => versionId = res.VersionId) - .catch(err => { - process.stdout.write('Error in before\n'); - throw err; - }); + return ( + s3 + .createBucket({ + Bucket: bucket, + ObjectLockEnabledForBucket: true, + }) + .promise() + .then(() => s3.putObject(params).promise()) + .then(() => s3.getObject({ Bucket: bucket, Key: key }).promise()) + /* eslint-disable no-return-assign */ + .then(res => (versionId = res.VersionId)) + .catch(err => { + process.stdout.write('Error in before\n'); + throw err; + }) + ); }); - afterEach(() => changeLockPromise([{ bucket, key, versionId }], '') - .then(() => s3.listObjectVersions({ Bucket: bucket }).promise()) - .then(res => res.Versions.forEach(object => { - const params = [ - { - bucket, - key: object.Key, - versionId: object.VersionId, - }, - ]; - changeLockPromise(params, ''); - })) - .then(() => { - process.stdout.write('Emptying and deleting buckets\n'); - return bucketUtil.empty(bucket); - }) - .then(() => s3.deleteBucket({ Bucket: bucket }).promise()) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - })); + afterEach(() => + changeLockPromise([{ bucket, key, versionId }], '') + .then(() => s3.listObjectVersions({ Bucket: bucket }).promise()) + .then(res => + res.Versions.forEach(object => { + const params = [ + { + bucket, + key: object.Key, + versionId: object.VersionId, + }, + ]; + changeLockPromise(params, ''); + }) + ) + .then(() => { + process.stdout.write('Emptying and deleting buckets\n'); + return bucketUtil.empty(bucket); + }) + .then(() => s3.deleteBucket({ Bucket: bucket }).promise()) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }) + ); it('should return object lock headers if set on the object', done => { s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { assert.ifError(err); assert.strictEqual(res.ObjectLockMode, mockMode); - const responseDate - = formatDate(res.ObjectLockRetainUntilDate.toISOString()); + const responseDate = formatDate(res.ObjectLockRetainUntilDate.toISOString()); const expectedDate = formatDate(mockDate); assert.strictEqual(responseDate, expectedDate); assert.strictEqual(res.ObjectLockLegalHoldStatus, 'ON'); diff --git a/tests/functional/aws-node-sdk/test/object/getMPU_compatibleHeaders.js b/tests/functional/aws-node-sdk/test/object/getMPU_compatibleHeaders.js index 7f3758fa1f..fb907f3efb 100644 --- a/tests/functional/aws-node-sdk/test/object/getMPU_compatibleHeaders.js +++ b/tests/functional/aws-node-sdk/test/object/getMPU_compatibleHeaders.js @@ -6,114 +6,123 @@ const BucketUtility = require('../../lib/utility/bucket-util'); const bucketName = 'testgetmpubucket'; const objectName = 'key'; -describe('GET multipart upload object [Cache-Control, Content-Disposition, ' + -'Content-Encoding, Expires headers]', () => { - withV4(sigCfg => { - let bucketUtil; - let s3; - let uploadId; - const cacheControl = 'max-age=86400'; - const contentDisposition = 'attachment; filename="fname.ext";'; - const contentEncoding = 'aws-chunked,gzip'; - // AWS Node SDK requires Date object, ISO-8601 string, or - // a UNIX timestamp for Expires header - const expires = new Date(); +describe( + 'GET multipart upload object [Cache-Control, Content-Disposition, ' + 'Content-Encoding, Expires headers]', + () => { + withV4(sigCfg => { + let bucketUtil; + let s3; + let uploadId; + const cacheControl = 'max-age=86400'; + const contentDisposition = 'attachment; filename="fname.ext";'; + const contentEncoding = 'aws-chunked,gzip'; + // AWS Node SDK requires Date object, ISO-8601 string, or + // a UNIX timestamp for Expires header + const expires = new Date(); - before(() => { - const params = { - Bucket: bucketName, - Key: objectName, - CacheControl: cacheControl, - ContentDisposition: contentDisposition, - ContentEncoding: contentEncoding, - Expires: expires, - }; - bucketUtil = new BucketUtility('default', sigCfg); - s3 = bucketUtil.s3; - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('deleting bucket, just in case\n'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - if (err.code !== 'NoSuchBucket') { - process.stdout.write(`${err}\n`); - throw err; - } - }) - .then(() => { - process.stdout.write('creating bucket\n'); - return s3.createBucket({ Bucket: bucketName }).promise(); - }) - .then(() => { - process.stdout.write('initiating multipart upload\n'); - return s3.createMultipartUpload(params).promise(); - }) - .then(res => { - uploadId = res.UploadId; - return uploadId; - }) - .catch(err => { - process.stdout.write(`Error in before: ${err}\n`); - throw err; - }); - }); - after(() => { - process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write('Error in after\n'); - throw err; - }); - }); - it('should return additional headers when get request is performed ' + - 'on MPU, when they are specified in creation of MPU', - () => { - const params = { Bucket: bucketName, Key: 'key', PartNumber: 1, - UploadId: uploadId }; - return s3.uploadPart(params).promise() - .catch(err => { - process.stdout.write(`Error in uploadPart ${err}\n`); - throw err; - }) - .then(res => { - process.stdout.write('about to complete multipart upload\n'); - return s3.completeMultipartUpload({ + before(() => { + const params = { Bucket: bucketName, Key: objectName, - UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: res.ETag, PartNumber: 1 }, - ], - }, - }).promise(); - }) - .catch(err => { - process.stdout.write(`Error completing upload ${err}\n`); - throw err; - }) - .then(() => { - process.stdout.write('about to get object\n'); - return s3.getObject({ - Bucket: bucketName, Key: objectName, - }).promise(); - }) - .catch(err => { - process.stdout.write(`Error getting object ${err}\n`); - throw err; - }) - .then(res => { - assert.strictEqual(res.CacheControl, cacheControl); - assert.strictEqual(res.ContentDisposition, contentDisposition); - assert.strictEqual(res.ContentEncoding, 'gzip'); - assert.strictEqual(res.Expires.toGMTString(), - expires.toGMTString()); + CacheControl: cacheControl, + ContentDisposition: contentDisposition, + ContentEncoding: contentEncoding, + Expires: expires, + }; + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; + return bucketUtil + .empty(bucketName) + .then(() => { + process.stdout.write('deleting bucket, just in case\n'); + return bucketUtil.deleteOne(bucketName); + }) + .catch(err => { + if (err.code !== 'NoSuchBucket') { + process.stdout.write(`${err}\n`); + throw err; + } + }) + .then(() => { + process.stdout.write('creating bucket\n'); + return s3.createBucket({ Bucket: bucketName }).promise(); + }) + .then(() => { + process.stdout.write('initiating multipart upload\n'); + return s3.createMultipartUpload(params).promise(); + }) + .then(res => { + uploadId = res.UploadId; + return uploadId; + }) + .catch(err => { + process.stdout.write(`Error in before: ${err}\n`); + throw err; + }); + }); + after(() => { + process.stdout.write('Emptying bucket\n'); + return bucketUtil + .empty(bucketName) + .then(() => { + process.stdout.write('Deleting bucket\n'); + return bucketUtil.deleteOne(bucketName); + }) + .catch(err => { + process.stdout.write('Error in after\n'); + throw err; + }); }); + it( + 'should return additional headers when get request is performed ' + + 'on MPU, when they are specified in creation of MPU', + () => { + const params = { Bucket: bucketName, Key: 'key', PartNumber: 1, UploadId: uploadId }; + return s3 + .uploadPart(params) + .promise() + .catch(err => { + process.stdout.write(`Error in uploadPart ${err}\n`); + throw err; + }) + .then(res => { + process.stdout.write('about to complete multipart upload\n'); + return s3 + .completeMultipartUpload({ + Bucket: bucketName, + Key: objectName, + UploadId: uploadId, + MultipartUpload: { + Parts: [{ ETag: res.ETag, PartNumber: 1 }], + }, + }) + .promise(); + }) + .catch(err => { + process.stdout.write(`Error completing upload ${err}\n`); + throw err; + }) + .then(() => { + process.stdout.write('about to get object\n'); + return s3 + .getObject({ + Bucket: bucketName, + Key: objectName, + }) + .promise(); + }) + .catch(err => { + process.stdout.write(`Error getting object ${err}\n`); + throw err; + }) + .then(res => { + assert.strictEqual(res.CacheControl, cacheControl); + assert.strictEqual(res.ContentDisposition, contentDisposition); + assert.strictEqual(res.ContentEncoding, 'gzip'); + assert.strictEqual(res.Expires.toGMTString(), expires.toGMTString()); + }); + } + ); }); - }); -}); + } +); diff --git a/tests/functional/aws-node-sdk/test/object/getObjTagging.js b/tests/functional/aws-node-sdk/test/object/getObjTagging.js index 73972590b1..8b4b009817 100644 --- a/tests/functional/aws-node-sdk/test/object/getObjTagging.js +++ b/tests/functional/aws-node-sdk/test/object/getObjTagging.js @@ -8,16 +8,18 @@ const bucketName = 'testtaggingbucket'; const objectName = 'testtaggingobject'; const objectNameAcl = 'testtaggingobjectacl'; -const taggingConfig = { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }, - { - Key: 'key2', - Value: 'value2', - }, -] }; +const taggingConfig = { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + { + Key: 'key2', + Value: 'value2', + }, + ], +}; function _checkError(err, code, statusCode) { assert(err, 'Expected error but found none'); @@ -33,132 +35,144 @@ describe('GET object taggings', () => { const otherAccountS3 = otherAccountBucketUtility.s3; beforeEach(done => { - async.waterfall([ - next => s3.createBucket({ Bucket: bucketName }, err => - next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucketName }, err => next(err)), + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + ], + done + ); }); afterEach(() => { process.stdout.write('Emptying bucket'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); - }); - - it('should return appropriate tags after putting tags', done => { - s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - Tagging: taggingConfig, - }, err => { - assert.ifError(err, `putObjectTagging error: ${err}`); - s3.getObjectTagging({ Bucket: bucketName, Key: objectName }, - (err, data) => { - assert.ifError(err, `getObjectTagging error: ${err}`); - assert.deepStrictEqual(data, taggingConfig); - done(); + return bucketUtil + .empty(bucketName) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(bucketName); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; }); - }); }); - it('should return no tag after putting and deleting tags', done => { - async.waterfall([ - next => s3.putObjectTagging({ + it('should return appropriate tags after putting tags', done => { + s3.putObjectTagging( + { Bucket: bucketName, Key: objectName, Tagging: taggingConfig, - }, err => next(err)), - next => s3.deleteObjectTagging({ Bucket: bucketName, - Key: objectName }, err => next(err)), - next => s3.getObjectTagging({ Bucket: bucketName, - Key: objectName }, (err, data) => next(err, data)), - ], (err, data) => { - assert.ifError(err, `error: ${err}`); - assert.deepStrictEqual(data.TagSet, []); - return done(); - }); + }, + err => { + assert.ifError(err, `putObjectTagging error: ${err}`); + s3.getObjectTagging({ Bucket: bucketName, Key: objectName }, (err, data) => { + assert.ifError(err, `getObjectTagging error: ${err}`); + assert.deepStrictEqual(data, taggingConfig); + done(); + }); + } + ); + }); + + it('should return no tag after putting and deleting tags', done => { + async.waterfall( + [ + next => + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + Tagging: taggingConfig, + }, + err => next(err) + ), + next => s3.deleteObjectTagging({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.getObjectTagging({ Bucket: bucketName, Key: objectName }, (err, data) => next(err, data)), + ], + (err, data) => { + assert.ifError(err, `error: ${err}`); + assert.deepStrictEqual(data.TagSet, []); + return done(); + } + ); }); it('should return empty array after putting no tag', done => { - s3.getObjectTagging({ Bucket: bucketName, Key: objectName }, - (err, data) => { + s3.getObjectTagging({ Bucket: bucketName, Key: objectName }, (err, data) => { assert.ifError(err, `getObjectTagging error: ${err}`); assert.deepStrictEqual(data.TagSet, []); done(); }); }); - it('should return NoSuchKey getting tag to a non-existing object', - done => { - s3.getObjectTagging({ - Bucket: bucketName, - Key: 'nonexisting', - }, err => { - _checkError(err, 'NoSuchKey', 404); - done(); - }); + it('should return NoSuchKey getting tag to a non-existing object', done => { + s3.getObjectTagging( + { + Bucket: bucketName, + Key: 'nonexisting', + }, + err => { + _checkError(err, 'NoSuchKey', 404); + done(); + } + ); }); - it('should return 403 AccessDenied getting tag with another account', - done => { - otherAccountS3.getObjectTagging({ Bucket: bucketName, Key: - objectName }, err => { + it('should return 403 AccessDenied getting tag with another account', done => { + otherAccountS3.getObjectTagging({ Bucket: bucketName, Key: objectName }, err => { _checkError(err, 'AccessDenied', 403); done(); }); }); - it('should return 403 AccessDenied getting tag with a different ' + - 'account to an object with ACL "public-read-write"', - done => { - s3.putObjectAcl({ Bucket: bucketName, Key: objectName, - ACL: 'public-read-write' }, err => { - if (err) { - return done(err); - } - return otherAccountS3.getObjectTagging({ Bucket: bucketName, - Key: objectName }, err => { - _checkError(err, 'AccessDenied', 403); - done(); + it( + 'should return 403 AccessDenied getting tag with a different ' + + 'account to an object with ACL "public-read-write"', + done => { + s3.putObjectAcl({ Bucket: bucketName, Key: objectName, ACL: 'public-read-write' }, err => { + if (err) { + return done(err); + } + return otherAccountS3.getObjectTagging({ Bucket: bucketName, Key: objectName }, err => { + _checkError(err, 'AccessDenied', 403); + done(); + }); }); - }); - }); + } + ); - it('should return 403 AccessDenied getting tag to an object ' + - 'in a bucket created with a different account', - done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => otherAccountS3.getObjectTagging({ Bucket: bucketName, - Key: objectNameAcl }, err => next(err)), - ], err => { - _checkError(err, 'AccessDenied', 403); - done(); - }); - }); + it( + 'should return 403 AccessDenied getting tag to an object ' + 'in a bucket created with a different account', + done => { + async.waterfall( + [ + next => s3.putBucketAcl({ Bucket: bucketName, ACL: 'public-read-write' }, err => next(err)), + next => otherAccountS3.putObject({ Bucket: bucketName, Key: objectNameAcl }, err => next(err)), + next => + otherAccountS3.getObjectTagging({ Bucket: bucketName, Key: objectNameAcl }, err => + next(err) + ), + ], + err => { + _checkError(err, 'AccessDenied', 403); + done(); + } + ); + } + ); - it('should get tag to an object in a bucket created with same ' + - 'account', done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => s3.getObjectTagging({ Bucket: bucketName, - Key: objectNameAcl }, err => next(err)), - ], done); + it('should get tag to an object in a bucket created with same ' + 'account', done => { + async.waterfall( + [ + next => s3.putBucketAcl({ Bucket: bucketName, ACL: 'public-read-write' }, err => next(err)), + next => otherAccountS3.putObject({ Bucket: bucketName, Key: objectNameAcl }, err => next(err)), + next => s3.getObjectTagging({ Bucket: bucketName, Key: objectNameAcl }, err => next(err)), + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/getObjectLegalHold.js b/tests/functional/aws-node-sdk/test/object/getObjectLegalHold.js index 71fcfa4a7e..0fce1455c7 100644 --- a/tests/functional/aws-node-sdk/test/object/getObjectLegalHold.js +++ b/tests/functional/aws-node-sdk/test/object/getObjectLegalHold.js @@ -26,133 +26,162 @@ describeSkipIfCeph('GET object legal hold', () => { beforeEach(() => { process.stdout.write('Putting buckets and objects\n'); - return s3.createBucket({ - Bucket: bucket, - ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) - .then(() => s3.putObject({ Bucket: unlockedBucket, Key: key }).promise()) - .then(() => s3.putObject({ Bucket: bucket, Key: keyNoHold }).promise()) - .then(() => s3.putObject({ Bucket: bucket, Key: key }).promise()) - .then(res => { - versionId = res.VersionId; - process.stdout.write('Putting object legal hold\n'); - return s3.putObjectLegalHold({ + return s3 + .createBucket({ Bucket: bucket, - Key: key, - LegalHold: { Status: 'ON' }, - }).promise(); - }) - .catch(err => { - process.stdout.write('Error in beforeEach\n'); - throw err; - }); + ObjectLockEnabledForBucket: true, + }) + .promise() + .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) + .then(() => s3.putObject({ Bucket: unlockedBucket, Key: key }).promise()) + .then(() => s3.putObject({ Bucket: bucket, Key: keyNoHold }).promise()) + .then(() => s3.putObject({ Bucket: bucket, Key: key }).promise()) + .then(res => { + versionId = res.VersionId; + process.stdout.write('Putting object legal hold\n'); + return s3 + .putObjectLegalHold({ + Bucket: bucket, + Key: key, + LegalHold: { Status: 'ON' }, + }) + .promise(); + }) + .catch(err => { + process.stdout.write('Error in beforeEach\n'); + throw err; + }); }); afterEach(() => { process.stdout.write('Removing object lock\n'); return changeLockPromise([{ bucket, key, versionId }], '') - .then(() => { - process.stdout.write('Emptying and deleting buckets\n'); - return bucketUtil.empty(bucket); - }) - .then(() => bucketUtil.empty(unlockedBucket)) - .then(() => bucketUtil.deleteMany([bucket, unlockedBucket])) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + .then(() => { + process.stdout.write('Emptying and deleting buckets\n'); + return bucketUtil.empty(bucket); + }) + .then(() => bucketUtil.empty(unlockedBucket)) + .then(() => bucketUtil.deleteMany([bucket, unlockedBucket])) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); - it('should return AccessDenied getting legal hold with another account', - done => { - otherAccountS3.getObjectLegalHold({ + it('should return AccessDenied getting legal hold with another account', done => { + otherAccountS3.getObjectLegalHold( + { Bucket: bucket, Key: key, - }, err => { + }, + err => { checkError(err, 'AccessDenied', 403); done(); - }); - }); + } + ); + }); it('should return NoSuchKey error if key does not exist', done => { - s3.getObjectLegalHold({ - Bucket: bucket, - Key: 'thiskeydoesnotexist', - }, err => { - checkError(err, 'NoSuchKey', 404); - done(); - }); + s3.getObjectLegalHold( + { + Bucket: bucket, + Key: 'thiskeydoesnotexist', + }, + err => { + checkError(err, 'NoSuchKey', 404); + done(); + } + ); }); it('should return NoSuchVersion error if version does not exist', done => { - s3.getObjectLegalHold({ - Bucket: bucket, - Key: key, - VersionId: '012345678901234567890123456789012', - }, err => { - checkError(err, 'NoSuchVersion', 404); - done(); - }); + s3.getObjectLegalHold( + { + Bucket: bucket, + Key: key, + VersionId: '012345678901234567890123456789012', + }, + err => { + checkError(err, 'NoSuchVersion', 404); + done(); + } + ); }); it('should return MethodNotAllowed if object version is delete marker', done => { s3.deleteObject({ Bucket: bucket, Key: key }, (err, res) => { assert.ifError(err); - s3.getObjectLegalHold({ - Bucket: bucket, - Key: key, - VersionId: res.VersionId, - }, err => { - checkError(err, 'MethodNotAllowed', 405); - done(); - }); + s3.getObjectLegalHold( + { + Bucket: bucket, + Key: key, + VersionId: res.VersionId, + }, + err => { + checkError(err, 'MethodNotAllowed', 405); + done(); + } + ); }); }); it('should return NoSuchKey if latest version is delete marker', done => { s3.deleteObject({ Bucket: bucket, Key: key }, err => { assert.ifError(err); - s3.getObjectLegalHold({ - Bucket: bucket, - Key: key, - }, err => { - checkError(err, 'NoSuchKey', 404); - done(); - }); + s3.getObjectLegalHold( + { + Bucket: bucket, + Key: key, + }, + err => { + checkError(err, 'NoSuchKey', 404); + done(); + } + ); }); }); - it('should return InvalidRequest error getting legal hold of object ' + - 'inside object lock disabled bucket', done => { - s3.getObjectLegalHold({ - Bucket: unlockedBucket, - Key: key, - }, err => { - checkError(err, 'InvalidRequest', 400); - done(); - }); - }); + it( + 'should return InvalidRequest error getting legal hold of object ' + 'inside object lock disabled bucket', + done => { + s3.getObjectLegalHold( + { + Bucket: unlockedBucket, + Key: key, + }, + err => { + checkError(err, 'InvalidRequest', 400); + done(); + } + ); + } + ); it('should return NoSuchObjectLockConfiguration if no legal hold set', done => { - s3.getObjectLegalHold({ - Bucket: bucket, - Key: keyNoHold, - }, err => { - checkError(err, 'NoSuchObjectLockConfiguration', 404); - done(); - }); + s3.getObjectLegalHold( + { + Bucket: bucket, + Key: keyNoHold, + }, + err => { + checkError(err, 'NoSuchObjectLockConfiguration', 404); + done(); + } + ); }); it('should get object legal hold', done => { - s3.getObjectLegalHold({ - Bucket: bucket, - Key: key, - }, (err, res) => { - assert.ifError(err); - assert.deepStrictEqual(res.LegalHold, { Status: 'ON' }); - changeObjectLock([{ bucket, key, versionId }], '', done); - }); + s3.getObjectLegalHold( + { + Bucket: bucket, + Key: key, + }, + (err, res) => { + assert.ifError(err); + assert.deepStrictEqual(res.LegalHold, { Status: 'ON' }); + changeObjectLock([{ bucket, key, versionId }], '', done); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/getPartSize.js b/tests/functional/aws-node-sdk/test/object/getPartSize.js index 32e7040f4e..6cc585238f 100644 --- a/tests/functional/aws-node-sdk/test/object/getPartSize.js +++ b/tests/functional/aws-node-sdk/test/object/getPartSize.js @@ -24,8 +24,7 @@ function checkError(err, statusCode, code) { } function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } function generateContent(partNumber) { @@ -38,101 +37,123 @@ describe('Part size tests with object head', () => { let s3; function headObject(fields, cb) { - s3.headObject({ - Bucket: bucket, - Key: object, - ...fields, - }, cb); + s3.headObject( + { + Bucket: bucket, + Key: object, + ...fields, + }, + cb + ); } before(function beforeF(done) { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - async.series([ - next => s3.createBucket({ Bucket: bucket }, err => next(err)), - next => s3.createMultipartUpload({ - Bucket: bucket, - Key: object - }, (err, data) => { - checkNoError(err); - this.currentTest.UploadId = data.UploadId; - return next(); - }), - next => async.mapSeries(partNumbers, (partNumber, callback) => { - const uploadPartParams = { - Bucket: bucket, - Key: object, - PartNumber: partNumber + 1, - UploadId: this.currentTest.UploadId, - Body: generateContent(partNumber + 1), - }; - - return s3.uploadPart(uploadPartParams, - (err, data) => { - if (err) { - return callback(err); + async.series( + [ + next => s3.createBucket({ Bucket: bucket }, err => next(err)), + next => + s3.createMultipartUpload( + { + Bucket: bucket, + Key: object, + }, + (err, data) => { + checkNoError(err); + this.currentTest.UploadId = data.UploadId; + return next(); + } + ), + next => + async.mapSeries( + partNumbers, + (partNumber, callback) => { + const uploadPartParams = { + Bucket: bucket, + Key: object, + PartNumber: partNumber + 1, + UploadId: this.currentTest.UploadId, + Body: generateContent(partNumber + 1), + }; + + return s3.uploadPart(uploadPartParams, (err, data) => { + if (err) { + return callback(err); + } + return callback(null, data.ETag); + }); + }, + (err, results) => { + checkNoError(err); + ETags = results; + return next(); } - return callback(null, data.ETag); - }); - }, (err, results) => { + ), + next => { + const params = { + Bucket: bucket, + Key: object, + MultipartUpload: { + Parts: partNumbers.map(partNumber => ({ + ETag: ETags[partNumber], + PartNumber: partNumber + 1, + })), + }, + UploadId: this.currentTest.UploadId, + }; + return s3.completeMultipartUpload(params, next); + }, + next => + s3.putObject( + { + Bucket: bucket, + Key: emptyObject, + Body: '', + }, + next + ), + next => + s3.putObject( + { + Bucket: bucket, + Key: nonMpuObject, + Body: generateContent(0), + }, + next + ), + ], + err => { checkNoError(err); - ETags = results; - return next(); - }), - next => { - const params = { - Bucket: bucket, - Key: object, - MultipartUpload: { - Parts: partNumbers.map(partNumber => ({ - ETag: ETags[partNumber], - PartNumber: partNumber + 1, - })), - }, - UploadId: this.currentTest.UploadId, - }; - return s3.completeMultipartUpload(params, next); - }, - next => s3.putObject({ - Bucket: bucket, - Key: emptyObject, - Body: '', - }, next), - next => s3.putObject({ - Bucket: bucket, - Key: nonMpuObject, - Body: generateContent(0), - }, next), - ], err => { - checkNoError(err); - done(); - }); + done(); + } + ); }); after(done => { - async.series([ - next => s3.deleteObject({ Bucket: bucket, Key: object }, next), - next => s3.deleteObject({ Bucket: bucket, Key: emptyObject }, next), - next => s3.deleteObject({ Bucket: bucket, Key: nonMpuObject }, next), - next => s3.deleteBucket({ Bucket: bucket }, next), - ], done); + async.series( + [ + next => s3.deleteObject({ Bucket: bucket, Key: object }, next), + next => s3.deleteObject({ Bucket: bucket, Key: emptyObject }, next), + next => s3.deleteObject({ Bucket: bucket, Key: nonMpuObject }, next), + next => s3.deleteBucket({ Bucket: bucket }, next), + ], + done + ); }); - it('should return the total size of the object ' + - 'when --part-number is not used', done => { - const totalSize = partNumbers.reduce((total, current) => - total + (bodySize + current + 1), 0); - headObject({}, (err, data) => { - checkNoError(err); - assert.equal(totalSize, data.ContentLength); - done(); - }); + it('should return the total size of the object ' + 'when --part-number is not used', done => { + const totalSize = partNumbers.reduce((total, current) => total + (bodySize + current + 1), 0); + headObject({}, (err, data) => { + checkNoError(err); + assert.equal(totalSize, data.ContentLength); + done(); }); + }); partNumbers.forEach(part => { - it(`should return the size of part ${part + 1} ` + - `when --part-number is set to ${part + 1}`, done => { + it(`should return the size of part ${part + 1} ` + `when --part-number is set to ${part + 1}`, done => { const partNumber = Number.parseInt(part, 10) + 1; const partSize = bodySize + partNumber; headObject({ PartNumber: partNumber }, (err, data) => { @@ -144,8 +165,7 @@ describe('Part size tests with object head', () => { }); invalidPartNumbers.forEach(part => { - it(`should return an error when --part-number is set to ${part}`, - done => { + it(`should return an error when --part-number is set to ${part}`, done => { headObject({ PartNumber: part }, (err, data) => { checkError(err, 400, 'BadRequest'); assert.strictEqual(data, null); @@ -154,15 +174,13 @@ describe('Part size tests with object head', () => { }); }); - it('should return an error when incorrect --part-number is used', - done => { - headObject({ PartNumber: partNumbers.length + 1 }, - (err, data) => { - checkError(err, 416, 416); - assert.strictEqual(data, null); - done(); - }); + it('should return an error when incorrect --part-number is used', done => { + headObject({ PartNumber: partNumbers.length + 1 }, (err, data) => { + checkError(err, 416, 416); + assert.strictEqual(data, null); + done(); }); + }); it('should return content-length 0 when requesting part 1 of empty object', done => { headObject({ Key: emptyObject, PartNumber: 1 }, (err, data) => { diff --git a/tests/functional/aws-node-sdk/test/object/getRange.js b/tests/functional/aws-node-sdk/test/object/getRange.js index d95cb19b19..cdbf8b37bc 100644 --- a/tests/functional/aws-node-sdk/test/object/getRange.js +++ b/tests/functional/aws-node-sdk/test/object/getRange.js @@ -28,45 +28,49 @@ describe('aws-node-sdk range test of large end position', () => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucketName }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }).then(() => - s3.putObject({ - Bucket: bucketName, - Key: objName, - Body: Buffer.allocUnsafe(2890).fill(0, 0, 2800) - .fill(1, 2800), - }).promise()) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucketName }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }) + .then(() => + s3 + .putObject({ + Bucket: bucketName, + Key: objName, + Body: Buffer.allocUnsafe(2890).fill(0, 0, 2800).fill(1, 2800), + }) + .promise() + ) + .catch(err => { + process.stdout.write(`Error in beforeEach: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + return bucketUtil + .empty(bucketName) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(bucketName); + }) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + throw err; + }); }); - it('should get the final 90 bytes of a 2890 byte object for a byte ' + - 'range of 2800-', - done => endRangeTest('bytes=2800-', 'bytes 2800-2889/2890', done) + it('should get the final 90 bytes of a 2890 byte object for a byte ' + 'range of 2800-', done => + endRangeTest('bytes=2800-', 'bytes 2800-2889/2890', done) ); - it('should get the final 90 bytes of a 2890 byte object for a byte ' + - 'range of 2800-Number.MAX_SAFE_INTEGER', - done => endRangeTest(`bytes=2800-${Number.MAX_SAFE_INTEGER}`, - 'bytes 2800-2889/2890', done) + it( + 'should get the final 90 bytes of a 2890 byte object for a byte ' + 'range of 2800-Number.MAX_SAFE_INTEGER', + done => endRangeTest(`bytes=2800-${Number.MAX_SAFE_INTEGER}`, 'bytes 2800-2889/2890', done) ); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/getRetention.js b/tests/functional/aws-node-sdk/test/object/getRetention.js index 21e60e0b0e..c1753f08fc 100644 --- a/tests/functional/aws-node-sdk/test/object/getRetention.js +++ b/tests/functional/aws-node-sdk/test/object/getRetention.js @@ -45,123 +45,146 @@ describeSkipIfCeph('GET object retention', () => { beforeEach(() => { process.stdout.write('Putting buckets and objects\n'); - return s3.createBucket({ - Bucket: bucketName, - ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) - .then(() => s3.putObject({ Bucket: unlockedBucket, Key: objectName }).promise()) - .then(() => s3.putObject({ Bucket: bucketName, Key: noRetentionObject }).promise()) - .then(() => s3.putObject({ Bucket: bucketName, Key: objectName }).promise()) - .then(res => { - versionId = res.VersionId; - process.stdout.write('Putting object retention\n'); - return s3.putObjectRetention({ + return s3 + .createBucket({ Bucket: bucketName, - Key: objectName, - Retention: retentionConfig, - }).promise(); - }) - .catch(err => { - process.stdout.write('Error in beforeEach\n'); - throw err; - }); + ObjectLockEnabledForBucket: true, + }) + .promise() + .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) + .then(() => s3.putObject({ Bucket: unlockedBucket, Key: objectName }).promise()) + .then(() => s3.putObject({ Bucket: bucketName, Key: noRetentionObject }).promise()) + .then(() => s3.putObject({ Bucket: bucketName, Key: objectName }).promise()) + .then(res => { + versionId = res.VersionId; + process.stdout.write('Putting object retention\n'); + return s3 + .putObjectRetention({ + Bucket: bucketName, + Key: objectName, + Retention: retentionConfig, + }) + .promise(); + }) + .catch(err => { + process.stdout.write('Error in beforeEach\n'); + throw err; + }); }); afterEach(() => { process.stdout.write('Removing object lock\n'); return changeLockPromise([{ bucket: bucketName, key: objectName, versionId }], '') - .then(() => { - process.stdout.write('Emptying and deleting buckets\n'); - return bucketUtil.empty(bucketName); - }) - .then(() => bucketUtil.empty(unlockedBucket)) - .then(() => bucketUtil.deleteMany([bucketName, unlockedBucket])) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + .then(() => { + process.stdout.write('Emptying and deleting buckets\n'); + return bucketUtil.empty(bucketName); + }) + .then(() => bucketUtil.empty(unlockedBucket)) + .then(() => bucketUtil.deleteMany([bucketName, unlockedBucket])) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); - it('should return AccessDenied putting retention with another account', - done => { - otherAccountS3.getObjectRetention({ - Bucket: bucketName, - Key: objectName, - }, err => { - checkError(err, 'AccessDenied', 403); - done(); - }); + it('should return AccessDenied putting retention with another account', done => { + otherAccountS3.getObjectRetention( + { + Bucket: bucketName, + Key: objectName, + }, + err => { + checkError(err, 'AccessDenied', 403); + done(); + } + ); }); it('should return NoSuchKey error if key does not exist', done => { - s3.getObjectRetention({ - Bucket: bucketName, - Key: 'thiskeydoesnotexist', - }, err => { - checkError(err, 'NoSuchKey', 404); - done(); - }); + s3.getObjectRetention( + { + Bucket: bucketName, + Key: 'thiskeydoesnotexist', + }, + err => { + checkError(err, 'NoSuchKey', 404); + done(); + } + ); }); it('should return NoSuchVersion error if version does not exist', done => { - s3.getObjectRetention({ - Bucket: bucketName, - Key: objectName, - VersionId: '012345678901234567890123456789012', - }, err => { - checkError(err, 'NoSuchVersion', 404); - done(); - }); - }); - - it('should return MethodNotAllowed if object version is delete marker', - done => { - s3.deleteObject({ Bucket: bucketName, Key: objectName }, (err, res) => { - assert.ifError(err); - s3.getObjectRetention({ + s3.getObjectRetention( + { Bucket: bucketName, Key: objectName, - VersionId: res.VersionId, - }, err => { - checkError(err, 'MethodNotAllowed', 405); + VersionId: '012345678901234567890123456789012', + }, + err => { + checkError(err, 'NoSuchVersion', 404); done(); - }); - }); + } + ); }); - it('should return InvalidRequest error getting retention to object ' + - 'in bucket with no object lock enabled', done => { - s3.getObjectRetention({ - Bucket: unlockedBucket, - Key: objectName, - }, err => { - checkError(err, 'InvalidRequest', 400); - done(); + it('should return MethodNotAllowed if object version is delete marker', done => { + s3.deleteObject({ Bucket: bucketName, Key: objectName }, (err, res) => { + assert.ifError(err); + s3.getObjectRetention( + { + Bucket: bucketName, + Key: objectName, + VersionId: res.VersionId, + }, + err => { + checkError(err, 'MethodNotAllowed', 405); + done(); + } + ); }); }); - it('should return NoSuchObjectLockConfiguration if no retention set', - done => { - s3.getObjectRetention({ - Bucket: bucketName, - Key: noRetentionObject, - }, err => { - checkError(err, 'NoSuchObjectLockConfiguration', 404); - done(); - }); + it( + 'should return InvalidRequest error getting retention to object ' + 'in bucket with no object lock enabled', + done => { + s3.getObjectRetention( + { + Bucket: unlockedBucket, + Key: objectName, + }, + err => { + checkError(err, 'InvalidRequest', 400); + done(); + } + ); + } + ); + + it('should return NoSuchObjectLockConfiguration if no retention set', done => { + s3.getObjectRetention( + { + Bucket: bucketName, + Key: noRetentionObject, + }, + err => { + checkError(err, 'NoSuchObjectLockConfiguration', 404); + done(); + } + ); }); it('should get object retention', done => { - s3.getObjectRetention({ - Bucket: bucketName, - Key: objectName, - }, (err, res) => { - assert.ifError(err); - assert.deepStrictEqual(res.Retention, expectedConfig); - changeObjectLock([ - { bucket: bucketName, key: objectName, versionId }], '', done); - }); + s3.getObjectRetention( + { + Bucket: bucketName, + Key: objectName, + }, + (err, res) => { + assert.ifError(err); + assert.deepStrictEqual(res.Retention, expectedConfig); + changeObjectLock([{ bucket: bucketName, key: objectName, versionId }], '', done); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/initiateMPU.js b/tests/functional/aws-node-sdk/test/object/initiateMPU.js index 44f7087d2e..c0da541dd6 100644 --- a/tests/functional/aws-node-sdk/test/object/initiateMPU.js +++ b/tests/functional/aws-node-sdk/test/object/initiateMPU.js @@ -3,8 +3,7 @@ const async = require('async'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); -const genMaxSizeMetaHeaders - = require('../../lib/utility/genMaxSizeMetaHeaders'); +const genMaxSizeMetaHeaders = require('../../lib/utility/genMaxSizeMetaHeaders'); const { generateMultipleTagQuery } = require('../../lib/utility/tagging'); const bucket = `initiatempubucket${Date.now()}`; @@ -18,178 +17,211 @@ describe('Initiate MPU', () => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => bucketUtil.deleteOne(bucket)); - it('should return InvalidRedirectLocation if initiate MPU ' + - 'with x-amz-website-redirect-location header that does not start ' + - 'with \'http://\', \'https://\' or \'/\'', done => { - const params = { Bucket: bucket, Key: key, - WebsiteRedirectLocation: 'google.com' }; - s3.createMultipartUpload(params, err => { - assert.strictEqual(err.code, 'InvalidRedirectLocation'); - assert.strictEqual(err.statusCode, 400); - done(); - }); - }); + it( + 'should return InvalidRedirectLocation if initiate MPU ' + + 'with x-amz-website-redirect-location header that does not start ' + + "with 'http://', 'https://' or '/'", + done => { + const params = { Bucket: bucket, Key: key, WebsiteRedirectLocation: 'google.com' }; + s3.createMultipartUpload(params, err => { + assert.strictEqual(err.code, 'InvalidRedirectLocation'); + assert.strictEqual(err.statusCode, 400); + done(); + }); + } + ); - it('should return InvalidStorageClass error when x-amz-storage-class header is provided ' + - 'and not equal to STANDARD', done => - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - StorageClass: 'COLD', - }, err => { - assert.strictEqual(err.code, 'InvalidStorageClass'); - assert.strictEqual(err.statusCode, 400); - done(); - }) + it( + 'should return InvalidStorageClass error when x-amz-storage-class header is provided ' + + 'and not equal to STANDARD', + done => + s3.createMultipartUpload( + { + Bucket: bucket, + Key: key, + StorageClass: 'COLD', + }, + err => { + assert.strictEqual(err.code, 'InvalidStorageClass'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ) ); - it('should return error if initiating MPU w/ > 2KB user-defined md', - done => { + it('should return error if initiating MPU w/ > 2KB user-defined md', done => { const metadata = genMaxSizeMetaHeaders(); const params = { Bucket: bucket, Key: key, Metadata: metadata }; - async.waterfall([ - next => s3.createMultipartUpload(params, (err, data) => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); - next(null, data.UploadId); - }), - (uploadId, next) => s3.abortMultipartUpload({ - Bucket: bucket, - Key: key, - UploadId: uploadId, - }, err => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); - // add one more byte to push over limit for next call - metadata.header0 = `${metadata.header0}${'0'}`; - next(); - }), - next => s3.createMultipartUpload(params, next), - ], err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'MetadataTooLarge'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + async.waterfall( + [ + next => + s3.createMultipartUpload(params, (err, data) => { + assert.strictEqual(err, null, `Unexpected err: ${err}`); + next(null, data.UploadId); + }), + (uploadId, next) => + s3.abortMultipartUpload( + { + Bucket: bucket, + Key: key, + UploadId: uploadId, + }, + err => { + assert.strictEqual(err, null, `Unexpected err: ${err}`); + // add one more byte to push over limit for next call + metadata.header0 = `${metadata.header0}${'0'}`; + next(); + } + ), + next => s3.createMultipartUpload(params, next), + ], + err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'MetadataTooLarge'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); }); describe('with tag set', () => { - it('should be able to put object with 10 tags', - done => { + it('should be able to put object with 10 tags', done => { const taggingConfig = generateMultipleTagQuery(10); - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: taggingConfig, - }, err => { - assert.ifError(err); - done(); - }); + s3.createMultipartUpload( + { + Bucket: bucket, + Key: key, + Tagging: taggingConfig, + }, + err => { + assert.ifError(err); + done(); + } + ); }); it('should allow putting 50 tags', done => { const taggingConfig = generateMultipleTagQuery(50); - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: taggingConfig, - }, err => { - assert.ifError(err); - done(); - }); + s3.createMultipartUpload( + { + Bucket: bucket, + Key: key, + Tagging: taggingConfig, + }, + err => { + assert.ifError(err); + done(); + } + ); }); - it('should return BadRequest if putting more that 50 tags', - done => { + it('should return BadRequest if putting more that 50 tags', done => { const taggingConfig = generateMultipleTagQuery(51); - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: taggingConfig, - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'BadRequest'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + s3.createMultipartUpload( + { + Bucket: bucket, + Key: key, + Tagging: taggingConfig, + }, + err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'BadRequest'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); }); - it('should return InvalidArgument creating mpu tag with ' + - 'invalid characters: %', done => { + it('should return InvalidArgument creating mpu tag with ' + 'invalid characters: %', done => { const value = 'value1%'; - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: `key1=${value}`, - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + s3.createMultipartUpload( + { + Bucket: bucket, + Key: key, + Tagging: `key1=${value}`, + }, + err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'InvalidArgument'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); }); - it('should return InvalidArgument creating mpu with ' + - 'bad encoded tags', done => { - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: 'key1==value1', - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + it('should return InvalidArgument creating mpu with ' + 'bad encoded tags', done => { + s3.createMultipartUpload( + { + Bucket: bucket, + Key: key, + Tagging: 'key1==value1', + }, + err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'InvalidArgument'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); }); it('should return InvalidArgument if tag with no key', done => { - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: '=value1', - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + s3.createMultipartUpload( + { + Bucket: bucket, + Key: key, + Tagging: '=value1', + }, + err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'InvalidArgument'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); }); - it('should return InvalidArgument if using the same key twice', - done => { - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: 'key1=value1&key1=value2', - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + it('should return InvalidArgument if using the same key twice', done => { + s3.createMultipartUpload( + { + Bucket: bucket, + Key: key, + Tagging: 'key1=value1&key1=value2', + }, + err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'InvalidArgument'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); }); - it('should return InvalidArgument if using the same key twice ' + - 'and empty tags', done => { - s3.putObject({ - Bucket: bucket, - Key: key, - Tagging: '&&&&&&&&&&&&&&&&&key1=value1&key1=value2', - }, - err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + it('should return InvalidArgument if using the same key twice ' + 'and empty tags', done => { + s3.putObject( + { + Bucket: bucket, + Key: key, + Tagging: '&&&&&&&&&&&&&&&&&key1=value1&key1=value2', + }, + err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'InvalidArgument'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/listParts.js b/tests/functional/aws-node-sdk/test/object/listParts.js index 8ed7474224..81ff705555 100644 --- a/tests/functional/aws-node-sdk/test/object/listParts.js +++ b/tests/functional/aws-node-sdk/test/object/listParts.js @@ -9,8 +9,7 @@ const bodyFirstPart = Buffer.allocUnsafe(10).fill(0); const bodySecondPart = Buffer.allocUnsafe(20).fill(0); function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } describe('List parts', () => { @@ -23,56 +22,86 @@ describe('List parts', () => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: bucket, Key: key }).promise()) - .then(res => { - uploadId = res.UploadId; - return s3.uploadPart({ Bucket: bucket, Key: key, - PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart, - }).promise(); - }).then(() => s3.uploadPart({ - Bucket: bucket, Key: key, - PartNumber: 2, UploadId: uploadId, Body: bodySecondPart, - }).promise()).then(res => { - secondEtag = res.ETag; - return secondEtag; - }) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .then(() => + s3 + .createMultipartUpload({ + Bucket: bucket, + Key: key, + }) + .promise() + ) + .then(res => { + uploadId = res.UploadId; + return s3 + .uploadPart({ + Bucket: bucket, + Key: key, + PartNumber: 1, + UploadId: uploadId, + Body: bodyFirstPart, + }) + .promise(); + }) + .then(() => + s3 + .uploadPart({ + Bucket: bucket, + Key: key, + PartNumber: 2, + UploadId: uploadId, + Body: bodySecondPart, + }) + .promise() + ) + .then(res => { + secondEtag = res.ETag; + return secondEtag; + }) + .catch(err => { + process.stdout.write(`Error in beforeEach: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket'); - return s3.abortMultipartUpload({ - Bucket: bucket, Key: key, UploadId: uploadId, - }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return s3 + .abortMultipartUpload({ + Bucket: bucket, + Key: key, + UploadId: uploadId, + }) + .promise() + .then(() => bucketUtil.empty(bucket)) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); it('should only list the second part', done => { - s3.listParts({ - Bucket: bucket, - Key: key, - PartNumberMarker: 1, - UploadId: uploadId }, - (err, data) => { - checkNoError(err); - assert.strictEqual(data.Parts[0].PartNumber, 2); - assert.strictEqual(data.Parts[0].Size, 20); - assert.strictEqual(`${data.Parts[0].ETag}`, secondEtag); - done(); - }); + s3.listParts( + { + Bucket: bucket, + Key: key, + PartNumberMarker: 1, + UploadId: uploadId, + }, + (err, data) => { + checkNoError(err); + assert.strictEqual(data.Parts[0].PartNumber, 2); + assert.strictEqual(data.Parts[0].Size, 20); + assert.strictEqual(`${data.Parts[0].ETag}`, secondEtag); + done(); + } + ); }); }); }); @@ -81,40 +110,56 @@ describe('List parts', () => { function createPart(sigCfg, bucketUtil, s3, key) { let uploadId; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: bucket, Key: key }).promise()) - .then(res => { - uploadId = res.UploadId; - return s3.uploadPart({ Bucket: bucket, Key: key, - PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart }).promise(); - }) - .then(() => Promise.resolve(uploadId)); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .then(() => + s3 + .createMultipartUpload({ + Bucket: bucket, + Key: key, + }) + .promise() + ) + .then(res => { + uploadId = res.UploadId; + return s3 + .uploadPart({ Bucket: bucket, Key: key, PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart }) + .promise(); + }) + .then(() => Promise.resolve(uploadId)); } function deletePart(s3, bucketUtil, key, uploadId) { process.stdout.write('Emptying bucket'); - return s3.abortMultipartUpload({ - Bucket: bucket, Key: key, UploadId: uploadId, - }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucket); - }); + return s3 + .abortMultipartUpload({ + Bucket: bucket, + Key: key, + UploadId: uploadId, + }) + .promise() + .then(() => bucketUtil.empty(bucket)) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(bucket); + }); } function testFunc(s3, bucket, key, uploadId, cb) { - s3.listParts({ - Bucket: bucket, - Key: key, - UploadId: uploadId }, - (err, data) => { - checkNoError(err); - assert.strictEqual(data.Key, key); - cb(); - }); + s3.listParts( + { + Bucket: bucket, + Key: key, + UploadId: uploadId, + }, + (err, data) => { + checkNoError(err); + assert.strictEqual(data.Key, key); + cb(); + } + ); } describe('List parts - object keys with special characters: `&`', () => { @@ -125,8 +170,7 @@ describe('List parts - object keys with special characters: `&`', () => { const key = '&'; beforeEach(() => - createPart(sigCfg, bucketUtil, s3, key) - .then(res => { + createPart(sigCfg, bucketUtil, s3, key).then(res => { uploadId = res; return Promise.resolve(); }) @@ -134,8 +178,7 @@ describe('List parts - object keys with special characters: `&`', () => { afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); - it('should list parts of an object with `&` in its key', - done => testFunc(s3, bucket, key, uploadId, done)); + it('should list parts of an object with `&` in its key', done => testFunc(s3, bucket, key, uploadId, done)); }); }); @@ -147,8 +190,7 @@ describe('List parts - object keys with special characters: `"`', () => { const key = '"quot'; beforeEach(() => - createPart(sigCfg, bucketUtil, s3, key) - .then(res => { + createPart(sigCfg, bucketUtil, s3, key).then(res => { uploadId = res; return Promise.resolve(); }) @@ -156,21 +198,19 @@ describe('List parts - object keys with special characters: `"`', () => { afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); - it('should list parts of an object with `"` in its key', - done => testFunc(s3, bucket, key, uploadId, done)); + it('should list parts of an object with `"` in its key', done => testFunc(s3, bucket, key, uploadId, done)); }); }); -describe('List parts - object keys with special characters: `\'`', () => { +describe("List parts - object keys with special characters: `'`", () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); const s3 = bucketUtil.s3; let uploadId; - const key = '\'apos'; + const key = "'apos"; beforeEach(() => - createPart(sigCfg, bucketUtil, s3, key) - .then(res => { + createPart(sigCfg, bucketUtil, s3, key).then(res => { uploadId = res; return Promise.resolve(); }) @@ -178,8 +218,7 @@ describe('List parts - object keys with special characters: `\'`', () => { afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); - it('should list parts of an object with `\'` in its key', - done => testFunc(s3, bucket, key, uploadId, done)); + it("should list parts of an object with `'` in its key", done => testFunc(s3, bucket, key, uploadId, done)); }); }); @@ -191,8 +230,7 @@ describe('List parts - object keys with special characters: `<`', () => { const key = ' - createPart(sigCfg, bucketUtil, s3, key) - .then(res => { + createPart(sigCfg, bucketUtil, s3, key).then(res => { uploadId = res; return Promise.resolve(); }) @@ -200,8 +238,7 @@ describe('List parts - object keys with special characters: `<`', () => { afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); - it('should list parts of an object with `<` in its key', - done => testFunc(s3, bucket, key, uploadId, done)); + it('should list parts of an object with `<` in its key', done => testFunc(s3, bucket, key, uploadId, done)); }); }); @@ -213,8 +250,7 @@ describe('List parts - object keys with special characters: `>`', () => { const key = '>gt'; beforeEach(() => - createPart(sigCfg, bucketUtil, s3, key) - .then(res => { + createPart(sigCfg, bucketUtil, s3, key).then(res => { uploadId = res; return Promise.resolve(); }) @@ -222,7 +258,6 @@ describe('List parts - object keys with special characters: `>`', () => { afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); - it('should list parts of an object with `>` in its key', - done => testFunc(s3, bucket, key, uploadId, done)); + it('should list parts of an object with `>` in its key', done => testFunc(s3, bucket, key, uploadId, done)); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/mpu.js b/tests/functional/aws-node-sdk/test/object/mpu.js index 9b256ef7d7..1c70a17e61 100644 --- a/tests/functional/aws-node-sdk/test/object/mpu.js +++ b/tests/functional/aws-node-sdk/test/object/mpu.js @@ -37,22 +37,22 @@ function getExpectedObj(res, data) { NextUploadIdMarker: uploadId, MaxUploads: maxUploads, IsTruncated: false, - Uploads: [{ - UploadId: uploadId, - Key: objectKey, - Initiated: initiated, - StorageClass: 'STANDARD', - Owner: + Uploads: [ { - DisplayName: displayName, - ID: userId, - }, - Initiator: - { - DisplayName: displayName, - ID: userId, + UploadId: uploadId, + Key: objectKey, + Initiated: initiated, + StorageClass: 'STANDARD', + Owner: { + DisplayName: displayName, + ID: userId, + }, + Initiator: { + DisplayName: displayName, + ID: userId, + }, }, - }], + ], CommonPrefixes: [], }; @@ -85,59 +85,71 @@ describe('aws-node-sdk test suite of listMultipartUploads', () => bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => bucketUtil.getOwner()) - .then(res => { - // The owner of the bucket will also be the MPU upload owner. - data.displayName = res.DisplayName; - data.userId = res.ID; - }) - .then(() => s3.createMultipartUpload({ - Bucket: bucket, - Key: objectKey, - }).promise()) - .then(res => { - data.uploadId = res.UploadId; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .then(() => bucketUtil.getOwner()) + .then(res => { + // The owner of the bucket will also be the MPU upload owner. + data.displayName = res.DisplayName; + data.userId = res.ID; + }) + .then(() => + s3 + .createMultipartUpload({ + Bucket: bucket, + Key: objectKey, + }) + .promise() + ) + .then(res => { + data.uploadId = res.UploadId; + }); }); afterEach(() => - s3.abortMultipartUpload({ - Bucket: bucket, - Key: objectKey, - UploadId: data.uploadId, - }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => bucketUtil.deleteOne(bucket)) + s3 + .abortMultipartUpload({ + Bucket: bucket, + Key: objectKey, + UploadId: data.uploadId, + }) + .promise() + .then(() => bucketUtil.empty(bucket)) + .then(() => bucketUtil.deleteOne(bucket)) ); it('should list ongoing multipart uploads', () => - s3.listMultipartUploads({ Bucket: bucket }).promise() - .then(res => checkValues(res, data)) - ); + s3 + .listMultipartUploads({ Bucket: bucket }) + .promise() + .then(res => checkValues(res, data))); it('should list ongoing multipart uploads with params', () => { data.prefixVal = 'to'; data.delimiter = 'test-delimiter'; data.maxUploads = 1; - return s3.listMultipartUploads({ - Bucket: bucket, - Prefix: 'to', - Delimiter: 'test-delimiter', - MaxUploads: 1, - }).promise() - .then(res => checkValues(res, data)); + return s3 + .listMultipartUploads({ + Bucket: bucket, + Prefix: 'to', + Delimiter: 'test-delimiter', + MaxUploads: 1, + }) + .promise() + .then(res => checkValues(res, data)); }); it('should list 0 multipart uploads when MaxUploads is 0', () => { data.maxUploads = 0; - return s3.listMultipartUploads({ - Bucket: bucket, - MaxUploads: 0, - }).promise() - .then(res => checkValues(res, data)); + return s3 + .listMultipartUploads({ + Bucket: bucket, + MaxUploads: 0, + }) + .promise() + .then(res => checkValues(res, data)); }); - }) -); + })); diff --git a/tests/functional/aws-node-sdk/test/object/mpuOrder.js b/tests/functional/aws-node-sdk/test/object/mpuOrder.js index a80dfe3a60..442c978c5b 100644 --- a/tests/functional/aws-node-sdk/test/object/mpuOrder.js +++ b/tests/functional/aws-node-sdk/test/object/mpuOrder.js @@ -13,20 +13,19 @@ function checkError(err, statusCode, code) { } function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } const body = Buffer.alloc(1024 * 1024 * 5, 'a'); const testsOrder = [ - { values: [3, 8, 1000], err: false }, - { values: [8, 3, 1000], err: true }, - { values: [8, 1000, 3], err: true }, - { values: [1000, 3, 8], err: true }, - { values: [3, 1000, 8], err: true }, - { values: [1000, 8, 3], err: true }, - { values: [3, 3, 1000], err: true }, + { values: [3, 8, 1000], err: false }, + { values: [8, 3, 1000], err: true }, + { values: [8, 1000, 3], err: true }, + { values: [1000, 3, 8], err: true }, + { values: [3, 1000, 8], err: true }, + { values: [1000, 8, 3], err: true }, + { values: [3, 3, 1000], err: true }, ]; describe('More MPU tests', () => { @@ -37,83 +36,116 @@ describe('More MPU tests', () => { beforeEach(function beforeEachF(done) { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - async.waterfall([ - next => s3.createBucket({ Bucket: bucket }, err => next(err)), - next => s3.createMultipartUpload({ Bucket: bucket, - Key: object }, (err, data) => { - checkNoError(err); - this.currentTest.UploadId = data.UploadId; - return next(); - }), - next => s3.uploadPart({ - Bucket: bucket, - Key: object, - PartNumber: 1000, - Body: body, - UploadId: this.currentTest.UploadId }, (err, data) => { - checkNoError(err); - this.currentTest.Etag = data.ETag; - return next(); - }), - next => s3.uploadPart({ - Bucket: bucket, - Key: object, - PartNumber: 3, - Body: body, - UploadId: this.currentTest.UploadId }, err => next(err)), - next => s3.uploadPart({ - Bucket: bucket, - Key: object, - PartNumber: 8, - Body: body, - UploadId: this.currentTest.UploadId }, err => next(err)), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucket }, err => next(err)), + next => + s3.createMultipartUpload({ Bucket: bucket, Key: object }, (err, data) => { + checkNoError(err); + this.currentTest.UploadId = data.UploadId; + return next(); + }), + next => + s3.uploadPart( + { + Bucket: bucket, + Key: object, + PartNumber: 1000, + Body: body, + UploadId: this.currentTest.UploadId, + }, + (err, data) => { + checkNoError(err); + this.currentTest.Etag = data.ETag; + return next(); + } + ), + next => + s3.uploadPart( + { + Bucket: bucket, + Key: object, + PartNumber: 3, + Body: body, + UploadId: this.currentTest.UploadId, + }, + err => next(err) + ), + next => + s3.uploadPart( + { + Bucket: bucket, + Key: object, + PartNumber: 8, + Body: body, + UploadId: this.currentTest.UploadId, + }, + err => next(err) + ), + ], + done + ); }); afterEach(done => { - async.waterfall([ - next => s3.deleteObject({ Bucket: bucket, Key: object }, - err => next(err)), - next => s3.deleteBucket({ Bucket: bucket }, err => next(err)), - ], done); + async.waterfall( + [ + next => s3.deleteObject({ Bucket: bucket, Key: object }, err => next(err)), + next => s3.deleteBucket({ Bucket: bucket }, err => next(err)), + ], + done + ); }); testsOrder.forEach(testOrder => { - it('should complete MPU by concatenating the parts in ' + - `the following order: ${testOrder.values}`, function itF(done) { - async.waterfall([ - next => s3.completeMultipartUpload({ - Bucket: bucket, - Key: object, - MultipartUpload: { - Parts: [ - { - ETag: this.test.Etag, - PartNumber: testOrder.values[0], - }, - { - ETag: this.test.Etag, - PartNumber: testOrder.values[1], - }, - { - ETag: this.test.Etag, - PartNumber: testOrder.values[2], - }, - ], - }, - UploadId: this.test.UploadId }, next), - ], err => { - if (testOrder.err) { - checkError(err, 400, 'InvalidPartOrder'); - return s3.abortMultipartUpload({ - Bucket: bucket, - Key: object, - UploadId: this.test.UploadId, - }, done); - } - checkNoError(err); - return done(); - }); - }); + it( + 'should complete MPU by concatenating the parts in ' + `the following order: ${testOrder.values}`, + function itF(done) { + async.waterfall( + [ + next => + s3.completeMultipartUpload( + { + Bucket: bucket, + Key: object, + MultipartUpload: { + Parts: [ + { + ETag: this.test.Etag, + PartNumber: testOrder.values[0], + }, + { + ETag: this.test.Etag, + PartNumber: testOrder.values[1], + }, + { + ETag: this.test.Etag, + PartNumber: testOrder.values[2], + }, + ], + }, + UploadId: this.test.UploadId, + }, + next + ), + ], + err => { + if (testOrder.err) { + checkError(err, 400, 'InvalidPartOrder'); + return s3.abortMultipartUpload( + { + Bucket: bucket, + Key: object, + UploadId: this.test.UploadId, + }, + done + ); + } + checkNoError(err); + return done(); + } + ); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/mpuVersion.js b/tests/functional/aws-node-sdk/test/object/mpuVersion.js index e175750a28..1cb9aabc17 100644 --- a/tests/functional/aws-node-sdk/test/object/mpuVersion.js +++ b/tests/functional/aws-node-sdk/test/object/mpuVersion.js @@ -21,57 +21,60 @@ const archive = { }; function putMPUVersion(s3, bucketName, objectName, vId, cb) { - async.waterfall([ - next => { - const params = { Bucket: bucketName, Key: objectName }; - const request = s3.createMultipartUpload(params); - if (vId !== undefined) { - request.on('build', () => { - request.httpRequest.headers['x-scal-s3-version-id'] = vId; - }); - } - return request.send(next); - }, - (resCreation, next) => { - const uploadId = resCreation.UploadId; - const params = { - Body: 'okok', - Bucket: bucketName, - Key: objectName, - PartNumber: 1, - UploadId: uploadId, - }; - const request = s3.uploadPart(params); - if (vId !== undefined) { - request.on('build', () => { - request.httpRequest.headers['x-scal-s3-version-id'] = vId; - }); - } - return request.send((err, res) => next(err, res, uploadId)); - }, - (res, uploadId, next) => { - const params = { - Bucket: bucketName, - Key: objectName, - MultipartUpload: { - Parts: [ - { - ETag: res.ETag, - PartNumber: 1 - }, - ] - }, - UploadId: uploadId, - }; - const request = s3.completeMultipartUpload(params); - if (vId !== undefined) { - request.on('build', () => { - request.httpRequest.headers['x-scal-s3-version-id'] = vId; - }); - } - return request.send(next); - }, - ], err => cb(err)); + async.waterfall( + [ + next => { + const params = { Bucket: bucketName, Key: objectName }; + const request = s3.createMultipartUpload(params); + if (vId !== undefined) { + request.on('build', () => { + request.httpRequest.headers['x-scal-s3-version-id'] = vId; + }); + } + return request.send(next); + }, + (resCreation, next) => { + const uploadId = resCreation.UploadId; + const params = { + Body: 'okok', + Bucket: bucketName, + Key: objectName, + PartNumber: 1, + UploadId: uploadId, + }; + const request = s3.uploadPart(params); + if (vId !== undefined) { + request.on('build', () => { + request.httpRequest.headers['x-scal-s3-version-id'] = vId; + }); + } + return request.send((err, res) => next(err, res, uploadId)); + }, + (res, uploadId, next) => { + const params = { + Bucket: bucketName, + Key: objectName, + MultipartUpload: { + Parts: [ + { + ETag: res.ETag, + PartNumber: 1, + }, + ], + }, + UploadId: uploadId, + }; + const request = s3.completeMultipartUpload(params); + if (vId !== undefined) { + request.on('build', () => { + request.httpRequest.headers['x-scal-s3-version-id'] = vId; + }); + } + return request.send(next); + }, + ], + err => cb(err) + ); } function putMPU(s3, bucketName, objectName, cb) { @@ -104,24 +107,28 @@ describe('MPU with x-scal-s3-version-id header', () => { beforeEach(done => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - async.series([ - next => metadata.setup(next), - next => s3.createBucket({ Bucket: bucketName }, next), - next => s3.createBucket({ Bucket: bucketNameMD, ObjectLockEnabledForBucket: true, }, next), - ], done); + async.series( + [ + next => metadata.setup(next), + next => s3.createBucket({ Bucket: bucketName }, next), + next => s3.createBucket({ Bucket: bucketNameMD, ObjectLockEnabledForBucket: true }, next), + ], + done + ); }); afterEach(() => { process.stdout.write('Emptying bucket'); - return bucketUtil.emptyMany([bucketName, bucketNameMD]) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteMany([bucketName, bucketNameMD]); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return bucketUtil + .emptyMany([bucketName, bucketNameMD]) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteMany([bucketName, bucketNameMD]); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); it('should overwrite an MPU object', done => { @@ -130,38 +137,51 @@ describe('MPU with x-scal-s3-version-id header', () => { let versionsBefore; let versionsAfter; - async.series([ - next => putMPU(s3, bucketName, objectName, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'uploadId', 'microVersionId', 'x-amz-restore', - 'archive', 'dataStoreName', 'originOp']); - - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => putMPU(s3, bucketName, objectName, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => putMPUVersion(s3, bucketName, objectName, '', next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'originOp', + ]); + + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite an object', done => { @@ -171,39 +191,53 @@ describe('MPU with x-scal-s3-version-id header', () => { let versionsBefore; let versionsAfter; - async.series([ - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => putMPUVersion(s3, bucketName, objectName, '', next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite a version', done => { @@ -211,7 +245,7 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -220,42 +254,57 @@ describe('MPU with x-scal-s3-version-id header', () => { let versionsAfter; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => putMPUVersion(s3, bucketName, objectName, vId, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite the current version if empty version id header', done => { @@ -263,7 +312,7 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -272,77 +321,99 @@ describe('MPU with x-scal-s3-version-id header', () => { let versionsAfter; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, '', next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => putMPUVersion(s3, bucketName, objectName, '', next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); + }); it('should fail if version is invalid', done => { const vParams = { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => putMPUVersion(s3, bucketName, objectName, 'aJLWKz4Ko9IjBBgXKj5KQT.G9UHv0g7P', err => { - checkError(err, 'InvalidArgument', 400); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + putMPUVersion(s3, bucketName, objectName, 'aJLWKz4Ko9IjBBgXKj5KQT.G9UHv0g7P', err => { + checkError(err, 'InvalidArgument', 400); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); it('should fail if key does not exist', done => { - async.series([ - next => putMPUVersion(s3, bucketName, objectName, '', err => { - checkError(err, 'NoSuchKey', 404); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => + putMPUVersion(s3, bucketName, objectName, '', err => { + checkError(err, 'NoSuchKey', 404); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); it('should fail if version does not exist', done => { @@ -350,22 +421,31 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => putMPUVersion(s3, bucketName, objectName, - '393833343735313131383832343239393939393952473030312020313031', err => { - checkError(err, 'NoSuchVersion', 404); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + putMPUVersion( + s3, + bucketName, + objectName, + '393833343735313131383832343239393939393952473030312020313031', + err => { + checkError(err, 'NoSuchVersion', 404); + return next(); + } + ), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); it('should overwrite a non-current null version', done => { @@ -373,7 +453,7 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let versionsBefore; @@ -381,40 +461,54 @@ describe('MPU with x-scal-s3-version-id header', () => { let objMDBefore; let objMDAfter; - async.series([ - next => s3.putObject(params, next), - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, 'null', archive, next), - next => getMetadata(bucketName, objectName, 'null', (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, 'null', next), - next => getMetadata(bucketName, objectName, 'null', (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, 'null', archive, next), + next => + getMetadata(bucketName, objectName, 'null', (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => putMPUVersion(s3, bucketName, objectName, 'null', next), + next => + getMetadata(bucketName, objectName, 'null', (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite the lastest version and keep nullVersionId', done => { @@ -422,7 +516,7 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let versionsBefore; @@ -431,43 +525,58 @@ describe('MPU with x-scal-s3-version-id header', () => { let objMDAfter; let vId; - async.series([ - next => s3.putObject(params, next), - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => s3.putBucketVersioning(vParams, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + next(err); + }), + next => putMPUVersion(s3, bucketName, objectName, vId, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite a current null version', done => { @@ -475,13 +584,13 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const sParams = { Bucket: bucketName, VersioningConfiguration: { Status: 'Suspended', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -489,41 +598,55 @@ describe('MPU with x-scal-s3-version-id header', () => { let versionsBefore; let versionsAfter; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putBucketVersioning(sParams, next), - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => s3.putBucketVersioning(sParams, next), + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + next(err); + }), + next => putMPUVersion(s3, bucketName, objectName, '', next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite a non-current version', done => { @@ -531,7 +654,7 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -540,44 +663,59 @@ describe('MPU with x-scal-s3-version-id header', () => { let versionsAfter; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => putMPUVersion(s3, bucketName, objectName, vId, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite the current version', done => { @@ -585,7 +723,7 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -594,43 +732,58 @@ describe('MPU with x-scal-s3-version-id header', () => { let versionsAfter; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => putMPUVersion(s3, bucketName, objectName, vId, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite the current version after bucket version suspended', done => { @@ -638,13 +791,13 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const sParams = { Bucket: bucketName, VersioningConfiguration: { Status: 'Suspended', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -653,44 +806,59 @@ describe('MPU with x-scal-s3-version-id header', () => { let versionsAfter; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => s3.putBucketVersioning(sParams, next), - next => putMPUVersion(s3, bucketName, objectName, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => s3.putBucketVersioning(sParams, next), + next => putMPUVersion(s3, bucketName, objectName, vId, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite the current null version after bucket version enabled', done => { @@ -698,7 +866,7 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -706,55 +874,73 @@ describe('MPU with x-scal-s3-version-id header', () => { let versionsBefore; let versionsAfter; - async.series([ - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => s3.putBucketVersioning(vParams, next), - next => putMPUVersion(s3, bucketName, objectName, 'null', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => s3.putBucketVersioning(vParams, next), + next => putMPUVersion(s3, bucketName, objectName, 'null', next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'uploadId', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should fail if archiving is not in progress', done => { const params = { Bucket: bucketName, Key: objectName }; - async.series([ - next => s3.putObject(params, next), - next => putMPUVersion(s3, bucketName, objectName, '', err => { - checkError(err, 'InvalidObjectState', 403); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => + putMPUVersion(s3, bucketName, objectName, '', err => { + checkError(err, 'InvalidObjectState', 403); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); it('should fail if trying to overwrite a delete marker', done => { @@ -763,25 +949,30 @@ describe('MPU with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.deleteObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, vId, err => { - checkError(err, 'MethodNotAllowed', 405); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + s3.deleteObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => + putMPUVersion(s3, bucketName, objectName, vId, err => { + checkError(err, 'MethodNotAllowed', 405); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); it('should fail if restore is already completed', done => { @@ -791,107 +982,125 @@ describe('MPU with x-scal-s3-version-id header', () => { restoreRequestedAt: new Date(0), restoreRequestedDays: 5, restoreCompletedAt: new Date(10), - restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)), + restoreWillExpireAt: new Date(10 + 5 * 24 * 60 * 60 * 1000), }; - async.series([ - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archiveCompleted, next), - next => putMPUVersion(s3, bucketName, objectName, '', err => { - checkError(err, 'InvalidObjectState', 403); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archiveCompleted, next), + next => + putMPUVersion(s3, bucketName, objectName, '', err => { + checkError(err, 'InvalidObjectState', 403); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); - [ - 'non versioned', - 'versioned', - 'suspended' - ].forEach(versioning => { + ['non versioned', 'versioned', 'suspended'].forEach(versioning => { it(`should update restore metadata while keeping storage class (${versioning})`, done => { const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; let objMDAfter; - async.series([ - next => { - if (versioning === 'versioned') { - return s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { Status: 'Enabled' } - }, next); - } else if (versioning === 'suspended') { - return s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { Status: 'Suspended' } - }, next); - } - return next(); - }, - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, next), - next => putMPUVersion(s3, bucketName, objectName, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => s3.listObjects({ Bucket: bucketName }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.Contents.length, 1); - assert.strictEqual(res.Contents[0].StorageClass, 'location-dmf-v1'); - return next(); - }), - next => s3.headObject(params, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.StorageClass, 'location-dmf-v1'); - return next(); - }), - next => s3.getObject(params, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.StorageClass, 'location-dmf-v1'); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - // Make sure object data location is set back to its bucket data location. - assert.deepStrictEqual(objMDAfter.dataStoreName, 'us-east-1'); - - assert.deepStrictEqual(objMDAfter.archive.archiveInfo, objMDBefore.archive.archiveInfo); - assert.deepStrictEqual(objMDAfter.archive.restoreRequestedAt, - objMDBefore.archive.restoreRequestedAt); - assert.deepStrictEqual(objMDAfter.archive.restoreRequestedDays, - objMDBefore.archive.restoreRequestedDays); - assert.deepStrictEqual(objMDAfter['x-amz-restore']['ongoing-request'], false); - - assert(objMDAfter.archive.restoreCompletedAt); - assert(objMDAfter.archive.restoreWillExpireAt); - assert(objMDAfter['x-amz-restore']['expiry-date']); - return done(); - }); + async.series( + [ + next => { + if (versioning === 'versioned') { + return s3.putBucketVersioning( + { + Bucket: bucketName, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ); + } else if (versioning === 'suspended') { + return s3.putBucketVersioning( + { + Bucket: bucketName, + VersioningConfiguration: { Status: 'Suspended' }, + }, + next + ); + } + return next(); + }, + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => metadata.listObject(bucketName, mdListingParams, log, next), + next => putMPUVersion(s3, bucketName, objectName, '', next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + s3.listObjects({ Bucket: bucketName }, (err, res) => { + assert.ifError(err); + assert.strictEqual(res.Contents.length, 1); + assert.strictEqual(res.Contents[0].StorageClass, 'location-dmf-v1'); + return next(); + }), + next => + s3.headObject(params, (err, res) => { + assert.ifError(err); + assert.strictEqual(res.StorageClass, 'location-dmf-v1'); + return next(); + }), + next => + s3.getObject(params, (err, res) => { + assert.ifError(err); + assert.strictEqual(res.StorageClass, 'location-dmf-v1'); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + // Make sure object data location is set back to its bucket data location. + assert.deepStrictEqual(objMDAfter.dataStoreName, 'us-east-1'); + + assert.deepStrictEqual(objMDAfter.archive.archiveInfo, objMDBefore.archive.archiveInfo); + assert.deepStrictEqual( + objMDAfter.archive.restoreRequestedAt, + objMDBefore.archive.restoreRequestedAt + ); + assert.deepStrictEqual( + objMDAfter.archive.restoreRequestedDays, + objMDBefore.archive.restoreRequestedDays + ); + assert.deepStrictEqual(objMDAfter['x-amz-restore']['ongoing-request'], false); + + assert(objMDAfter.archive.restoreCompletedAt); + assert(objMDAfter.archive.restoreWillExpireAt); + assert(objMDAfter['x-amz-restore']['expiry-date']); + return done(); + } + ); }); }); it('should "copy" all but non data-related metadata (data encryption, data size...)', done => { const params = { Bucket: bucketNameMD, - Key: objectName + Key: objectName, }; const putParams = { ...params, Metadata: { 'custom-user-md': 'custom-md', }, - WebsiteRedirectLocation: 'http://custom-redirect' + WebsiteRedirectLocation: 'http://custom-redirect', }; const aclParams = { ...params, @@ -901,102 +1110,107 @@ describe('MPU with x-scal-s3-version-id header', () => { const tagParams = { ...params, Tagging: { - TagSet: [{ - Key: 'tag1', - Value: 'value1' - }, { - Key: 'tag2', - Value: 'value2' - }] - } + TagSet: [ + { + Key: 'tag1', + Value: 'value1', + }, + { + Key: 'tag2', + Value: 'value2', + }, + ], + }, }; const legalHoldParams = { ...params, LegalHold: { - Status: 'ON' - }, + Status: 'ON', + }, }; const acl = { - 'Canned': '', - 'FULL_CONTROL': [ + Canned: '', + FULL_CONTROL: [ // canonicalID of user Bart '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be', ], - 'WRITE_ACP': [], - 'READ': [], - 'READ_ACP': [], + WRITE_ACP: [], + READ: [], + READ_ACP: [], }; const tags = { tag1: 'value1', tag2: 'value2' }; const replicationInfo = { - 'status': 'COMPLETED', - 'backends': [ - { - 'site': 'azure-normal', - 'status': 'COMPLETED', - 'dataStoreVersionId': '', - }, - ], - 'content': [ - 'DATA', - 'METADATA', + status: 'COMPLETED', + backends: [ + { + site: 'azure-normal', + status: 'COMPLETED', + dataStoreVersionId: '', + }, ], - 'destination': 'arn:aws:s3:::versioned', - 'storageClass': 'azure-normal', - 'role': 'arn:aws:iam::root:role/s3-replication-role', - 'storageType': 'azure', - 'dataStoreVersionId': '', - 'isNFS': null, + content: ['DATA', 'METADATA'], + destination: 'arn:aws:s3:::versioned', + storageClass: 'azure-normal', + role: 'arn:aws:iam::root:role/s3-replication-role', + storageType: 'azure', + dataStoreVersionId: '', + isNFS: null, }; - async.series([ - next => s3.putObject(putParams, next), - next => s3.putObjectAcl(aclParams, next), - next => s3.putObjectTagging(tagParams, next), - next => s3.putObjectLegalHold(legalHoldParams, next), - next => getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { - if (err) { - return next(err); - } - /* eslint-disable no-param-reassign */ - objMD.dataStoreName = 'location-dmf-v1'; - objMD.archive = archive; - objMD.replicationInfo = replicationInfo; - // data related - objMD['content-length'] = 99; - objMD['content-type'] = 'testtype'; - objMD['content-md5'] = 'testmd5'; - objMD['content-encoding'] = 'testencoding'; - objMD['x-amz-server-side-encryption'] = 'aws:kms'; - /* eslint-enable no-param-reassign */ - return metadata.putObjectMD(bucketNameMD, objectName, objMD, undefined, log, next); - }), - next => putMPUVersion(s3, bucketNameMD, objectName, '', next), - next => getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { - if (err) { - return next(err); - } - assert.deepStrictEqual(objMD.acl, acl); - assert.deepStrictEqual(objMD.tags, tags); - assert.deepStrictEqual(objMD.replicationInfo, replicationInfo); - assert.deepStrictEqual(objMD.legalHold, true); - assert.strictEqual(objMD['x-amz-meta-custom-user-md'], 'custom-md'); - assert.strictEqual(objMD['x-amz-website-redirect-location'], 'http://custom-redirect'); - // make sure data related metadatas ar not the same before and after - assert.notStrictEqual(objMD['x-amz-server-side-encryption'], 'aws:kms'); - assert.notStrictEqual(objMD['content-length'], 99); - assert.notStrictEqual(objMD['content-encoding'], 'testencoding'); - assert.notStrictEqual(objMD['content-type'], 'testtype'); - // make sure we keep the same etag and add the new restored - // data's etag inside x-amz-restore - assert.strictEqual(objMD['content-md5'], 'testmd5'); - assert.strictEqual(typeof objMD['x-amz-restore']['content-md5'], 'string'); - return next(); - }), - // removing legal hold to be able to clean the bucket after the test - next => { - legalHoldParams.LegalHold.Status = 'OFF'; - return s3.putObjectLegalHold(legalHoldParams, next); - }, - ], done); + async.series( + [ + next => s3.putObject(putParams, next), + next => s3.putObjectAcl(aclParams, next), + next => s3.putObjectTagging(tagParams, next), + next => s3.putObjectLegalHold(legalHoldParams, next), + next => + getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { + if (err) { + return next(err); + } + /* eslint-disable no-param-reassign */ + objMD.dataStoreName = 'location-dmf-v1'; + objMD.archive = archive; + objMD.replicationInfo = replicationInfo; + // data related + objMD['content-length'] = 99; + objMD['content-type'] = 'testtype'; + objMD['content-md5'] = 'testmd5'; + objMD['content-encoding'] = 'testencoding'; + objMD['x-amz-server-side-encryption'] = 'aws:kms'; + /* eslint-enable no-param-reassign */ + return metadata.putObjectMD(bucketNameMD, objectName, objMD, undefined, log, next); + }), + next => putMPUVersion(s3, bucketNameMD, objectName, '', next), + next => + getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { + if (err) { + return next(err); + } + assert.deepStrictEqual(objMD.acl, acl); + assert.deepStrictEqual(objMD.tags, tags); + assert.deepStrictEqual(objMD.replicationInfo, replicationInfo); + assert.deepStrictEqual(objMD.legalHold, true); + assert.strictEqual(objMD['x-amz-meta-custom-user-md'], 'custom-md'); + assert.strictEqual(objMD['x-amz-website-redirect-location'], 'http://custom-redirect'); + // make sure data related metadatas ar not the same before and after + assert.notStrictEqual(objMD['x-amz-server-side-encryption'], 'aws:kms'); + assert.notStrictEqual(objMD['content-length'], 99); + assert.notStrictEqual(objMD['content-encoding'], 'testencoding'); + assert.notStrictEqual(objMD['content-type'], 'testtype'); + // make sure we keep the same etag and add the new restored + // data's etag inside x-amz-restore + assert.strictEqual(objMD['content-md5'], 'testmd5'); + assert.strictEqual(typeof objMD['x-amz-restore']['content-md5'], 'string'); + return next(); + }), + // removing legal hold to be able to clean the bucket after the test + next => { + legalHoldParams.LegalHold.Status = 'OFF'; + return s3.putObjectLegalHold(legalHoldParams, next); + }, + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/multiObjectDelete.js b/tests/functional/aws-node-sdk/test/object/multiObjectDelete.js index 10de2996e1..992c98f3f4 100644 --- a/tests/functional/aws-node-sdk/test/object/multiObjectDelete.js +++ b/tests/functional/aws-node-sdk/test/object/multiObjectDelete.js @@ -14,8 +14,7 @@ const bucketName = 'multi-object-delete-234-634'; const key = 'key'; function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } function checkError(err, code) { @@ -37,7 +36,7 @@ function sortList(list) { function createObjectsList(size, versionIds) { const objects = []; - for (let i = 1; i < (size + 1); i++) { + for (let i = 1; i < size + 1; i++) { objects.push({ Key: `${key}${i}`, }); @@ -74,11 +73,13 @@ describe('Multi-Object Delete Success', function success() { await Promise.race(queued); queued.splice(0, queued.findIndex(p => p === queued[0]) + 1); } - const result = s3.putObject({ - Bucket: bucketName, - Key: key, - Body: 'somebody', - }).promise(); + const result = s3 + .putObject({ + Bucket: bucketName, + Key: key, + Body: 'somebody', + }) + .promise(); queued.push(result); return result; }; @@ -94,44 +95,48 @@ describe('Multi-Object Delete Success', function success() { it('should batch delete 1000 objects', done => { const objects = createObjectsList(1000); - s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - Quiet: false, + s3.deleteObjects( + { + Bucket: bucketName, + Delete: { + Objects: objects, + Quiet: false, + }, }, - }, function result(err, res) { - checkNoError(err); - if (this.httpResponse.body.toString() - .indexOf(' { const objects = createObjectsList(1000); - s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - Quiet: true, + s3.deleteObjects( + { + Bucket: bucketName, + Delete: { + Objects: objects, + Quiet: true, + }, }, - }, function result(err, res) { - checkNoError(err); - if (this.httpResponse.body.toString() - .indexOf(' { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucketName }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucketName }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => s3.deleteBucket({ Bucket: bucketName }).promise()); - it('should return error if request deletion of more than 1000 objects', - () => { - const objects = createObjectsList(1001); - return s3.deleteObjects({ + it('should return error if request deletion of more than 1000 objects', () => { + const objects = createObjectsList(1001); + return s3 + .deleteObjects({ Bucket: bucketName, Delete: { Objects: objects, }, - }).promise().catch(err => { + }) + .promise() + .catch(err => { checkError(err, 'MalformedXML'); }); - }); + }); - it('should return error if request deletion of 0 objects', - () => { - const objects = createObjectsList(0); - return s3.deleteObjects({ + it('should return error if request deletion of 0 objects', () => { + const objects = createObjectsList(0); + return s3 + .deleteObjects({ Bucket: bucketName, Delete: { Objects: objects, }, - }).promise().catch(err => { + }) + .promise() + .catch(err => { checkError(err, 'MalformedXML'); }); - }); + }); - it('should return no error if try to delete non-existent objects', - () => { - const objects = createObjectsList(1000); - return s3.deleteObjects({ + it('should return no error if try to delete non-existent objects', () => { + const objects = createObjectsList(1000); + return s3 + .deleteObjects({ Bucket: bucketName, Delete: { Objects: objects, }, - }).promise().then(res => { + }) + .promise() + .then(res => { assert.strictEqual(res.Deleted.length, 1000); assert.strictEqual(res.Errors.length, 0); - }).catch(err => { + }) + .catch(err => { checkNoError(err); }); - }); + }); it('should return error if no such bucket', () => { const objects = createObjectsList(1); - return s3.deleteObjects({ - Bucket: 'nosuchbucket2323292093', - Delete: { - Objects: objects, - }, - }).promise().catch(err => { - checkError(err, 'NoSuchBucket'); - }); + return s3 + .deleteObjects({ + Bucket: 'nosuchbucket2323292093', + Delete: { + Objects: objects, + }, + }) + .promise() + .catch(err => { + checkError(err, 'NoSuchBucket'); + }); }); }); }); @@ -219,31 +236,35 @@ describe('Multi-Object Delete Access', function access() { signatureVersion: 'v4', }); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucketName }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }) - .then(() => { - for (let i = 1; i < 501; i++) { - createObjects.push(s3.putObject({ - Bucket: bucketName, - Key: `${key}${i}`, - Body: 'somebody', - }).promise()); - } - return Promise.all(createObjects) + return s3 + .createBucket({ Bucket: bucketName }) + .promise() .catch(err => { - process.stdout.write(`Error creating objects: ${err}\n`); + process.stdout.write(`Error creating bucket: ${err}\n`); throw err; + }) + .then(() => { + for (let i = 1; i < 501; i++) { + createObjects.push( + s3 + .putObject({ + Bucket: bucketName, + Key: `${key}${i}`, + Body: 'somebody', + }) + .promise() + ); + } + return Promise.all(createObjects).catch(err => { + process.stdout.write(`Error creating objects: ${err}\n`); + throw err; + }); }); - }); }); after(() => s3.deleteBucket({ Bucket: bucketName }).promise()); - it('should return access denied error for each object where no acl ' + - 'permission', () => { + it('should return access denied error for each object where no acl ' + 'permission', () => { const objects = createObjectsList(500); const errorList = createObjectsList(500); errorList.forEach(obj => { @@ -251,36 +272,43 @@ describe('Multi-Object Delete Access', function access() { item.Code = 'AccessDenied'; item.Message = 'Access Denied'; }); - return otherAccountS3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - Quiet: false, - }, - }).promise().then(res => { - assert.strictEqual(res.Deleted.length, 0); - assert.deepStrictEqual(sortList(res.Errors), sortList(errorList)); - assert.strictEqual(res.Errors.length, 500); - }).catch(err => { - checkNoError(err); - }); + return otherAccountS3 + .deleteObjects({ + Bucket: bucketName, + Delete: { + Objects: objects, + Quiet: false, + }, + }) + .promise() + .then(res => { + assert.strictEqual(res.Deleted.length, 0); + assert.deepStrictEqual(sortList(res.Errors), sortList(errorList)); + assert.strictEqual(res.Errors.length, 500); + }) + .catch(err => { + checkNoError(err); + }); }); - it('should batch delete objects where requester has permission', () => { const objects = createObjectsList(500); - return s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - Quiet: false, - }, - }).promise().then(res => { - assert.strictEqual(res.Deleted.length, 500); - assert.strictEqual(res.Errors.length, 0); - }).catch(err => { - checkNoError(err); - }); + return s3 + .deleteObjects({ + Bucket: bucketName, + Delete: { + Objects: objects, + Quiet: false, + }, + }) + .promise() + .then(res => { + assert.strictEqual(res.Deleted.length, 500); + assert.strictEqual(res.Errors.length, 0); + }) + .catch(err => { + checkNoError(err); + }); }); }); @@ -298,90 +326,115 @@ describeSkipIfCeph('Multi-Object Delete with Object Lock', () => { signatureVersion: 'v4', }); s3 = bucketUtil.s3; - return s3.createBucket({ - Bucket: bucketName, - ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.putObjectLockConfiguration({ - Bucket: bucketName, - ObjectLockConfiguration: { - ObjectLockEnabled: 'Enabled', - Rule: { - DefaultRetention: { - Days: 1, - Mode: 'GOVERNANCE', - }, - }, - }, - }).promise()) - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }) - .then(() => { - for (let i = 1; i < 6; i++) { - createObjects.push(s3.putObject({ - Bucket: bucketName, - Key: `${key}${i}`, - Body: 'somebody', - }).promise()); - } - return Promise.all(createObjects) - .then(res => { - res.forEach(r => { - versionIds.push(r.VersionId); - }); + return s3 + .createBucket({ + Bucket: bucketName, + ObjectLockEnabledForBucket: true, }) + .promise() + .then(() => + s3 + .putObjectLockConfiguration({ + Bucket: bucketName, + ObjectLockConfiguration: { + ObjectLockEnabled: 'Enabled', + Rule: { + DefaultRetention: { + Days: 1, + Mode: 'GOVERNANCE', + }, + }, + }, + }) + .promise() + ) .catch(err => { - process.stdout.write(`Error creating objects: ${err}\n`); + process.stdout.write(`Error creating bucket: ${err}\n`); throw err; + }) + .then(() => { + for (let i = 1; i < 6; i++) { + createObjects.push( + s3 + .putObject({ + Bucket: bucketName, + Key: `${key}${i}`, + Body: 'somebody', + }) + .promise() + ); + } + return Promise.all(createObjects) + .then(res => { + res.forEach(r => { + versionIds.push(r.VersionId); + }); + }) + .catch(err => { + process.stdout.write(`Error creating objects: ${err}\n`); + throw err; + }); }); - }); }); after(() => s3.deleteBucket({ Bucket: bucketName }).promise()); it('should not delete locked objects', () => { const objects = createObjectsList(5, versionIds); - return s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - Quiet: false, - }, - }).promise().then(res => { - assert.strictEqual(res.Errors.length, 5); - res.Errors.forEach(err => assert.strictEqual(err.Code, 'AccessDenied')); - }); - }); - - it('should not delete locked objects with GOVERNANCE ' + - 'retention mode and bypass header when object is legal hold enabled', () => { - const objects = createObjectsList(5, versionIds); - const putObjectLegalHolds = []; - for (let i = 1; i < 6; i++) { - putObjectLegalHolds.push(s3.putObjectLegalHold({ - Bucket: bucketName, - Key: `${key}${i}`, - LegalHold: { - Status: 'ON', - }, - }).promise()); - } - return Promise.all(putObjectLegalHolds) - .then(() => s3.deleteObjects({ + return s3 + .deleteObjects({ Bucket: bucketName, Delete: { Objects: objects, Quiet: false, }, - BypassGovernanceRetention: true, - }).promise()).then(res => { + }) + .promise() + .then(res => { assert.strictEqual(res.Errors.length, 5); res.Errors.forEach(err => assert.strictEqual(err.Code, 'AccessDenied')); }); }); + it( + 'should not delete locked objects with GOVERNANCE ' + + 'retention mode and bypass header when object is legal hold enabled', + () => { + const objects = createObjectsList(5, versionIds); + const putObjectLegalHolds = []; + for (let i = 1; i < 6; i++) { + putObjectLegalHolds.push( + s3 + .putObjectLegalHold({ + Bucket: bucketName, + Key: `${key}${i}`, + LegalHold: { + Status: 'ON', + }, + }) + .promise() + ); + } + return Promise.all(putObjectLegalHolds) + .then(() => + s3 + .deleteObjects({ + Bucket: bucketName, + Delete: { + Objects: objects, + Quiet: false, + }, + BypassGovernanceRetention: true, + }) + .promise() + ) + .then(res => { + assert.strictEqual(res.Errors.length, 5); + res.Errors.forEach(err => assert.strictEqual(err.Code, 'AccessDenied')); + }); + } + ); + it('should delete locked objects after retention period has expired', () => { const objects = createObjectsList(5, versionIds); const objectsCopy = JSON.parse(JSON.stringify(objects)); @@ -397,35 +450,44 @@ describeSkipIfCeph('Multi-Object Delete with Object Lock', () => { date: moment().subtract(10, 'days').toISOString(), }; return changeLockPromise(objectsCopy, newRetention) - .then(() => s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - Quiet: false, - }, - }).promise()).then(res => { - assert.strictEqual(res.Deleted.length, 5); - assert.strictEqual(res.Errors.length, 0); - }).catch(err => { - checkNoError(err); - }); + .then(() => + s3 + .deleteObjects({ + Bucket: bucketName, + Delete: { + Objects: objects, + Quiet: false, + }, + }) + .promise() + ) + .then(res => { + assert.strictEqual(res.Deleted.length, 5); + assert.strictEqual(res.Errors.length, 0); + }) + .catch(err => { + checkNoError(err); + }); }); - it('should delete locked objects with GOVERNANCE ' + - 'retention mode and bypass header', () => { + it('should delete locked objects with GOVERNANCE ' + 'retention mode and bypass header', () => { const objects = createObjectsList(5, versionIds); - return s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - Quiet: false, - }, - BypassGovernanceRetention: true, - }).promise().then(res => { - assert.strictEqual(res.Deleted.length, 5); - assert.strictEqual(res.Errors.length, 0); - }).catch(err => { - checkNoError(err); - }); + return s3 + .deleteObjects({ + Bucket: bucketName, + Delete: { + Objects: objects, + Quiet: false, + }, + BypassGovernanceRetention: true, + }) + .promise() + .then(res => { + assert.strictEqual(res.Deleted.length, 5); + assert.strictEqual(res.Errors.length, 0); + }) + .catch(err => { + checkNoError(err); + }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/objectCopy.js b/tests/functional/aws-node-sdk/test/object/objectCopy.js index c56a24682c..28f1671052 100644 --- a/tests/functional/aws-node-sdk/test/object/objectCopy.js +++ b/tests/functional/aws-node-sdk/test/object/objectCopy.js @@ -5,8 +5,7 @@ const changeObjectLock = require('../../../../utilities/objectLock-util'); const { fakeMetadataTransition, fakeMetadataArchive } = require('../utils/init'); const { taggingTests } = require('../../lib/utility/tagging'); -const genMaxSizeMetaHeaders - = require('../../lib/utility/genMaxSizeMetaHeaders'); +const genMaxSizeMetaHeaders = require('../../lib/utility/genMaxSizeMetaHeaders'); const constants = require('../../../../constants'); const sourceBucketName = 'supersourcebucket8102016'; @@ -48,8 +47,7 @@ const otherAccountS3 = otherAccountBucketUtility.s3; const itSkipIfE2E = process.env.S3_END_TO_END ? it.skip : it; function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } function checkError(err, code) { @@ -64,10 +62,9 @@ function dateFromNow(diff) { } function dateConvert(d) { - return (new Date(d)).toISOString(); + return new Date(d).toISOString(); } - describe('Object Copy', () => { withV4(sigCfg => { let bucketUtil; @@ -79,86 +76,86 @@ describe('Object Copy', () => { before(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return bucketUtil.empty(sourceBucketName) - .then(() => - bucketUtil.empty(destBucketName) - ) - .then(() => - bucketUtil.deleteMany([sourceBucketName, destBucketName]) - ) - .catch(err => { - if (err.code !== 'NoSuchBucket') { - process.stdout.write(`${err}\n`); + return bucketUtil + .empty(sourceBucketName) + .then(() => bucketUtil.empty(destBucketName)) + .then(() => bucketUtil.deleteMany([sourceBucketName, destBucketName])) + .catch(err => { + if (err.code !== 'NoSuchBucket') { + process.stdout.write(`${err}\n`); + throw err; + } + }) + .then(() => bucketUtil.createOne(sourceBucketName)) + .then(() => bucketUtil.createOne(destBucketName)) + .catch(err => { throw err; - } - }) - .then(() => bucketUtil.createOne(sourceBucketName) - ) - .then(() => bucketUtil.createOne(destBucketName) - ) - .catch(err => { - throw err; - }); + }); }); - beforeEach(() => s3.putObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - Body: content, - Metadata: originalMetadata, - CacheControl: originalCacheControl, - ContentDisposition: originalContentDisposition, - ContentEncoding: originalContentEncoding, - Expires: originalExpires, - Tagging: originalTagging, - }).promise().then(res => { - etag = res.ETag; - etagTrim = etag.substring(1, etag.length - 1); - return s3.headObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - }).promise(); - }).then(res => { - lastModified = res.LastModified; - })); - - afterEach(() => bucketUtil.empty(sourceBucketName) - .then(() => bucketUtil.empty(destBucketName)) + beforeEach(() => + s3 + .putObject({ + Bucket: sourceBucketName, + Key: sourceObjName, + Body: content, + Metadata: originalMetadata, + CacheControl: originalCacheControl, + ContentDisposition: originalContentDisposition, + ContentEncoding: originalContentEncoding, + Expires: originalExpires, + Tagging: originalTagging, + }) + .promise() + .then(res => { + etag = res.ETag; + etagTrim = etag.substring(1, etag.length - 1); + return s3 + .headObject({ + Bucket: sourceBucketName, + Key: sourceObjName, + }) + .promise(); + }) + .then(res => { + lastModified = res.LastModified; + }) ); + afterEach(() => bucketUtil.empty(sourceBucketName).then(() => bucketUtil.empty(destBucketName))); + after(() => bucketUtil.deleteMany([sourceBucketName, destBucketName])); function requestCopy(fields, cb) { - s3.copyObject(Object.assign({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, fields), cb); + s3.copyObject( + Object.assign( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + }, + fields + ), + cb + ); } - function successCopyCheck(error, response, copyVersionMetadata, - destBucketName, destObjName, done) { + function successCopyCheck(error, response, copyVersionMetadata, destBucketName, destObjName, done) { checkNoError(error); assert.strictEqual(response.ETag, etag); - const copyLastModified = new Date(response.LastModified) - .toGMTString(); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + const copyLastModified = new Date(response.LastModified).toGMTString(); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { checkNoError(err); assert.strictEqual(res.StorageClass, undefined); - assert.strictEqual(res.Body.toString(), - content); - assert.deepStrictEqual(res.Metadata, - copyVersionMetadata); - assert.strictEqual(res.LastModified.toGMTString(), - copyLastModified); + assert.strictEqual(res.Body.toString(), content); + assert.deepStrictEqual(res.Metadata, copyVersionMetadata); + assert.strictEqual(res.LastModified.toGMTString(), copyLastModified); done(); }); } function checkSuccessTagging(key, value, cb) { - s3.getObjectTagging({ Bucket: destBucketName, Key: destObjName }, - (err, data) => { + s3.getObjectTagging({ Bucket: destBucketName, Key: destObjName }, (err, data) => { checkNoError(err); assert.strictEqual(data.TagSet[0].Key, key); assert.strictEqual(data.TagSet[0].Value, value); @@ -167,128 +164,181 @@ describe('Object Copy', () => { } function checkNoTagging(cb) { - s3.getObjectTagging({ Bucket: destBucketName, Key: destObjName }, - (err, data) => { + s3.getObjectTagging({ Bucket: destBucketName, Key: destObjName }, (err, data) => { checkNoError(err); assert.strictEqual(data.TagSet.length, 0); cb(); }); } - it('should copy an object from a source bucket to a different ' + - 'destination bucket and copy the metadata if no metadata directve' + - 'header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}` }, - (err, res) => - successCopyCheck(err, res, originalMetadata, - destBucketName, destObjName, done) + it( + 'should copy an object from a source bucket to a different ' + + 'destination bucket and copy the metadata if no metadata directve' + + 'header provided', + done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}` }, + (err, res) => successCopyCheck(err, res, originalMetadata, destBucketName, destObjName, done) ); - }); + } + ); - it('should copy an object from a source bucket to a different ' + - 'destination bucket and copy the tag set if no tagging directive' + - 'header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}` }, - err => { - checkNoError(err); - checkSuccessTagging(originalTagKey, originalTagValue, done); - }); - }); + it( + 'should copy an object from a source bucket to a different ' + + 'destination bucket and copy the tag set if no tagging directive' + + 'header provided', + done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}` }, + err => { + checkNoError(err); + checkSuccessTagging(originalTagKey, originalTagValue, done); + } + ); + } + ); - it('should return 400 InvalidArgument if invalid tagging ' + - 'directive', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'COCO' }, + it('should return 400 InvalidArgument if invalid tagging ' + 'directive', done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + TaggingDirective: 'COCO', + }, err => { checkError(err, 'InvalidArgument'); done(); - }); - }); - - it('should copy an object from a source bucket to a different ' + - 'destination bucket and copy the tag set if COPY tagging ' + - 'directive header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'COPY' }, - err => { - checkNoError(err); - checkSuccessTagging(originalTagKey, originalTagValue, done); - }); + } + ); }); - it('should copy an object and tag set if COPY ' + - 'included as tag directive header (and ignore any new ' + - 'tag set sent with copy request)', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'COPY', - Tagging: newTagging, - }, - err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.deepStrictEqual(res.Metadata, originalMetadata); - done(); - }); - }); - }); + it( + 'should copy an object from a source bucket to a different ' + + 'destination bucket and copy the tag set if COPY tagging ' + + 'directive header provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + TaggingDirective: 'COPY', + }, + err => { + checkNoError(err); + checkSuccessTagging(originalTagKey, originalTagValue, done); + } + ); + } + ); - it('should copy an object from a source to the same destination ' + - 'updating tag if REPLACE tagging directive header provided', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'REPLACE', Tagging: newTagging }, - err => { - checkNoError(err); - checkSuccessTagging(newTagKey, newTagValue, done); - }); - }); + it( + 'should copy an object and tag set if COPY ' + + 'included as tag directive header (and ignore any new ' + + 'tag set sent with copy request)', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + TaggingDirective: 'COPY', + Tagging: newTagging, + }, + err => { + checkNoError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + assert.deepStrictEqual(res.Metadata, originalMetadata); + done(); + }); + } + ); + } + ); - it('should copy an object from a source to the same destination ' + - 'return no tag if REPLACE tagging directive header provided but ' + - '"x-amz-tagging" header is not specified', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'REPLACE' }, - err => { - checkNoError(err); - checkNoTagging(done); - }); - }); + it( + 'should copy an object from a source to the same destination ' + + 'updating tag if REPLACE tagging directive header provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + TaggingDirective: 'REPLACE', + Tagging: newTagging, + }, + err => { + checkNoError(err); + checkSuccessTagging(newTagKey, newTagValue, done); + } + ); + } + ); - it('should copy an object from a source to the same destination ' + - 'return no tag if COPY tagging directive header but provided from ' + - 'an empty object', done => { - s3.putObject({ Bucket: sourceBucketName, Key: 'emptyobject' }, - err => { - checkNoError(err); - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/emptyobject`, - TaggingDirective: 'COPY' }, + it( + 'should copy an object from a source to the same destination ' + + 'return no tag if REPLACE tagging directive header provided but ' + + '"x-amz-tagging" header is not specified', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + TaggingDirective: 'REPLACE', + }, err => { checkNoError(err); checkNoTagging(done); - }); - }); - }); + } + ); + } + ); - it('should copy an object from a source to the same destination ' + - 'updating tag if REPLACE tagging directive header provided', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'REPLACE', Tagging: newTagging }, - err => { + it( + 'should copy an object from a source to the same destination ' + + 'return no tag if COPY tagging directive header but provided from ' + + 'an empty object', + done => { + s3.putObject({ Bucket: sourceBucketName, Key: 'emptyobject' }, err => { checkNoError(err); - checkSuccessTagging(newTagKey, newTagValue, done); + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/emptyobject`, + TaggingDirective: 'COPY', + }, + err => { + checkNoError(err); + checkNoTagging(done); + } + ); }); - }); + } + ); + + it( + 'should copy an object from a source to the same destination ' + + 'updating tag if REPLACE tagging directive header provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + TaggingDirective: 'REPLACE', + Tagging: newTagging, + }, + err => { + checkNoError(err); + checkSuccessTagging(newTagKey, newTagValue, done); + } + ); + } + ); describe('Copy object updating tag set', () => { taggingTests.forEach(taggingTest => { @@ -296,66 +346,61 @@ describe('Object Copy', () => { const key = encodeURIComponent(taggingTest.tag.key); const value = encodeURIComponent(taggingTest.tag.value); const tagging = `${key}=${value}`; - const params = { Bucket: destBucketName, Key: destObjName, + const params = { + Bucket: destBucketName, + Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'REPLACE', Tagging: tagging }; + TaggingDirective: 'REPLACE', + Tagging: tagging, + }; s3.copyObject(params, err => { if (taggingTest.error) { checkError(err, taggingTest.error); return done(); } - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - return checkSuccessTagging(taggingTest.tag.key, - taggingTest.tag.value, done); + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + return checkSuccessTagging(taggingTest.tag.key, taggingTest.tag.value, done); }); }); }); }); - it('should also copy additional headers (CacheControl, ' + - 'ContentDisposition, ContentEncoding, Expires) when copying an ' + - 'object from a source bucket to a different destination bucket', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}` }, - err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { + it( + 'should also copy additional headers (CacheControl, ' + + 'ContentDisposition, ContentEncoding, Expires) when copying an ' + + 'object from a source bucket to a different destination bucket', + done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}` }, + err => { + checkNoError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { if (err) { done(err); } - assert.strictEqual(res.CacheControl, - originalCacheControl); - assert.strictEqual(res.ContentDisposition, - originalContentDisposition); + assert.strictEqual(res.CacheControl, originalCacheControl); + assert.strictEqual(res.ContentDisposition, originalContentDisposition); // Should remove V4 streaming value 'aws-chunked' // to be compatible with AWS behavior - assert.strictEqual(res.ContentEncoding, - 'base64,' - ); - assert.strictEqual(res.Expires.toGMTString(), - originalExpires.toGMTString()); + assert.strictEqual(res.ContentEncoding, 'base64,'); + assert.strictEqual(res.Expires.toGMTString(), originalExpires.toGMTString()); done(); }); - }); - }); + } + ); + } + ); - it('should copy an object from a source bucket to a different ' + - 'key in the same bucket', - done => { - s3.copyObject({ Bucket: sourceBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}` }, - (err, res) => - successCopyCheck(err, res, originalMetadata, - sourceBucketName, destObjName, done) - ); - }); + it('should copy an object from a source bucket to a different ' + 'key in the same bucket', done => { + s3.copyObject( + { Bucket: sourceBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}` }, + (err, res) => successCopyCheck(err, res, originalMetadata, sourceBucketName, destObjName, done) + ); + }); // TODO: see S3C-3482, figure out why this test fails in Integration builds - itSkipIfE2E('should not return error if copying object w/ > ' + - '2KB user-defined md and COPY directive', + itSkipIfE2E( + 'should not return error if copying object w/ > ' + '2KB user-defined md and COPY directive', done => { const metadata = genMaxSizeMetaHeaders(); const params = { @@ -374,11 +419,12 @@ describe('Object Copy', () => { done(); }); }); - }); + } + ); // TODO: see S3C-3482, figure out why this test fails in Integration builds - itSkipIfE2E('should return error if copying object w/ > 2KB ' + - 'user-defined md and REPLACE directive', + itSkipIfE2E( + 'should return error if copying object w/ > 2KB ' + 'user-defined md and REPLACE directive', done => { const metadata = genMaxSizeMetaHeaders(); const params = { @@ -399,169 +445,198 @@ describe('Object Copy', () => { done(); }); }); - }); + } + ); - it('should copy an object from a source to the same destination ' + - '(update metadata)', done => { - s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - MetadataDirective: 'REPLACE', - Metadata: newMetadata }, - (err, res) => - successCopyCheck(err, res, newMetadata, - sourceBucketName, sourceObjName, done) - ); + it('should copy an object from a source to the same destination ' + '(update metadata)', done => { + s3.copyObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + MetadataDirective: 'REPLACE', + Metadata: newMetadata, + }, + (err, res) => successCopyCheck(err, res, newMetadata, sourceBucketName, sourceObjName, done) + ); }); - it('should copy an object and replace the metadata if replace ' + - 'included as metadata directive header', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - MetadataDirective: 'REPLACE', - Metadata: newMetadata, - }, - (err, res) => - successCopyCheck(err, res, newMetadata, - destBucketName, destObjName, done) + it( + 'should copy an object and replace the metadata if replace ' + 'included as metadata directive header', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + MetadataDirective: 'REPLACE', + Metadata: newMetadata, + }, + (err, res) => successCopyCheck(err, res, newMetadata, destBucketName, destObjName, done) ); - }); + } + ); - it('should copy an object and replace ContentType if replace ' + - 'included as a metadata directive header, and new ContentType is ' + - 'provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - MetadataDirective: 'REPLACE', - ContentType: 'image', - }, () => { - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - if (err) { - return done(err); + it( + 'should copy an object and replace ContentType if replace ' + + 'included as a metadata directive header, and new ContentType is ' + + 'provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + MetadataDirective: 'REPLACE', + ContentType: 'image', + }, + () => { + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.ContentType, 'image'); + return done(); + }); } - assert.strictEqual(res.ContentType, 'image'); - return done(); - }); - }); - }); + ); + } + ); - it('should copy an object and keep ContentType if replace ' + - 'included as a metadata directive header, but no new ContentType ' + - 'is provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - MetadataDirective: 'REPLACE', - }, () => { - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - if (err) { - return done(err); + it( + 'should copy an object and keep ContentType if replace ' + + 'included as a metadata directive header, but no new ContentType ' + + 'is provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + MetadataDirective: 'REPLACE', + }, + () => { + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.ContentType, 'application/octet-stream'); + return done(); + }); } - assert.strictEqual(res.ContentType, - 'application/octet-stream'); - return done(); - }); - }); - }); + ); + } + ); - it('should also replace additional headers if replace ' + - 'included as metadata directive header and new headers are ' + - 'specified', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - MetadataDirective: 'REPLACE', - CacheControl: newCacheControl, - ContentDisposition: newContentDisposition, - ContentEncoding: newContentEncoding, - Expires: newExpires, - }, err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - if (err) { - done(err); + it( + 'should also replace additional headers if replace ' + + 'included as metadata directive header and new headers are ' + + 'specified', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + MetadataDirective: 'REPLACE', + CacheControl: newCacheControl, + ContentDisposition: newContentDisposition, + ContentEncoding: newContentEncoding, + Expires: newExpires, + }, + err => { + checkNoError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + if (err) { + done(err); + } + assert.strictEqual(res.CacheControl, newCacheControl); + assert.strictEqual(res.ContentDisposition, newContentDisposition); + // Should remove V4 streaming value 'aws-chunked' + // to be compatible with AWS behavior + assert.strictEqual(res.ContentEncoding, 'gzip,'); + assert.strictEqual(res.Expires.toGMTString(), newExpires.toGMTString()); + done(); + }); } - assert.strictEqual(res.CacheControl, newCacheControl); - assert.strictEqual(res.ContentDisposition, - newContentDisposition); - // Should remove V4 streaming value 'aws-chunked' - // to be compatible with AWS behavior - assert.strictEqual(res.ContentEncoding, 'gzip,'); - assert.strictEqual(res.Expires.toGMTString(), - newExpires.toGMTString()); - done(); - }); - }); - }); + ); + } + ); - it('should copy an object and the metadata if copy ' + - 'included as metadata directive header (and ignore any new ' + - 'metadata sent with copy request)', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - MetadataDirective: 'COPY', - Metadata: newMetadata, - }, - err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.deepStrictEqual(res.Metadata, originalMetadata); - done(); - }); - }); - }); + it( + 'should copy an object and the metadata if copy ' + + 'included as metadata directive header (and ignore any new ' + + 'metadata sent with copy request)', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + MetadataDirective: 'COPY', + Metadata: newMetadata, + }, + err => { + checkNoError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + assert.deepStrictEqual(res.Metadata, originalMetadata); + done(); + }); + } + ); + } + ); - it('should copy an object and its additional headers if copy ' + - 'included as metadata directive header (and ignore any new ' + - 'headers sent with copy request)', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - MetadataDirective: 'COPY', - Metadata: newMetadata, - CacheControl: newCacheControl, - ContentDisposition: newContentDisposition, - ContentEncoding: newContentEncoding, - Expires: newExpires, - }, err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { - if (err) { - done(err); - } - assert.strictEqual(res.CacheControl, - originalCacheControl); - assert.strictEqual(res.ContentDisposition, - originalContentDisposition); - assert.strictEqual(res.ContentEncoding, - 'base64,'); - assert.strictEqual(res.Expires.toGMTString(), - originalExpires.toGMTString()); - done(); - }); - }); - }); + it( + 'should copy an object and its additional headers if copy ' + + 'included as metadata directive header (and ignore any new ' + + 'headers sent with copy request)', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + MetadataDirective: 'COPY', + Metadata: newMetadata, + CacheControl: newCacheControl, + ContentDisposition: newContentDisposition, + ContentEncoding: newContentEncoding, + Expires: newExpires, + }, + err => { + checkNoError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + if (err) { + done(err); + } + assert.strictEqual(res.CacheControl, originalCacheControl); + assert.strictEqual(res.ContentDisposition, originalContentDisposition); + assert.strictEqual(res.ContentEncoding, 'base64,'); + assert.strictEqual(res.Expires.toGMTString(), originalExpires.toGMTString()); + done(); + }); + } + ); + } + ); it('should copy a 0 byte object to different destination', done => { const emptyFileETag = '"d41d8cd98f00b204e9800998ecf8427e"'; - s3.putObject({ Bucket: sourceBucketName, Key: sourceObjName, - Body: '', Metadata: originalMetadata }, () => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, + s3.putObject({ Bucket: sourceBucketName, Key: sourceObjName, Body: '', Metadata: originalMetadata }, () => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}` }, (err, res) => { checkNoError(err); assert.strictEqual(res.ETag, emptyFileETag); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { checkNoError(err); - assert.deepStrictEqual(res.Metadata, - originalMetadata); + assert.deepStrictEqual(res.Metadata, originalMetadata); assert.strictEqual(res.ETag, emptyFileETag); done(); }); - }); + } + ); }); }); @@ -570,351 +645,412 @@ describe('Object Copy', () => { it('should copy a 0 byte object to same destination', done => { const emptyFileETag = '"d41d8cd98f00b204e9800998ecf8427e"'; s3.putObject({ Bucket: sourceBucketName, Key: sourceObjName, Body: '' }, () => { - s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - StorageClass: 'REDUCED_REDUNDANCY', - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, emptyFileETag); - s3.getObject({ Bucket: sourceBucketName, - Key: sourceObjName }, (err, res) => { - assert.deepStrictEqual(res.Metadata, - {}); - assert.deepStrictEqual(res.StorageClass, - 'REDUCED_REDUNDANCY'); + s3.copyObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + StorageClass: 'REDUCED_REDUNDANCY', + }, + (err, res) => { + checkNoError(err); assert.strictEqual(res.ETag, emptyFileETag); - done(); - }); - }); + s3.getObject({ Bucket: sourceBucketName, Key: sourceObjName }, (err, res) => { + assert.deepStrictEqual(res.Metadata, {}); + assert.deepStrictEqual(res.StorageClass, 'REDUCED_REDUNDANCY'); + assert.strictEqual(res.ETag, emptyFileETag); + done(); + }); + } + ); }); }); - it('should copy an object to a different destination and change ' + - 'the storage class if storage class header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - StorageClass: 'REDUCED_REDUNDANCY', - }, err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.strictEqual(res.StorageClass, - 'REDUCED_REDUNDANCY'); - done(); - }); - }); - }); + it( + 'should copy an object to a different destination and change ' + + 'the storage class if storage class header provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + StorageClass: 'REDUCED_REDUNDANCY', + }, + err => { + checkNoError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + assert.strictEqual(res.StorageClass, 'REDUCED_REDUNDANCY'); + done(); + }); + } + ); + } + ); - it('should copy an object to the same destination and change the ' + - 'storage class if the storage class header provided', done => { - s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - StorageClass: 'REDUCED_REDUNDANCY', - }, err => { - checkNoError(err); - s3.getObject({ Bucket: sourceBucketName, - Key: sourceObjName }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.StorageClass, - 'REDUCED_REDUNDANCY'); - done(); - }); - }); - }); + it( + 'should copy an object to the same destination and change the ' + + 'storage class if the storage class header provided', + done => { + s3.copyObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + StorageClass: 'REDUCED_REDUNDANCY', + }, + err => { + checkNoError(err); + s3.getObject({ Bucket: sourceBucketName, Key: sourceObjName }, (err, res) => { + checkNoError(err); + assert.strictEqual(res.StorageClass, 'REDUCED_REDUNDANCY'); + done(); + }); + } + ); + } + ); } - it('should copy an object to a new bucket and overwrite an already ' + - 'existing object in the destination bucket', done => { - s3.putObject({ Bucket: destBucketName, Key: destObjName, - Body: 'overwrite me', Metadata: originalMetadata }, () => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - MetadataDirective: 'REPLACE', - Metadata: newMetadata, - }, - (err, res) => { + it( + 'should copy an object to a new bucket and overwrite an already ' + + 'existing object in the destination bucket', + done => { + s3.putObject( + { Bucket: destBucketName, Key: destObjName, Body: 'overwrite me', Metadata: originalMetadata }, + () => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + MetadataDirective: 'REPLACE', + Metadata: newMetadata, + }, + (err, res) => { + checkNoError(err); + assert.strictEqual(res.ETag, etag); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + assert.deepStrictEqual(res.Metadata, newMetadata); + assert.strictEqual(res.ETag, etag); + assert.strictEqual(res.Body.toString(), content); + done(); + }); + } + ); + } + ); + } + ); + + // skipping test as object level encryption is not implemented yet + it.skip( + 'should copy an object and change the server side encryption' + + 'option if server side encryption header provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + ServerSideEncryption: 'AES256', + }, + err => { checkNoError(err); - assert.strictEqual(res.ETag, etag); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.deepStrictEqual(res.Metadata, - newMetadata); - assert.strictEqual(res.ETag, etag); - assert.strictEqual(res.Body.toString(), content); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + assert.strictEqual(res.ServerSideEncryption, 'AES256'); done(); }); - }); - }); - }); + } + ); + } + ); - // skipping test as object level encryption is not implemented yet - it.skip('should copy an object and change the server side encryption' + - 'option if server side encryption header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - ServerSideEncryption: 'AES256', - }, - err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.strictEqual(res.ServerSideEncryption, - 'AES256'); - done(); - }); + it( + 'should return Not Implemented error for obj. encryption using ' + 'customer-provided encryption keys', + done => { + const params = { + Bucket: destBucketName, + Key: 'key', + CopySource: `${sourceBucketName}/${sourceObjName}`, + SSECustomerAlgorithm: 'AES256', + }; + s3.copyObject(params, err => { + assert.strictEqual(err.code, 'NotImplemented'); + done(); }); - }); - - it('should return Not Implemented error for obj. encryption using ' + - 'customer-provided encryption keys', done => { - const params = { Bucket: destBucketName, Key: 'key', - CopySource: `${sourceBucketName}/${sourceObjName}`, - SSECustomerAlgorithm: 'AES256' }; - s3.copyObject(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); - done(); - }); - }); + } + ); it('should copy an object and set the acl on the new object', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - ACL: 'authenticated-read', - }, + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + ACL: 'authenticated-read', + }, err => { checkNoError(err); - s3.getObjectAcl({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + s3.getObjectAcl({ Bucket: destBucketName, Key: destObjName }, (err, res) => { // With authenticated-read ACL, there are two // grants: // (1) FULL_CONTROL to the object owner // (2) READ to the authenticated-read assert.strictEqual(res.Grants.length, 2); - assert.strictEqual(res.Grants[0].Permission, - 'FULL_CONTROL'); - assert.strictEqual(res.Grants[1].Permission, - 'READ'); - assert.strictEqual(res.Grants[1].Grantee.URI, - 'http://acs.amazonaws.com/groups/' + - 'global/AuthenticatedUsers'); + assert.strictEqual(res.Grants[0].Permission, 'FULL_CONTROL'); + assert.strictEqual(res.Grants[1].Permission, 'READ'); + assert.strictEqual( + res.Grants[1].Grantee.URI, + 'http://acs.amazonaws.com/groups/' + 'global/AuthenticatedUsers' + ); done(); }); - }); - }); - - it('should copy an object and default the acl on the new object ' + - 'to private even if the copied object had a ' + - 'different acl', done => { - s3.putObjectAcl({ Bucket: sourceBucketName, Key: sourceObjName, - ACL: 'authenticated-read' }, () => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - () => { - s3.getObjectAcl({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - // With private ACL, there is only one grant - // of FULL_CONTROL to the object owner - assert.strictEqual(res.Grants.length, 1); - assert.strictEqual(res.Grants[0].Permission, - 'FULL_CONTROL'); - done(); - }); - }); - }); + } + ); }); - it('should return an error if attempt to copy with same source as' + - 'destination and do not change any metadata', done => { - s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - err => { - checkError(err, 'InvalidRequest'); - done(); + it( + 'should copy an object and default the acl on the new object ' + + 'to private even if the copied object had a ' + + 'different acl', + done => { + s3.putObjectAcl({ Bucket: sourceBucketName, Key: sourceObjName, ACL: 'authenticated-read' }, () => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + }, + () => { + s3.getObjectAcl({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + // With private ACL, there is only one grant + // of FULL_CONTROL to the object owner + assert.strictEqual(res.Grants.length, 1); + assert.strictEqual(res.Grants[0].Permission, 'FULL_CONTROL'); + done(); + }); + } + ); }); - }); + } + ); - it('should return an error if attempt to copy from nonexistent bucket', + it( + 'should return an error if attempt to copy with same source as' + + 'destination and do not change any metadata', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `nobucket453234/${sourceObjName}`, - }, + s3.copyObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + }, + err => { + checkError(err, 'InvalidRequest'); + done(); + } + ); + } + ); + + it('should return an error if attempt to copy from nonexistent bucket', done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: `nobucket453234/${sourceObjName}` }, err => { checkError(err, 'NoSuchBucket'); done(); - }); - }); + } + ); + }); - it('should return an error if use invalid redirect location', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + it('should return an error if use invalid redirect location', done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, WebsiteRedirectLocation: 'google.com', }, err => { checkError(err, 'InvalidRedirectLocation'); done(); - }); - }); + } + ); + }); - it('should return an error if copy request has object lock legal ' + - 'hold header but object lock is not enabled on destination bucket', + it( + 'should return an error if copy request has object lock legal ' + + 'hold header but object lock is not enabled on destination bucket', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - ObjectLockLegalHoldStatus: 'ON', - }, - err => { - checkError(err, 'InvalidRequest'); - done(); - }); - }); + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + ObjectLockLegalHoldStatus: 'ON', + }, + err => { + checkError(err, 'InvalidRequest'); + done(); + } + ); + } + ); - it('should return an error if copy request has retention headers ' + - 'but object lock is not enabled on destination bucket', + it( + 'should return an error if copy request has retention headers ' + + 'but object lock is not enabled on destination bucket', done => { const mockDate = new Date(2050, 10, 12); - s3.copyObject({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - ObjectLockMode: 'GOVERNANCE', - ObjectLockRetainUntilDate: mockDate, - }, - err => { - checkError(err, 'InvalidRequest'); - done(); - }); - }); + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + ObjectLockMode: 'GOVERNANCE', + ObjectLockRetainUntilDate: mockDate, + }, + err => { + checkError(err, 'InvalidRequest'); + done(); + } + ); + } + ); - it('should return an error if attempt to copy to nonexistent bucket', - done => { - s3.copyObject({ Bucket: 'nobucket453234', Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, + it('should return an error if attempt to copy to nonexistent bucket', done => { + s3.copyObject( + { Bucket: 'nobucket453234', Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}` }, err => { checkError(err, 'NoSuchBucket'); done(); - }); - }); + } + ); + }); - it('should return an error if attempt to copy nonexistent object', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/nokey`, - }, + it('should return an error if attempt to copy nonexistent object', done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/nokey` }, err => { checkError(err, 'NoSuchKey'); done(); - }); - }); + } + ); + }); - it('should return an error if send invalid metadata directive header', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + it('should return an error if send invalid metadata directive header', done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'copyHalf', }, err => { checkError(err, 'InvalidArgument'); done(); - }); - }); + } + ); + }); describe('copying by another account', () => { const otherAccountBucket = 'otheraccountbucket42342342342'; const otherAccountKey = 'key'; - beforeEach(() => otherAccountBucketUtility - .createOne(otherAccountBucket) - ); + beforeEach(() => otherAccountBucketUtility.createOne(otherAccountBucket)); - afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket) - .then(() => otherAccountBucketUtility - .deleteOne(otherAccountBucket)) + afterEach(() => + otherAccountBucketUtility + .empty(otherAccountBucket) + .then(() => otherAccountBucketUtility.deleteOne(otherAccountBucket)) ); - it('should not allow an account without read persmission on the ' + - 'source object to copy the object', done => { - otherAccountS3.copyObject({ Bucket: otherAccountBucket, - Key: otherAccountKey, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - err => { - checkError(err, 'AccessDenied'); - done(); - }); - }); - - it('should not allow an account without write persmission on the ' + - 'destination bucket to copy the object', done => { - otherAccountS3.putObject({ Bucket: otherAccountBucket, - Key: otherAccountKey, Body: '' }, () => { - otherAccountS3.copyObject({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${otherAccountBucket}/${otherAccountKey}`, - }, + it( + 'should not allow an account without read persmission on the ' + 'source object to copy the object', + done => { + otherAccountS3.copyObject( + { + Bucket: otherAccountBucket, + Key: otherAccountKey, + CopySource: `${sourceBucketName}/${sourceObjName}`, + }, err => { checkError(err, 'AccessDenied'); done(); - }); - }); - }); + } + ); + } + ); - it('should allow an account with read permission on the ' + - 'source object and write permission on the destination ' + - 'bucket to copy the object', done => { - s3.putObjectAcl({ Bucket: sourceBucketName, - Key: sourceObjName, ACL: 'public-read' }, () => { - otherAccountS3.copyObject({ Bucket: otherAccountBucket, - Key: otherAccountKey, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - err => { - checkNoError(err); - done(); - }); - }); - }); + it( + 'should not allow an account without write persmission on the ' + + 'destination bucket to copy the object', + done => { + otherAccountS3.putObject({ Bucket: otherAccountBucket, Key: otherAccountKey, Body: '' }, () => { + otherAccountS3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${otherAccountBucket}/${otherAccountKey}`, + }, + err => { + checkError(err, 'AccessDenied'); + done(); + } + ); + }); + } + ); + + it( + 'should allow an account with read permission on the ' + + 'source object and write permission on the destination ' + + 'bucket to copy the object', + done => { + s3.putObjectAcl({ Bucket: sourceBucketName, Key: sourceObjName, ACL: 'public-read' }, () => { + otherAccountS3.copyObject( + { + Bucket: otherAccountBucket, + Key: otherAccountKey, + CopySource: `${sourceBucketName}/${sourceObjName}`, + }, + err => { + checkNoError(err); + done(); + } + ); + }); + } + ); }); - it('If-Match: returns no error when ETag match, with double quotes ' + - 'around ETag', - done => { - requestCopy({ CopySourceIfMatch: etag }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when ETag match, with double quotes ' + 'around ETag', done => { + requestCopy({ CopySourceIfMatch: etag }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when one of ETags match, with double ' + - 'quotes around ETag', - done => { - requestCopy({ CopySourceIfMatch: - `non-matching,${etag}` }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when one of ETags match, with double ' + 'quotes around ETag', done => { + requestCopy({ CopySourceIfMatch: `non-matching,${etag}` }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when ETag match, without double ' + - 'quotes around ETag', - done => { - requestCopy({ CopySourceIfMatch: etagTrim }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when ETag match, without double ' + 'quotes around ETag', done => { + requestCopy({ CopySourceIfMatch: etagTrim }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when one of ETags match, without ' + - 'double quotes around ETag', - done => { - requestCopy({ CopySourceIfMatch: - `non-matching,${etagTrim}` }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when one of ETags match, without ' + 'double quotes around ETag', done => { + requestCopy({ CopySourceIfMatch: `non-matching,${etagTrim}` }, err => { + checkNoError(err); + done(); }); + }); it('If-Match: returns no error when ETag match with *', done => { requestCopy({ CopySourceIfMatch: '*' }, err => { @@ -923,13 +1059,12 @@ describe('Object Copy', () => { }); }); - it('If-Match: returns PreconditionFailed when ETag does not match', - done => { - requestCopy({ CopySourceIfMatch: 'non-matching ETag' }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it('If-Match: returns PreconditionFailed when ETag does not match', done => { + requestCopy({ CopySourceIfMatch: 'non-matching ETag' }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); it('If-None-Match: returns no error when ETag does not match', done => { requestCopy({ CopySourceIfNoneMatch: 'non-matching' }, err => { @@ -938,334 +1073,385 @@ describe('Object Copy', () => { }); }); - it('If-None-Match: returns no error when all ETags do not match', - done => { - requestCopy({ + it('If-None-Match: returns no error when all ETags do not match', done => { + requestCopy( + { CopySourceIfNoneMatch: 'non-matching,non-matching-either', - }, err => { + }, + err => { checkNoError(err); done(); - }); - }); + } + ); + }); - it('If-None-Match: returns PreconditionFailed when ETag match, with' + - 'double quotes around ETag', - done => { - requestCopy({ CopySourceIfNoneMatch: etag }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it('If-None-Match: returns PreconditionFailed when ETag match, with' + 'double quotes around ETag', done => { + requestCopy({ CopySourceIfNoneMatch: etag }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); - it('If-None-Match: returns PreconditionFailed when one of ETags ' + - 'match, with double quotes around ETag', + it( + 'If-None-Match: returns PreconditionFailed when one of ETags ' + 'match, with double quotes around ETag', done => { - requestCopy({ - CopySourceIfNoneMatch: `non-matching,${etag}`, - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); - }); + requestCopy( + { + CopySourceIfNoneMatch: `non-matching,${etag}`, + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); + } + ); - it('If-None-Match: returns PreconditionFailed when ETag match, ' + - 'without double quotes around ETag', + it( + 'If-None-Match: returns PreconditionFailed when ETag match, ' + 'without double quotes around ETag', done => { requestCopy({ CopySourceIfNoneMatch: etagTrim }, err => { checkError(err, 'PreconditionFailed'); done(); }); - }); - - it('If-None-Match: returns PreconditionFailed when one of ETags ' + - 'match, without double quotes around ETag', - done => { - requestCopy({ - CopySourceIfNoneMatch: `non-matching,${etagTrim}`, - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); - }); + } + ); - it('If-Modified-Since: returns no error if Last modified date is ' + - 'greater', + it( + 'If-None-Match: returns PreconditionFailed when one of ETags ' + 'match, without double quotes around ETag', done => { - requestCopy({ CopySourceIfModifiedSince: dateFromNow(-1) }, + requestCopy( + { + CopySourceIfNoneMatch: `non-matching,${etagTrim}`, + }, err => { - checkNoError(err); + checkError(err, 'PreconditionFailed'); done(); - }); + } + ); + } + ); + + it('If-Modified-Since: returns no error if Last modified date is ' + 'greater', done => { + requestCopy({ CopySourceIfModifiedSince: dateFromNow(-1) }, err => { + checkNoError(err); + done(); }); + }); // Skipping this test, because real AWS does not provide error as // expected - it.skip('If-Modified-Since: returns PreconditionFailed if Last ' + - 'modified date is lesser', - done => { - requestCopy({ CopySourceIfModifiedSince: dateFromNow(1) }, - err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it.skip('If-Modified-Since: returns PreconditionFailed if Last ' + 'modified date is lesser', done => { + requestCopy({ CopySourceIfModifiedSince: dateFromNow(1) }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); - it('If-Modified-Since: returns PreconditionFailed if Last modified ' + - 'date is equal', - done => { - requestCopy({ CopySourceIfModifiedSince: - dateConvert(lastModified) }, - err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it('If-Modified-Since: returns PreconditionFailed if Last modified ' + 'date is equal', done => { + requestCopy({ CopySourceIfModifiedSince: dateConvert(lastModified) }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); - it('If-Unmodified-Since: returns no error when lastModified date is ' + - 'greater', - done => { - requestCopy({ CopySourceIfUnmodifiedSince: dateFromNow(1) }, - err => { - checkNoError(err); - done(); - }); + it('If-Unmodified-Since: returns no error when lastModified date is ' + 'greater', done => { + requestCopy({ CopySourceIfUnmodifiedSince: dateFromNow(1) }, err => { + checkNoError(err); + done(); }); + }); - it('If-Unmodified-Since: returns no error when lastModified ' + - 'date is equal', - done => { - requestCopy({ CopySourceIfUnmodifiedSince: - dateConvert(lastModified) }, - err => { - checkNoError(err); - done(); - }); + it('If-Unmodified-Since: returns no error when lastModified ' + 'date is equal', done => { + requestCopy({ CopySourceIfUnmodifiedSince: dateConvert(lastModified) }, err => { + checkNoError(err); + done(); }); + }); - it('If-Unmodified-Since: returns PreconditionFailed when ' + - 'lastModified date is lesser', - done => { - requestCopy({ CopySourceIfUnmodifiedSince: dateFromNow(-1) }, - err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it('If-Unmodified-Since: returns PreconditionFailed when ' + 'lastModified date is lesser', done => { + requestCopy({ CopySourceIfUnmodifiedSince: dateFromNow(-1) }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); + + it( + 'If-Match & If-Unmodified-Since: returns no error when match Etag ' + 'and lastModified is greater', + done => { + requestCopy( + { + CopySourceIfMatch: etagTrim, + CopySourceIfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); + } + ); - it('If-Match & If-Unmodified-Since: returns no error when match Etag ' + - 'and lastModified is greater', - done => { - requestCopy({ + it('If-Match match & If-Unmodified-Since match', done => { + requestCopy( + { CopySourceIfMatch: etagTrim, - CopySourceIfUnmodifiedSince: dateFromNow(-1), - }, err => { + CopySourceIfUnmodifiedSince: dateFromNow(1), + }, + err => { checkNoError(err); done(); - }); - }); - - it('If-Match match & If-Unmodified-Since match', done => { - requestCopy({ - CopySourceIfMatch: etagTrim, - CopySourceIfUnmodifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + } + ); }); it('If-Match not match & If-Unmodified-Since not match', done => { - requestCopy({ - CopySourceIfMatch: 'non-matching', - CopySourceIfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfMatch: 'non-matching', + CopySourceIfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-Match not match & If-Unmodified-Since match', done => { - requestCopy({ - CopySourceIfMatch: 'non-matching', - CopySourceIfUnmodifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfMatch: 'non-matching', + CopySourceIfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); // Skipping this test, because real AWS does not provide error as // expected it.skip('If-Match match & If-Modified-Since not match', done => { - requestCopy({ - CopySourceIfMatch: etagTrim, - CopySourceIfModifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + requestCopy( + { + CopySourceIfMatch: etagTrim, + CopySourceIfModifiedSince: dateFromNow(1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-Match match & If-Modified-Since match', done => { - requestCopy({ - CopySourceIfMatch: etagTrim, - CopySourceIfModifiedSince: dateFromNow(-1), - }, err => { - checkNoError(err); - done(); - }); + requestCopy( + { + CopySourceIfMatch: etagTrim, + CopySourceIfModifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-Match not match & If-Modified-Since not match', done => { - requestCopy({ - CopySourceIfMatch: 'non-matching', - CopySourceIfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfMatch: 'non-matching', + CopySourceIfModifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-Match not match & If-Modified-Since match', done => { - requestCopy({ - CopySourceIfMatch: 'non-matching', - CopySourceIfModifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfMatch: 'non-matching', + CopySourceIfModifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); - it('If-None-Match & If-Modified-Since: returns PreconditionFailed ' + - 'when Etag does not match and lastModified is greater', + it( + 'If-None-Match & If-Modified-Since: returns PreconditionFailed ' + + 'when Etag does not match and lastModified is greater', done => { - requestCopy({ + requestCopy( + { + CopySourceIfNoneMatch: etagTrim, + CopySourceIfModifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); + } + ); + + it('If-None-Match not match & If-Modified-Since not match', done => { + requestCopy( + { CopySourceIfNoneMatch: etagTrim, - CopySourceIfModifiedSince: dateFromNow(-1), - }, err => { + CopySourceIfModifiedSince: dateFromNow(1), + }, + err => { checkError(err, 'PreconditionFailed'); done(); - }); - }); - - it('If-None-Match not match & If-Modified-Since not match', done => { - requestCopy({ - CopySourceIfNoneMatch: etagTrim, - CopySourceIfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + } + ); }); it('If-None-Match match & If-Modified-Since match', done => { - requestCopy({ - CopySourceIfNoneMatch: 'non-matching', - CopySourceIfModifiedSince: dateFromNow(-1), - }, err => { - checkNoError(err); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: 'non-matching', + CopySourceIfModifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); }); // Skipping this test, because real AWS does not provide error as // expected it.skip('If-None-Match match & If-Modified-Since not match', done => { - requestCopy({ - CopySourceIfNoneMatch: 'non-matching', - CopySourceIfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: 'non-matching', + CopySourceIfModifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-None-Match match & If-Unmodified-Since match', done => { - requestCopy({ - CopySourceIfNoneMatch: 'non-matching', - CopySourceIfUnmodifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: 'non-matching', + CopySourceIfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-None-Match match & If-Unmodified-Since not match', done => { - requestCopy({ - CopySourceIfNoneMatch: 'non-matching', - CopySourceIfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: 'non-matching', + CopySourceIfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-None-Match not match & If-Unmodified-Since match', done => { - requestCopy({ - CopySourceIfNoneMatch: etagTrim, - CopySourceIfUnmodifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: etagTrim, + CopySourceIfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-None-Match not match & If-Unmodified-Since not match', done => { - requestCopy({ - CopySourceIfNoneMatch: etagTrim, - CopySourceIfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); - }); - - it('should return InvalidStorageClass error when x-amz-storage-class header is provided ' + - 'and not equal to STANDARD', done => { - s3.copyObject({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - StorageClass: 'COLD', - }, err => { - assert.strictEqual(err.code, 'InvalidStorageClass'); - assert.strictEqual(err.statusCode, 400); + requestCopy( + { + CopySourceIfNoneMatch: etagTrim, + CopySourceIfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); done(); - }); + } + ); }); + it( + 'should return InvalidStorageClass error when x-amz-storage-class header is provided ' + + 'and not equal to STANDARD', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + StorageClass: 'COLD', + }, + err => { + assert.strictEqual(err.code, 'InvalidStorageClass'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); + } + ); + it('should not copy a cold object', done => { const archive = { archiveInfo: { archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', - archiveVersion: 5577006791947779 + archiveVersion: 5577006791947779, }, }; fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archive, err => { assert.ifError(err); - s3.copyObject({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, err => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + }, + err => { assert.strictEqual(err.code, 'InvalidObjectState'); assert.strictEqual(err.statusCode, 403); done(); - }); + } + ); }); }); - it('should copy an object when it\'s transitioning to cold', done => { + it("should copy an object when it's transitioning to cold", done => { fakeMetadataTransition(sourceBucketName, sourceObjName, undefined, err => { assert.ifError(err); - s3.copyObject({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, (err, res) => { - successCopyCheck(err, res, originalMetadata, - destBucketName, destObjName, done); - }); + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + }, + (err, res) => { + successCopyCheck(err, res, originalMetadata, destBucketName, destObjName, done); + } + ); }); }); @@ -1275,18 +1461,20 @@ describe('Object Copy', () => { restoreRequestedAt: new Date(0), restoreRequestedDays: 5, restoreCompletedAt: new Date(10), - restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)), + restoreWillExpireAt: new Date(10 + 5 * 24 * 60 * 60 * 1000), }; fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archiveCompleted, err => { assert.ifError(err); - s3.copyObject({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, (err, res) => { - successCopyCheck(err, res, originalMetadata, - destBucketName, destObjName, done); - }); + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + }, + (err, res) => { + successCopyCheck(err, res, originalMetadata, destBucketName, destObjName, done); + } + ); }); }); }); @@ -1295,8 +1483,7 @@ describe('Object Copy', () => { const isCEPH = process.env.CI_CEPH !== undefined; const describeSkipIfCeph = isCEPH ? describe.skip : describe; -describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + - 'bucket and source bucket', () => { +describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + 'bucket and source bucket', () => { withV4(sigCfg => { let bucketUtil; let s3; @@ -1305,10 +1492,10 @@ describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + before(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return bucketUtil.empty(sourceBucketName) + return bucketUtil + .empty(sourceBucketName) .then(() => bucketUtil.empty(destBucketName)) - .then(() => - bucketUtil.deleteMany([sourceBucketName, destBucketName])) + .then(() => bucketUtil.deleteMany([sourceBucketName, destBucketName])) .catch(err => { if (err.code !== 'NoSuchBucket') { process.stdout.write(`${err}\n`); @@ -1322,51 +1509,55 @@ describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + }); }); - beforeEach(() => s3.putObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - Body: content, - Metadata: originalMetadata, - ObjectLockMode: 'GOVERNANCE', - ObjectLockRetainUntilDate: new Date(2050, 1, 1), - }).promise().then(res => { - versionId = res.VersionId; - s3.headObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - }).promise(); - })); - - afterEach(() => bucketUtil.empty(sourceBucketName) - .then(() => bucketUtil.empty(destBucketName))); + beforeEach(() => + s3 + .putObject({ + Bucket: sourceBucketName, + Key: sourceObjName, + Body: content, + Metadata: originalMetadata, + ObjectLockMode: 'GOVERNANCE', + ObjectLockRetainUntilDate: new Date(2050, 1, 1), + }) + .promise() + .then(res => { + versionId = res.VersionId; + s3.headObject({ + Bucket: sourceBucketName, + Key: sourceObjName, + }).promise(); + }) + ); + + afterEach(() => bucketUtil.empty(sourceBucketName).then(() => bucketUtil.empty(destBucketName))); after(() => bucketUtil.deleteMany([sourceBucketName, destBucketName])); - it('should not copy default retention info of the destination ' + - 'bucket if legal hold header is passed with copy object request', + it( + 'should not copy default retention info of the destination ' + + 'bucket if legal hold header is passed with copy object request', done => { - s3.copyObject({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - ObjectLockLegalHoldStatus: 'ON', - }, - err => { - assert.ifError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + ObjectLockLegalHoldStatus: 'ON', + }, + err => { + assert.ifError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { assert.ifError(err); assert.strictEqual(res.ObjectLockMode, undefined); - assert.strictEqual(res.ObjectLockRetainUntilDate, - undefined); - assert.strictEqual(res.ObjectLockLegalHoldStatus, - 'ON'); + assert.strictEqual(res.ObjectLockRetainUntilDate, undefined); + assert.strictEqual(res.ObjectLockLegalHoldStatus, 'ON'); const removeLockObjs = [ { bucket: sourceBucketName, key: sourceObjName, versionId, - }, { + }, + { bucket: destBucketName, key: destObjName, versionId: res.VersionId, @@ -1374,28 +1565,29 @@ describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + ]; changeObjectLock(removeLockObjs, '', done); }); - }); - }); + } + ); + } + ); - it('should not copy default retention info of the destination ' + - 'bucket if legal hold header is passed with copy object request', + it( + 'should not copy default retention info of the destination ' + + 'bucket if legal hold header is passed with copy object request', done => { - s3.copyObject({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - ObjectLockLegalHoldStatus: 'on', - }, - err => { - assert.ifError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + ObjectLockLegalHoldStatus: 'on', + }, + err => { + assert.ifError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { assert.ifError(err); assert.strictEqual(res.ObjectLockMode, undefined); - assert.strictEqual(res.ObjectLockRetainUntilDate, - undefined); - assert.strictEqual(res.ObjectLockLegalHoldStatus, - 'OFF'); + assert.strictEqual(res.ObjectLockRetainUntilDate, undefined); + assert.strictEqual(res.ObjectLockLegalHoldStatus, 'OFF'); const removeLockObjs = [ { bucket: sourceBucketName, @@ -1405,33 +1597,39 @@ describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + ]; changeObjectLock(removeLockObjs, '', done); }); - }); - }); + } + ); + } + ); - it('should overwrite default retention info of the destination ' + - 'bucket if retention headers passed with copy object request', + it( + 'should overwrite default retention info of the destination ' + + 'bucket if retention headers passed with copy object request', done => { - s3.copyObject({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - ObjectLockMode: 'COMPLIANCE', - ObjectLockRetainUntilDate: new Date(2055, 2, 3), - }, - err => { - assert.ifError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + ObjectLockMode: 'COMPLIANCE', + ObjectLockRetainUntilDate: new Date(2055, 2, 3), + }, + err => { + assert.ifError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { assert.ifError(err); assert.strictEqual(res.ObjectLockMode, 'COMPLIANCE'); - assert.strictEqual(res.ObjectLockRetainUntilDate.toGMTString(), - new Date(2055, 2, 3).toGMTString()); + assert.strictEqual( + res.ObjectLockRetainUntilDate.toGMTString(), + new Date(2055, 2, 3).toGMTString() + ); const removeLockObjs = [ { bucket: sourceBucketName, key: sourceObjName, versionId, - }, { + }, + { bucket: destBucketName, key: destObjName, versionId: res.VersionId, @@ -1439,7 +1637,9 @@ describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + ]; changeObjectLock(removeLockObjs, '', done); }); - }); - }); + } + ); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/objectHead.js b/tests/functional/aws-node-sdk/test/object/objectHead.js index bca6a0e13b..4ad50c197d 100644 --- a/tests/functional/aws-node-sdk/test/object/objectHead.js +++ b/tests/functional/aws-node-sdk/test/object/objectHead.js @@ -15,8 +15,7 @@ const objectName = 'someObject'; const partSize = 1024 * 1024 * 5; // 5MB minumum required part size. function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } function checkError(err, code) { @@ -31,7 +30,7 @@ function dateFromNow(diff) { } function dateConvert(d) { - return (new Date(d)).toISOString(); + return new Date(d).toISOString(); } describe('HEAD object, conditions', () => { @@ -45,77 +44,80 @@ describe('HEAD object, conditions', () => { before(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return bucketUtil.empty(bucketName).then(() => - bucketUtil.deleteOne(bucketName) - ) - .catch(err => { - if (err.code !== 'NoSuchBucket') { - process.stdout.write(`${err}\n`); - throw err; - } - }) - .then(() => bucketUtil.createOne(bucketName)); + return bucketUtil + .empty(bucketName) + .then(() => bucketUtil.deleteOne(bucketName)) + .catch(err => { + if (err.code !== 'NoSuchBucket') { + process.stdout.write(`${err}\n`); + throw err; + } + }) + .then(() => bucketUtil.createOne(bucketName)); }); function requestHead(fields, cb) { - s3.headObject(Object.assign({ - Bucket: bucketName, - Key: objectName, - }, fields), cb); + s3.headObject( + Object.assign( + { + Bucket: bucketName, + Key: objectName, + }, + fields + ), + cb + ); } - beforeEach(() => s3.putObject({ - Bucket: bucketName, - Key: objectName, - Body: 'I am the best content ever', - }).promise().then(res => { - etag = res.ETag; - etagTrim = etag.substring(1, etag.length - 1); - return s3.headObject( - { Bucket: bucketName, Key: objectName }).promise(); - }).then(res => { - lastModified = res.LastModified; - })); + beforeEach(() => + s3 + .putObject({ + Bucket: bucketName, + Key: objectName, + Body: 'I am the best content ever', + }) + .promise() + .then(res => { + etag = res.ETag; + etagTrim = etag.substring(1, etag.length - 1); + return s3.headObject({ Bucket: bucketName, Key: objectName }).promise(); + }) + .then(res => { + lastModified = res.LastModified; + }) + ); afterEach(() => bucketUtil.empty(bucketName)); after(() => bucketUtil.deleteOne(bucketName)); - it('If-Match: returns no error when ETag match, with double quotes ' + - 'around ETag', - done => { - requestHead({ IfMatch: etag }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when ETag match, with double quotes ' + 'around ETag', done => { + requestHead({ IfMatch: etag }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when one of ETags match, with double ' + - 'quotes around ETag', - done => { - requestHead({ IfMatch: `non-matching,${etag}` }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when one of ETags match, with double ' + 'quotes around ETag', done => { + requestHead({ IfMatch: `non-matching,${etag}` }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when ETag match, without double ' + - 'quotes around ETag', - done => { - requestHead({ IfMatch: etagTrim }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when ETag match, without double ' + 'quotes around ETag', done => { + requestHead({ IfMatch: etagTrim }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when one of ETags match, without ' + - 'double quotes around ETag', - done => { - requestHead({ IfMatch: `non-matching,${etagTrim}` }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when one of ETags match, without ' + 'double quotes around ETag', done => { + requestHead({ IfMatch: `non-matching,${etagTrim}` }, err => { + checkNoError(err); + done(); }); + }); it('If-Match: returns no error when ETag match with *', done => { requestHead({ IfMatch: '*' }, err => { @@ -124,13 +126,12 @@ describe('HEAD object, conditions', () => { }); }); - it('If-Match: returns PreconditionFailed when ETag does not match', - done => { - requestHead({ IfMatch: 'non-matching ETag' }, err => { - checkError(err, errorInstances.PreconditionFailed.code); - done(); - }); + it('If-Match: returns PreconditionFailed when ETag does not match', done => { + requestHead({ IfMatch: 'non-matching ETag' }, err => { + checkError(err, errorInstances.PreconditionFailed.code); + done(); }); + }); it('If-None-Match: returns no error when ETag does not match', done => { requestHead({ IfNoneMatch: 'non-matching' }, err => { @@ -139,282 +140,320 @@ describe('HEAD object, conditions', () => { }); }); - it('If-None-Match: returns no error when all ETags do not match', - done => { - requestHead({ + it('If-None-Match: returns no error when all ETags do not match', done => { + requestHead( + { IfNoneMatch: 'non-matching,non-matching-either', - }, err => { + }, + err => { checkNoError(err); done(); - }); - }); + } + ); + }); - it('If-None-Match: returns NotModified when ETag match, with double ' + - 'quotes around ETag', - done => { - requestHead({ IfNoneMatch: etag }, err => { - checkError(err, 'NotModified'); - done(); - }); + it('If-None-Match: returns NotModified when ETag match, with double ' + 'quotes around ETag', done => { + requestHead({ IfNoneMatch: etag }, err => { + checkError(err, 'NotModified'); + done(); }); + }); - it('If-None-Match: returns NotModified when one of ETags match, with ' + - 'double quotes around ETag', - done => { - requestHead({ + it('If-None-Match: returns NotModified when one of ETags match, with ' + 'double quotes around ETag', done => { + requestHead( + { IfNoneMatch: `non-matching,${etag}`, - }, err => { - checkError(err, 'NotModified'); - done(); - }); - }); - - it('If-None-Match: returns NotModified when ETag match, without ' + - 'double quotes around ETag', - done => { - requestHead({ IfNoneMatch: etagTrim }, err => { + }, + err => { checkError(err, 'NotModified'); done(); - }); - }); + } + ); + }); - it('If-None-Match: returns NotModified when one of ETags match, ' + - 'without double quotes around ETag', - done => { - requestHead({ - IfNoneMatch: `non-matching,${etagTrim}`, - }, err => { - checkError(err, 'NotModified'); - done(); - }); + it('If-None-Match: returns NotModified when ETag match, without ' + 'double quotes around ETag', done => { + requestHead({ IfNoneMatch: etagTrim }, err => { + checkError(err, 'NotModified'); + done(); }); + }); - it('If-Modified-Since: returns no error if Last modified date is ' + - 'greater', + it( + 'If-None-Match: returns NotModified when one of ETags match, ' + 'without double quotes around ETag', done => { - requestHead({ IfModifiedSince: dateFromNow(-1) }, + requestHead( + { + IfNoneMatch: `non-matching,${etagTrim}`, + }, err => { - checkNoError(err); + checkError(err, 'NotModified'); done(); - }); + } + ); + } + ); + + it('If-Modified-Since: returns no error if Last modified date is ' + 'greater', done => { + requestHead({ IfModifiedSince: dateFromNow(-1) }, err => { + checkNoError(err); + done(); }); + }); // Skipping this test, because real AWS does not provide error as // expected - it.skip('If-Modified-Since: returns NotModified if Last modified ' + - 'date is lesser', - done => { - requestHead({ IfModifiedSince: dateFromNow(1) }, - err => { - checkError(err, 'NotModified'); - done(); - }); + it.skip('If-Modified-Since: returns NotModified if Last modified ' + 'date is lesser', done => { + requestHead({ IfModifiedSince: dateFromNow(1) }, err => { + checkError(err, 'NotModified'); + done(); }); + }); - it('If-Modified-Since: returns NotModified if Last modified ' + - 'date is equal', - done => { - requestHead({ IfModifiedSince: dateConvert(lastModified) }, - err => { - checkError(err, 'NotModified'); - done(); - }); + it('If-Modified-Since: returns NotModified if Last modified ' + 'date is equal', done => { + requestHead({ IfModifiedSince: dateConvert(lastModified) }, err => { + checkError(err, 'NotModified'); + done(); }); + }); - it('If-Unmodified-Since: returns no error when lastModified date is ' + - 'greater', - done => { - requestHead({ IfUnmodifiedSince: dateFromNow(1) }, err => { - checkNoError(err); - done(); - }); + it('If-Unmodified-Since: returns no error when lastModified date is ' + 'greater', done => { + requestHead({ IfUnmodifiedSince: dateFromNow(1) }, err => { + checkNoError(err); + done(); }); + }); - it('If-Unmodified-Since: returns no error when lastModified ' + - 'date is equal', - done => { - requestHead({ IfUnmodifiedSince: dateConvert(lastModified) }, - err => { - checkNoError(err); - done(); - }); + it('If-Unmodified-Since: returns no error when lastModified ' + 'date is equal', done => { + requestHead({ IfUnmodifiedSince: dateConvert(lastModified) }, err => { + checkNoError(err); + done(); }); + }); - it('If-Unmodified-Since: returns PreconditionFailed when ' + - 'lastModified date is lesser', - done => { - requestHead({ IfUnmodifiedSince: dateFromNow(-1) }, err => { - checkError(err, errorInstances.PreconditionFailed.code); - done(); - }); + it('If-Unmodified-Since: returns PreconditionFailed when ' + 'lastModified date is lesser', done => { + requestHead({ IfUnmodifiedSince: dateFromNow(-1) }, err => { + checkError(err, errorInstances.PreconditionFailed.code); + done(); }); + }); - it('If-Match & If-Unmodified-Since: returns no error when match Etag ' + - 'and lastModified is greater', + it( + 'If-Match & If-Unmodified-Since: returns no error when match Etag ' + 'and lastModified is greater', done => { - requestHead({ + requestHead( + { + IfMatch: etagTrim, + IfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); + } + ); + + it('If-Match match & If-Unmodified-Since match', done => { + requestHead( + { IfMatch: etagTrim, - IfUnmodifiedSince: dateFromNow(-1), - }, err => { + IfUnmodifiedSince: dateFromNow(1), + }, + err => { checkNoError(err); done(); - }); - }); - - it('If-Match match & If-Unmodified-Since match', done => { - requestHead({ - IfMatch: etagTrim, - IfUnmodifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + } + ); }); it('If-Match not match & If-Unmodified-Since not match', done => { - requestHead({ - IfMatch: 'non-matching', - IfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, errorInstances.PreconditionFailed.code); - done(); - }); + requestHead( + { + IfMatch: 'non-matching', + IfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, errorInstances.PreconditionFailed.code); + done(); + } + ); }); it('If-Match not match & If-Unmodified-Since match', done => { - requestHead({ - IfMatch: 'non-matching', - IfUnmodifiedSince: dateFromNow(1), - }, err => { - checkError(err, errorInstances.PreconditionFailed.code); - done(); - }); + requestHead( + { + IfMatch: 'non-matching', + IfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkError(err, errorInstances.PreconditionFailed.code); + done(); + } + ); }); // Skipping this test, because real AWS does not provide error as // expected it.skip('If-Match match & If-Modified-Since not match', done => { - requestHead({ - IfMatch: etagTrim, - IfModifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + requestHead( + { + IfMatch: etagTrim, + IfModifiedSince: dateFromNow(1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-Match match & If-Modified-Since match', done => { - requestHead({ - IfMatch: etagTrim, - IfModifiedSince: dateFromNow(-1), - }, err => { - checkNoError(err); - done(); - }); + requestHead( + { + IfMatch: etagTrim, + IfModifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-Match not match & If-Modified-Since not match', done => { - requestHead({ - IfMatch: 'non-matching', - IfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, errorInstances.PreconditionFailed.code); - done(); - }); + requestHead( + { + IfMatch: 'non-matching', + IfModifiedSince: dateFromNow(1), + }, + err => { + checkError(err, errorInstances.PreconditionFailed.code); + done(); + } + ); }); it('If-Match not match & If-Modified-Since match', done => { - requestHead({ - IfMatch: 'non-matching', - IfModifiedSince: dateFromNow(-1), - }, err => { - checkError(err, errorInstances.PreconditionFailed.code); - done(); - }); + requestHead( + { + IfMatch: 'non-matching', + IfModifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, errorInstances.PreconditionFailed.code); + done(); + } + ); }); - it('If-None-Match & If-Modified-Since: returns NotModified when Etag ' + - 'does not match and lastModified is greater', + it( + 'If-None-Match & If-Modified-Since: returns NotModified when Etag ' + + 'does not match and lastModified is greater', done => { - requestHead({ + requestHead( + { + IfNoneMatch: etagTrim, + IfModifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'NotModified'); + done(); + } + ); + } + ); + + it('If-None-Match not match & If-Modified-Since not match', done => { + requestHead( + { IfNoneMatch: etagTrim, - IfModifiedSince: dateFromNow(-1), - }, err => { + IfModifiedSince: dateFromNow(1), + }, + err => { checkError(err, 'NotModified'); done(); - }); - }); - - it('If-None-Match not match & If-Modified-Since not match', done => { - requestHead({ - IfNoneMatch: etagTrim, - IfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'NotModified'); - done(); - }); + } + ); }); it('If-None-Match match & If-Modified-Since match', done => { - requestHead({ - IfNoneMatch: 'non-matching', - IfModifiedSince: dateFromNow(-1), - }, err => { - checkNoError(err); - done(); - }); + requestHead( + { + IfNoneMatch: 'non-matching', + IfModifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); }); // Skipping this test, because real AWS does not provide error as // expected it.skip('If-None-Match match & If-Modified-Since not match', done => { - requestHead({ - IfNoneMatch: 'non-matching', - IfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'NotModified'); - done(); - }); + requestHead( + { + IfNoneMatch: 'non-matching', + IfModifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'NotModified'); + done(); + } + ); }); it('If-None-Match match & If-Unmodified-Since match', done => { - requestHead({ - IfNoneMatch: 'non-matching', - IfUnmodifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + requestHead( + { + IfNoneMatch: 'non-matching', + IfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-None-Match match & If-Unmodified-Since not match', done => { - requestHead({ - IfNoneMatch: 'non-matching', - IfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, errorInstances.PreconditionFailed.code); - done(); - }); + requestHead( + { + IfNoneMatch: 'non-matching', + IfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, errorInstances.PreconditionFailed.code); + done(); + } + ); }); it('If-None-Match not match & If-Unmodified-Since match', done => { - requestHead({ - IfNoneMatch: etagTrim, - IfUnmodifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'NotModified'); - done(); - }); + requestHead( + { + IfNoneMatch: etagTrim, + IfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'NotModified'); + done(); + } + ); }); it('If-None-Match not match & If-Unmodified-Since not match', done => { - requestHead({ - IfNoneMatch: etagTrim, - IfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, errorInstances.PreconditionFailed.code); - done(); - }); + requestHead( + { + IfNoneMatch: etagTrim, + IfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, errorInstances.PreconditionFailed.code); + done(); + } + ); }); it('WebsiteRedirectLocation is set & it appears in response', done => { @@ -432,8 +471,7 @@ describe('HEAD object, conditions', () => { checkNoError(err); s3.headObject(redirBkt, (err, data) => { checkNoError(err); - assert.strictEqual(data.WebsiteRedirectLocation, - 'http://google.com'); + assert.strictEqual(data.WebsiteRedirectLocation, 'http://google.com'); return done(); }); }); @@ -463,74 +501,97 @@ describe('HEAD object, conditions', () => { it('WebsiteRedirectLocation is not set & is absent', done => { requestHead({}, (err, data) => { checkNoError(err); - assert.strictEqual('WebsiteRedirectLocation' in data, - false, 'WebsiteRedirectLocation header is present.'); + assert.strictEqual( + 'WebsiteRedirectLocation' in data, + false, + 'WebsiteRedirectLocation header is present.' + ); done(); }); }); - it('PartNumber is set & PartsCount is absent because object is not ' + - 'multipart', done => { + it('PartNumber is set & PartsCount is absent because object is not ' + 'multipart', done => { requestHead({ PartNumber: 1 }, (err, data) => { assert.ifError(err); - assert.strictEqual('PartsCount' in data, false, - 'PartsCount header is present.'); + assert.strictEqual('PartsCount' in data, false, 'PartsCount header is present.'); done(); }); }); - it('PartNumber is set & PartsCount appears in response for ' + - 'multipart object', done => { + it('PartNumber is set & PartsCount appears in response for ' + 'multipart object', done => { const mpuKey = 'mpukey'; - async.waterfall([ - next => s3.createMultipartUpload({ - Bucket: bucketName, - Key: mpuKey, - }, next), - (data, next) => { - const uploadId = data.UploadId; - s3.uploadPart({ - Bucket: bucketName, - Key: mpuKey, - UploadId: uploadId, - PartNumber: 1, - Body: Buffer.alloc(partSize).fill('a'), - }, (err, data) => next(err, uploadId, data.ETag)); - }, - (uploadId, etagOne, next) => s3.uploadPart({ - Bucket: bucketName, - Key: mpuKey, - UploadId: uploadId, - PartNumber: 2, - Body: Buffer.alloc(partSize).fill('z'), - }, (err, data) => next(err, uploadId, etagOne, data.ETag)), - (uploadId, etagOne, etagTwo, next) => - s3.completeMultipartUpload({ - Bucket: bucketName, - Key: mpuKey, - UploadId: uploadId, - MultipartUpload: { - Parts: [{ - PartNumber: 1, - ETag: etagOne, - }, { - PartNumber: 2, - ETag: etagTwo, - }], + async.waterfall( + [ + next => + s3.createMultipartUpload( + { + Bucket: bucketName, + Key: mpuKey, + }, + next + ), + (data, next) => { + const uploadId = data.UploadId; + s3.uploadPart( + { + Bucket: bucketName, + Key: mpuKey, + UploadId: uploadId, + PartNumber: 1, + Body: Buffer.alloc(partSize).fill('a'), + }, + (err, data) => next(err, uploadId, data.ETag) + ); }, - }, next), - ], err => { - assert.ifError(err); - s3.headObject({ - Bucket: bucketName, - Key: mpuKey, - PartNumber: 1, - }, (err, data) => { + (uploadId, etagOne, next) => + s3.uploadPart( + { + Bucket: bucketName, + Key: mpuKey, + UploadId: uploadId, + PartNumber: 2, + Body: Buffer.alloc(partSize).fill('z'), + }, + (err, data) => next(err, uploadId, etagOne, data.ETag) + ), + (uploadId, etagOne, etagTwo, next) => + s3.completeMultipartUpload( + { + Bucket: bucketName, + Key: mpuKey, + UploadId: uploadId, + MultipartUpload: { + Parts: [ + { + PartNumber: 1, + ETag: etagOne, + }, + { + PartNumber: 2, + ETag: etagTwo, + }, + ], + }, + }, + next + ), + ], + err => { assert.ifError(err); - assert.strictEqual(data.PartsCount, 2); - done(); - }); - }); + s3.headObject( + { + Bucket: bucketName, + Key: mpuKey, + PartNumber: 1, + }, + (err, data) => { + assert.ifError(err); + assert.strictEqual(data.PartsCount, 2); + done(); + } + ); + } + ); }); }); }); @@ -557,48 +618,55 @@ describeSkipIfCeph('HEAD object with object lock', () => { ObjectLockMode: mockMode, ObjectLockLegalHoldStatus: 'ON', }; - return s3.createBucket({ - Bucket: bucket, - ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.putObject(params).promise()) - .then(() => s3.getObject({ Bucket: bucket, Key: key }).promise()) - /* eslint-disable no-return-assign */ - .then(res => versionId = res.VersionId) - .catch(err => { - process.stdout.write('Error in before\n'); - throw err; - }); + return ( + s3 + .createBucket({ + Bucket: bucket, + ObjectLockEnabledForBucket: true, + }) + .promise() + .then(() => s3.putObject(params).promise()) + .then(() => s3.getObject({ Bucket: bucket, Key: key }).promise()) + /* eslint-disable no-return-assign */ + .then(res => (versionId = res.VersionId)) + .catch(err => { + process.stdout.write('Error in before\n'); + throw err; + }) + ); }); - afterEach(() => changeLockPromise([{ bucket, key, versionId }], '') - .then(() => s3.listObjectVersions({ Bucket: bucket }).promise()) - .then(res => res.Versions.forEach(object => { - const params = [ - { - bucket, - key: object.Key, - versionId: object.VersionId, - }, - ]; - changeLockPromise(params, ''); - })) - .then(() => { - process.stdout.write('Emptying and deleting buckets\n'); - return bucketUtil.empty(bucket); - }) - .then(() => s3.deleteBucket({ Bucket: bucket }).promise()) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - })); + afterEach(() => + changeLockPromise([{ bucket, key, versionId }], '') + .then(() => s3.listObjectVersions({ Bucket: bucket }).promise()) + .then(res => + res.Versions.forEach(object => { + const params = [ + { + bucket, + key: object.Key, + versionId: object.VersionId, + }, + ]; + changeLockPromise(params, ''); + }) + ) + .then(() => { + process.stdout.write('Emptying and deleting buckets\n'); + return bucketUtil.empty(bucket); + }) + .then(() => s3.deleteBucket({ Bucket: bucket }).promise()) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }) + ); it('should return object lock headers if set on the object', done => { s3.headObject({ Bucket: bucket, Key: key }, (err, res) => { assert.ifError(err); assert.strictEqual(res.ObjectLockMode, mockMode); - const responseDate - = formatDate(res.ObjectLockRetainUntilDate.toISOString()); + const responseDate = formatDate(res.ObjectLockRetainUntilDate.toISOString()); const expectedDate = formatDate(mockDate); assert.strictEqual(responseDate, expectedDate); assert.strictEqual(res.ObjectLockLegalHoldStatus, 'ON'); diff --git a/tests/functional/aws-node-sdk/test/object/objectHead_compatibleHeaders.js b/tests/functional/aws-node-sdk/test/object/objectHead_compatibleHeaders.js index 1b55458666..9f7a34f264 100644 --- a/tests/functional/aws-node-sdk/test/object/objectHead_compatibleHeaders.js +++ b/tests/functional/aws-node-sdk/test/object/objectHead_compatibleHeaders.js @@ -6,73 +6,68 @@ const BucketUtility = require('../../lib/utility/bucket-util'); const bucketName = 'objectheadtestheaders'; const objectName = 'someObject'; -describe('HEAD object, compatibility headers [Cache-Control, ' + - 'Content-Disposition, Content-Encoding, Expires]', () => { - withV4(sigCfg => { - let bucketUtil; - let s3; - const cacheControl = 'max-age=86400'; - const contentDisposition = 'attachment; filename="fname.ext";'; - const contentEncoding = 'gzip,aws-chunked'; - // AWS Node SDK requires Date object, ISO-8601 string, or - // a UNIX timestamp for Expires header - const expires = new Date(); +describe( + 'HEAD object, compatibility headers [Cache-Control, ' + 'Content-Disposition, Content-Encoding, Expires]', + () => { + withV4(sigCfg => { + let bucketUtil; + let s3; + const cacheControl = 'max-age=86400'; + const contentDisposition = 'attachment; filename="fname.ext";'; + const contentEncoding = 'gzip,aws-chunked'; + // AWS Node SDK requires Date object, ISO-8601 string, or + // a UNIX timestamp for Expires header + const expires = new Date(); - before(() => { - bucketUtil = new BucketUtility('default', sigCfg); - s3 = bucketUtil.s3; - return bucketUtil.empty(bucketName).then(() => - bucketUtil.deleteOne(bucketName) - ) - .catch(err => { - if (err.code !== 'NoSuchBucket') { - process.stdout.write(`${err}\n`); - throw err; - } - }) - .then(() => bucketUtil.createOne(bucketName)) - .then(() => { - const params = { - Bucket: bucketName, - Key: objectName, - CacheControl: cacheControl, - ContentDisposition: contentDisposition, - ContentEncoding: contentEncoding, - Expires: expires, - }; - return s3.putObject(params).promise(); - }) - .catch(err => { - process.stdout.write(`Error with putObject: ${err}\n`); - throw err; + before(() => { + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; + return bucketUtil + .empty(bucketName) + .then(() => bucketUtil.deleteOne(bucketName)) + .catch(err => { + if (err.code !== 'NoSuchBucket') { + process.stdout.write(`${err}\n`); + throw err; + } + }) + .then(() => bucketUtil.createOne(bucketName)) + .then(() => { + const params = { + Bucket: bucketName, + Key: objectName, + CacheControl: cacheControl, + ContentDisposition: contentDisposition, + ContentEncoding: contentEncoding, + Expires: expires, + }; + return s3.putObject(params).promise(); + }) + .catch(err => { + process.stdout.write(`Error with putObject: ${err}\n`); + throw err; + }); }); - }); - after(() => { - process.stdout.write('deleting bucket'); - return bucketUtil.empty(bucketName).then(() => - bucketUtil.deleteOne(bucketName)); - }); + after(() => { + process.stdout.write('deleting bucket'); + return bucketUtil.empty(bucketName).then(() => bucketUtil.deleteOne(bucketName)); + }); - it('should return additional headers if specified in objectPUT ' + - 'request', done => { - s3.headObject({ Bucket: bucketName, Key: objectName }, - (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.CacheControl, - cacheControl); - assert.strictEqual(res.ContentDisposition, - contentDisposition); - // Should remove V4 streaming value 'aws-chunked' - // to be compatible with AWS behavior - assert.strictEqual(res.ContentEncoding, - 'gzip,'); - assert.strictEqual(res.Expires.toGMTString(), - expires.toGMTString()); - return done(); - }); + it('should return additional headers if specified in objectPUT ' + 'request', done => { + s3.headObject({ Bucket: bucketName, Key: objectName }, (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.CacheControl, cacheControl); + assert.strictEqual(res.ContentDisposition, contentDisposition); + // Should remove V4 streaming value 'aws-chunked' + // to be compatible with AWS behavior + assert.strictEqual(res.ContentEncoding, 'gzip,'); + assert.strictEqual(res.Expires.toGMTString(), expires.toGMTString()); + return done(); + }); + }); }); - }); -}); + } +); diff --git a/tests/functional/aws-node-sdk/test/object/objectHead_replication.js b/tests/functional/aws-node-sdk/test/object/objectHead_replication.js index 71fc50fab0..74707e7618 100644 --- a/tests/functional/aws-node-sdk/test/object/objectHead_replication.js +++ b/tests/functional/aws-node-sdk/test/object/objectHead_replication.js @@ -3,8 +3,7 @@ const async = require('async'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); -const { removeAllVersions, versioningEnabled } = - require('../../lib/utility/versioning-util'); +const { removeAllVersions, versioningEnabled } = require('../../lib/utility/versioning-util'); const sourceBucket = 'source-bucket'; const keyPrefix = 'test-prefix'; @@ -16,55 +15,77 @@ describe("Head object 'ReplicationStatus' value", () => { function checkHeadObj(key, expectedStatus, cb) { const params = { Bucket: sourceBucket, Key: key }; - return async.series([ - next => s3.putObject(params, next), - next => s3.headObject(params, (err, res) => { - if (err) { - return next(err); - } - assert.strictEqual(res.ReplicationStatus, expectedStatus); - return next(); - }), - ], cb); + return async.series( + [ + next => s3.putObject(params, next), + next => + s3.headObject(params, (err, res) => { + if (err) { + return next(err); + } + assert.strictEqual(res.ReplicationStatus, expectedStatus); + return next(); + }), + ], + cb + ); } - beforeEach(done => async.series([ - next => s3.createBucket({ Bucket: sourceBucket }, next), - next => s3.putBucketVersioning({ - Bucket: sourceBucket, - VersioningConfiguration: versioningEnabled, - }, next), - ], done)); + beforeEach(done => + async.series( + [ + next => s3.createBucket({ Bucket: sourceBucket }, next), + next => + s3.putBucketVersioning( + { + Bucket: sourceBucket, + VersioningConfiguration: versioningEnabled, + }, + next + ), + ], + done + ) + ); - afterEach(done => async.series([ - next => removeAllVersions({ Bucket: sourceBucket }, next), - next => s3.deleteBucket({ Bucket: sourceBucket }, next), - ], done)); + afterEach(done => + async.series( + [ + next => removeAllVersions({ Bucket: sourceBucket }, next), + next => s3.deleteBucket({ Bucket: sourceBucket }, next), + ], + done + ) + ); - it('should be `undefined` when there is no bucket replication config', - done => checkHeadObj(`${keyPrefix}-foobar`, undefined, done)); + it('should be `undefined` when there is no bucket replication config', done => + checkHeadObj(`${keyPrefix}-foobar`, undefined, done)); describe('With bucket replication config', () => { - beforeEach(done => s3.putBucketReplication({ - Bucket: sourceBucket, - ReplicationConfiguration: { - Role: 'arn:aws:iam::123456789012:role/src-resource', - Rules: [ - { - Destination: { StorageClass: 'us-east-2', - Bucket: 'arn:aws:s3:::dest-bucket' }, - Prefix: keyPrefix, - Status: 'Enabled', + beforeEach(done => + s3.putBucketReplication( + { + Bucket: sourceBucket, + ReplicationConfiguration: { + Role: 'arn:aws:iam::123456789012:role/src-resource', + Rules: [ + { + Destination: { StorageClass: 'us-east-2', Bucket: 'arn:aws:s3:::dest-bucket' }, + Prefix: keyPrefix, + Status: 'Enabled', + }, + ], }, - ], - }, - }, done)); + }, + done + ) + ); - it("should be 'PENDING' when object key prefix applies", - done => checkHeadObj(`${keyPrefix}-foobar`, 'PENDING', done)); + it("should be 'PENDING' when object key prefix applies", done => + checkHeadObj(`${keyPrefix}-foobar`, 'PENDING', done)); - it('should be `undefined` when object key prefix does not apply', - done => checkHeadObj(`foobar-${keyPrefix}`, undefined, done)); + it('should be `undefined` when object key prefix does not apply', done => + checkHeadObj(`foobar-${keyPrefix}`, undefined, done)); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/objectOverwrite.js b/tests/functional/aws-node-sdk/test/object/objectOverwrite.js index f4ff972968..c64e024a69 100644 --- a/tests/functional/aws-node-sdk/test/object/objectOverwrite.js +++ b/tests/functional/aws-node-sdk/test/object/objectOverwrite.js @@ -14,7 +14,6 @@ const secondPutMetadata = { secondputagain: 'secondValue', }; - describe('Put object with same key as prior object', () => { withV4(sigCfg => { let bucketUtil; @@ -24,41 +23,47 @@ describe('Put object with same key as prior object', () => { before(done => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - bucketUtil.createRandom(1) - .then(created => { - bucketName = created; - done(); - }) - .catch(done); + bucketUtil + .createRandom(1) + .then(created => { + bucketName = created; + done(); + }) + .catch(done); }); - beforeEach(() => s3.putObject({ - Bucket: bucketName, - Key: objectName, - Body: 'I am the best content ever', - Metadata: firstPutMetadata, - }).promise().then(() => - s3.headObject({ Bucket: bucketName, Key: objectName }).promise() - ).then(res => { - assert.deepStrictEqual(res.Metadata, firstPutMetadata); - })); + beforeEach(() => + s3 + .putObject({ + Bucket: bucketName, + Key: objectName, + Body: 'I am the best content ever', + Metadata: firstPutMetadata, + }) + .promise() + .then(() => s3.headObject({ Bucket: bucketName, Key: objectName }).promise()) + .then(res => { + assert.deepStrictEqual(res.Metadata, firstPutMetadata); + }) + ); afterEach(() => bucketUtil.empty(bucketName)); after(() => bucketUtil.deleteOne(bucketName)); - it('should overwrite all user metadata and data on overwrite put', - () => s3.putObject({ - Bucket: bucketName, - Key: objectName, - Body: 'Much different', - Metadata: secondPutMetadata, - }).promise().then(() => - s3.getObject({ Bucket: bucketName, Key: objectName }).promise() - ).then(res => { - assert.deepStrictEqual(res.Metadata, secondPutMetadata); - assert.deepStrictEqual(res.Body.toString(), - 'Much different'); - })); + it('should overwrite all user metadata and data on overwrite put', () => + s3 + .putObject({ + Bucket: bucketName, + Key: objectName, + Body: 'Much different', + Metadata: secondPutMetadata, + }) + .promise() + .then(() => s3.getObject({ Bucket: bucketName, Key: objectName }).promise()) + .then(res => { + assert.deepStrictEqual(res.Metadata, secondPutMetadata); + assert.deepStrictEqual(res.Body.toString(), 'Much different'); + })); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/put.js b/tests/functional/aws-node-sdk/test/object/put.js index 9b1bae8942..7cda97aa90 100644 --- a/tests/functional/aws-node-sdk/test/object/put.js +++ b/tests/functional/aws-node-sdk/test/object/put.js @@ -3,10 +3,8 @@ const assert = require('assert'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const provideRawOutput = require('../../lib/utility/provideRawOutput'); -const { taggingTests, generateMultipleTagQuery } - = require('../../lib/utility/tagging'); -const genMaxSizeMetaHeaders - = require('../../lib/utility/genMaxSizeMetaHeaders'); +const { taggingTests, generateMultipleTagQuery } = require('../../lib/utility/tagging'); +const genMaxSizeMetaHeaders = require('../../lib/utility/genMaxSizeMetaHeaders'); const changeObjectLock = require('../../../../utilities/objectLock-util'); const bucket = 'bucket2putstuffin4324242'; @@ -26,232 +24,209 @@ describe('PUT object', () => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); - it('should put an object and set the acl via query param', - done => { - const params = { Bucket: bucket, Key: 'key', - ACL: 'public-read', StorageClass: 'STANDARD' }; - const url = s3.getSignedUrl('putObject', params); - provideRawOutput(['-verbose', '-X', 'PUT', url, - '--upload-file', 'uploadFile'], httpCode => { - assert.strictEqual(httpCode, '200 OK'); - s3.getObjectAcl({ Bucket: bucket, Key: 'key' }, - (err, result) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - assert.deepStrictEqual(result.Grants[1], { Grantee: - { Type: 'Group', URI: - 'http://acs.amazonaws.com/groups/global/AllUsers', - }, Permission: 'READ' }); - done(); + it('should put an object and set the acl via query param', done => { + const params = { Bucket: bucket, Key: 'key', ACL: 'public-read', StorageClass: 'STANDARD' }; + const url = s3.getSignedUrl('putObject', params); + provideRawOutput(['-verbose', '-X', 'PUT', url, '--upload-file', 'uploadFile'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + s3.getObjectAcl({ Bucket: bucket, Key: 'key' }, (err, result) => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + assert.deepStrictEqual(result.Grants[1], { + Grantee: { Type: 'Group', URI: 'http://acs.amazonaws.com/groups/global/AllUsers' }, + Permission: 'READ', }); - }); - }); - - it('should put an object with key slash', - done => { - const params = { Bucket: bucket, Key: '/' }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); done(); }); }); - - it('should return error if putting object w/ > 2KB user-defined md', - done => { - const metadata = genMaxSizeMetaHeaders(); - const params = { Bucket: bucket, Key: '/', Metadata: metadata }; - s3.putObject(params, err => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); - // add one more byte to be over the limit - metadata.header0 = `${metadata.header0}${'0'}`; - s3.putObject(params, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'MetadataTooLarge'); - assert.strictEqual(err.statusCode, 400); - done(); - }); - }); - }); - - it('should return InvalidRequest error if putting object with ' + - 'object lock retention date and mode when object lock is not ' + - 'enabled on the bucket', done => { - const date = new Date(2050, 10, 10); - const params = { - Bucket: bucket, - Key: 'key', - ObjectLockRetainUntilDate: date, - ObjectLockMode: 'GOVERNANCE', - }; - s3.putObject(params, err => { - const expectedErrMessage - = 'Bucket is missing ObjectLockConfiguration'; - assert.strictEqual(err.code, 'InvalidRequest'); - assert.strictEqual(err.message, expectedErrMessage); - done(); - }); }); - it('should return Not Implemented error for obj. encryption using ' + - 'customer-provided encryption keys', done => { - const params = { Bucket: bucket, Key: 'key', - SSECustomerAlgorithm: 'AES256' }; + it('should put an object with key slash', done => { + const params = { Bucket: bucket, Key: '/' }; s3.putObject(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); done(); }); }); - it('should return InvalidRedirectLocation if putting object ' + - 'with x-amz-website-redirect-location header that does not start ' + - 'with \'http://\', \'https://\' or \'/\'', done => { - const params = { Bucket: bucket, Key: 'key', - WebsiteRedirectLocation: 'google.com' }; + it('should return error if putting object w/ > 2KB user-defined md', done => { + const metadata = genMaxSizeMetaHeaders(); + const params = { Bucket: bucket, Key: '/', Metadata: metadata }; s3.putObject(params, err => { - assert.strictEqual(err.code, 'InvalidRedirectLocation'); - assert.strictEqual(err.statusCode, 400); - done(); + assert.strictEqual(err, null, `Unexpected err: ${err}`); + // add one more byte to be over the limit + metadata.header0 = `${metadata.header0}${'0'}`; + s3.putObject(params, err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'MetadataTooLarge'); + assert.strictEqual(err.statusCode, 400); + done(); + }); }); }); + it( + 'should return InvalidRequest error if putting object with ' + + 'object lock retention date and mode when object lock is not ' + + 'enabled on the bucket', + done => { + const date = new Date(2050, 10, 10); + const params = { + Bucket: bucket, + Key: 'key', + ObjectLockRetainUntilDate: date, + ObjectLockMode: 'GOVERNANCE', + }; + s3.putObject(params, err => { + const expectedErrMessage = 'Bucket is missing ObjectLockConfiguration'; + assert.strictEqual(err.code, 'InvalidRequest'); + assert.strictEqual(err.message, expectedErrMessage); + done(); + }); + } + ); + + it( + 'should return Not Implemented error for obj. encryption using ' + 'customer-provided encryption keys', + done => { + const params = { Bucket: bucket, Key: 'key', SSECustomerAlgorithm: 'AES256' }; + s3.putObject(params, err => { + assert.strictEqual(err.code, 'NotImplemented'); + done(); + }); + } + ); + + it( + 'should return InvalidRedirectLocation if putting object ' + + 'with x-amz-website-redirect-location header that does not start ' + + "with 'http://', 'https://' or '/'", + done => { + const params = { Bucket: bucket, Key: 'key', WebsiteRedirectLocation: 'google.com' }; + s3.putObject(params, err => { + assert.strictEqual(err.code, 'InvalidRedirectLocation'); + assert.strictEqual(err.statusCode, 400); + done(); + }); + } + ); + describe('Put object with tag set', () => { taggingTests.forEach(taggingTest => { it(taggingTest.it, done => { const key = encodeURIComponent(taggingTest.tag.key); const value = encodeURIComponent(taggingTest.tag.value); const tagging = `${key}=${value}`; - const params = { Bucket: bucket, Key: object, - Tagging: tagging }; + const params = { Bucket: bucket, Key: object, Tagging: tagging }; s3.putObject(params, err => { if (taggingTest.error) { _checkError(err, taggingTest.error, 400); return done(); } - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - return s3.getObjectTagging({ Bucket: bucket, - Key: object }, (err, data) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + return s3.getObjectTagging({ Bucket: bucket, Key: object }, (err, data) => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); assert.deepStrictEqual(data.TagSet[0], { Key: taggingTest.tag.key, - Value: taggingTest.tag.value }); + Value: taggingTest.tag.value, + }); done(); }); }); }); }); - it('should be able to put object with 10 tags', - done => { + it('should be able to put object with 10 tags', done => { const taggingConfig = generateMultipleTagQuery(10); - s3.putObject({ Bucket: bucket, Key: object, - Tagging: taggingConfig }, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); + s3.putObject({ Bucket: bucket, Key: object, Tagging: taggingConfig }, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); done(); }); }); it('should be able to put an empty Tag set', done => { - s3.putObject({ Bucket: bucket, Key: object, - Tagging: '', - }, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); + s3.putObject({ Bucket: bucket, Key: object, Tagging: '' }, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); done(); }); }); - it('should be able to put object with empty tags', - done => { - s3.putObject({ Bucket: bucket, Key: object, - Tagging: '&&&&&&&&&&&&&&&&&key1=value1' }, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); + it('should be able to put object with empty tags', done => { + s3.putObject({ Bucket: bucket, Key: object, Tagging: '&&&&&&&&&&&&&&&&&key1=value1' }, err => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); done(); }); }); it('should allow putting 50 tags', done => { const taggingConfig = generateMultipleTagQuery(50); - s3.putObject({ Bucket: bucket, Key: object, - Tagging: taggingConfig }, done); + s3.putObject({ Bucket: bucket, Key: object, Tagging: taggingConfig }, done); }); - it('should return BadRequest if putting more that 50 tags', - done => { + it('should return BadRequest if putting more that 50 tags', done => { const taggingConfig = generateMultipleTagQuery(51); - s3.putObject({ Bucket: bucket, Key: object, - Tagging: taggingConfig }, err => { + s3.putObject({ Bucket: bucket, Key: object, Tagging: taggingConfig }, err => { _checkError(err, 'BadRequest', 400); done(); }); }); - it('should return InvalidArgument if using the same key twice', - done => { - s3.putObject({ Bucket: bucket, Key: object, - Tagging: 'key1=value1&key1=value2' }, err => { + it('should return InvalidArgument if using the same key twice', done => { + s3.putObject({ Bucket: bucket, Key: object, Tagging: 'key1=value1&key1=value2' }, err => { _checkError(err, 'InvalidArgument', 400); done(); }); }); - it('should return InvalidArgument if using the same key twice ' + - 'and empty tags', done => { - s3.putObject({ Bucket: bucket, Key: object, - Tagging: '&&&&&&&&&&&&&&&&&key1=value1&key1=value2' }, - err => { - _checkError(err, 'InvalidArgument', 400); - done(); - }); + it('should return InvalidArgument if using the same key twice ' + 'and empty tags', done => { + s3.putObject( + { Bucket: bucket, Key: object, Tagging: '&&&&&&&&&&&&&&&&&key1=value1&key1=value2' }, + err => { + _checkError(err, 'InvalidArgument', 400); + done(); + } + ); }); it('should return InvalidArgument if tag with no key', done => { - s3.putObject({ Bucket: bucket, Key: object, - Tagging: '=value1', - }, err => { + s3.putObject({ Bucket: bucket, Key: object, Tagging: '=value1' }, err => { _checkError(err, 'InvalidArgument', 400); done(); }); }); - it('should return InvalidArgument putting object with ' + - 'bad encoded tags', done => { - s3.putObject({ Bucket: bucket, Key: object, Tagging: - 'key1==value1' }, err => { + it('should return InvalidArgument putting object with ' + 'bad encoded tags', done => { + s3.putObject({ Bucket: bucket, Key: object, Tagging: 'key1==value1' }, err => { _checkError(err, 'InvalidArgument', 400); done(); }); }); - it('should return InvalidArgument putting object tag with ' + - 'invalid characters: %', done => { + it('should return InvalidArgument putting object tag with ' + 'invalid characters: %', done => { const value = 'value1%'; - s3.putObject({ Bucket: bucket, Key: object, Tagging: - `key1=${value}` }, err => { + s3.putObject({ Bucket: bucket, Key: object, Tagging: `key1=${value}` }, err => { _checkError(err, 'InvalidArgument', 400); done(); }); @@ -271,60 +246,67 @@ describeSkipIfCeph('PUT object with object lock', () => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ - Bucket: bucket, - ObjectLockEnabledForBucket: true, - }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ + Bucket: bucket, + ObjectLockEnabledForBucket: true, + }) + .promise() + .catch(err => { + process.stdout.write(`Error creating bucket: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); - it('should put object with valid object lock retention date and ' + - 'mode when object lock is enabled on the bucket', done => { - const date = new Date(2050, 10, 10); - const params = { - Bucket: bucket, - Key: 'key1', - ObjectLockRetainUntilDate: date, - ObjectLockMode: 'COMPLIANCE', - }; - s3.putObject(params, (err, res) => { - assert.ifError(err); - changeObjectLock( - [{ bucket, key: 'key1', versionId: res.VersionId }], '', done); - }); - }); + it( + 'should put object with valid object lock retention date and ' + + 'mode when object lock is enabled on the bucket', + done => { + const date = new Date(2050, 10, 10); + const params = { + Bucket: bucket, + Key: 'key1', + ObjectLockRetainUntilDate: date, + ObjectLockMode: 'COMPLIANCE', + }; + s3.putObject(params, (err, res) => { + assert.ifError(err); + changeObjectLock([{ bucket, key: 'key1', versionId: res.VersionId }], '', done); + }); + } + ); - it('should put object with valid object lock retention date and ' + - 'mode when object lock is enabled on the bucket', done => { - const date = new Date(2050, 10, 10); - const params = { - Bucket: bucket, - Key: 'key2', - ObjectLockRetainUntilDate: date, - ObjectLockMode: 'GOVERNANCE', - }; - s3.putObject(params, (err, res) => { - assert.ifError(err); - changeObjectLock( - [{ bucket, key: 'key2', versionId: res.VersionId }], '', done); - }); - }); + it( + 'should put object with valid object lock retention date and ' + + 'mode when object lock is enabled on the bucket', + done => { + const date = new Date(2050, 10, 10); + const params = { + Bucket: bucket, + Key: 'key2', + ObjectLockRetainUntilDate: date, + ObjectLockMode: 'GOVERNANCE', + }; + s3.putObject(params, (err, res) => { + assert.ifError(err); + changeObjectLock([{ bucket, key: 'key2', versionId: res.VersionId }], '', done); + }); + } + ); it('should error with invalid object lock mode header', done => { const date = new Date(2050, 10, 10); @@ -349,8 +331,7 @@ describeSkipIfCeph('PUT object with object lock', () => { }; s3.putObject(params, (err, res) => { assert.ifError(err); - changeObjectLock( - [{ bucket, key: 'key4', versionId: res.VersionId }], '', done); + changeObjectLock([{ bucket, key: 'key4', versionId: res.VersionId }], '', done); }); }); @@ -374,59 +355,65 @@ describeSkipIfCeph('PUT object with object lock', () => { }; s3.putObject(params, err => { assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.message, - 'Legal hold status must be one of "ON", "OFF"'); + assert.strictEqual(err.message, 'Legal hold status must be one of "ON", "OFF"'); done(); }); }); - it('should return error when object lock retain until date header is ' + - 'provided but object lock mode header is missing', done => { - const date = new Date(2050, 10, 10); - const params = { - Bucket: bucket, - Key: 'key7', - ObjectLockRetainUntilDate: date, - }; - s3.putObject(params, err => { - const expectedErrMessage - = 'x-amz-object-lock-retain-until-date and ' + - 'x-amz-object-lock-mode must both be supplied'; - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.message, expectedErrMessage); - done(); - }); - }); + it( + 'should return error when object lock retain until date header is ' + + 'provided but object lock mode header is missing', + done => { + const date = new Date(2050, 10, 10); + const params = { + Bucket: bucket, + Key: 'key7', + ObjectLockRetainUntilDate: date, + }; + s3.putObject(params, err => { + const expectedErrMessage = + 'x-amz-object-lock-retain-until-date and ' + 'x-amz-object-lock-mode must both be supplied'; + assert.strictEqual(err.code, 'InvalidArgument'); + assert.strictEqual(err.message, expectedErrMessage); + done(); + }); + } + ); - it('should return error when object lock mode header is provided ' + - 'but object lock retain until date header is missing', done => { - const params = { - Bucket: bucket, - Key: 'key8', - ObjectLockMode: 'GOVERNANCE', - }; - s3.putObject(params, err => { - const expectedErrMessage - = 'x-amz-object-lock-retain-until-date and ' + - 'x-amz-object-lock-mode must both be supplied'; - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.message, expectedErrMessage); - done(); - }); - }); + it( + 'should return error when object lock mode header is provided ' + + 'but object lock retain until date header is missing', + done => { + const params = { + Bucket: bucket, + Key: 'key8', + ObjectLockMode: 'GOVERNANCE', + }; + s3.putObject(params, err => { + const expectedErrMessage = + 'x-amz-object-lock-retain-until-date and ' + 'x-amz-object-lock-mode must both be supplied'; + assert.strictEqual(err.code, 'InvalidArgument'); + assert.strictEqual(err.message, expectedErrMessage); + done(); + }); + } + ); - it('should return InvalidStorageClass error when x-amz-storage-class header is provided ' + - 'and not equal to STANDARD', done => { - const params = { - Bucket: bucket, - Key: 'key8', - StorageClass: 'COLD', - }; - s3.putObject(params, err => { - assert.strictEqual(err.code, 'InvalidStorageClass'); - assert.strictEqual(err.statusCode, 400); - done(); - }); - }); + it( + 'should return InvalidStorageClass error when x-amz-storage-class header is provided ' + + 'and not equal to STANDARD', + done => { + const params = { + Bucket: bucket, + Key: 'key8', + StorageClass: 'COLD', + }; + s3.putObject(params, err => { + assert.strictEqual(err.code, 'InvalidStorageClass'); + assert.strictEqual(err.statusCode, 400); + done(); + }); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/putObjAcl.js b/tests/functional/aws-node-sdk/test/object/putObjAcl.js index 0899409130..85e74116b7 100644 --- a/tests/functional/aws-node-sdk/test/object/putObjAcl.js +++ b/tests/functional/aws-node-sdk/test/object/putObjAcl.js @@ -4,8 +4,7 @@ const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const constants = require('../../../../../constants'); -const notOwnerCanonicalID = '79a59df900b949e55d96a1e698fba' + - 'cedfd6e09d98eacf8f8d5218e7cd47ef2bf'; +const notOwnerCanonicalID = '79a59df900b949e55d96a1e698fba' + 'cedfd6e09d98eacf8f8d5218e7cd47ef2bf'; const itSkipIfAWS = process.env.AWS_ON_AIR ? it.skip : it; class _AccessControlPolicy { @@ -46,12 +45,13 @@ describe('PUT Object ACL', () => { const Key = 'aclTest'; before(done => { - bucketUtil.createRandom(1) - .then(created => { - bucketName = created; - done(); - }) - .catch(done); + bucketUtil + .createRandom(1) + .then(created => { + bucketName = created; + done(); + }) + .catch(done); }); afterEach(() => { @@ -67,30 +67,31 @@ describe('PUT Object ACL', () => { it('should put object ACLs', async () => { const s3 = bucketUtil.s3; const Bucket = bucketName; - const objects = [ - { Bucket, Key }, - ]; + const objects = [{ Bucket, Key }]; for (const param of objects) { await s3.putObject(param).promise(); } const data = await s3.putObjectAcl({ Bucket, Key, ACL: 'public-read' }).promise(); assert(data); - }); + }); - it('should return NoSuchKey if try to put object ACLs ' + - 'for nonexistent object', done => { + it('should return NoSuchKey if try to put object ACLs ' + 'for nonexistent object', done => { const s3 = bucketUtil.s3; const Bucket = bucketName; - s3.putObjectAcl({ - Bucket, - Key, - ACL: 'public-read' }, err => { - assert(err); - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NoSuchKey'); - done(); - }); + s3.putObjectAcl( + { + Bucket, + Key, + ACL: 'public-read', + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NoSuchKey'); + done(); + } + ); }); describe('on an object', () => { @@ -101,10 +102,8 @@ describe('PUT Object ACL', () => { }); // The supplied canonical ID is not associated with a real AWS // account, so AWS_ON_AIR will raise a 400 InvalidArgument - itSkipIfAWS('should return AccessDenied if try to change owner ' + - 'ID in ACL request body', done => { - const acp = new _AccessControlPolicy( - { ownerID: notOwnerCanonicalID }); + itSkipIfAWS('should return AccessDenied if try to change owner ' + 'ID in ACL request body', done => { + const acp = new _AccessControlPolicy({ ownerID: notOwnerCanonicalID }); acp.addGrantee('Group', constants.publicId, 'READ'); const putAclParams = { Bucket: bucketName, diff --git a/tests/functional/aws-node-sdk/test/object/putObjTagging.js b/tests/functional/aws-node-sdk/test/object/putObjTagging.js index 9912c9ff2c..16c1a3bd2a 100644 --- a/tests/functional/aws-node-sdk/test/object/putObjTagging.js +++ b/tests/functional/aws-node-sdk/test/object/putObjTagging.js @@ -9,11 +9,14 @@ const bucketName = 'testtaggingbucket'; const objectName = 'testtaggingobject'; const objectNameAcl = 'testtaggingobjectacl'; -const taggingConfig = { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }] }; +const taggingConfig = { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + ], +}; function generateMultipleTagConfig(number) { const tags = []; @@ -48,32 +51,33 @@ describe('PUT object taggings', () => { const otherAccountBucketUtility = new BucketUtility('lisa', {}); const otherAccountS3 = otherAccountBucketUtility.s3; - beforeEach(done => s3.createBucket({ Bucket: bucketName }, err => { - if (err) { - return done(err); - } - return s3.putObject({ Bucket: bucketName, Key: objectName }, done); - })); + beforeEach(done => + s3.createBucket({ Bucket: bucketName }, err => { + if (err) { + return done(err); + } + return s3.putObject({ Bucket: bucketName, Key: objectName }, done); + }) + ); afterEach(() => { process.stdout.write('Emptying bucket'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return bucketUtil + .empty(bucketName) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(bucketName); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); taggingTests.forEach(taggingTest => { it(taggingTest.it, done => { - const taggingConfig = generateTaggingConfig(taggingTest.tag.key, - taggingTest.tag.value); - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: taggingConfig }, (err, data) => { + const taggingConfig = generateTaggingConfig(taggingTest.tag.key, taggingTest.tag.value); + s3.putObjectTagging({ Bucket: bucketName, Key: objectName, Tagging: taggingConfig }, (err, data) => { if (taggingTest.error) { _checkError(err, taggingTest.error, 400); } else { @@ -87,55 +91,65 @@ describe('PUT object taggings', () => { it('should allow putting 50 tags', done => { const taggingConfig = generateMultipleTagConfig(50); - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: taggingConfig }, done); + s3.putObjectTagging({ Bucket: bucketName, Key: objectName, Tagging: taggingConfig }, done); }); it('should return BadRequest if putting more that 50 tags', done => { const taggingConfig = generateMultipleTagConfig(51); - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: taggingConfig }, err => { + s3.putObjectTagging({ Bucket: bucketName, Key: objectName, Tagging: taggingConfig }, err => { _checkError(err, 'BadRequest', 400); done(); }); }); it('should return InvalidTag if using the same key twice', done => { - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: { TagSet: [ - { - Key: 'key1', - Value: 'value1', + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + Tagging: { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + { + Key: 'key1', + Value: 'value2', + }, + ], }, - { - Key: 'key1', - Value: 'value2', - }, - ] }, - }, err => { - _checkError(err, 'InvalidTag', 400); - done(); - }); + }, + err => { + _checkError(err, 'InvalidTag', 400); + done(); + } + ); }); it('should return InvalidTag if key is an empty string', done => { - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: { TagSet: [ - { - Key: '', - Value: 'value1', + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + Tagging: { + TagSet: [ + { + Key: '', + Value: 'value1', + }, + ], }, - ] }, - }, err => { - _checkError(err, 'InvalidTag', 400); - done(); - }); + }, + err => { + _checkError(err, 'InvalidTag', 400); + done(); + } + ); }); it('should be able to put an empty Tag set', done => { - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: { TagSet: [] }, - }, (err, data) => { + s3.putObjectTagging({ Bucket: bucketName, Key: objectName, Tagging: { TagSet: [] } }, (err, data) => { assert.ifError(err, `Found unexpected err ${err}`); assert.strictEqual(Object.keys(data).length, 0); done(); @@ -143,71 +157,78 @@ describe('PUT object taggings', () => { }); it('should return NoSuchKey put tag to a non-existing object', done => { - s3.putObjectTagging({ - Bucket: bucketName, - Key: 'nonexisting', - Tagging: taggingConfig, - }, err => { - _checkError(err, 'NoSuchKey', 404); - done(); - }); - }); - - it('should return 403 AccessDenied putting tag with another account', - done => { - otherAccountS3.putObjectTagging({ Bucket: bucketName, Key: - objectName, Tagging: taggingConfig, - }, err => { - _checkError(err, 'AccessDenied', 403); - done(); - }); - }); - - it('should return 403 AccessDenied putting tag with a different ' + - 'account to an object with ACL "public-read-write"', - done => { - s3.putObjectAcl({ Bucket: bucketName, Key: objectName, - ACL: 'public-read-write' }, err => { - if (err) { - return done(err); - } - return otherAccountS3.putObjectTagging({ Bucket: bucketName, - Key: objectName, Tagging: taggingConfig, - }, err => { - _checkError(err, 'AccessDenied', 403); + s3.putObjectTagging( + { + Bucket: bucketName, + Key: 'nonexisting', + Tagging: taggingConfig, + }, + err => { + _checkError(err, 'NoSuchKey', 404); done(); - }); - }); + } + ); }); - it('should return 403 AccessDenied putting tag to an object ' + - 'in a bucket created with a different account', - done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => otherAccountS3.putObjectTagging({ Bucket: bucketName, - Key: objectNameAcl, Tagging: taggingConfig, - }, err => next(err)), - ], err => { + it('should return 403 AccessDenied putting tag with another account', done => { + otherAccountS3.putObjectTagging({ Bucket: bucketName, Key: objectName, Tagging: taggingConfig }, err => { _checkError(err, 'AccessDenied', 403); done(); }); }); - it('should put tag to an object in a bucket created with same ' + - 'account', done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => s3.putObjectTagging({ Bucket: bucketName, - Key: objectNameAcl, Tagging: taggingConfig, - }, err => next(err)), - ], done); + it( + 'should return 403 AccessDenied putting tag with a different ' + + 'account to an object with ACL "public-read-write"', + done => { + s3.putObjectAcl({ Bucket: bucketName, Key: objectName, ACL: 'public-read-write' }, err => { + if (err) { + return done(err); + } + return otherAccountS3.putObjectTagging( + { Bucket: bucketName, Key: objectName, Tagging: taggingConfig }, + err => { + _checkError(err, 'AccessDenied', 403); + done(); + } + ); + }); + } + ); + + it( + 'should return 403 AccessDenied putting tag to an object ' + 'in a bucket created with a different account', + done => { + async.waterfall( + [ + next => s3.putBucketAcl({ Bucket: bucketName, ACL: 'public-read-write' }, err => next(err)), + next => otherAccountS3.putObject({ Bucket: bucketName, Key: objectNameAcl }, err => next(err)), + next => + otherAccountS3.putObjectTagging( + { Bucket: bucketName, Key: objectNameAcl, Tagging: taggingConfig }, + err => next(err) + ), + ], + err => { + _checkError(err, 'AccessDenied', 403); + done(); + } + ); + } + ); + + it('should put tag to an object in a bucket created with same ' + 'account', done => { + async.waterfall( + [ + next => s3.putBucketAcl({ Bucket: bucketName, ACL: 'public-read-write' }, err => next(err)), + next => otherAccountS3.putObject({ Bucket: bucketName, Key: objectNameAcl }, err => next(err)), + next => + s3.putObjectTagging({ Bucket: bucketName, Key: objectNameAcl, Tagging: taggingConfig }, err => + next(err) + ), + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/putObjectLegalHold.js b/tests/functional/aws-node-sdk/test/object/putObjectLegalHold.js index f8492a7609..911df94210 100644 --- a/tests/functional/aws-node-sdk/test/object/putObjectLegalHold.js +++ b/tests/functional/aws-node-sdk/test/object/putObjectLegalHold.js @@ -53,35 +53,37 @@ describeSkipIfCeph('PUT object legal hold', () => { beforeEach(() => { process.stdout.write('Putting buckets and objects\n'); - return s3.createBucket({ - Bucket: bucket, - ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) - .then(() => s3.putObject({ Bucket: unlockedBucket, Key: key }).promise()) - .then(() => s3.putObject({ Bucket: bucket, Key: key }).promise()) - .then(res => { - versionId = res.VersionId; - }) - .catch(err => { - process.stdout.write('Error in beforeEach\n'); - throw err; - }); + return s3 + .createBucket({ + Bucket: bucket, + ObjectLockEnabledForBucket: true, + }) + .promise() + .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) + .then(() => s3.putObject({ Bucket: unlockedBucket, Key: key }).promise()) + .then(() => s3.putObject({ Bucket: bucket, Key: key }).promise()) + .then(res => { + versionId = res.VersionId; + }) + .catch(err => { + process.stdout.write('Error in beforeEach\n'); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying and deleting buckets\n'); - return bucketUtil.empty(bucket) - .then(() => bucketUtil.empty(unlockedBucket)) - .then(() => bucketUtil.deleteMany([bucket, unlockedBucket])) - .catch(err => { - process.stdout.write('Error in afterEach\n'); - throw err; - }); + return bucketUtil + .empty(bucket) + .then(() => bucketUtil.empty(unlockedBucket)) + .then(() => bucketUtil.deleteMany([bucket, unlockedBucket])) + .catch(err => { + process.stdout.write('Error in afterEach\n'); + throw err; + }); }); - it('should return AccessDenied putting legal hold with another account', - done => { + it('should return AccessDenied putting legal hold with another account', done => { const params = createLegalHoldParams(bucket, key, 'ON'); otherAccountS3.putObjectLegalHold(params, err => { checkError(err, 'AccessDenied', 403); @@ -98,28 +100,33 @@ describeSkipIfCeph('PUT object legal hold', () => { }); it('should return NoSuchVersion error if version does not exist', done => { - s3.putObjectLegalHold({ - Bucket: bucket, - Key: key, - VersionId: '012345678901234567890123456789012', - LegalHold: mockLegalHold.on, - }, err => { - checkError(err, 'NoSuchVersion', 404); - done(); - }); + s3.putObjectLegalHold( + { + Bucket: bucket, + Key: key, + VersionId: '012345678901234567890123456789012', + LegalHold: mockLegalHold.on, + }, + err => { + checkError(err, 'NoSuchVersion', 404); + done(); + } + ); }); - it('should return InvalidRequest error putting legal hold to object ' + - 'in bucket with no object lock enabled', done => { - const params = createLegalHoldParams(unlockedBucket, key, 'ON'); - s3.putObjectLegalHold(params, err => { - checkError(err, 'InvalidRequest', 400); - done(); - }); - }); + it( + 'should return InvalidRequest error putting legal hold to object ' + + 'in bucket with no object lock enabled', + done => { + const params = createLegalHoldParams(unlockedBucket, key, 'ON'); + s3.putObjectLegalHold(params, err => { + checkError(err, 'InvalidRequest', 400); + done(); + }); + } + ); - it('should return MethodNotAllowed if object version is delete marker', - done => { + it('should return MethodNotAllowed if object version is delete marker', done => { s3.deleteObject({ Bucket: bucket, Key: key }, err => { assert.ifError(err); const params = createLegalHoldParams(bucket, key, 'ON'); @@ -155,14 +162,17 @@ describeSkipIfCeph('PUT object legal hold', () => { }); it('should return error if request does not contain Status', done => { - s3.putObjectLegalHold({ - Bucket: bucket, - Key: key, - LegalHold: {}, - }, err => { - checkError(err, 'MalformedXML', 400); - changeObjectLock([{ bucket, key, versionId }], '', done); - }); + s3.putObjectLegalHold( + { + Bucket: bucket, + Key: key, + LegalHold: {}, + }, + err => { + checkError(err, 'MalformedXML', 400); + changeObjectLock([{ bucket, key, versionId }], '', done); + } + ); }); it('expects params.LegalHold.Status to be a string', done => { diff --git a/tests/functional/aws-node-sdk/test/object/putPart.js b/tests/functional/aws-node-sdk/test/object/putPart.js index c847e9e194..498a27ac32 100644 --- a/tests/functional/aws-node-sdk/test/object/putPart.js +++ b/tests/functional/aws-node-sdk/test/object/putPart.js @@ -15,48 +15,66 @@ describe('PUT object', () => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: bucket, Key: key }).promise()) - .then(res => { - uploadId = res.UploadId; - return uploadId; - }) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + return s3 + .createBucket({ Bucket: bucket }) + .promise() + .then(() => + s3 + .createMultipartUpload({ + Bucket: bucket, + Key: key, + }) + .promise() + ) + .then(res => { + uploadId = res.UploadId; + return uploadId; + }) + .catch(err => { + process.stdout.write(`Error in beforeEach: ${err}\n`); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying bucket'); - return s3.abortMultipartUpload({ - Bucket: bucket, Key: key, UploadId: uploadId, - }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return s3 + .abortMultipartUpload({ + Bucket: bucket, + Key: key, + UploadId: uploadId, + }) + .promise() + .then(() => bucketUtil.empty(bucket)) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteOne(bucket); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); - it('should return Not Implemented error for obj. encryption using ' + - 'customer-provided encryption keys', done => { - const params = { Bucket: bucket, Key: 'key', PartNumber: 0, - UploadId: uploadId, SSECustomerAlgorithm: 'AES256' }; - s3.uploadPart(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); - done(); - }); - }); + it( + 'should return Not Implemented error for obj. encryption using ' + 'customer-provided encryption keys', + done => { + const params = { + Bucket: bucket, + Key: 'key', + PartNumber: 0, + UploadId: uploadId, + SSECustomerAlgorithm: 'AES256', + }; + s3.uploadPart(params, err => { + assert.strictEqual(err.code, 'NotImplemented'); + done(); + }); + } + ); it('should return InvalidArgument if negative PartNumber', done => { - const params = { Bucket: bucket, Key: 'key', PartNumber: -1, - UploadId: uploadId }; + const params = { Bucket: bucket, Key: 'key', PartNumber: -1, UploadId: uploadId }; s3.uploadPart(params, err => { assert.strictEqual(err.code, 'InvalidArgument'); done(); diff --git a/tests/functional/aws-node-sdk/test/object/putRetention.js b/tests/functional/aws-node-sdk/test/object/putRetention.js index 909234620f..14e7200e0f 100644 --- a/tests/functional/aws-node-sdk/test/object/putRetention.js +++ b/tests/functional/aws-node-sdk/test/object/putRetention.js @@ -28,119 +28,140 @@ describeSkipIfCeph('PUT object retention', () => { beforeEach(() => { process.stdout.write('Putting buckets and objects\n'); - return s3.createBucket({ - Bucket: bucketName, - ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) - .then(() => s3.putObject({ Bucket: unlockedBucket, Key: objectName }).promise()) - .then(() => s3.putObject({ Bucket: bucketName, Key: objectName }).promise()) - .then(res => { - versionId = res.VersionId; - }) - .catch(err => { - process.stdout.write('Error in beforeEach\n'); - throw err; - }); + return s3 + .createBucket({ + Bucket: bucketName, + ObjectLockEnabledForBucket: true, + }) + .promise() + .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) + .then(() => s3.putObject({ Bucket: unlockedBucket, Key: objectName }).promise()) + .then(() => s3.putObject({ Bucket: bucketName, Key: objectName }).promise()) + .then(res => { + versionId = res.VersionId; + }) + .catch(err => { + process.stdout.write('Error in beforeEach\n'); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying and deleting buckets\n'); - return bucketUtil.empty(bucketName) - .then(() => bucketUtil.empty(unlockedBucket)) - .then(() => bucketUtil.deleteMany([bucketName, unlockedBucket])) - .catch(err => { - process.stdout.write('Error in afterEach\n'); - throw err; - }); + return bucketUtil + .empty(bucketName) + .then(() => bucketUtil.empty(unlockedBucket)) + .then(() => bucketUtil.deleteMany([bucketName, unlockedBucket])) + .catch(err => { + process.stdout.write('Error in afterEach\n'); + throw err; + }); }); - it('should return AccessDenied putting retention with another account', - done => { - otherAccountS3.putObjectRetention({ - Bucket: bucketName, - Key: objectName, - Retention: retentionConfig, - }, err => { - checkError(err, 'AccessDenied', 403); - done(); - }); + it('should return AccessDenied putting retention with another account', done => { + otherAccountS3.putObjectRetention( + { + Bucket: bucketName, + Key: objectName, + Retention: retentionConfig, + }, + err => { + checkError(err, 'AccessDenied', 403); + done(); + } + ); }); it('should return NoSuchKey error if key does not exist', done => { - s3.putObjectRetention({ - Bucket: bucketName, - Key: 'thiskeydoesnotexist', - Retention: retentionConfig, - }, err => { - checkError(err, 'NoSuchKey', 404); - done(); - }); + s3.putObjectRetention( + { + Bucket: bucketName, + Key: 'thiskeydoesnotexist', + Retention: retentionConfig, + }, + err => { + checkError(err, 'NoSuchKey', 404); + done(); + } + ); }); it('should return NoSuchVersion error if version does not exist', done => { - s3.putObjectRetention({ - Bucket: bucketName, - Key: objectName, - VersionId: '012345678901234567890123456789012', - Retention: retentionConfig, - }, err => { - checkError(err, 'NoSuchVersion', 404); - done(); - }); + s3.putObjectRetention( + { + Bucket: bucketName, + Key: objectName, + VersionId: '012345678901234567890123456789012', + Retention: retentionConfig, + }, + err => { + checkError(err, 'NoSuchVersion', 404); + done(); + } + ); }); - it('should return InvalidRequest error putting retention to object ' + - 'in bucket with no object lock enabled', done => { - s3.putObjectRetention({ - Bucket: unlockedBucket, - Key: objectName, - Retention: retentionConfig, - }, err => { - checkError(err, 'InvalidRequest', 400); - done(); - }); - }); + it( + 'should return InvalidRequest error putting retention to object ' + 'in bucket with no object lock enabled', + done => { + s3.putObjectRetention( + { + Bucket: unlockedBucket, + Key: objectName, + Retention: retentionConfig, + }, + err => { + checkError(err, 'InvalidRequest', 400); + done(); + } + ); + } + ); - it('should return MethodNotAllowed if object version is delete marker', - done => { + it('should return MethodNotAllowed if object version is delete marker', done => { s3.deleteObject({ Bucket: bucketName, Key: objectName }, err => { assert.ifError(err); - s3.putObjectRetention({ - Bucket: bucketName, - Key: objectName, - Retention: retentionConfig, - }, err => { - checkError(err, 'MethodNotAllowed', 405); - done(); - }); + s3.putObjectRetention( + { + Bucket: bucketName, + Key: objectName, + Retention: retentionConfig, + }, + err => { + checkError(err, 'MethodNotAllowed', 405); + done(); + } + ); }); }); it('should put object retention', done => { - s3.putObjectRetention({ - Bucket: bucketName, - Key: objectName, - Retention: retentionConfig, - }, err => { - assert.ifError(err); - changeObjectLock([ - { bucket: bucketName, key: objectName, versionId }], '', done); - }); + s3.putObjectRetention( + { + Bucket: bucketName, + Key: objectName, + Retention: retentionConfig, + }, + err => { + assert.ifError(err); + changeObjectLock([{ bucket: bucketName, key: objectName, versionId }], '', done); + } + ); }); it('should support request with versionId parameter', done => { - s3.putObjectRetention({ - Bucket: bucketName, - Key: objectName, - Retention: retentionConfig, - VersionId: versionId, - }, err => { - assert.ifError(err); - changeObjectLock([ - { bucket: bucketName, key: objectName, versionId }, - ], '', done); - }); + s3.putObjectRetention( + { + Bucket: bucketName, + Key: objectName, + Retention: retentionConfig, + VersionId: versionId, + }, + err => { + assert.ifError(err); + changeObjectLock([{ bucket: bucketName, key: objectName, versionId }], '', done); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/putVersion.js b/tests/functional/aws-node-sdk/test/object/putVersion.js index f6c74009c7..e2600efe40 100644 --- a/tests/functional/aws-node-sdk/test/object/putVersion.js +++ b/tests/functional/aws-node-sdk/test/object/putVersion.js @@ -55,24 +55,28 @@ describe('PUT object with x-scal-s3-version-id header', () => { beforeEach(done => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - async.series([ - next => metadata.setup(next), - next => s3.createBucket({ Bucket: bucketName }, next), - next => s3.createBucket({ Bucket: bucketNameMD, ObjectLockEnabledForBucket: true, }, next), - ], done); + async.series( + [ + next => metadata.setup(next), + next => s3.createBucket({ Bucket: bucketName }, next), + next => s3.createBucket({ Bucket: bucketNameMD, ObjectLockEnabledForBucket: true }, next), + ], + done + ); }); afterEach(() => { process.stdout.write('Emptying bucket'); - return bucketUtil.emptyMany([bucketName, bucketNameMD]) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteMany([bucketName, bucketNameMD]); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return bucketUtil + .emptyMany([bucketName, bucketNameMD]) + .then(() => { + process.stdout.write('Deleting bucket'); + return bucketUtil.deleteMany([bucketName, bucketNameMD]); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); it('should overwrite an object', done => { @@ -82,37 +86,51 @@ describe('PUT object with x-scal-s3-version-id header', () => { let versionsBefore; let versionsAfter; - async.series([ - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => putObjectVersion(s3, params, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', - 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName', 'originOp']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => putObjectVersion(s3, params, '', next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + 'originOp', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite a version', done => { @@ -120,7 +138,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -129,41 +147,56 @@ describe('PUT object with x-scal-s3-version-id header', () => { let versionsAfter; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => putObjectVersion(s3, params, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', - 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => putObjectVersion(s3, params, vId, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite the current version if empty version id header', done => { @@ -171,7 +204,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -180,83 +213,106 @@ describe('PUT object with x-scal-s3-version-id header', () => { let versionsAfter; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => putObjectVersion(s3, params, '', next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', - 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => putObjectVersion(s3, params, '', next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); + }); it('should fail if version id is invalid', done => { const vParams = { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => putObjectVersion(s3, params, 'aJLWKz4Ko9IjBBgXKj5KQT.G9UHv0g7P', err => { - checkError(err, 'InvalidArgument', 400); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + putObjectVersion(s3, params, 'aJLWKz4Ko9IjBBgXKj5KQT.G9UHv0g7P', err => { + checkError(err, 'InvalidArgument', 400); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); it('should fail if key does not exist', done => { const params = { Bucket: bucketName, Key: objectName }; - async.series([ - next => putObjectVersion(s3, params, '', err => { - checkError(err, 'NoSuchKey', 404); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => + putObjectVersion(s3, params, '', err => { + checkError(err, 'NoSuchKey', 404); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); it('should fail if version does not exist', done => { @@ -264,22 +320,30 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => putObjectVersion(s3, params, - '393833343735313131383832343239393939393952473030312020313031', err => { - checkError(err, 'NoSuchVersion', 404); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + putObjectVersion( + s3, + params, + '393833343735313131383832343239393939393952473030312020313031', + err => { + checkError(err, 'NoSuchVersion', 404); + return next(); + } + ), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); it('should overwrite a non-current null version', done => { @@ -287,7 +351,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let versionsBefore; @@ -295,39 +359,53 @@ describe('PUT object with x-scal-s3-version-id header', () => { let objMDBefore; let objMDAfter; - async.series([ - next => s3.putObject(params, next), - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, 'null', archive, next), - next => getMetadata(bucketName, objectName, 'null', (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - next(err); - }), - next => putObjectVersion(s3, params, 'null', next), - next => getMetadata(bucketName, objectName, 'null', (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', - 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, 'null', archive, next), + next => + getMetadata(bucketName, objectName, 'null', (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + next(err); + }), + next => putObjectVersion(s3, params, 'null', next), + next => + getMetadata(bucketName, objectName, 'null', (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite the lastest version and keep nullVersionId', done => { @@ -335,7 +413,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let versionsBefore; @@ -344,42 +422,57 @@ describe('PUT object with x-scal-s3-version-id header', () => { let objMDAfter; let vId; - async.series([ - next => s3.putObject(params, next), - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => putObjectVersion(s3, params, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', - 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => s3.putBucketVersioning(vParams, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => putObjectVersion(s3, params, vId, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite a current null version', done => { @@ -387,13 +480,13 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const sParams = { Bucket: bucketName, VersioningConfiguration: { Status: 'Suspended', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -401,40 +494,54 @@ describe('PUT object with x-scal-s3-version-id header', () => { let versionsBefore; let versionsAfter; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putBucketVersioning(sParams, next), - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => putObjectVersion(s3, params, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', - 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => s3.putBucketVersioning(sParams, next), + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => putObjectVersion(s3, params, '', next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite a non-current version', done => { @@ -442,7 +549,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -451,43 +558,58 @@ describe('PUT object with x-scal-s3-version-id header', () => { let versionsAfter; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => s3.putObject(params, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => putObjectVersion(s3, params, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', - 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => s3.putObject(params, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => putObjectVersion(s3, params, vId, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite the current version', done => { @@ -495,7 +617,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -504,42 +626,57 @@ describe('PUT object with x-scal-s3-version-id header', () => { let versionsAfter; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => putObjectVersion(s3, params, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', - 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => putObjectVersion(s3, params, vId, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite the current version after bucket version suspended', done => { @@ -547,13 +684,13 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const sParams = { Bucket: bucketName, VersioningConfiguration: { Status: 'Suspended', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -562,43 +699,58 @@ describe('PUT object with x-scal-s3-version-id header', () => { let versionsAfter; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => s3.putBucketVersioning(sParams, next), - next => putObjectVersion(s3, params, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', - 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + s3.putObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => s3.putBucketVersioning(sParams, next), + next => putObjectVersion(s3, params, vId, next), + next => + getMetadata(bucketName, objectName, vId, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should overwrite the current null version after bucket version enabled', done => { @@ -606,7 +758,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; @@ -614,53 +766,71 @@ describe('PUT object with x-scal-s3-version-id header', () => { let versionsBefore; let versionsAfter; - async.series([ - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = res.Versions; - return next(err); - }), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => s3.putBucketVersioning(vParams, next), - next => putObjectVersion(s3, params, 'null', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = res.Versions; - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); - - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', - 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsBefore = res.Versions; + return next(err); + }), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => s3.putBucketVersioning(vParams, next), + next => putObjectVersion(s3, params, 'null', next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + metadata.listObject(bucketName, mdListingParams, log, (err, res) => { + versionsAfter = res.Versions; + return next(err); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, [ + 'location', + 'content-length', + 'originOp', + 'microVersionId', + 'x-amz-restore', + 'archive', + 'dataStoreName', + ]); + assert.deepStrictEqual(objMDAfter, objMDBefore); + return done(); + } + ); }); it('should fail if archiving is not in progress', done => { const params = { Bucket: bucketName, Key: objectName }; - async.series([ - next => s3.putObject(params, next), - next => putObjectVersion(s3, params, '', err => { - checkError(err, 'InvalidObjectState', 403); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => + putObjectVersion(s3, params, '', err => { + checkError(err, 'InvalidObjectState', 403); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); it('should fail if trying to overwrite a delete marker', done => { @@ -669,25 +839,30 @@ describe('PUT object with x-scal-s3-version-id header', () => { Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', - } + }, }; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.deleteObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => putObjectVersion(s3, params, vId, err => { - checkError(err, 'MethodNotAllowed', 405); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => s3.putBucketVersioning(vParams, next), + next => s3.putObject(params, next), + next => + s3.deleteObject(params, (err, res) => { + vId = res.VersionId; + return next(err); + }), + next => + putObjectVersion(s3, params, vId, err => { + checkError(err, 'MethodNotAllowed', 405); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); it('should fail if restore is already completed', done => { @@ -697,110 +872,128 @@ describe('PUT object with x-scal-s3-version-id header', () => { restoreRequestedAt: new Date(0), restoreRequestedDays: 5, restoreCompletedAt: new Date(10), - restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)), + restoreWillExpireAt: new Date(10 + 5 * 24 * 60 * 60 * 1000), }; - async.series([ - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archiveCompleted, next), - next => putObjectVersion(s3, params, '', err => { - checkError(err, 'InvalidObjectState', 403); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + async.series( + [ + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archiveCompleted, next), + next => + putObjectVersion(s3, params, '', err => { + checkError(err, 'InvalidObjectState', 403); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + return done(); + } + ); }); - [ - 'non versioned', - 'versioned', - 'suspended' - ].forEach(versioning => { + ['non versioned', 'versioned', 'suspended'].forEach(versioning => { it(`should update restore metadata while keeping storage class (${versioning})`, done => { const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; let objMDAfter; - async.series([ - next => { - if (versioning === 'versioned') { - return s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { Status: 'Enabled' } - }, next); - } else if (versioning === 'suspended') { - return s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { Status: 'Suspended' } - }, next); - } - return next(); - }, - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, next), - next => putObjectVersion(s3, params, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => s3.listObjects({ Bucket: bucketName }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.Contents.length, 1); - assert.strictEqual(res.Contents[0].StorageClass, 'location-dmf-v1'); - return next(); - }), - next => s3.headObject(params, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.StorageClass, 'location-dmf-v1'); - return next(); - }), - next => s3.getObject(params, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.StorageClass, 'location-dmf-v1'); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - // storage class must stay as the cold location - assert.deepStrictEqual(objMDAfter['x-amz-storage-class'], 'location-dmf-v1'); - - /// Make sure object data location is set back to its bucket data location. - assert.deepStrictEqual(objMDAfter.dataStoreName, 'us-east-1'); - - assert.deepStrictEqual(objMDAfter.archive.archiveInfo, objMDBefore.archive.archiveInfo); - assert.deepStrictEqual(objMDAfter.archive.restoreRequestedAt, - objMDBefore.archive.restoreRequestedAt); - assert.deepStrictEqual(objMDAfter.archive.restoreRequestedDays, - objMDBefore.archive.restoreRequestedDays); - assert.deepStrictEqual(objMDAfter['x-amz-restore']['ongoing-request'], false); - - assert(objMDAfter.archive.restoreCompletedAt); - assert(objMDAfter.archive.restoreWillExpireAt); - assert(objMDAfter['x-amz-restore']['expiry-date']); - return done(); - }); + async.series( + [ + next => { + if (versioning === 'versioned') { + return s3.putBucketVersioning( + { + Bucket: bucketName, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ); + } else if (versioning === 'suspended') { + return s3.putBucketVersioning( + { + Bucket: bucketName, + VersioningConfiguration: { Status: 'Suspended' }, + }, + next + ); + } + return next(); + }, + next => s3.putObject(params, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDBefore = objMD; + return next(err); + }), + next => metadata.listObject(bucketName, mdListingParams, log, next), + next => putObjectVersion(s3, params, '', next), + next => + getMetadata(bucketName, objectName, undefined, (err, objMD) => { + objMDAfter = objMD; + return next(err); + }), + next => + s3.listObjects({ Bucket: bucketName }, (err, res) => { + assert.ifError(err); + assert.strictEqual(res.Contents.length, 1); + assert.strictEqual(res.Contents[0].StorageClass, 'location-dmf-v1'); + return next(); + }), + next => + s3.headObject(params, (err, res) => { + assert.ifError(err); + assert.strictEqual(res.StorageClass, 'location-dmf-v1'); + return next(); + }), + next => + s3.getObject(params, (err, res) => { + assert.ifError(err); + assert.strictEqual(res.StorageClass, 'location-dmf-v1'); + return next(); + }), + ], + err => { + assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + // storage class must stay as the cold location + assert.deepStrictEqual(objMDAfter['x-amz-storage-class'], 'location-dmf-v1'); + + /// Make sure object data location is set back to its bucket data location. + assert.deepStrictEqual(objMDAfter.dataStoreName, 'us-east-1'); + + assert.deepStrictEqual(objMDAfter.archive.archiveInfo, objMDBefore.archive.archiveInfo); + assert.deepStrictEqual( + objMDAfter.archive.restoreRequestedAt, + objMDBefore.archive.restoreRequestedAt + ); + assert.deepStrictEqual( + objMDAfter.archive.restoreRequestedDays, + objMDBefore.archive.restoreRequestedDays + ); + assert.deepStrictEqual(objMDAfter['x-amz-restore']['ongoing-request'], false); + + assert(objMDAfter.archive.restoreCompletedAt); + assert(objMDAfter.archive.restoreWillExpireAt); + assert(objMDAfter['x-amz-restore']['expiry-date']); + return done(); + } + ); }); }); it('should "copy" all but non data-related metadata (data encryption, data size...)', done => { const params = { Bucket: bucketNameMD, - Key: objectName + Key: objectName, }; const putParams = { ...params, Metadata: { 'custom-user-md': 'custom-md', }, - WebsiteRedirectLocation: 'http://custom-redirect' + WebsiteRedirectLocation: 'http://custom-redirect', }; const aclParams = { ...params, @@ -810,102 +1003,107 @@ describe('PUT object with x-scal-s3-version-id header', () => { const tagParams = { ...params, Tagging: { - TagSet: [{ - Key: 'tag1', - Value: 'value1' - }, { - Key: 'tag2', - Value: 'value2' - }] - } + TagSet: [ + { + Key: 'tag1', + Value: 'value1', + }, + { + Key: 'tag2', + Value: 'value2', + }, + ], + }, }; const legalHoldParams = { ...params, LegalHold: { - Status: 'ON' - }, + Status: 'ON', + }, }; const acl = { - 'Canned': '', - 'FULL_CONTROL': [ + Canned: '', + FULL_CONTROL: [ // canonicalID of user Bart '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be', ], - 'WRITE_ACP': [], - 'READ': [], - 'READ_ACP': [], + WRITE_ACP: [], + READ: [], + READ_ACP: [], }; const tags = { tag1: 'value1', tag2: 'value2' }; const replicationInfo = { - 'status': 'COMPLETED', - 'backends': [ - { - 'site': 'azure-normal', - 'status': 'COMPLETED', - 'dataStoreVersionId': '', - }, - ], - 'content': [ - 'DATA', - 'METADATA', + status: 'COMPLETED', + backends: [ + { + site: 'azure-normal', + status: 'COMPLETED', + dataStoreVersionId: '', + }, ], - 'destination': 'arn:aws:s3:::versioned', - 'storageClass': 'azure-normal', - 'role': 'arn:aws:iam::root:role/s3-replication-role', - 'storageType': 'azure', - 'dataStoreVersionId': '', - 'isNFS': null, + content: ['DATA', 'METADATA'], + destination: 'arn:aws:s3:::versioned', + storageClass: 'azure-normal', + role: 'arn:aws:iam::root:role/s3-replication-role', + storageType: 'azure', + dataStoreVersionId: '', + isNFS: null, }; - async.series([ - next => s3.putObject(putParams, next), - next => s3.putObjectAcl(aclParams, next), - next => s3.putObjectTagging(tagParams, next), - next => s3.putObjectLegalHold(legalHoldParams, next), - next => getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { - if (err) { - return next(err); - } - /* eslint-disable no-param-reassign */ - objMD.dataStoreName = 'location-dmf-v1'; - objMD.archive = archive; - objMD.replicationInfo = replicationInfo; - // data related - objMD['content-length'] = 99; - objMD['content-type'] = 'testtype'; - objMD['content-md5'] = 'testmd5'; - objMD['content-encoding'] = 'testencoding'; - objMD['x-amz-server-side-encryption'] = 'aws:kms'; - /* eslint-enable no-param-reassign */ - return metadata.putObjectMD(bucketNameMD, objectName, objMD, undefined, log, next); - }), - next => putObjectVersion(s3, params, '', next), - next => getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { - if (err) { - return next(err); - } - assert.deepStrictEqual(objMD.acl, acl); - assert.deepStrictEqual(objMD.tags, tags); - assert.deepStrictEqual(objMD.replicationInfo, replicationInfo); - assert.deepStrictEqual(objMD.legalHold, true); - assert.strictEqual(objMD['x-amz-meta-custom-user-md'], 'custom-md'); - assert.strictEqual(objMD['x-amz-website-redirect-location'], 'http://custom-redirect'); - // make sure data related metadatas ar not the same before and after - assert.notStrictEqual(objMD['x-amz-server-side-encryption'], 'aws:kms'); - assert.notStrictEqual(objMD['content-length'], 99); - assert.notStrictEqual(objMD['content-encoding'], 'testencoding'); - assert.notStrictEqual(objMD['content-type'], 'testtype'); - // make sure we keep the same etag and add the new restored - // data's etag inside x-amz-restore - assert.strictEqual(objMD['content-md5'], 'testmd5'); - assert.strictEqual(typeof objMD['x-amz-restore']['content-md5'], 'string'); - return next(); - }), - // removing legal hold to be able to clean the bucket after the test - next => { - legalHoldParams.LegalHold.Status = 'OFF'; - return s3.putObjectLegalHold(legalHoldParams, next); - }, - ], done); + async.series( + [ + next => s3.putObject(putParams, next), + next => s3.putObjectAcl(aclParams, next), + next => s3.putObjectTagging(tagParams, next), + next => s3.putObjectLegalHold(legalHoldParams, next), + next => + getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { + if (err) { + return next(err); + } + /* eslint-disable no-param-reassign */ + objMD.dataStoreName = 'location-dmf-v1'; + objMD.archive = archive; + objMD.replicationInfo = replicationInfo; + // data related + objMD['content-length'] = 99; + objMD['content-type'] = 'testtype'; + objMD['content-md5'] = 'testmd5'; + objMD['content-encoding'] = 'testencoding'; + objMD['x-amz-server-side-encryption'] = 'aws:kms'; + /* eslint-enable no-param-reassign */ + return metadata.putObjectMD(bucketNameMD, objectName, objMD, undefined, log, next); + }), + next => putObjectVersion(s3, params, '', next), + next => + getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { + if (err) { + return next(err); + } + assert.deepStrictEqual(objMD.acl, acl); + assert.deepStrictEqual(objMD.tags, tags); + assert.deepStrictEqual(objMD.replicationInfo, replicationInfo); + assert.deepStrictEqual(objMD.legalHold, true); + assert.strictEqual(objMD['x-amz-meta-custom-user-md'], 'custom-md'); + assert.strictEqual(objMD['x-amz-website-redirect-location'], 'http://custom-redirect'); + // make sure data related metadatas ar not the same before and after + assert.notStrictEqual(objMD['x-amz-server-side-encryption'], 'aws:kms'); + assert.notStrictEqual(objMD['content-length'], 99); + assert.notStrictEqual(objMD['content-encoding'], 'testencoding'); + assert.notStrictEqual(objMD['content-type'], 'testtype'); + // make sure we keep the same etag and add the new restored + // data's etag inside x-amz-restore + assert.strictEqual(objMD['content-md5'], 'testmd5'); + assert.strictEqual(typeof objMD['x-amz-restore']['content-md5'], 'string'); + return next(); + }), + // removing legal hold to be able to clean the bucket after the test + next => { + legalHoldParams.LegalHold.Status = 'OFF'; + return s3.putObjectLegalHold(legalHoldParams, next); + }, + ], + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/rangeTest.js b/tests/functional/aws-node-sdk/test/object/rangeTest.js index c0cd642c54..17dbc94de8 100644 --- a/tests/functional/aws-node-sdk/test/object/rangeTest.js +++ b/tests/functional/aws-node-sdk/test/object/rangeTest.js @@ -25,8 +25,7 @@ function getOuterRange(range, bytes) { arr[1] = Number.parseInt(bytes, 10) - 1; } else { arr[0] = arr[0] === '' ? 0 : Number.parseInt(arr[0], 10); - arr[1] = arr[1] === '' || Number.parseInt(arr[1], 10) >= bytes ? - Number.parseInt(bytes, 10) - 1 : arr[1]; + arr[1] = arr[1] === '' || Number.parseInt(arr[1], 10) >= bytes ? Number.parseInt(bytes, 10) - 1 : arr[1]; } return { begin: arr[0], @@ -37,55 +36,67 @@ function getOuterRange(range, bytes) { // Get the ranged object from a bucket. Write the response body to a file, then // use getRangeExec to check that all the bytes are in the correct location. function checkRanges(range, bytes) { - return s3.getObject({ - Bucket: bucket, - Key: key, - Range: `bytes=${range}`, - }).promise() - .then(res => { - const { begin, end } = getOuterRange(range, bytes); - const total = (end - begin) + 1; - // If the range header is '-' (i.e., it is invalid), content range - // should be undefined - const contentRange = range === '-' ? undefined : - `bytes ${begin}-${end}/${bytes}`; + return s3 + .getObject({ + Bucket: bucket, + Key: key, + Range: `bytes=${range}`, + }) + .promise() + .then(res => { + const { begin, end } = getOuterRange(range, bytes); + const total = end - begin + 1; + // If the range header is '-' (i.e., it is invalid), content range + // should be undefined + const contentRange = range === '-' ? undefined : `bytes ${begin}-${end}/${bytes}`; - assert.deepStrictEqual(res.ContentLength, total); - assert.deepStrictEqual(res.ContentRange, contentRange); - assert.deepStrictEqual(res.ContentType, 'application/octet-stream'); - assert.deepStrictEqual(res.Metadata, {}); + assert.deepStrictEqual(res.ContentLength, total); + assert.deepStrictEqual(res.ContentRange, contentRange); + assert.deepStrictEqual(res.ContentType, 'application/octet-stream'); + assert.deepStrictEqual(res.Metadata, {}); - // Write a file using the buffer so getRangeExec can then check bytes. - // If the getRangeExec program fails, then the range is incorrect. - return writeFileAsync(`hashedFile.${bytes}.${range}`, res.Body) - .then(() => execFileAsync('./getRangeExec', ['--check', '--size', total, - '--offset', begin, `hashedFile.${bytes}.${range}`])); - }); + // Write a file using the buffer so getRangeExec can then check bytes. + // If the getRangeExec program fails, then the range is incorrect. + return writeFileAsync(`hashedFile.${bytes}.${range}`, res.Body).then(() => + execFileAsync('./getRangeExec', [ + '--check', + '--size', + total, + '--offset', + begin, + `hashedFile.${bytes}.${range}`, + ]) + ); + }); } // Create 5MB parts and upload them as parts of a MPU async function uploadParts(bytes, uploadId) { const name = `hashedFile.${bytes}`; - return Promise.all([1, 2].map(async part => { - try { - await execFileAsync('dd', [ - `if=${name}`, - `of=${name}.mpuPart${part}`, - 'bs=5242880', - `skip=${part - 1}`, - 'count=1', - ]); - await s3.uploadPart({ - Bucket: bucket, - Key: key, - PartNumber: part, - UploadId: uploadId, - Body: createReadStream(`${name}.mpuPart${part}`), - }).promise(); - } catch (error) { - throw new Error(`Error uploading part ${part}: ${error.message}`); - } - })); + return Promise.all( + [1, 2].map(async part => { + try { + await execFileAsync('dd', [ + `if=${name}`, + `of=${name}.mpuPart${part}`, + 'bs=5242880', + `skip=${part - 1}`, + 'count=1', + ]); + await s3 + .uploadPart({ + Bucket: bucket, + Key: key, + PartNumber: part, + UploadId: uploadId, + Body: createReadStream(`${name}.mpuPart${part}`), + }) + .promise(); + } catch (error) { + throw new Error(`Error uploading part ${part}: ${error.message}`); + } + }) + ); } // Create a hashed file of size bytes @@ -95,8 +106,7 @@ function createHashedFile(bytes) { } describeSkipIfCeph('aws-node-sdk range tests', () => { - before(() => execFileAsync('gcc', ['-o', 'getRangeExec', - 'lib/utility/getRange.c'])); + before(() => execFileAsync('gcc', ['-o', 'getRangeExec', 'lib/utility/getRange.c'])); after(() => execAsync('rm getRangeExec')); describe('aws-node-sdk range test for object put by MPU', () => @@ -107,64 +117,82 @@ describeSkipIfCeph('aws-node-sdk range tests', () => { let uploadId; beforeEach(() => - s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - }).promise()) - .then(res => { - uploadId = res.UploadId; - }) - .then(() => createHashedFile(fileSize)) - .then(() => uploadParts(fileSize, uploadId)) - .then(res => s3.completeMultipartUpload({ - Bucket: bucket, - Key: key, - UploadId: uploadId, - MultipartUpload: { - Parts: [ - { - ETag: res[0].ETag, - PartNumber: 1, - }, - { - ETag: res[1].ETag, - PartNumber: 2, - }, - ], - }, - }).promise()) + s3 + .createBucket({ Bucket: bucket }) + .promise() + .then(() => + s3 + .createMultipartUpload({ + Bucket: bucket, + Key: key, + }) + .promise() + ) + .then(res => { + uploadId = res.UploadId; + }) + .then(() => createHashedFile(fileSize)) + .then(() => uploadParts(fileSize, uploadId)) + .then(res => + s3 + .completeMultipartUpload({ + Bucket: bucket, + Key: key, + UploadId: uploadId, + MultipartUpload: { + Parts: [ + { + ETag: res[0].ETag, + PartNumber: 1, + }, + { + ETag: res[1].ETag, + PartNumber: 2, + }, + ], + }, + }) + .promise() + ) ); - afterEach(() => bucketUtil.empty(bucket) - .then(() => s3.abortMultipartUpload({ - Bucket: bucket, - Key: key, - UploadId: uploadId, - }).promise()) - .catch(err => new Promise((resolve, reject) => { - if (err.code !== 'NoSuchUpload') { - reject(err); - } - resolve(); - })) - .then(() => bucketUtil.deleteOne(bucket)) - .then(() => execAsync(`rm hashedFile.${fileSize}*`)) + afterEach(() => + bucketUtil + .empty(bucket) + .then(() => + s3 + .abortMultipartUpload({ + Bucket: bucket, + Key: key, + UploadId: uploadId, + }) + .promise() + ) + .catch( + err => + new Promise((resolve, reject) => { + if (err.code !== 'NoSuchUpload') { + reject(err); + } + resolve(); + }) + ) + .then(() => bucketUtil.deleteOne(bucket)) + .then(() => execAsync(`rm hashedFile.${fileSize}*`)) ); - it('should get a range from the first part of an object', () => - checkRanges('0-9', fileSize)); + it('should get a range from the first part of an object', () => checkRanges('0-9', fileSize)); - it('should get a range from the second part of an object', () => - checkRanges('5242880-5242889', fileSize)); + it('should get a range from the second part of an object', () => checkRanges('5242880-5242889', fileSize)); - it('should get a range that spans both parts of an object', () => - checkRanges('5242875-5242884', fileSize)); + it('should get a range that spans both parts of an object', () => checkRanges('5242875-5242884', fileSize)); - it('should get a range from the second part of an object and ' + - 'include the end if the range requested goes beyond the ' + - 'actual object end', () => - checkRanges('10485750-10485790', fileSize)); + it( + 'should get a range from the second part of an object and ' + + 'include the end if the range requested goes beyond the ' + + 'actual object end', + () => checkRanges('10485750-10485790', fileSize) + ); })); describe('aws-node-sdk range test of regular object put (non-MPU)', () => @@ -174,18 +202,27 @@ describeSkipIfCeph('aws-node-sdk range tests', () => { const fileSize = 2000; beforeEach(() => - s3.createBucket({ Bucket: bucket }).promise() - .then(() => createHashedFile(fileSize)) - .then(() => s3.putObject({ - Bucket: bucket, - Key: key, - Body: createReadStream(`hashedFile.${fileSize}`), - }).promise())); + s3 + .createBucket({ Bucket: bucket }) + .promise() + .then(() => createHashedFile(fileSize)) + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: key, + Body: createReadStream(`hashedFile.${fileSize}`), + }) + .promise() + ) + ); afterEach(() => - bucketUtil.empty(bucket) - .then(() => bucketUtil.deleteOne(bucket)) - .then(() => execAsync(`rm hashedFile.${fileSize}*`))); + bucketUtil + .empty(bucket) + .then(() => bucketUtil.deleteOne(bucket)) + .then(() => execAsync(`rm hashedFile.${fileSize}*`)) + ); const putRangeTests = [ '-', // Test for invalid range @@ -218,9 +255,9 @@ describeSkipIfCeph('aws-node-sdk range tests', () => { ]; putRangeTests.forEach(range => { - it(`should get a range of ${range} bytes using a ${fileSize} ` + - 'byte sized object', () => - checkRanges(range, fileSize)); + it(`should get a range of ${range} bytes using a ${fileSize} ` + 'byte sized object', () => + checkRanges(range, fileSize) + ); }); })); @@ -231,26 +268,37 @@ describeSkipIfCeph('aws-node-sdk range tests', () => { const fileSize = 2900; beforeEach(() => - s3.createBucket({ Bucket: bucket }).promise() - .then(() => createHashedFile(fileSize)) - .then(() => s3.putObject({ - Bucket: bucket, - Key: key, - Body: createReadStream(`hashedFile.${fileSize}`), - }).promise())); + s3 + .createBucket({ Bucket: bucket }) + .promise() + .then(() => createHashedFile(fileSize)) + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: key, + Body: createReadStream(`hashedFile.${fileSize}`), + }) + .promise() + ) + ); afterEach(() => - bucketUtil.empty(bucket) - .then(() => bucketUtil.deleteOne(bucket)) - .then(() => execAsync(`rm hashedFile.${fileSize}*`))); + bucketUtil + .empty(bucket) + .then(() => bucketUtil.deleteOne(bucket)) + .then(() => execAsync(`rm hashedFile.${fileSize}*`)) + ); - it('should get the final 90 bytes of a 2890 byte object for a ' + - 'byte range of 2800-', () => - checkRanges('2800-', fileSize)); + it('should get the final 90 bytes of a 2890 byte object for a ' + 'byte range of 2800-', () => + checkRanges('2800-', fileSize) + ); - it('should get the final 90 bytes of a 2890 byte object for a ' + - 'byte range of 2800-Number.MAX_SAFE_INTEGER', () => - checkRanges(`2800-${Number.MAX_SAFE_INTEGER}`, fileSize)); + it( + 'should get the final 90 bytes of a 2890 byte object for a ' + + 'byte range of 2800-Number.MAX_SAFE_INTEGER', + () => checkRanges(`2800-${Number.MAX_SAFE_INTEGER}`, fileSize) + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/websiteGet.js b/tests/functional/aws-node-sdk/test/object/websiteGet.js index 63054f3396..cb7e149f59 100644 --- a/tests/functional/aws-node-sdk/test/object/websiteGet.js +++ b/tests/functional/aws-node-sdk/test/object/websiteGet.js @@ -14,12 +14,11 @@ const config = getConfig('default', { signatureVersion: 'v4' }); const s3 = new S3(config); const transport = conf.https ? 'https' : 'http'; -const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : - 'bucketwebsitetester'; +const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : 'bucketwebsitetester'; const port = process.env.AWS_ON_AIR ? 80 : 8000; -const hostname = process.env.S3_END_TO_END ? - `${bucket}.s3-website-us-east-1.scality.com` : - `${bucket}.s3-website-us-east-1.amazonaws.com`; +const hostname = process.env.S3_END_TO_END + ? `${bucket}.s3-website-us-east-1.scality.com` + : `${bucket}.s3-website-us-east-1.amazonaws.com`; const endpoint = `${transport}://${hostname}:${port}`; const redirectEndpoint = `${transport}://www.google.com`; @@ -30,27 +29,33 @@ const redirectEndpoint = `${transport}://www.google.com`; function putBucketWebsiteAndPutObjectRedirect(redirect, condition, key, done) { const webConfig = new WebsiteConfigTester('index.html'); webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { if (err) { done(err); } - return s3.putObject({ Bucket: bucket, - Key: key, - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/redirect.html')), - ContentType: 'text/html' }, done); + return s3.putObject( + { + Bucket: bucket, + Key: key, + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/redirect.html')), + ContentType: 'text/html', + }, + done + ); }); } describe('User visits bucket website endpoint', () => { it('should return 404 when no such bucket', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: '404-no-such-bucket', - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: '404-no-such-bucket', + }, + done + ); }); describe('with existing bucket', () => { @@ -59,122 +64,138 @@ describe('User visits bucket website endpoint', () => { afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); it('should return 404 when no website configuration', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: '404-no-such-website-configuration', - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: '404-no-such-website-configuration', + }, + done + ); }); describe('with existing configuration', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html' }, + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + }, err => { assert.strictEqual(err, null); done(); - }); + } + ); }); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); - }); - - it('should return 405 when user requests method other than get ' + - 'or head', done => { - makeRequest({ - hostname, - port, - method: 'POST', - }, (err, res) => { - assert.strictEqual(err, null, - `Err with request ${err}`); - assert.strictEqual(res.statusCode, 405); - assert(res.body.indexOf('405 ' + - 'Method Not Allowed') > -1); - return done(); - }); + s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, err => done(err)); + }); + + it('should return 405 when user requests method other than get ' + 'or head', done => { + makeRequest( + { + hostname, + port, + method: 'POST', + }, + (err, res) => { + assert.strictEqual(err, null, `Err with request ${err}`); + assert.strictEqual(res.statusCode, 405); + assert(res.body.indexOf('405 ' + 'Method Not Allowed') > -1); + return done(); + } + ); }); it('should serve indexDocument if no key requested', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'index-user', - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'index-user', + }, + done + ); }); it('should serve indexDocument if key requested', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/index.html`, - responseType: 'index-user', - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/index.html`, + responseType: 'index-user', + }, + done + ); }); }); describe('with path in request with/without key', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, - Key: 'pathprefix/index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html' }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'pathprefix/index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + }, + done + ); }); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: - 'pathprefix/index.html' }, - done); + s3.deleteObject({ Bucket: bucket, Key: 'pathprefix/index.html' }, done); }); - it('should serve indexDocument if path request without key', - done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/pathprefix/`, - responseType: 'index-user', - }, done); + it('should serve indexDocument if path request without key', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/pathprefix/`, + responseType: 'index-user', + }, + done + ); }); - it('should serve indexDocument if path request with key', - done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/pathprefix/index.html`, - responseType: 'index-user', - }, done); + it('should serve indexDocument if path request with key', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/pathprefix/index.html`, + responseType: 'index-user', + }, + done + ); }); }); describe('with private key', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, - Key: 'index.html', - ACL: 'private', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html' }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + ACL: 'private', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + }, + done + ); }); }); @@ -183,27 +204,32 @@ describe('User visits bucket website endpoint', () => { }); it('should return 403 if key is private', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: '403-access-denied', - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: '403-access-denied', + }, + done + ); }); }); describe('with nonexisting index document key', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); it('should return 403 if nonexisting index document key', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: '403-access-denied', - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: '403-access-denied', + }, + done + ); }); }); @@ -212,80 +238,91 @@ describe('User visits bucket website endpoint', () => { const redirectAllTo = { HostName: 'www.google.com', }; - const webConfig = new WebsiteConfigTester(null, null, - redirectAllTo); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + const webConfig = new WebsiteConfigTester(null, null, redirectAllTo); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); it(`should redirect to ${redirectEndpoint}`, done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'redirect', - redirectUrl: `${redirectEndpoint}/`, - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'redirect', + redirectUrl: `${redirectEndpoint}/`, + }, + done + ); }); it(`should redirect to ${redirectEndpoint}/about`, done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/about`, - responseType: 'redirect', - redirectUrl: `${redirectEndpoint}/about`, - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/about`, + responseType: 'redirect', + redirectUrl: `${redirectEndpoint}/about`, + }, + done + ); }); }); - describe.skip('redirect all requests to https://www.google.com ' + - 'since https protocol set in website config', () => { - // Note: these tests will all redirect to https even if - // conf does not have https since protocol in website config - // specifies https - beforeEach(done => { - const redirectAllTo = { - HostName: 'www.google.com', - Protocol: 'https', - }; - const webConfig = new WebsiteConfigTester(null, null, - redirectAllTo); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); - }); + describe.skip( + 'redirect all requests to https://www.google.com ' + 'since https protocol set in website config', + () => { + // Note: these tests will all redirect to https even if + // conf does not have https since protocol in website config + // specifies https + beforeEach(done => { + const redirectAllTo = { + HostName: 'www.google.com', + Protocol: 'https', + }; + const webConfig = new WebsiteConfigTester(null, null, redirectAllTo); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); + }); - it('should redirect to https://google.com/', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'redirect', - redirectUrl: 'https://www.google.com/', - }, done); - }); + it('should redirect to https://google.com/', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'redirect', + redirectUrl: 'https://www.google.com/', + }, + done + ); + }); - it('should redirect to https://google.com/about', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/about`, - responseType: 'redirect', - redirectUrl: 'https://www.google.com/about', - }, done); - }); - }); + it('should redirect to https://google.com/about', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/about`, + responseType: 'redirect', + redirectUrl: 'https://www.google.com/about', + }, + done + ); + }); + } + ); describe('with custom error document', () => { beforeEach(done => { - const webConfig = new WebsiteConfigTester('index.html', - 'error.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, - Key: 'error.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/error.html')), - ContentType: 'text/html' }, done); + const webConfig = new WebsiteConfigTester('index.html', 'error.html'); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'error.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/error.html')), + ContentType: 'text/html', + }, + done + ); }); }); @@ -293,55 +330,62 @@ describe('User visits bucket website endpoint', () => { s3.deleteObject({ Bucket: bucket, Key: 'error.html' }, done); }); - it('should serve custom error document if an error occurred', - done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'error-user', - }, done); - }); - - it('should serve custom error document with redirect', - done => { - s3.putObject({ Bucket: bucket, - Key: 'error.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/error.html')), - ContentType: 'text/html', - WebsiteRedirectLocation: 'https://scality.com/test', - }, err => { - assert.ifError(err); - WebsiteConfigTester.checkHTML({ + it('should serve custom error document if an error occurred', done => { + WebsiteConfigTester.checkHTML( + { method: 'GET', url: endpoint, - responseType: 'redirect-error', - redirectUrl: 'https://scality.com/test', - expectedHeaders: { - 'x-amz-error-code': 'AccessDenied', - 'x-amz-error-message': 'Access Denied', - }, - }, done); - }); + responseType: 'error-user', + }, + done + ); + }); + + it('should serve custom error document with redirect', done => { + s3.putObject( + { + Bucket: bucket, + Key: 'error.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/error.html')), + ContentType: 'text/html', + WebsiteRedirectLocation: 'https://scality.com/test', + }, + err => { + assert.ifError(err); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'redirect-error', + redirectUrl: 'https://scality.com/test', + expectedHeaders: { + 'x-amz-error-code': 'AccessDenied', + 'x-amz-error-message': 'Access Denied', + }, + }, + done + ); + } + ); }); }); describe('unfound custom error document', () => { beforeEach(done => { - const webConfig = new WebsiteConfigTester('index.html', - 'error.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + const webConfig = new WebsiteConfigTester('index.html', 'error.html'); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); - it('should serve s3 error file if unfound custom error document ' + - 'and an error occurred', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: '403-retrieve-error-document', - }, done); + it('should serve s3 error file if unfound custom error document ' + 'and an error occurred', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: '403-retrieve-error-document', + }, + done + ); }); }); @@ -355,18 +399,19 @@ describe('User visits bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); - it(`should redirect to ${redirectEndpoint} if error 403` + - ' occured', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'redirect', - redirectUrl: `${redirectEndpoint}/`, - }, done); + it(`should redirect to ${redirectEndpoint} if error 403` + ' occured', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'redirect', + redirectUrl: `${redirectEndpoint}/`, + }, + done + ); }); }); @@ -380,23 +425,23 @@ describe('User visits bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); - it(`should redirect to ${redirectEndpoint}/about/ if ` + - 'key prefix is equal to "about"', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/about/`, - responseType: 'redirect', - redirectUrl: `${redirectEndpoint}/about/`, - }, done); + it(`should redirect to ${redirectEndpoint}/about/ if ` + 'key prefix is equal to "about"', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/about/`, + responseType: 'redirect', + redirectUrl: `${redirectEndpoint}/about/`, + }, + done + ); }); }); - describe.skip('redirect to hostname with prefix and error condition', - () => { + describe.skip('redirect to hostname with prefix and error condition', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { @@ -407,19 +452,23 @@ describe('User visits bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); - it(`should redirect to ${redirectEndpoint} if ` + - 'key prefix is equal to "about" AND error code 403', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/about/`, - responseType: 'redirect', - redirectUrl: `${redirectEndpoint}/about/`, - }, done); - }); + it( + `should redirect to ${redirectEndpoint} if ` + 'key prefix is equal to "about" AND error code 403', + done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/about/`, + responseType: 'redirect', + redirectUrl: `${redirectEndpoint}/about/`, + }, + done + ); + } + ); }); describe.skip('redirect with multiple redirect rules', () => { @@ -436,22 +485,23 @@ describe('User visits bucket website endpoint', () => { }; webConfig.addRoutingRule(redirectOne, conditions); webConfig.addRoutingRule(redirectTwo, conditions); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); it('should redirect to the first one', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/about/`, - responseType: 'redirect', - redirectUrl: `${redirectEndpoint}/about/`, - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/about/`, + responseType: 'redirect', + redirectUrl: `${redirectEndpoint}/about/`, + }, + done + ); }); }); - describe.skip('redirect with protocol', - () => { + describe.skip('redirect with protocol', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { @@ -462,18 +512,19 @@ describe('User visits bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); - it('should redirect to https://www.google.com/about if ' + - 'https protocols', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/about/`, - responseType: 'redirect', - redirectUrl: 'https://www.google.com/about/', - }, done); + it('should redirect to https://www.google.com/about if ' + 'https protocols', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/about/`, + responseType: 'redirect', + redirectUrl: 'https://www.google.com/about/', + }, + done + ); }); }); @@ -485,23 +536,23 @@ describe('User visits bucket website endpoint', () => { const redirect = { ReplaceKeyWith: 'redirect.html', }; - putBucketWebsiteAndPutObjectRedirect(redirect, condition, - 'redirect.html', done); + putBucketWebsiteAndPutObjectRedirect(redirect, condition, 'redirect.html', done); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'redirect.html' }, - err => done(err)); + s3.deleteObject({ Bucket: bucket, Key: 'redirect.html' }, err => done(err)); }); - it('should serve redirect file if error 403 error occured', - done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'redirect-user', - redirectUrl: `${endpoint}/redirect.html`, - }, done); + it('should serve redirect file if error 403 error occured', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'redirect-user', + redirectUrl: `${endpoint}/redirect.html`, + }, + done + ); }); }); @@ -516,23 +567,23 @@ describe('User visits bucket website endpoint', () => { ReplaceKeyPrefixWith: 'about/', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); - it(`should redirect to ${redirectEndpoint}/about/ if ` + - 'ReplaceKeyPrefixWith equals "about/"', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'redirect', - redirectUrl: `${redirectEndpoint}/about/`, - }, done); + it(`should redirect to ${redirectEndpoint}/about/ if ` + 'ReplaceKeyPrefixWith equals "about/"', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'redirect', + redirectUrl: `${redirectEndpoint}/about/`, + }, + done + ); }); }); - describe.skip('redirect requests with prefix /about to redirect/', - () => { + describe.skip('redirect requests with prefix /about to redirect/', () => { beforeEach(done => { const condition = { KeyPrefixEquals: 'about/', @@ -540,184 +591,209 @@ describe('User visits bucket website endpoint', () => { const redirect = { ReplaceKeyPrefixWith: 'redirect/', }; - putBucketWebsiteAndPutObjectRedirect(redirect, condition, - 'redirect/index.html', done); + putBucketWebsiteAndPutObjectRedirect(redirect, condition, 'redirect/index.html', done); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'redirect/index.html' }, - err => done(err)); + s3.deleteObject({ Bucket: bucket, Key: 'redirect/index.html' }, err => done(err)); }); - it('should serve redirect file if key prefix is equal to "about"', - done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/about/`, - responseType: 'redirect-user', - redirectUrl: `${endpoint}/redirect/`, - }, done); + it('should serve redirect file if key prefix is equal to "about"', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/about/`, + responseType: 'redirect-user', + redirectUrl: `${endpoint}/redirect/`, + }, + done + ); }); }); - describe.skip('redirect requests, with prefix /about and that return ' + - '403 error, to prefix redirect/', () => { - beforeEach(done => { - const condition = { - KeyPrefixEquals: 'about/', - HttpErrorCodeReturnedEquals: '403', - }; - const redirect = { - ReplaceKeyPrefixWith: 'redirect/', - }; - putBucketWebsiteAndPutObjectRedirect(redirect, condition, - 'redirect/index.html', done); - }); + describe.skip( + 'redirect requests, with prefix /about and that return ' + '403 error, to prefix redirect/', + () => { + beforeEach(done => { + const condition = { + KeyPrefixEquals: 'about/', + HttpErrorCodeReturnedEquals: '403', + }; + const redirect = { + ReplaceKeyPrefixWith: 'redirect/', + }; + putBucketWebsiteAndPutObjectRedirect(redirect, condition, 'redirect/index.html', done); + }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'redirect/index.html' }, - err => done(err)); - }); + afterEach(done => { + s3.deleteObject({ Bucket: bucket, Key: 'redirect/index.html' }, err => done(err)); + }); - it('should serve redirect file if key prefix is equal to ' + - '"about" and error 403', - done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/about/`, - responseType: 'redirect-user', - redirectUrl: `${endpoint}/redirect/`, - }, done); - }); - }); + it('should serve redirect file if key prefix is equal to ' + '"about" and error 403', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/about/`, + responseType: 'redirect-user', + redirectUrl: `${endpoint}/redirect/`, + }, + done + ); + }); + } + ); describe('object redirect to /', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - Metadata: { - test: 'value', + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + Metadata: { + test: 'value', + }, + WebsiteRedirectLocation: '/', }, - WebsiteRedirectLocation: '/', - }, err => { assert.strictEqual(err, null); done(); - }); + } + ); }); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); + s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, err => done(err)); }); it('should redirect to /', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/index.html`, - responseType: 'redirect', - redirectUrl: '/', - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/index.html`, + responseType: 'redirect', + redirectUrl: '/', + }, + done + ); }); }); describe('with bucket policy', () => { beforeEach(done => { - const webConfig = new WebsiteConfigTester('index.html', - 'error.html'); - - async.waterfall([ - next => s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, next), - (data, next) => s3.putBucketPolicy({ Bucket: bucket, - Policy: JSON.stringify({ - Version: '2012-10-17', - Statement: [{ - Sid: 'PublicReadGetObject', - Effect: 'Allow', - Principal: '*', - Action: ['s3:GetObject'], - Resource: [ - `arn:aws:s3:::${bucket}/index.html`, - `arn:aws:s3:::${bucket}/error.html`, - `arn:aws:s3:::${bucket}/access.html`, - ], - }, - { - Sid: 'DenyUnrelatedObj', - Effect: 'Deny', - Principal: '*', - Action: ['s3:GetObject'], - Resource: [ - `arn:aws:s3:::${bucket}/unrelated_obj.html`, - ], - }], - }), - }, next), - (data, next) => s3.putObject({ - Bucket: bucket, Key: 'index.html', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - }, next), - (data, next) => s3.putObject({ - Bucket: bucket, Key: 'error.html', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/error.html')), - ContentType: 'text/html', - }, next), - - ], err => { - assert.ifError(err); - done(); - }); + const webConfig = new WebsiteConfigTester('index.html', 'error.html'); + + async.waterfall( + [ + next => s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, next), + (data, next) => + s3.putBucketPolicy( + { + Bucket: bucket, + Policy: JSON.stringify({ + Version: '2012-10-17', + Statement: [ + { + Sid: 'PublicReadGetObject', + Effect: 'Allow', + Principal: '*', + Action: ['s3:GetObject'], + Resource: [ + `arn:aws:s3:::${bucket}/index.html`, + `arn:aws:s3:::${bucket}/error.html`, + `arn:aws:s3:::${bucket}/access.html`, + ], + }, + { + Sid: 'DenyUnrelatedObj', + Effect: 'Deny', + Principal: '*', + Action: ['s3:GetObject'], + Resource: [`arn:aws:s3:::${bucket}/unrelated_obj.html`], + }, + ], + }), + }, + next + ), + (data, next) => + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + }, + next + ), + (data, next) => + s3.putObject( + { + Bucket: bucket, + Key: 'error.html', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/error.html')), + ContentType: 'text/html', + }, + next + ), + ], + err => { + assert.ifError(err); + done(); + } + ); }); afterEach(done => { - async.waterfall([ - next => s3.deleteObject({ Bucket: bucket, - Key: 'index.html' }, next), - (data, next) => s3.deleteObject({ Bucket: bucket, - Key: 'error.html' }, next), - ], err => { - assert.ifError(err); - done(); - }); + async.waterfall( + [ + next => s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, next), + (data, next) => s3.deleteObject({ Bucket: bucket, Key: 'error.html' }, next), + ], + err => { + assert.ifError(err); + done(); + } + ); }); it('should serve indexDocument if no key requested', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'index-user', - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'index-user', + }, + done + ); }); - it('should serve custom error 403 with deny on unrelated object ' + - 'and no access to key', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/non_existing.html`, - responseType: 'error-user', - }, done); + it('should serve custom error 403 with deny on unrelated object ' + 'and no access to key', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/non_existing.html`, + responseType: 'error-user', + }, + done + ); }); - it('should serve custom error 404 with deny on unrelated object ' + - 'and access to key', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/access.html`, - responseType: 'error-user-404', - }, done); + it('should serve custom error 404 with deny on unrelated object ' + 'and access to key', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/access.html`, + responseType: 'error-user-404', + }, + done + ); }); }); @@ -731,34 +807,37 @@ describe('User visits bucket website endpoint', () => { ReplaceKeyWith: 'whatever.html', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - }, + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + }, err => { assert.strictEqual(err, null); done(); - }); + } + ); }); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); + s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, err => done(err)); }); it('should not redirect if index key is not explicit', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'index-user', - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'index-user', + }, + done + ); }); }); @@ -767,72 +846,84 @@ describe('User visits bucket website endpoint', () => { const webConfig = new WebsiteConfigTester('index.html'); const object = { Bucket: bucket, - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), ContentType: 'text/html', }; - async.waterfall([ - next => s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, next), - (data, next) => s3.putBucketPolicy({ Bucket: bucket, - Policy: JSON.stringify({ - Version: '2012-10-17', - Statement: [{ - Sid: 'PublicReadGetObject', - Effect: 'Allow', - Principal: '*', - Action: ['s3:GetObject'], - Resource: [ - `arn:aws:s3:::${bucket}/original_key_file`, - `arn:aws:s3:::${bucket}/original_key_nofile`, - `arn:aws:s3:::${bucket}/file/*`, - `arn:aws:s3:::${bucket}/nofile/*`, - ], - }], - }), - }, next), - (data, next) => s3.putObject(Object.assign({}, object, - { Key: 'original_key_file/index.html' }), next), - (data, next) => s3.putObject(Object.assign({}, object, - { Key: 'file/index.html' }), next), // the redirect 302 - (data, next) => s3.putObject(Object.assign({}, object, - { Key: 'no_access_file/index.html' }), next), - ], err => { - assert.ifError(err); - done(); - }); + async.waterfall( + [ + next => s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, next), + (data, next) => + s3.putBucketPolicy( + { + Bucket: bucket, + Policy: JSON.stringify({ + Version: '2012-10-17', + Statement: [ + { + Sid: 'PublicReadGetObject', + Effect: 'Allow', + Principal: '*', + Action: ['s3:GetObject'], + Resource: [ + `arn:aws:s3:::${bucket}/original_key_file`, + `arn:aws:s3:::${bucket}/original_key_nofile`, + `arn:aws:s3:::${bucket}/file/*`, + `arn:aws:s3:::${bucket}/nofile/*`, + ], + }, + ], + }), + }, + next + ), + (data, next) => + s3.putObject(Object.assign({}, object, { Key: 'original_key_file/index.html' }), next), + (data, next) => s3.putObject(Object.assign({}, object, { Key: 'file/index.html' }), next), // the redirect 302 + (data, next) => + s3.putObject(Object.assign({}, object, { Key: 'no_access_file/index.html' }), next), + ], + err => { + assert.ifError(err); + done(); + } + ); }); afterEach(done => { - async.waterfall([ - next => s3.deleteObject({ Bucket: bucket, - Key: 'original_key_file/index.html' }, next), - (data, next) => s3.deleteObject({ Bucket: bucket, - Key: 'file/index.html' }, next), - (data, next) => s3.deleteObject({ Bucket: bucket, - Key: 'no_access_file/index.html' }, next), - ], err => { - assert.ifError(err); - done(); - }); + async.waterfall( + [ + next => s3.deleteObject({ Bucket: bucket, Key: 'original_key_file/index.html' }, next), + (data, next) => s3.deleteObject({ Bucket: bucket, Key: 'file/index.html' }, next), + (data, next) => s3.deleteObject({ Bucket: bucket, Key: 'no_access_file/index.html' }, next), + ], + err => { + assert.ifError(err); + done(); + } + ); }); it('should redirect 302 with trailing / on folder with index', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/file`, - responseType: 'redirect-error-found', - redirectUrl: '/file/', - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/file`, + responseType: 'redirect-error-found', + redirectUrl: '/file/', + }, + done + ); }); - it('should return 404 on original key access without index', - done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/original_key_nofile`, - responseType: '404-not-found', - }, done); + it('should return 404 on original key access without index', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/original_key_nofile`, + responseType: '404-not-found', + }, + done + ); }); describe('should return 403', () => { @@ -851,12 +942,16 @@ describe('User visits bucket website endpoint', () => { }, ].forEach(test => it(test.it, done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/${test.key}`, - responseType: '403-access-denied', - }, done); - })); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/${test.key}`, + responseType: '403-access-denied', + }, + done + ); + }) + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/websiteGetWithACL.js b/tests/functional/aws-node-sdk/test/object/websiteGetWithACL.js index b257ac96a2..5d39346997 100644 --- a/tests/functional/aws-node-sdk/test/object/websiteGetWithACL.js +++ b/tests/functional/aws-node-sdk/test/object/websiteGetWithACL.js @@ -12,13 +12,11 @@ const s3 = new S3(config); // `127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com` const transport = conf.https ? 'https' : 'http'; -const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : - 'bucketwebsitetester'; -const hostname = process.env.S3_END_TO_END ? - `${bucket}.s3-website-us-east-1.scality.com` : - `${bucket}.s3-website-us-east-1.amazonaws.com`; -const endpoint = process.env.AWS_ON_AIR ? `${transport}://${hostname}` : - `${transport}://${hostname}:8000`; +const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : 'bucketwebsitetester'; +const hostname = process.env.S3_END_TO_END + ? `${bucket}.s3-website-us-east-1.scality.com` + : `${bucket}.s3-website-us-east-1.amazonaws.com`; +const endpoint = process.env.AWS_ON_AIR ? `${transport}://${hostname}` : `${transport}://${hostname}:8000`; const aclEquivalent = { public: ['public-read-write', 'public-read'], @@ -35,51 +33,44 @@ const aclTests = [ }, // CEPH: test_website_public_bucket_list_private_index_blockederrordoc { - it: 'should return 403 if public bucket - private index - public ' + - 'error documents', + it: 'should return 403 if public bucket - private index - public ' + 'error documents', bucketACL: 'public', objects: { index: 'private', error: 'private' }, html: '403-access-denied', }, { - it: 'should return index doc if private bucket - public index - ' + - 'public error documents', + it: 'should return index doc if private bucket - public index - ' + 'public error documents', bucketACL: 'private', objects: { index: 'public-read', error: 'private' }, html: 'index-user', }, { - it: 'should return index doc if public bucket - public index - ' + - 'private error documents', + it: 'should return index doc if public bucket - public index - ' + 'private error documents', bucketACL: 'public', objects: { index: 'public-read', error: 'private' }, html: 'index-user', }, { - it: 'should return index doc if private bucket - public index - ' + - 'public error documents', + it: 'should return index doc if private bucket - public index - ' + 'public error documents', bucketACL: 'private', objects: { index: 'public-read', error: 'public-read' }, html: 'index-user', }, { - it: 'should return index doc if public bucket - public index - ' + - 'public error documents', + it: 'should return index doc if public bucket - public index - ' + 'public error documents', bucketACL: 'public', objects: { index: 'public-read', error: 'public-read' }, html: 'index-user', }, { - it: 'should return error doc if private bucket - without index - ' + - 'public error documents', + it: 'should return error doc if private bucket - without index - ' + 'public error documents', bucketACL: 'private', objects: { error: 'public-read' }, html: 'error-user', }, { - it: 'should return 404 if public bucket - without index - ' + - 'public error documents', + it: 'should return 404 if public bucket - without index - ' + 'public error documents', bucketACL: 'public', objects: { error: 'public-read' }, html: 'error-user-404', @@ -87,8 +78,7 @@ const aclTests = [ // CEPH: test_website_private_bucket_list_empty_blockederrordoc { - it: 'should return 403 if private bucket - without index - ' + - 'private error documents', + it: 'should return 403 if private bucket - without index - ' + 'private error documents', bucketACL: 'private', objects: { error: 'private' }, html: '403-access-denied', @@ -96,8 +86,7 @@ const aclTests = [ // CEPH: test_website_public_bucket_list_empty_blockederrordoc { - it: 'should return 404 if public bucket - without index - ' + - 'private error documents', + it: 'should return 404 if public bucket - without index - ' + 'private error documents', bucketACL: 'public', objects: { error: 'private' }, html: '404-not-found', @@ -105,20 +94,17 @@ const aclTests = [ // CEPH: test_website_public_bucket_list_empty_missingerrordoc { - it: 'should return 404 if public bucket - without index - ' + - 'without error documents', + it: 'should return 404 if public bucket - without index - ' + 'without error documents', bucketACL: 'public', - objects: { }, + objects: {}, html: '404-not-found', }, { - it: 'should return 403 if private bucket - without index - ' + - 'without error documents', + it: 'should return 403 if private bucket - without index - ' + 'without error documents', bucketACL: 'private', - objects: { }, + objects: {}, html: '403-access-denied', }, - ]; describe('User visits bucket website endpoint with ACL', () => { @@ -126,12 +112,10 @@ describe('User visits bucket website endpoint with ACL', () => { aclEquivalent[test.bucketACL].forEach(bucketACL => { describe(`with existing bucket with ${bucketACL} acl`, () => { beforeEach(done => { - WebsiteConfigTester.createPutBucketWebsite(s3, bucket, - bucketACL, test.objects, done); + WebsiteConfigTester.createPutBucketWebsite(s3, bucket, bucketACL, test.objects, done); }); afterEach(done => { - WebsiteConfigTester.deleteObjectsThenBucket(s3, bucket, - test.objects, err => { + WebsiteConfigTester.deleteObjectsThenBucket(s3, bucket, test.objects, err => { if (process.env.AWS_ON_AIR) { // Give some time for AWS to finish deleting // object and buckets before starting next test @@ -143,29 +127,38 @@ describe('User visits bucket website endpoint with ACL', () => { }); it(`${test.it} with no auth credentials sent`, done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - requestType: test.html, - }, done); + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + requestType: test.html, + }, + done + ); }); it(`${test.it} even with invalid auth credentials`, done => { - WebsiteConfigTester.checkHTML({ - auth: 'invalid credentials', - method: 'GET', - url: endpoint, - requestType: test.html, - }, done); + WebsiteConfigTester.checkHTML( + { + auth: 'invalid credentials', + method: 'GET', + url: endpoint, + requestType: test.html, + }, + done + ); }); it(`${test.it} even with valid auth credentials`, done => { - WebsiteConfigTester.checkHTML({ - auth: 'valid credentials', - method: 'GET', - url: endpoint, - requestType: test.html, - }, done); + WebsiteConfigTester.checkHTML( + { + auth: 'valid credentials', + method: 'GET', + url: endpoint, + requestType: test.html, + }, + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/websiteHead.js b/tests/functional/aws-node-sdk/test/object/websiteHead.js index d0f81d2198..2f8050cd67 100644 --- a/tests/functional/aws-node-sdk/test/object/websiteHead.js +++ b/tests/functional/aws-node-sdk/test/object/websiteHead.js @@ -17,19 +17,16 @@ const s3 = new S3(config); // `127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com` const transport = conf.https ? 'https' : 'http'; -const bucket = process.env.AWS_ON_AIR ? `awsbucketwebsitetester-${Date.now()}` : - 'bucketwebsitetester'; -const hostname = process.env.S3_END_TO_END ? - `${bucket}.s3-website-us-east-1.scality.com` : - `${bucket}.s3-website-us-east-1.amazonaws.com`; -const endpoint = process.env.AWS_ON_AIR ? `${transport}://${hostname}` : - `${transport}://${hostname}:8000`; -const redirectEndpoint = conf.https ? 'https://www.google.com/' : - 'http://www.google.com/'; +const bucket = process.env.AWS_ON_AIR ? `awsbucketwebsitetester-${Date.now()}` : 'bucketwebsitetester'; +const hostname = process.env.S3_END_TO_END + ? `${bucket}.s3-website-us-east-1.scality.com` + : `${bucket}.s3-website-us-east-1.amazonaws.com`; +const endpoint = process.env.AWS_ON_AIR ? `${transport}://${hostname}` : `${transport}://${hostname}:8000`; +const redirectEndpoint = conf.https ? 'https://www.google.com/' : 'http://www.google.com/'; const indexDocETag = '"95a589c37a2df74b062fb4d5a6f64197"'; const indexExpectedHeaders = { - 'etag': indexDocETag, + etag: indexDocETag, 'x-amz-meta-test': 'value', }; @@ -76,7 +73,6 @@ const indexExpectedHeaders = { // KX/MgqE4dZCJ4d9eF59Wbg/kza40cWcoA= // x-amz-request-id: 0073330F58C7137C - describe('Head request on bucket website endpoint', () => { it('should return 404 when no such bucket', done => { const expectedHeaders = { @@ -85,8 +81,7 @@ describe('Head request on bucket website endpoint', () => { // so compatible with aws 'x-amz-error-message': 'The specified bucket does not exist.', }; - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 404, - expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 404, expectedHeaders, done); }); describe('with existing bucket', () => { @@ -97,106 +92,115 @@ describe('Head request on bucket website endpoint', () => { it('should return 404 when no website configuration', done => { const expectedHeaders = { 'x-amz-error-code': 'NoSuchWebsiteConfiguration', - 'x-amz-error-message': 'The specified bucket does not ' + - 'have a website configuration', + 'x-amz-error-message': 'The specified bucket does not ' + 'have a website configuration', }; - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 404, - expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 404, expectedHeaders, done); }); describe('with existing configuration', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - Metadata: { - test: 'value', + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + Metadata: { + test: 'value', + }, }, - }, err => { assert.strictEqual(err, null); done(); - }); + } + ); }); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); + s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, err => done(err)); }); - it('should return indexDocument headers if no key ' + - 'requested', done => { - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, - 200, indexExpectedHeaders, done); + it('should return indexDocument headers if no key ' + 'requested', done => { + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 200, indexExpectedHeaders, done); }); it('should return indexDocument headers if key requested', done => { - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/index.html`, 200, indexExpectedHeaders, done); + WebsiteConfigTester.makeHeadRequest( + undefined, + `${endpoint}/index.html`, + 200, + indexExpectedHeaders, + done + ); }); }); describe('with path prefix in request with/without key', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, - Key: 'pathprefix/index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - Metadata: { - test: 'value', + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'pathprefix/index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + Metadata: { + test: 'value', + }, }, - }, done); + done + ); }); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: - 'pathprefix/index.html' }, - done); + s3.deleteObject({ Bucket: bucket, Key: 'pathprefix/index.html' }, done); }); - it('should serve indexDocument if path request without key', - done => { - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/pathprefix/`, 200, indexExpectedHeaders, done); + it('should serve indexDocument if path request without key', done => { + WebsiteConfigTester.makeHeadRequest( + undefined, + `${endpoint}/pathprefix/`, + 200, + indexExpectedHeaders, + done + ); }); - it('should serve indexDocument if path request with key', - done => { - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/pathprefix/index.html`, 200, - indexExpectedHeaders, done); + it('should serve indexDocument if path request with key', done => { + WebsiteConfigTester.makeHeadRequest( + undefined, + `${endpoint}/pathprefix/index.html`, + 200, + indexExpectedHeaders, + done + ); }); }); describe('with private key', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, - Key: 'index.html', - ACL: 'private', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html' }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + ACL: 'private', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + }, + done + ); }); }); @@ -209,16 +213,14 @@ describe('Head request on bucket website endpoint', () => { 'x-amz-error-code': 'AccessDenied', 'x-amz-error-message': 'Access Denied', }; - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 403, - expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 403, expectedHeaders, done); }); }); describe('with nonexisting index document key', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); it('should return 403 if nonexisting index document key', done => { @@ -226,8 +228,7 @@ describe('Head request on bucket website endpoint', () => { 'x-amz-error-code': 'AccessDenied', 'x-amz-error-message': 'Access Denied', }; - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 403, - expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 403, expectedHeaders, done); }); }); @@ -236,76 +237,71 @@ describe('Head request on bucket website endpoint', () => { const redirectAllTo = { HostName: 'www.google.com', }; - const webConfig = new WebsiteConfigTester(null, null, - redirectAllTo); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + const webConfig = new WebsiteConfigTester(null, null, redirectAllTo); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); it(`should redirect to ${redirectEndpoint}`, done => { const expectedHeaders = { location: redirectEndpoint, }; - WebsiteConfigTester.makeHeadRequest(undefined, - endpoint, 301, expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 301, expectedHeaders, done); }); it(`should redirect to ${redirectEndpoint}about`, done => { const expectedHeaders = { location: `${redirectEndpoint}about/`, }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/about/`, 301, expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/about/`, 301, expectedHeaders, done); }); }); - describe('redirect all requests to https://www.google.com ' + - 'since https protocol set in website config', () => { - // Note: these tests will all redirect to https even if - // conf does not have https since protocol in website config - // specifies https - beforeEach(done => { - const redirectAllTo = { - HostName: 'www.google.com', - Protocol: 'https', - }; - const webConfig = new WebsiteConfigTester(null, null, - redirectAllTo); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); - }); + describe( + 'redirect all requests to https://www.google.com ' + 'since https protocol set in website config', + () => { + // Note: these tests will all redirect to https even if + // conf does not have https since protocol in website config + // specifies https + beforeEach(done => { + const redirectAllTo = { + HostName: 'www.google.com', + Protocol: 'https', + }; + const webConfig = new WebsiteConfigTester(null, null, redirectAllTo); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); + }); - it('should redirect to https://google.com', done => { - const expectedHeaders = { - location: 'https://www.google.com/', - }; - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, - 301, expectedHeaders, done); - }); + it('should redirect to https://google.com', done => { + const expectedHeaders = { + location: 'https://www.google.com/', + }; + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 301, expectedHeaders, done); + }); - it('should redirect to https://google.com/about', done => { - const expectedHeaders = { - location: 'https://www.google.com/about/', - }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/about/`, 301, expectedHeaders, done); - }); - }); + it('should redirect to https://google.com/about', done => { + const expectedHeaders = { + location: 'https://www.google.com/about/', + }; + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/about/`, 301, expectedHeaders, done); + }); + } + ); describe('with custom error document', () => { beforeEach(done => { - const webConfig = new WebsiteConfigTester('index.html', - 'error.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, - Key: 'error.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/error.html')), - ContentType: 'text/html' }, done); + const webConfig = new WebsiteConfigTester('index.html', 'error.html'); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'error.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/error.html')), + ContentType: 'text/html', + }, + done + ); }); }); @@ -313,14 +309,12 @@ describe('Head request on bucket website endpoint', () => { s3.deleteObject({ Bucket: bucket, Key: 'error.html' }, done); }); - it('should return regular error headers regardless of whether ' + - 'custom error document', done => { + it('should return regular error headers regardless of whether ' + 'custom error document', done => { const expectedHeaders = { 'x-amz-error-code': 'AccessDenied', 'x-amz-error-message': 'Access Denied', }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/madeup`, 403, expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/madeup`, 403, expectedHeaders, done); }); }); @@ -334,17 +328,14 @@ describe('Head request on bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); - it(`should redirect to ${redirectEndpoint} if error 403` + - ' occured', done => { + it(`should redirect to ${redirectEndpoint} if error 403` + ' occured', done => { const expectedHeaders = { location: redirectEndpoint, }; - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 301, - expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 301, expectedHeaders, done); }); }); @@ -358,22 +349,18 @@ describe('Head request on bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); - it(`should redirect to ${redirectEndpoint}about if ` + - 'key prefix is equal to "about"', done => { + it(`should redirect to ${redirectEndpoint}about if ` + 'key prefix is equal to "about"', done => { const expectedHeaders = { location: `${redirectEndpoint}about/`, }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/about/`, 301, expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/about/`, 301, expectedHeaders, done); }); }); - describe('redirect to hostname with prefix and error condition', - () => { + describe('redirect to hostname with prefix and error condition', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { @@ -384,18 +371,18 @@ describe('Head request on bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); - }); - - it(`should redirect to ${redirectEndpoint} if ` + - 'key prefix is equal to "about" AND error code 403', done => { - const expectedHeaders = { - location: `${redirectEndpoint}about/`, - }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/about/`, 301, expectedHeaders, done); - }); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); + }); + + it( + `should redirect to ${redirectEndpoint} if ` + 'key prefix is equal to "about" AND error code 403', + done => { + const expectedHeaders = { + location: `${redirectEndpoint}about/`, + }; + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/about/`, 301, expectedHeaders, done); + } + ); }); describe('redirect with multiple redirect rules', () => { @@ -412,21 +399,18 @@ describe('Head request on bucket website endpoint', () => { }; webConfig.addRoutingRule(redirectOne, conditions); webConfig.addRoutingRule(redirectTwo, conditions); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); it('should redirect based on first rule', done => { const expectedHeaders = { location: `${redirectEndpoint}about/`, }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/about/`, 301, expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/about/`, 301, expectedHeaders, done); }); }); - describe('redirect with protocol', - () => { + describe('redirect with protocol', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { @@ -437,17 +421,14 @@ describe('Head request on bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); - it('should redirect to https://www.google.com/about if ' + - 'https protocol specified', done => { + it('should redirect to https://www.google.com/about if ' + 'https protocol specified', done => { const expectedHeaders = { location: 'https://www.google.com/about/', }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/about/`, 301, expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/about/`, 301, expectedHeaders, done); }); }); @@ -461,22 +442,18 @@ describe('Head request on bucket website endpoint', () => { ReplaceKeyWith: 'redirect.html', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'redirect.html' }, - err => done(err)); + s3.deleteObject({ Bucket: bucket, Key: 'redirect.html' }, err => done(err)); }); - it('should redirect to specified file if 403 error ' + - 'error occured', done => { + it('should redirect to specified file if 403 error ' + 'error occured', done => { const expectedHeaders = { location: `${endpoint}/redirect.html`, }; - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 301, - expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 301, expectedHeaders, done); }); }); @@ -491,22 +468,18 @@ describe('Head request on bucket website endpoint', () => { ReplaceKeyPrefixWith: 'about', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); - it(`should redirect to ${redirectEndpoint}about if ` + - 'ReplaceKeyPrefixWith equals "about"', done => { + it(`should redirect to ${redirectEndpoint}about if ` + 'ReplaceKeyPrefixWith equals "about"', done => { const expectedHeaders = { location: `${redirectEndpoint}about`, }; - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 301, - expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 301, expectedHeaders, done); }); }); - describe('redirect requests with prefix /about to redirect/', - () => { + describe('redirect requests with prefix /about to redirect/', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { @@ -516,27 +489,22 @@ describe('Head request on bucket website endpoint', () => { ReplaceKeyPrefixWith: 'redirect/', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'redirect/index.html' }, - err => done(err)); + s3.deleteObject({ Bucket: bucket, Key: 'redirect/index.html' }, err => done(err)); }); - it('should redirect to "redirect/" object if key prefix is equal ' + - 'to "about/"', done => { + it('should redirect to "redirect/" object if key prefix is equal ' + 'to "about/"', done => { const expectedHeaders = { location: `${endpoint}/redirect/`, }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/about/`, 301, expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/about/`, 301, expectedHeaders, done); }); }); - describe('redirect requests, with both prefix and error code ' + - 'condition', () => { + describe('redirect requests, with both prefix and error code ' + 'condition', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { @@ -547,113 +515,115 @@ describe('Head request on bucket website endpoint', () => { ReplaceKeyPrefixWith: 'redirect/', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, done); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'redirect/index.html' }, - err => done(err)); - }); - - it('should redirect to "redirect" object if key prefix is equal ' + - 'to "about/" and there is a 403 error satisfying the ' + - 'condition in the redirect rule', - done => { - const expectedHeaders = { - location: `${endpoint}/redirect/`, - }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/about/`, 301, expectedHeaders, done); - }); + s3.deleteObject({ Bucket: bucket, Key: 'redirect/index.html' }, err => done(err)); + }); + + it( + 'should redirect to "redirect" object if key prefix is equal ' + + 'to "about/" and there is a 403 error satisfying the ' + + 'condition in the redirect rule', + done => { + const expectedHeaders = { + location: `${endpoint}/redirect/`, + }; + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/about/`, 301, expectedHeaders, done); + } + ); }); describe('object redirect to /', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - Metadata: { - test: 'value', + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + Metadata: { + test: 'value', + }, + WebsiteRedirectLocation: '/', }, - WebsiteRedirectLocation: '/', - }, err => { assert.strictEqual(err, null); done(); - }); + } + ); }); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); + s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, err => done(err)); }); it('should redirect to /', done => { const expectedHeaders = { location: '/', }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/index.html`, 301, expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/index.html`, 301, expectedHeaders, done); }); }); describe('with bucket policy', () => { beforeEach(done => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putBucketPolicy({ Bucket: bucket, Policy: JSON.stringify( + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putBucketPolicy( { - Version: '2012-10-17', - Statement: [{ - Sid: 'PublicReadGetObject', - Effect: 'Allow', - Principal: '*', - Action: ['s3:GetObject'], - Resource: [ - `arn:aws:s3:::${bucket}/index.html`, - `arn:aws:s3:::${bucket}/access.html`, + Bucket: bucket, + Policy: JSON.stringify({ + Version: '2012-10-17', + Statement: [ + { + Sid: 'PublicReadGetObject', + Effect: 'Allow', + Principal: '*', + Action: ['s3:GetObject'], + Resource: [ + `arn:aws:s3:::${bucket}/index.html`, + `arn:aws:s3:::${bucket}/access.html`, + ], + }, ], - }], + }), + }, + err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + Metadata: { + test: 'value', + }, + }, + err => { + assert.strictEqual(err, null); + done(); + } + ); } - ) }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - Metadata: { - test: 'value', - } }, - err => { - assert.strictEqual(err, null); - done(); - }); - }); + ); }); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); + s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, err => done(err)); }); - it('should return indexDocument headers if no key ' + - 'requested', done => { - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, - 200, indexExpectedHeaders, done); + it('should return indexDocument headers if no key ' + 'requested', done => { + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 200, indexExpectedHeaders, done); }); it('should serve error 403 with no access to key', done => { @@ -661,9 +631,13 @@ describe('Head request on bucket website endpoint', () => { 'x-amz-error-code': 'AccessDenied', 'x-amz-error-message': 'Access Denied', }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/non_existing.html`, 403, expectedHeaders, - done); + WebsiteConfigTester.makeHeadRequest( + undefined, + `${endpoint}/non_existing.html`, + 403, + expectedHeaders, + done + ); }); it('should serve error 404 with access to key', done => { @@ -671,9 +645,7 @@ describe('Head request on bucket website endpoint', () => { 'x-amz-error-code': 'NoSuchKey', 'x-amz-error-message': 'The specified key does not exist.', }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/access.html`, 404, expectedHeaders, - done); + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/access.html`, 404, expectedHeaders, done); }); }); @@ -687,34 +659,33 @@ describe('Head request on bucket website endpoint', () => { ReplaceKeyWith: 'whatever.html', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - Metadata: { - test: 'value', + s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + s3.putObject( + { + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + Metadata: { + test: 'value', + }, }, - }, err => { assert.strictEqual(err, null); done(); - }); + } + ); }); }); afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); + s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, err => done(err)); }); it('should not redirect if index key is not explicit', done => { - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, - 200, indexExpectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, endpoint, 200, indexExpectedHeaders, done); }); }); @@ -723,75 +694,84 @@ describe('Head request on bucket website endpoint', () => { const webConfig = new WebsiteConfigTester('index.html'); const object = { Bucket: bucket, - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), ContentType: 'text/html', }; - async.waterfall([ - next => s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, next), - (data, next) => s3.putBucketPolicy({ Bucket: bucket, - Policy: JSON.stringify({ - Version: '2012-10-17', - Statement: [{ - Sid: 'PublicReadGetObject', - Effect: 'Allow', - Principal: '*', - Action: ['s3:GetObject'], - Resource: [ - `arn:aws:s3:::${bucket}/original_key_file`, - `arn:aws:s3:::${bucket}/original_key_nofile`, - `arn:aws:s3:::${bucket}/file/*`, - `arn:aws:s3:::${bucket}/nofile/*`, - ], - }], - }), - }, next), - (data, next) => s3.putObject(Object.assign({}, object, - { Key: 'original_key_file/index.html' }), next), - (data, next) => s3.putObject(Object.assign({}, object, - { Key: 'file/index.html' }), next), // the redirect 302 - (data, next) => s3.putObject(Object.assign({}, object, - { Key: 'no_access_file/index.html' }), next), - ], err => { - assert.ifError(err); - done(); - }); + async.waterfall( + [ + next => s3.putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }, next), + (data, next) => + s3.putBucketPolicy( + { + Bucket: bucket, + Policy: JSON.stringify({ + Version: '2012-10-17', + Statement: [ + { + Sid: 'PublicReadGetObject', + Effect: 'Allow', + Principal: '*', + Action: ['s3:GetObject'], + Resource: [ + `arn:aws:s3:::${bucket}/original_key_file`, + `arn:aws:s3:::${bucket}/original_key_nofile`, + `arn:aws:s3:::${bucket}/file/*`, + `arn:aws:s3:::${bucket}/nofile/*`, + ], + }, + ], + }), + }, + next + ), + (data, next) => + s3.putObject(Object.assign({}, object, { Key: 'original_key_file/index.html' }), next), + (data, next) => s3.putObject(Object.assign({}, object, { Key: 'file/index.html' }), next), // the redirect 302 + (data, next) => + s3.putObject(Object.assign({}, object, { Key: 'no_access_file/index.html' }), next), + ], + err => { + assert.ifError(err); + done(); + } + ); }); afterEach(done => { - async.waterfall([ - next => s3.deleteObject({ Bucket: bucket, - Key: 'original_key_file/index.html' }, next), - (data, next) => s3.deleteObject({ Bucket: bucket, - Key: 'file/index.html' }, next), - (data, next) => s3.deleteObject({ Bucket: bucket, - Key: 'no_access_file/index.html' }, next), - ], err => { - assert.ifError(err); - done(); - }); + async.waterfall( + [ + next => s3.deleteObject({ Bucket: bucket, Key: 'original_key_file/index.html' }, next), + (data, next) => s3.deleteObject({ Bucket: bucket, Key: 'file/index.html' }, next), + (data, next) => s3.deleteObject({ Bucket: bucket, Key: 'no_access_file/index.html' }, next), + ], + err => { + assert.ifError(err); + done(); + } + ); }); it('should redirect 302 with trailing / on folder with index', done => { const expectedHeaders = { - 'location': '/file/', + location: '/file/', 'x-amz-error-code': 'Found', 'x-amz-error-message': 'Resource Found', }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/file`, 302, expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest(undefined, `${endpoint}/file`, 302, expectedHeaders, done); }); - it('should return 404 on original key access without index', - done => { + it('should return 404 on original key access without index', done => { const expectedHeaders = { 'x-amz-error-code': 'NoSuchKey', 'x-amz-error-message': 'The specified key does not exist.', }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/original_key_nofile`, 404, - expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest( + undefined, + `${endpoint}/original_key_nofile`, + 404, + expectedHeaders, + done + ); }); describe('should return 403', () => { @@ -814,10 +794,15 @@ describe('Head request on bucket website endpoint', () => { 'x-amz-error-code': 'AccessDenied', 'x-amz-error-message': 'Access Denied', }; - WebsiteConfigTester.makeHeadRequest(undefined, - `${endpoint}/${test.key}`, 403, - expectedHeaders, done); - })); + WebsiteConfigTester.makeHeadRequest( + undefined, + `${endpoint}/${test.key}`, + 403, + expectedHeaders, + done + ); + }) + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/websiteHeadWithACL.js b/tests/functional/aws-node-sdk/test/object/websiteHeadWithACL.js index 1defd8730d..706e214690 100644 --- a/tests/functional/aws-node-sdk/test/object/websiteHeadWithACL.js +++ b/tests/functional/aws-node-sdk/test/object/websiteHeadWithACL.js @@ -12,13 +12,11 @@ const s3 = new S3(config); // `127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com` const transport = conf.https ? 'https' : 'http'; -const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : - 'bucketwebsitetester'; -const hostname = process.env.S3_END_TO_END ? - `${bucket}.s3-website-us-east-1.scality.com` : - `${bucket}.s3-website-us-east-1.amazonaws.com`; -const endpoint = process.env.AWS_ON_AIR ? `${transport}://${hostname}` : - `${transport}://${hostname}:8000`; +const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : 'bucketwebsitetester'; +const hostname = process.env.S3_END_TO_END + ? `${bucket}.s3-website-us-east-1.scality.com` + : `${bucket}.s3-website-us-east-1.amazonaws.com`; +const endpoint = process.env.AWS_ON_AIR ? `${transport}://${hostname}` : `${transport}://${hostname}:8000`; const aclEquivalent = { public: ['public-read-write', 'public-read'], @@ -56,84 +54,73 @@ const aclTests = [ result: 'accessDenied', }, { - it: 'should return 403 if public bucket - private index - public ' + - 'error documents', + it: 'should return 403 if public bucket - private index - public ' + 'error documents', bucketACL: 'public', objects: { index: 'private', error: 'private' }, result: 'accessDenied', }, { - it: 'should return 200 if private bucket - public index - ' + - 'public error documents', + it: 'should return 200 if private bucket - public index - ' + 'public error documents', bucketACL: 'private', objects: { index: 'public-read', error: 'private' }, result: 'index', }, { - it: 'should return 200 if public bucket - public index - ' + - 'private error documents', + it: 'should return 200 if public bucket - public index - ' + 'private error documents', bucketACL: 'public', objects: { index: 'public-read', error: 'private' }, result: 'index', }, { - it: 'should return 200 if private bucket - public index - ' + - 'public error documents', + it: 'should return 200 if private bucket - public index - ' + 'public error documents', bucketACL: 'private', objects: { index: 'public-read', error: 'public-read' }, result: 'index', }, { - it: 'should return 200 if public bucket - public index - ' + - 'public error documents', + it: 'should return 200 if public bucket - public index - ' + 'public error documents', bucketACL: 'public', objects: { index: 'public-read', error: 'public-read' }, result: 'index', }, { - it: 'should return 403 AccessDenied if private bucket - ' + - 'without index - public error documents', + it: 'should return 403 AccessDenied if private bucket - ' + 'without index - public error documents', bucketACL: 'private', objects: { error: 'public-read' }, result: 'accessDenied', }, { - it: 'should return 404 if public bucket - without index - ' + - 'public error documents', + it: 'should return 404 if public bucket - without index - ' + 'public error documents', bucketACL: 'public', objects: { error: 'public-read' }, result: 'noSuchKey', }, { - it: 'should return 403 if private bucket - without index - ' + - 'private error documents', + it: 'should return 403 if private bucket - without index - ' + 'private error documents', bucketACL: 'private', objects: { error: 'private' }, result: 'accessDenied', }, { - it: 'should return 404 if public bucket - without index - ' + - 'private error documents', + it: 'should return 404 if public bucket - without index - ' + 'private error documents', bucketACL: 'public', objects: { error: 'private' }, result: 'noSuchKey', }, { - it: 'should return 404 if public bucket - without index - ' + - 'without error documents', + it: 'should return 404 if public bucket - without index - ' + 'without error documents', bucketACL: 'public', - objects: { }, + objects: {}, result: 'noSuchKey', }, { - it: 'should return 403 if private bucket - without index - ' + - 'without error documents', + it: 'should return 403 if private bucket - without index - ' + 'without error documents', bucketACL: 'private', - objects: { }, + objects: {}, result: 'accessDenied', }, ]; @@ -143,33 +130,43 @@ describe('Head request on bucket website endpoint with ACL', () => { aclEquivalent[test.bucketACL].forEach(bucketACL => { describe(`with existing bucket with ${bucketACL} acl`, () => { beforeEach(done => { - WebsiteConfigTester.createPutBucketWebsite(s3, bucket, - bucketACL, test.objects, done); + WebsiteConfigTester.createPutBucketWebsite(s3, bucket, bucketACL, test.objects, done); }); afterEach(done => { - WebsiteConfigTester.deleteObjectsThenBucket(s3, bucket, - test.objects, done); + WebsiteConfigTester.deleteObjectsThenBucket(s3, bucket, test.objects, done); }); it(`${test.it} with no auth credentials sent`, done => { const result = test.result; - WebsiteConfigTester.makeHeadRequest(undefined, endpoint, + WebsiteConfigTester.makeHeadRequest( + undefined, + endpoint, headersACL[result].status, - headersACL[result].expectedHeaders, done); + headersACL[result].expectedHeaders, + done + ); }); it(`${test.it} even with invalid auth credentials`, done => { const result = test.result; - WebsiteConfigTester.makeHeadRequest('invalid credentials', - endpoint, headersACL[result].status, - headersACL[result].expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest( + 'invalid credentials', + endpoint, + headersACL[result].status, + headersACL[result].expectedHeaders, + done + ); }); it(`${test.it} even with valid auth credentials`, done => { const result = test.result; - WebsiteConfigTester.makeHeadRequest('valid credentials', - endpoint, headersACL[result].status, - headersACL[result].expectedHeaders, done); + WebsiteConfigTester.makeHeadRequest( + 'valid credentials', + endpoint, + headersACL[result].status, + headersACL[result].expectedHeaders, + done + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/websiteRuleMixing.js b/tests/functional/aws-node-sdk/test/object/websiteRuleMixing.js index ca61c25a76..1db11268a4 100644 --- a/tests/functional/aws-node-sdk/test/object/websiteRuleMixing.js +++ b/tests/functional/aws-node-sdk/test/object/websiteRuleMixing.js @@ -15,326 +15,439 @@ const s3 = bucketUtil.s3; // `127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com` const transport = conf.https ? 'https' : 'http'; -const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : - 'bucketwebsitetester'; -const hostname = process.env.S3_END_TO_END ? - `${bucket}.s3-website-us-east-1.scality.com` : - `${bucket}.s3-website-us-east-1.amazonaws.com`; -const endpoint = process.env.AWS_ON_AIR ? `${transport}://${hostname}` : - `${transport}://${hostname}:8000`; -const redirectEndpoint = conf.https ? 'https://www.google.com/' : - 'http://www.google.com/'; - -describe('User visits bucket website endpoint and requests resource ' + -'that has x-amz-website-redirect-location header ::', () => { - beforeEach(done => s3.createBucket({ Bucket: bucket }, done)); - - afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); - - describe('when x-amz-website-redirect-location: /redirect.html', () => { - beforeEach(() => { - const webConfig = new WebsiteConfigTester('index.html'); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, - Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - WebsiteRedirectLocation: '/redirect.html' }).promise()) - .then(() => s3.putObject({ Bucket: bucket, - Key: 'redirect.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/redirect.html')), - ContentType: 'text/html' }).promise()); - }); - - afterEach(() => bucketUtil.empty(bucket)); - - it('should serve redirect file on GET request', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'redirect', - redirectUrl: '/redirect.html', - }, done); - }); - - it('should redirect to redirect file on HEAD request', done => { - WebsiteConfigTester.checkHTML({ - method: 'HEAD', - url: endpoint, - responseType: 'redirect', - redirectUrl: '/redirect.html', - }, done); - }); - }); - - describe('when x-amz-website-redirect-location: https://www.google.com', +const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : 'bucketwebsitetester'; +const hostname = process.env.S3_END_TO_END + ? `${bucket}.s3-website-us-east-1.scality.com` + : `${bucket}.s3-website-us-east-1.amazonaws.com`; +const endpoint = process.env.AWS_ON_AIR ? `${transport}://${hostname}` : `${transport}://${hostname}:8000`; +const redirectEndpoint = conf.https ? 'https://www.google.com/' : 'http://www.google.com/'; + +describe( + 'User visits bucket website endpoint and requests resource ' + 'that has x-amz-website-redirect-location header ::', () => { - beforeEach(() => { - const webConfig = new WebsiteConfigTester('index.html'); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, - Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - WebsiteRedirectLocation: 'https://www.google.com' }).promise()); - }); + beforeEach(done => s3.createBucket({ Bucket: bucket }, done)); + + afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); + + describe('when x-amz-website-redirect-location: /redirect.html', () => { + beforeEach(() => { + const webConfig = new WebsiteConfigTester('index.html'); + return s3 + .putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }) + .promise() + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + WebsiteRedirectLocation: '/redirect.html', + }) + .promise() + ) + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: 'redirect.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/redirect.html')), + ContentType: 'text/html', + }) + .promise() + ); + }); - afterEach(() => bucketUtil.empty(bucket)); + afterEach(() => bucketUtil.empty(bucket)); + + it('should serve redirect file on GET request', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'redirect', + redirectUrl: '/redirect.html', + }, + done + ); + }); - it('should redirect to https://www.google.com', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'redirect', - redirectUrl: 'https://www.google.com', - }, done); + it('should redirect to redirect file on HEAD request', done => { + WebsiteConfigTester.checkHTML( + { + method: 'HEAD', + url: endpoint, + responseType: 'redirect', + redirectUrl: '/redirect.html', + }, + done + ); + }); }); - it('should redirect to https://www.google.com on HEAD request', - done => { - WebsiteConfigTester.checkHTML({ - method: 'HEAD', - url: endpoint, - responseType: 'redirect', - redirectUrl: 'https://www.google.com', - }, done); + describe('when x-amz-website-redirect-location: https://www.google.com', () => { + beforeEach(() => { + const webConfig = new WebsiteConfigTester('index.html'); + return s3 + .putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }) + .promise() + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + WebsiteRedirectLocation: 'https://www.google.com', + }) + .promise() + ); }); - }); - - describe('when key with header is private', () => { - beforeEach(() => { - const webConfig = new WebsiteConfigTester('index.html'); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, - Key: 'index.html', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - WebsiteRedirectLocation: 'https://www.google.com' }).promise()); - }); - afterEach(() => bucketUtil.empty(bucket)); + afterEach(() => bucketUtil.empty(bucket)); + + it('should redirect to https://www.google.com', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'redirect', + redirectUrl: 'https://www.google.com', + }, + done + ); + }); - it('should return 403 instead of x-amz-website-redirect-location ' + - 'header location', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: '403-access-denied', - }, done); + it('should redirect to https://www.google.com on HEAD request', done => { + WebsiteConfigTester.checkHTML( + { + method: 'HEAD', + url: endpoint, + responseType: 'redirect', + redirectUrl: 'https://www.google.com', + }, + done + ); + }); }); - it('should return 403 instead of x-amz-website-redirect-location ' + - 'header location on HEAD request', done => { - WebsiteConfigTester.checkHTML({ - method: 'HEAD', - url: endpoint, - responseType: '403-access-denied', - }, done); - }); - }); - - describe('when key with header is private' + - 'and website config has error condition routing rule', () => { - beforeEach(() => { - const webConfig = new WebsiteConfigTester('index.html'); - const condition = { - HttpErrorCodeReturnedEquals: '403', - }; - const redirect = { - HostName: 'www.google.com', - }; - webConfig.addRoutingRule(redirect, condition); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, - Key: 'index.html', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - WebsiteRedirectLocation: '/redirect.html' }).promise()) - .then(() => s3.putObject({ Bucket: bucket, - Key: 'redirect.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/redirect.html')), - ContentType: 'text/html' }).promise()); - }); + describe('when key with header is private', () => { + beforeEach(() => { + const webConfig = new WebsiteConfigTester('index.html'); + return s3 + .putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }) + .promise() + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: 'index.html', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + WebsiteRedirectLocation: 'https://www.google.com', + }) + .promise() + ); + }); - afterEach(() => bucketUtil.empty(bucket)); - - it(`should redirect to ${redirectEndpoint} since error 403 ` + - 'occurred instead of x-amz-website-redirect-location header ' + - 'location on GET request', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'redirect', - redirectUrl: redirectEndpoint, - }, done); - }); + afterEach(() => bucketUtil.empty(bucket)); + + it('should return 403 instead of x-amz-website-redirect-location ' + 'header location', done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: '403-access-denied', + }, + done + ); + }); - it(`should redirect to ${redirectEndpoint} since error 403 ` + - 'occurred instead of x-amz-website-redirect-location header ' + - 'location on HEAD request', - done => { - WebsiteConfigTester.checkHTML({ - method: 'HEAD', - url: endpoint, - responseType: 'redirect', - redirectUrl: redirectEndpoint, - }, done); - }); - }); - - describe(`with redirect all requests to ${redirectEndpoint}`, () => { - beforeEach(() => { - const redirectAllTo = { - HostName: 'www.google.com', - }; - const webConfig = new WebsiteConfigTester(null, null, - redirectAllTo); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, - Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - WebsiteRedirectLocation: '/redirect.html' }).promise()); + it( + 'should return 403 instead of x-amz-website-redirect-location ' + 'header location on HEAD request', + done => { + WebsiteConfigTester.checkHTML( + { + method: 'HEAD', + url: endpoint, + responseType: '403-access-denied', + }, + done + ); + } + ); }); - afterEach(() => bucketUtil.empty(bucket)); - - it(`should redirect to ${redirectEndpoint} instead of ` + - 'x-amz-website-redirect-location header location on GET request', - done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: endpoint, - responseType: 'redirect', - redirectUrl: redirectEndpoint, - }, done); - }); + describe('when key with header is private' + 'and website config has error condition routing rule', () => { + beforeEach(() => { + const webConfig = new WebsiteConfigTester('index.html'); + const condition = { + HttpErrorCodeReturnedEquals: '403', + }; + const redirect = { + HostName: 'www.google.com', + }; + webConfig.addRoutingRule(redirect, condition); + return s3 + .putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }) + .promise() + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: 'index.html', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + WebsiteRedirectLocation: '/redirect.html', + }) + .promise() + ) + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: 'redirect.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/redirect.html')), + ContentType: 'text/html', + }) + .promise() + ); + }); - it(`should redirect to ${redirectEndpoint} instead of ` + - 'x-amz-website-redirect-location header location on HEAD request', - done => { - WebsiteConfigTester.checkHTML({ - method: 'HEAD', - url: endpoint, - responseType: 'redirect', - redirectUrl: redirectEndpoint, - }, done); + afterEach(() => bucketUtil.empty(bucket)); + + it( + `should redirect to ${redirectEndpoint} since error 403 ` + + 'occurred instead of x-amz-website-redirect-location header ' + + 'location on GET request', + done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'redirect', + redirectUrl: redirectEndpoint, + }, + done + ); + } + ); + + it( + `should redirect to ${redirectEndpoint} since error 403 ` + + 'occurred instead of x-amz-website-redirect-location header ' + + 'location on HEAD request', + done => { + WebsiteConfigTester.checkHTML( + { + method: 'HEAD', + url: endpoint, + responseType: 'redirect', + redirectUrl: redirectEndpoint, + }, + done + ); + } + ); }); - }); - describe('with routing rule redirect to hostname with prefix condition', - () => { - beforeEach(() => { - const webConfig = new WebsiteConfigTester('index.html'); - const condition = { - KeyPrefixEquals: 'about/', - }; - const redirect = { - HostName: 'www.google.com', - }; - webConfig.addRoutingRule(redirect, condition); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, - Key: 'about/index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - WebsiteRedirectLocation: '/redirect.html' }).promise()); - }); + describe(`with redirect all requests to ${redirectEndpoint}`, () => { + beforeEach(() => { + const redirectAllTo = { + HostName: 'www.google.com', + }; + const webConfig = new WebsiteConfigTester(null, null, redirectAllTo); + return s3 + .putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }) + .promise() + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + WebsiteRedirectLocation: '/redirect.html', + }) + .promise() + ); + }); - afterEach(() => bucketUtil.empty(bucket)); - - it(`should redirect GET request to ${redirectEndpoint}about/ ` + - 'instead of about/ key x-amz-website-redirect-location ' + - 'header location', done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/about/`, - responseType: 'redirect', - redirectUrl: `${redirectEndpoint}about/`, - }, done); + afterEach(() => bucketUtil.empty(bucket)); + + it( + `should redirect to ${redirectEndpoint} instead of ` + + 'x-amz-website-redirect-location header location on GET request', + done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: endpoint, + responseType: 'redirect', + redirectUrl: redirectEndpoint, + }, + done + ); + } + ); + + it( + `should redirect to ${redirectEndpoint} instead of ` + + 'x-amz-website-redirect-location header location on HEAD request', + done => { + WebsiteConfigTester.checkHTML( + { + method: 'HEAD', + url: endpoint, + responseType: 'redirect', + redirectUrl: redirectEndpoint, + }, + done + ); + } + ); }); - it(`should redirect HEAD request to ${redirectEndpoint}about ` + - 'instead of about/ key x-amz-website-redirect-location ' + - 'header location', done => { - WebsiteConfigTester.checkHTML({ - method: 'HEAD', - url: `${endpoint}/about/`, - responseType: 'redirect', - redirectUrl: `${redirectEndpoint}about/`, - }, done); - }); - }); - - describe('with routing rule replaceKeyWith', () => { - beforeEach(() => { - const webConfig = new WebsiteConfigTester('index.html'); - const condition = { - KeyPrefixEquals: 'index.html', - }; - const redirect = { - ReplaceKeyWith: 'redirect.html', - }; - webConfig.addRoutingRule(redirect, condition); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, - Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - WebsiteRedirectLocation: 'https://www.google.com' }).promise()) - .then(() => s3.putObject({ Bucket: bucket, - Key: 'redirect.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/redirect.html')), - ContentType: 'text/html' }).promise()); - }); + describe('with routing rule redirect to hostname with prefix condition', () => { + beforeEach(() => { + const webConfig = new WebsiteConfigTester('index.html'); + const condition = { + KeyPrefixEquals: 'about/', + }; + const redirect = { + HostName: 'www.google.com', + }; + webConfig.addRoutingRule(redirect, condition); + return s3 + .putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }) + .promise() + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: 'about/index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + WebsiteRedirectLocation: '/redirect.html', + }) + .promise() + ); + }); - afterEach(() => bucketUtil.empty(bucket)); - - it('should replace key instead of redirecting to key ' + - 'x-amz-website-redirect-location header location on GET request', - done => { - WebsiteConfigTester.checkHTML({ - method: 'GET', - url: `${endpoint}/index.html`, - responseType: 'redirect-user', - redirectUrl: `${endpoint}/redirect.html`, - }, done); + afterEach(() => bucketUtil.empty(bucket)); + + it( + `should redirect GET request to ${redirectEndpoint}about/ ` + + 'instead of about/ key x-amz-website-redirect-location ' + + 'header location', + done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/about/`, + responseType: 'redirect', + redirectUrl: `${redirectEndpoint}about/`, + }, + done + ); + } + ); + + it( + `should redirect HEAD request to ${redirectEndpoint}about ` + + 'instead of about/ key x-amz-website-redirect-location ' + + 'header location', + done => { + WebsiteConfigTester.checkHTML( + { + method: 'HEAD', + url: `${endpoint}/about/`, + responseType: 'redirect', + redirectUrl: `${redirectEndpoint}about/`, + }, + done + ); + } + ); }); - it('should replace key instead of redirecting to key ' + - 'x-amz-website-redirect-location header location on HEAD request', - done => { - WebsiteConfigTester.checkHTML({ - method: 'HEAD', - url: `${endpoint}/index.html`, - responseType: 'redirect-user', - redirectUrl: `${endpoint}/redirect.html`, - }, done); + describe('with routing rule replaceKeyWith', () => { + beforeEach(() => { + const webConfig = new WebsiteConfigTester('index.html'); + const condition = { + KeyPrefixEquals: 'index.html', + }; + const redirect = { + ReplaceKeyWith: 'redirect.html', + }; + webConfig.addRoutingRule(redirect, condition); + return s3 + .putBucketWebsite({ Bucket: bucket, WebsiteConfiguration: webConfig }) + .promise() + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), + ContentType: 'text/html', + WebsiteRedirectLocation: 'https://www.google.com', + }) + .promise() + ) + .then(() => + s3 + .putObject({ + Bucket: bucket, + Key: 'redirect.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/redirect.html')), + ContentType: 'text/html', + }) + .promise() + ); }); - }); -}); + + afterEach(() => bucketUtil.empty(bucket)); + + it( + 'should replace key instead of redirecting to key ' + + 'x-amz-website-redirect-location header location on GET request', + done => { + WebsiteConfigTester.checkHTML( + { + method: 'GET', + url: `${endpoint}/index.html`, + responseType: 'redirect-user', + redirectUrl: `${endpoint}/redirect.html`, + }, + done + ); + } + ); + + it( + 'should replace key instead of redirecting to key ' + + 'x-amz-website-redirect-location header location on HEAD request', + done => { + WebsiteConfigTester.checkHTML( + { + method: 'HEAD', + url: `${endpoint}/index.html`, + responseType: 'redirect-user', + redirectUrl: `${endpoint}/redirect.html`, + }, + done + ); + } + ); + }); + } +); diff --git a/tests/functional/aws-node-sdk/test/service/get.js b/tests/functional/aws-node-sdk/test/service/get.js index f168f9148c..4a1652b3fd 100644 --- a/tests/functional/aws-node-sdk/test/service/get.js +++ b/tests/functional/aws-node-sdk/test/service/get.js @@ -8,9 +8,7 @@ const getConfig = require('../support/config'); const withV4 = require('../support/withV4'); const svcSchema = require('../../schema/service'); -const describeFn = process.env.AWS_ON_AIR - ? describe.skip - : describe; +const describeFn = process.env.AWS_ON_AIR ? describe.skip : describe; describeFn('GET Service - AWS.S3.listBuckets', function getService() { this.timeout(600000); @@ -54,10 +52,11 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() { }; }); - it('should return 403 and InvalidAccessKeyId ' + - 'if accessKeyId is invalid', done => { - const invalidAccess = getConfig('default', - Object.assign({}, + it('should return 403 and InvalidAccessKeyId ' + 'if accessKeyId is invalid', done => { + const invalidAccess = getConfig( + 'default', + Object.assign( + {}, { credentials: null, accessKeyId: 'wrong', @@ -72,8 +71,7 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() { testFn(invalidAccess, expectedCode, expectedStatus, done); }); - it('should return 403 and SignatureDoesNotMatch ' + - 'if credential is polluted', done => { + it('should return 403 and SignatureDoesNotMatch ' + 'if credential is polluted', done => { const pollutedConfig = getConfig('default', sigCfg); pollutedConfig.credentials.secretAccessKey = 'wrong'; @@ -88,71 +86,80 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() { let bucketUtil; let s3; const bucketsNumber = 1001; - process.stdout - .write(`testing listing with ${bucketsNumber} buckets\n`); - const createdBuckets = Array.from(Array(bucketsNumber).keys()) - .map(i => `getservicebuckets-${i}`); + process.stdout.write(`testing listing with ${bucketsNumber} buckets\n`); + const createdBuckets = Array.from(Array(bucketsNumber).keys()).map(i => `getservicebuckets-${i}`); before(done => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; s3.config.update({ maxRetries: 0 }); s3.config.update({ httpOptions: { timeout: 0 } }); - async.eachLimit(createdBuckets, 10, (bucketName, moveOn) => { - s3.createBucket({ Bucket: bucketName }, err => { - if (bucketName.endsWith('000')) { - // log to keep ci alive - process.stdout - .write(`creating bucket: ${bucketName}\n`); + async.eachLimit( + createdBuckets, + 10, + (bucketName, moveOn) => { + s3.createBucket({ Bucket: bucketName }, err => { + if (bucketName.endsWith('000')) { + // log to keep ci alive + process.stdout.write(`creating bucket: ${bucketName}\n`); + } + moveOn(err); + }); + }, + err => { + if (err) { + process.stdout.write(`err creating buckets: ${err}`); } - moveOn(err); - }); - }, - err => { - if (err) { - process.stdout.write(`err creating buckets: ${err}`); + done(err); } - done(err); - }); + ); }); after(done => { - async.eachLimit(createdBuckets, 10, (bucketName, moveOn) => { - s3.deleteBucket({ Bucket: bucketName }, err => { - if (bucketName.endsWith('000')) { - // log to keep ci alive - process.stdout - .write(`deleting bucket: ${bucketName}\n`); + async.eachLimit( + createdBuckets, + 10, + (bucketName, moveOn) => { + s3.deleteBucket({ Bucket: bucketName }, err => { + if (bucketName.endsWith('000')) { + // log to keep ci alive + process.stdout.write(`deleting bucket: ${bucketName}\n`); + } + moveOn(err); + }); + }, + err => { + if (err) { + process.stdout.write(`err deleting buckets: ${err}`); } - moveOn(err); - }); - }, - err => { - if (err) { - process.stdout.write(`err deleting buckets: ${err}`); + done(err); } - done(err); - }); + ); }); it('should list buckets concurrently', done => { - async.times(20, (n, next) => { - s3.listBuckets((err, result) => { - assert.equal(result.Buckets.length, - createdBuckets.length, - 'Created buckets are missing in response'); - next(err); - }); - }, - err => { - assert.ifError(err, `error listing buckets: ${err}`); - done(); - }); + async.times( + 20, + (n, next) => { + s3.listBuckets((err, result) => { + assert.equal( + result.Buckets.length, + createdBuckets.length, + 'Created buckets are missing in response' + ); + next(err); + }); + }, + err => { + assert.ifError(err, `error listing buckets: ${err}`); + done(); + } + ); }); it('should list buckets', done => { - s3 - .listBuckets().promise() + s3.listBuckets() + .promise() .then(data => { const isValidResponse = tv4.validate(data, svcSchema); if (!isValidResponse) { @@ -163,12 +170,9 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() { return data; }) .then(data => { - const buckets = data.Buckets.filter(bucket => - createdBuckets.indexOf(bucket.Name) > -1 - ); + const buckets = data.Buckets.filter(bucket => createdBuckets.indexOf(bucket.Name) > -1); - assert.equal(buckets.length, createdBuckets.length, - 'Created buckets are missing in response'); + assert.equal(buckets.length, createdBuckets.length, 'Created buckets are missing in response'); return buckets; }) @@ -176,14 +180,12 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() { // Sort createdBuckets in alphabetical order createdBuckets.sort(); - const isCorrectOrder = buckets - .reduce( - (prev, bucket, idx) => - prev && bucket.Name === createdBuckets[idx] - , true); + const isCorrectOrder = buckets.reduce( + (prev, bucket, idx) => prev && bucket.Name === createdBuckets[idx], + true + ); - assert.ok(isCorrectOrder, - 'Not returning created buckets by alphabetically'); + assert.ok(isCorrectOrder, 'Not returning created buckets by alphabetically'); done(); }) .catch(done); @@ -201,14 +203,12 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() { it('should not return other accounts bucket list', done => { anotherS3 - .listBuckets().promise() + .listBuckets() + .promise() .then(data => { - const hasSameBuckets = data.Buckets - .filter(filterFn) - .length; + const hasSameBuckets = data.Buckets.filter(filterFn).length; - assert.strictEqual(hasSameBuckets, 0, - 'It has other buddies bucket'); + assert.strictEqual(hasSameBuckets, 0, 'It has other buddies bucket'); done(); }) .catch(done); diff --git a/tests/functional/aws-node-sdk/test/support/awsConfig.js b/tests/functional/aws-node-sdk/test/support/awsConfig.js index 86149e7cc5..d62b480a24 100644 --- a/tests/functional/aws-node-sdk/test/support/awsConfig.js +++ b/tests/functional/aws-node-sdk/test/support/awsConfig.js @@ -18,14 +18,19 @@ function getAwsCredentials(profile, credFile) { } function getRealAwsConfig(location) { - const { awsEndpoint, gcpEndpoint, credentialsProfile, - credentials: locCredentials, bucketName, mpuBucketName, pathStyle } = - config.locationConstraints[location].details; + const { + awsEndpoint, + gcpEndpoint, + credentialsProfile, + credentials: locCredentials, + bucketName, + mpuBucketName, + pathStyle, + } = config.locationConstraints[location].details; const useHTTPS = config.locationConstraints[location].details.https; const proto = useHTTPS ? 'https' : 'http'; const params = { - endpoint: gcpEndpoint ? - `${proto}://${gcpEndpoint}` : `${proto}://${awsEndpoint}`, + endpoint: gcpEndpoint ? `${proto}://${gcpEndpoint}` : `${proto}://${awsEndpoint}`, signatureVersion: 'v4', }; if (config.locationConstraints[location].type === 'gcp') { @@ -42,8 +47,7 @@ function getRealAwsConfig(location) { }; } if (credentialsProfile) { - const credentials = getAwsCredentials(credentialsProfile, - '/.aws/credentials'); + const credentials = getAwsCredentials(credentialsProfile, '/.aws/credentials'); params.credentials = credentials; return params; } diff --git a/tests/functional/aws-node-sdk/test/support/config.js b/tests/functional/aws-node-sdk/test/support/config.js index ae4df173a2..964c267e2b 100644 --- a/tests/functional/aws-node-sdk/test/support/config.js +++ b/tests/functional/aws-node-sdk/test/support/config.js @@ -37,9 +37,7 @@ function _getMemCredentials(profile) { function _getMemConfig(profile, config) { const credentials = _getMemCredentials(profile); - const memConfig = Object.assign({} - , DEFAULT_GLOBAL_OPTIONS, DEFAULT_MEM_OPTIONS - , { credentials }, config); + const memConfig = Object.assign({}, DEFAULT_GLOBAL_OPTIONS, DEFAULT_MEM_OPTIONS, { credentials }, config); if (process.env.IP) { memConfig.endpoint = `${transport}://${process.env.IP}:8000`; @@ -51,16 +49,13 @@ function _getMemConfig(profile, config) { function _getAwsConfig(profile, config) { const credentials = getAwsCredentials(profile, '/.aws/scality'); - const awsConfig = Object.assign({} - , DEFAULT_GLOBAL_OPTIONS, DEFAULT_AWS_OPTIONS - , { credentials }, config); + const awsConfig = Object.assign({}, DEFAULT_GLOBAL_OPTIONS, DEFAULT_AWS_OPTIONS, { credentials }, config); return awsConfig; } function getConfig(profile = 'default', config = {}) { - const fn = process.env.AWS_ON_AIR && process.env.AWS_ON_AIR === 'true' - ? _getAwsConfig : _getMemConfig; + const fn = process.env.AWS_ON_AIR && process.env.AWS_ON_AIR === 'true' ? _getAwsConfig : _getMemConfig; return fn.apply(this, [profile, config]); } diff --git a/tests/functional/aws-node-sdk/test/support/objectConfigs.js b/tests/functional/aws-node-sdk/test/support/objectConfigs.js index ed2e9cb0bf..cf4324dbdf 100644 --- a/tests/functional/aws-node-sdk/test/support/objectConfigs.js +++ b/tests/functional/aws-node-sdk/test/support/objectConfigs.js @@ -10,9 +10,8 @@ const canonicalObjectConfig = { invalidPartNumbers: [-1, 0, maximumAllowedPartCount + 1], signature: 'for canonical object', meta: { - computeTotalSize: (partNumbers, bodySize) => partNumbers.reduce((total, current) => - total + bodySize + current + 1 - , 0), + computeTotalSize: (partNumbers, bodySize) => + partNumbers.reduce((total, current) => total + bodySize + current + 1, 0), objectIsEmpty: false, }, }; @@ -32,9 +31,6 @@ const emptyObjectConfig = { }, }; -const objectConfigs = [ - canonicalObjectConfig, - emptyObjectConfig, -]; +const objectConfigs = [canonicalObjectConfig, emptyObjectConfig]; module.exports = objectConfigs; diff --git a/tests/functional/aws-node-sdk/test/support/withV4.js b/tests/functional/aws-node-sdk/test/support/withV4.js index e875ed68f2..6b9b543cd1 100644 --- a/tests/functional/aws-node-sdk/test/support/withV4.js +++ b/tests/functional/aws-node-sdk/test/support/withV4.js @@ -12,11 +12,13 @@ function withV4(testFn) { config = {}; } - describe(`With ${version} signature`, (cfg => - function tcWrap() { - testFn.call(this, cfg); - } - )(config)); + describe( + `With ${version} signature`, + (cfg => + function tcWrap() { + testFn.call(this, cfg); + })(config) + ); }); } diff --git a/tests/functional/aws-node-sdk/test/utils/init.js b/tests/functional/aws-node-sdk/test/utils/init.js index e700c9d79e..0a3cafd5ca 100644 --- a/tests/functional/aws-node-sdk/test/utils/init.js +++ b/tests/functional/aws-node-sdk/test/utils/init.js @@ -4,8 +4,7 @@ const metadata = require('../../../../../lib/metadata/wrapper'); const { config } = require('../../../../../lib/Config'); const { DummyRequestLogger } = require('../../../../unit/helpers'); const log = new DummyRequestLogger(); -const nonVersionedObjId = - versionIdUtils.getInfVid(config.replicationGroupId); +const nonVersionedObjId = versionIdUtils.getInfVid(config.replicationGroupId); function decodeVersionId(versionId) { let decodedVersionId; @@ -22,21 +21,20 @@ function decodeVersionId(versionId) { let metadataInit = false; function initMetadata(done) { - if (metadataInit === true) { - return done(); - } - return metadata.setup(err => { - if (err) { - return done(err); - } - metadataInit = true; - return done(); - }); + if (metadataInit === true) { + return done(); + } + return metadata.setup(err => { + if (err) { + return done(err); + } + metadataInit = true; + return done(); + }); } function getMetadata(bucketName, objectName, versionId, cb) { - return metadata.getObjectMD(bucketName, objectName, { versionId: decodeVersionId(versionId) }, - log, cb); + return metadata.getObjectMD(bucketName, objectName, { versionId: decodeVersionId(versionId) }, log, cb); } /** @@ -51,13 +49,19 @@ function getMetadata(bucketName, objectName, versionId, cb) { function fakeMetadataTransition(bucketName, objectName, versionId, cb) { return getMetadata(bucketName, objectName, versionId, (err, objMD) => { if (err) { - return cb(err); - } + return cb(err); + } /* eslint-disable no-param-reassign */ objMD['x-amz-scal-transition-in-progress'] = true; /* eslint-enable no-param-reassign */ - return metadata.putObjectMD(bucketName, objectName, objMD, { versionId: decodeVersionId(versionId) }, - log, err => cb(err)); + return metadata.putObjectMD( + bucketName, + objectName, + objMD, + { versionId: decodeVersionId(versionId) }, + log, + err => cb(err) + ); }); } @@ -74,21 +78,27 @@ function fakeMetadataTransition(bucketName, objectName, versionId, cb) { function fakeMetadataArchive(bucketName, objectName, versionId, archive, cb) { return getMetadata(bucketName, objectName, versionId, (err, objMD) => { if (err) { - return cb(err); - } + return cb(err); + } /* eslint-disable no-param-reassign */ objMD['x-amz-storage-class'] = 'location-dmf-v1'; objMD.dataStoreName = 'location-dmf-v1'; objMD.archive = archive; /* eslint-enable no-param-reassign */ - return metadata.putObjectMD(bucketName, objectName, objMD, { versionId: decodeVersionId(versionId) }, - log, err => cb(err)); + return metadata.putObjectMD( + bucketName, + objectName, + objMD, + { versionId: decodeVersionId(versionId) }, + log, + err => cb(err) + ); }); } module.exports = { - initMetadata, - getMetadata, - fakeMetadataArchive, + initMetadata, + getMetadata, + fakeMetadataArchive, fakeMetadataTransition, }; diff --git a/tests/functional/aws-node-sdk/test/versioning/bucketDelete.js b/tests/functional/aws-node-sdk/test/versioning/bucketDelete.js index 791e4d5b77..f43d1c3472 100644 --- a/tests/functional/aws-node-sdk/test/versioning/bucketDelete.js +++ b/tests/functional/aws-node-sdk/test/versioning/bucketDelete.js @@ -9,7 +9,6 @@ const { removeAllVersions } = require('../../lib/utility/versioning-util.js'); const bucketName = `versioning-bucket-${Date.now()}`; const key = 'anObject'; - function checkError(err, code) { assert.notEqual(err, null, 'Expected failure but got success'); assert.strictEqual(err.code, code); @@ -26,16 +25,22 @@ describe('aws-node-sdk test delete bucket', () => { // setup test beforeEach(done => { - async.waterfall([ - next => s3.createBucket({ Bucket: bucketName }, - err => next(err)), - next => s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { - Status: 'Enabled', - }, - }, err => next(err)), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucketName }, err => next(err)), + next => + s3.putBucketVersioning( + { + Bucket: bucketName, + VersioningConfiguration: { + Status: 'Enabled', + }, + }, + err => next(err) + ), + ], + done + ); }); // empty and delete bucket after testing if bucket exists @@ -50,16 +55,14 @@ describe('aws-node-sdk test delete bucket', () => { }); }); - it('should be able to delete empty bucket with version enabled', - done => { + it('should be able to delete empty bucket with version enabled', done => { s3.deleteBucket({ Bucket: bucketName }, err => { checkNoError(err); return done(); }); }); - it('should return error 409 BucketNotEmpty if trying to delete bucket' + - ' containing delete marker', done => { + it('should return error 409 BucketNotEmpty if trying to delete bucket' + ' containing delete marker', done => { s3.deleteObject({ Bucket: bucketName, Key: key }, err => { if (err) { return done(err); @@ -71,22 +74,26 @@ describe('aws-node-sdk test delete bucket', () => { }); }); - it('should return error 409 BucketNotEmpty if trying to delete bucket' + - ' containing version and delete marker', done => { - async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: key }, - err => next(err)), - next => s3.deleteObject({ Bucket: bucketName, Key: key }, - err => next(err)), - next => s3.deleteBucket({ Bucket: bucketName }, err => { - checkError(err, 'BucketNotEmpty'); - return next(); - }), - ], done); - }); + it( + 'should return error 409 BucketNotEmpty if trying to delete bucket' + + ' containing version and delete marker', + done => { + async.waterfall( + [ + next => s3.putObject({ Bucket: bucketName, Key: key }, err => next(err)), + next => s3.deleteObject({ Bucket: bucketName, Key: key }, err => next(err)), + next => + s3.deleteBucket({ Bucket: bucketName }, err => { + checkError(err, 'BucketNotEmpty'); + return next(); + }), + ], + done + ); + } + ); - it('should return error 404 NoSuchBucket if the bucket name is invalid', - done => { + it('should return error 404 NoSuchBucket if the bucket name is invalid', done => { s3.deleteBucket({ Bucket: 'bucketA' }, err => { checkError(err, 'NoSuchBucket'); return done(); diff --git a/tests/functional/aws-node-sdk/test/versioning/legacyNullVersionCompat.js b/tests/functional/aws-node-sdk/test/versioning/legacyNullVersionCompat.js index ce77ac50fe..4fd76f4d25 100644 --- a/tests/functional/aws-node-sdk/test/versioning/legacyNullVersionCompat.js +++ b/tests/functional/aws-node-sdk/test/versioning/legacyNullVersionCompat.js @@ -3,10 +3,7 @@ const async = require('async'); const BucketUtility = require('../../lib/utility/bucket-util'); -const { - removeAllVersions, - versioningEnabled, -} = require('../../lib/utility/versioning-util.js'); +const { removeAllVersions, versioningEnabled } = require('../../lib/utility/versioning-util.js'); // This series of tests can only be enabled on an environment that has // two Cloudserver instances, with one of them in null version @@ -16,8 +13,9 @@ const { // combination of Cloudserver requests to bucketd and the behavior of // bucketd based on those requests. -const describeSkipIfNotExplicitlyEnabled = - process.env.ENABLE_LEGACY_NULL_VERSION_COMPAT_TESTS ? describe : describe.skip; +const describeSkipIfNotExplicitlyEnabled = process.env.ENABLE_LEGACY_NULL_VERSION_COMPAT_TESTS + ? describe + : describe.skip; describeSkipIfNotExplicitlyEnabled('legacy null version compatibility tests', () => { const bucketUtilCompat = new BucketUtility('default', { @@ -33,25 +31,46 @@ describeSkipIfNotExplicitlyEnabled('legacy null version compatibility tests', () // master and no "isNull2" metadata attribute), by using the // Cloudserver endpoint that is configured with null version // compatibility mode enabled. - beforeEach(done => async.series([ - next => s3Compat.createBucket({ - Bucket: bucket, - }, next), - next => s3Compat.putObject({ - Bucket: bucket, - Key: 'obj', - Body: 'nullbody', - }, next), - next => s3Compat.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, next), - next => s3Compat.putObject({ - Bucket: bucket, - Key: 'obj', - Body: 'versionedbody', - }, next), - ], done)); + beforeEach(done => + async.series( + [ + next => + s3Compat.createBucket( + { + Bucket: bucket, + }, + next + ), + next => + s3Compat.putObject( + { + Bucket: bucket, + Key: 'obj', + Body: 'nullbody', + }, + next + ), + next => + s3Compat.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + next + ), + next => + s3Compat.putObject( + { + Bucket: bucket, + Key: 'obj', + Body: 'versionedbody', + }, + next + ), + ], + done + ) + ); afterEach(done => { removeAllVersions({ Bucket: bucket }, err => { @@ -63,37 +82,56 @@ describeSkipIfNotExplicitlyEnabled('legacy null version compatibility tests', () }); it('updating ACL of legacy null version with non-compat cloudserver', done => { - async.series([ - next => s3.putObjectAcl({ - Bucket: bucket, - Key: 'obj', - VersionId: 'null', - ACL: 'public-read', - }, next), - next => s3.getObjectAcl({ - Bucket: bucket, - Key: 'obj', - VersionId: 'null', - }, (err, acl) => { - assert.ifError(err); - // check that we fetched the updated null version - assert.strictEqual(acl.Grants.length, 2); - next(); - }), - next => s3.deleteObject({ - Bucket: bucket, - Key: 'obj', - VersionId: 'null', - }, next), - next => s3.listObjectVersions({ - Bucket: bucket, - }, (err, listing) => { - assert.ifError(err); - // check that the null version has been correctly deleted - assert(listing.Versions.every(version => version.VersionId !== 'null')); - next(); - }), - ], done); + async.series( + [ + next => + s3.putObjectAcl( + { + Bucket: bucket, + Key: 'obj', + VersionId: 'null', + ACL: 'public-read', + }, + next + ), + next => + s3.getObjectAcl( + { + Bucket: bucket, + Key: 'obj', + VersionId: 'null', + }, + (err, acl) => { + assert.ifError(err); + // check that we fetched the updated null version + assert.strictEqual(acl.Grants.length, 2); + next(); + } + ), + next => + s3.deleteObject( + { + Bucket: bucket, + Key: 'obj', + VersionId: 'null', + }, + next + ), + next => + s3.listObjectVersions( + { + Bucket: bucket, + }, + (err, listing) => { + assert.ifError(err); + // check that the null version has been correctly deleted + assert(listing.Versions.every(version => version.VersionId !== 'null')); + next(); + } + ), + ], + done + ); }); it('updating tags of legacy null version with non-compat cloudserver', done => { @@ -103,54 +141,81 @@ describeSkipIfNotExplicitlyEnabled('legacy null version compatibility tests', () Value: 'newtagvalue', }, ]; - async.series([ - next => s3.putObjectTagging({ - Bucket: bucket, - Key: 'obj', - VersionId: 'null', - Tagging: { - TagSet: tagSet, - }, - }, next), - next => s3.getObjectTagging({ - Bucket: bucket, - Key: 'obj', - VersionId: 'null', - }, (err, tagging) => { - assert.ifError(err); - assert.deepStrictEqual(tagging.TagSet, tagSet); - next(); - }), - next => s3.deleteObjectTagging({ - Bucket: bucket, - Key: 'obj', - VersionId: 'null', - }, err => { - assert.ifError(err); - next(); - }), - next => s3.getObjectTagging({ - Bucket: bucket, - Key: 'obj', - VersionId: 'null', - }, (err, tagging) => { - assert.ifError(err); - assert.deepStrictEqual(tagging.TagSet, []); - next(); - }), - next => s3.deleteObject({ - Bucket: bucket, - Key: 'obj', - VersionId: 'null', - }, next), - next => s3.listObjectVersions({ - Bucket: bucket, - }, (err, listing) => { - assert.ifError(err); - // check that the null version has been correctly deleted - assert(listing.Versions.every(version => version.VersionId !== 'null')); - next(); - }), - ], done); + async.series( + [ + next => + s3.putObjectTagging( + { + Bucket: bucket, + Key: 'obj', + VersionId: 'null', + Tagging: { + TagSet: tagSet, + }, + }, + next + ), + next => + s3.getObjectTagging( + { + Bucket: bucket, + Key: 'obj', + VersionId: 'null', + }, + (err, tagging) => { + assert.ifError(err); + assert.deepStrictEqual(tagging.TagSet, tagSet); + next(); + } + ), + next => + s3.deleteObjectTagging( + { + Bucket: bucket, + Key: 'obj', + VersionId: 'null', + }, + err => { + assert.ifError(err); + next(); + } + ), + next => + s3.getObjectTagging( + { + Bucket: bucket, + Key: 'obj', + VersionId: 'null', + }, + (err, tagging) => { + assert.ifError(err); + assert.deepStrictEqual(tagging.TagSet, []); + next(); + } + ), + next => + s3.deleteObject( + { + Bucket: bucket, + Key: 'obj', + VersionId: 'null', + }, + next + ), + next => + s3.listObjectVersions( + { + Bucket: bucket, + }, + (err, listing) => { + assert.ifError(err); + // check that the null version has been correctly deleted + assert(listing.Versions.every(version => version.VersionId !== 'null')); + next(); + } + ), + ], + done + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/listObjectMasterVersions.js b/tests/functional/aws-node-sdk/test/versioning/listObjectMasterVersions.js index 6d4877ccda..c4badb1816 100644 --- a/tests/functional/aws-node-sdk/test/versioning/listObjectMasterVersions.js +++ b/tests/functional/aws-node-sdk/test/versioning/listObjectMasterVersions.js @@ -9,22 +9,13 @@ const { removeAllVersions } = require('../../lib/utility/versioning-util'); const bucket = `versioning-bucket-${Date.now()}`; const itSkipIfE2E = process.env.S3_END_TO_END ? it.skip : it; - function _assertResultElements(entry) { - const elements = [ - 'LastModified', - 'ETag', - 'Size', - 'Owner', - 'StorageClass', - ]; + const elements = ['LastModified', 'ETag', 'Size', 'Owner', 'StorageClass']; elements.forEach(elem => { - assert.notStrictEqual(entry[elem], undefined, - `Expected ${elem} in result but did not find it`); + assert.notStrictEqual(entry[elem], undefined, `Expected ${elem} in result but did not find it`); if (elem === 'Owner') { assert(entry.Owner.ID, 'Expected Owner ID but did not find it'); - assert(entry.Owner.DisplayName, - 'Expected Owner DisplayName but did not find it'); + assert(entry.Owner.DisplayName, 'Expected Owner DisplayName but did not find it'); } }); } @@ -48,8 +39,7 @@ describe('listObject - Delimiter master', function testSuite() { return done(err); } return s3.deleteBucket({ Bucket: bucket }, err => { - assert.strictEqual(err, null, - `Error deleting bucket: ${err}`); + assert.strictEqual(err, null, `Error deleting bucket: ${err}`); return done(); }); }); @@ -82,52 +72,62 @@ describe('listObject - Delimiter master', function testSuite() { ]; it('put objects inside bucket', done => { - async.eachSeries(objects, (obj, next) => { - async.waterfall([ - next => { - if (!versioning && obj.isNull !== true) { - const params = { - Bucket: bucket, - VersioningConfiguration: { - Status: 'Enabled', - }, - }; - versioning = true; - return s3.putBucketVersioning(params, err => - next(err)); - } else if (versioning && obj.isNull === true) { - const params = { - Bucket: bucket, - VersioningConfiguration: { - Status: 'Suspended', - }, - }; - versioning = false; - return s3.putBucketVersioning(params, err => - next(err)); - } - return next(); - }, - next => { - if (obj.value === null) { - return s3.deleteObject({ - Bucket: bucket, - Key: obj.name, - }, function test(err) { - const headers = this.httpResponse.headers; - assert.strictEqual( - headers['x-amz-delete-marker'], 'true'); - return next(err); - }); - } - return s3.putObject({ - Bucket: bucket, - Key: obj.name, - Body: obj.value, - }, err => next(err)); - }, - ], err => next(err)); - }, err => done(err)); + async.eachSeries( + objects, + (obj, next) => { + async.waterfall( + [ + next => { + if (!versioning && obj.isNull !== true) { + const params = { + Bucket: bucket, + VersioningConfiguration: { + Status: 'Enabled', + }, + }; + versioning = true; + return s3.putBucketVersioning(params, err => next(err)); + } else if (versioning && obj.isNull === true) { + const params = { + Bucket: bucket, + VersioningConfiguration: { + Status: 'Suspended', + }, + }; + versioning = false; + return s3.putBucketVersioning(params, err => next(err)); + } + return next(); + }, + next => { + if (obj.value === null) { + return s3.deleteObject( + { + Bucket: bucket, + Key: obj.name, + }, + function test(err) { + const headers = this.httpResponse.headers; + assert.strictEqual(headers['x-amz-delete-marker'], 'true'); + return next(err); + } + ); + } + return s3.putObject( + { + Bucket: bucket, + Key: obj.name, + Body: obj.value, + }, + err => next(err) + ); + }, + ], + err => next(err) + ); + }, + err => done(err) + ); }); [ @@ -176,11 +176,7 @@ describe('listObject - Delimiter master', function testSuite() { { name: 'with maxKeys', params: { MaxKeys: 3 }, - expectedResult: [ - 'Pâtisserie=中文-español-English', - 'notes/spring/1.txt', - 'notes/spring/march/1.txt', - ], + expectedResult: ['Pâtisserie=中文-español-English', 'notes/spring/1.txt', 'notes/spring/march/1.txt'], commonPrefix: [], isTruncated: true, nextMarker: undefined, @@ -206,9 +202,7 @@ describe('listObject - Delimiter master', function testSuite() { { name: 'with delimiter', params: { Delimiter: '/' }, - expectedResult: [ - 'Pâtisserie=中文-español-English', - ], + expectedResult: ['Pâtisserie=中文-español-English'], commonPrefix: ['notes/'], isTruncated: false, nextMarker: undefined, @@ -243,15 +237,8 @@ describe('listObject - Delimiter master', function testSuite() { { name: 'delimiter and prefix (related to #147)', params: { Delimiter: '/', Prefix: 'notes/' }, - expectedResult: [ - 'notes/year.txt', - 'notes/yore.rs', - ], - commonPrefix: [ - 'notes/spring/', - 'notes/summer/', - 'notes/zaphod/', - ], + expectedResult: ['notes/year.txt', 'notes/yore.rs'], + commonPrefix: ['notes/spring/', 'notes/summer/', 'notes/zaphod/'], isTruncated: false, nextMarker: undefined, }, @@ -336,30 +323,25 @@ describe('listObject - Delimiter master', function testSuite() { const runTest = test.skipe2e ? itSkipIfE2E : it; runTest(test.name, done => { const expectedResult = test.expectedResult; - s3.listObjects(Object.assign({ Bucket: bucket }, test.params), - (err, res) => { - if (err) { - return done(err); + s3.listObjects(Object.assign({ Bucket: bucket }, test.params), (err, res) => { + if (err) { + return done(err); + } + res.Contents.forEach(result => { + if (!expectedResult.find(key => key === result.Key)) { + throw new Error('listing fail, ' + `unexpected key ${result.Key}`); + } + _assertResultElements(result); + }); + res.CommonPrefixes.forEach(cp => { + if (!test.commonPrefix.find(item => item === cp.Prefix)) { + throw new Error('listing fail, ' + `unexpected prefix ${cp.Prefix}`); } - res.Contents.forEach(result => { - if (!expectedResult - .find(key => key === result.Key)) { - throw new Error('listing fail, ' + - `unexpected key ${result.Key}`); - } - _assertResultElements(result); - }); - res.CommonPrefixes.forEach(cp => { - if (!test.commonPrefix - .find(item => item === cp.Prefix)) { - throw new Error('listing fail, ' + - `unexpected prefix ${cp.Prefix}`); - } - }); - assert.strictEqual(res.IsTruncated, test.isTruncated); - assert.strictEqual(res.NextMarker, test.nextMarker); - return done(); }); + assert.strictEqual(res.IsTruncated, test.isTruncated); + assert.strictEqual(res.NextMarker, test.nextMarker); + return done(); + }); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/listObjectVersions.js b/tests/functional/aws-node-sdk/test/versioning/listObjectVersions.js index 9785b02579..41801bd199 100644 --- a/tests/functional/aws-node-sdk/test/versioning/listObjectVersions.js +++ b/tests/functional/aws-node-sdk/test/versioning/listObjectVersions.js @@ -8,28 +8,16 @@ const { removeAllVersions } = require('../../lib/utility/versioning-util'); const bucket = `versioning-bucket-${Date.now()}`; -const resultElements = [ - 'VersionId', - 'IsLatest', - 'LastModified', - 'Owner', -]; -const versionResultElements = [ - 'ETag', - 'Size', - 'StorageClass', -]; +const resultElements = ['VersionId', 'IsLatest', 'LastModified', 'Owner']; +const versionResultElements = ['ETag', 'Size', 'StorageClass']; function _assertResultElements(entry, type) { - const elements = type === 'DeleteMarker' ? resultElements : - resultElements.concat(versionResultElements); + const elements = type === 'DeleteMarker' ? resultElements : resultElements.concat(versionResultElements); elements.forEach(elem => { - assert.notStrictEqual(entry[elem], undefined, - `Expected ${elem} in result but did not find it`); + assert.notStrictEqual(entry[elem], undefined, `Expected ${elem} in result but did not find it`); if (elem === 'Owner') { assert(entry.Owner.ID, 'Expected Owner ID but did not find it'); - assert(entry.Owner.DisplayName, - 'Expected Owner DisplayName but did not find it'); + assert(entry.Owner.DisplayName, 'Expected Owner DisplayName but did not find it'); } }); } @@ -53,8 +41,7 @@ describe('listObject - Delimiter version', function testSuite() { return done(err); } return s3.deleteBucket({ Bucket: bucket }, err => { - assert.strictEqual(err, null, - `Error deleting bucket: ${err}`); + assert.strictEqual(err, null, `Error deleting bucket: ${err}`); return done(); }); }); @@ -87,62 +74,71 @@ describe('listObject - Delimiter version', function testSuite() { ]; it('put objects inside bucket', done => { - async.eachSeries(objects, (obj, next) => { - async.waterfall([ - next => { - if (!versioning && obj.isNull !== true) { - const params = { - Bucket: bucket, - VersioningConfiguration: { - Status: 'Enabled', - }, - }; - versioning = true; - return s3.putBucketVersioning(params, - err => next(err)); - } else if (versioning && obj.isNull === true) { - const params = { - Bucket: bucket, - VersioningConfiguration: { - Status: 'Suspended', - }, - }; - versioning = false; - return s3.putBucketVersioning(params, - err => next(err)); - } - return next(); - }, - next => { - if (obj.value === null) { - return s3.deleteObject({ - Bucket: bucket, - Key: obj.name, - }, function test(err) { - const headers = this.httpResponse.headers; - assert.strictEqual( - headers['x-amz-delete-marker'], - 'true'); - // eslint-disable-next-line no-param-reassign - obj.versionId = headers['x-amz-version-id']; - return next(err); - }); - } - return s3.putObject({ - Bucket: bucket, - Key: obj.name, - Body: obj.value, - }, (err, res) => { - if (err) { - return next(err); - } - // eslint-disable-next-line no-param-reassign - obj.versionId = res.VersionId || 'null'; - return next(); - }); - }, - ], err => next(err)); - }, err => done(err)); + async.eachSeries( + objects, + (obj, next) => { + async.waterfall( + [ + next => { + if (!versioning && obj.isNull !== true) { + const params = { + Bucket: bucket, + VersioningConfiguration: { + Status: 'Enabled', + }, + }; + versioning = true; + return s3.putBucketVersioning(params, err => next(err)); + } else if (versioning && obj.isNull === true) { + const params = { + Bucket: bucket, + VersioningConfiguration: { + Status: 'Suspended', + }, + }; + versioning = false; + return s3.putBucketVersioning(params, err => next(err)); + } + return next(); + }, + next => { + if (obj.value === null) { + return s3.deleteObject( + { + Bucket: bucket, + Key: obj.name, + }, + function test(err) { + const headers = this.httpResponse.headers; + assert.strictEqual(headers['x-amz-delete-marker'], 'true'); + // eslint-disable-next-line no-param-reassign + obj.versionId = headers['x-amz-version-id']; + return next(err); + } + ); + } + return s3.putObject( + { + Bucket: bucket, + Key: obj.name, + Body: obj.value, + }, + (err, res) => { + if (err) { + return next(err); + } + // eslint-disable-next-line no-param-reassign + obj.versionId = res.VersionId || 'null'; + return next(); + } + ); + }, + ], + err => next(err) + ); + }, + err => done(err) + ); }); [ @@ -193,11 +189,7 @@ describe('listObject - Delimiter version', function testSuite() { { name: 'with maxKeys', params: { MaxKeys: 3 }, - expectedResult: [ - objects[4], - objects[5], - objects[8], - ], + expectedResult: [objects[4], objects[5], objects[8]], commonPrefix: [], isTruncated: true, nextKeyMarker: objects[8].name, @@ -224,8 +216,7 @@ describe('listObject - Delimiter version', function testSuite() { { name: 'with long delimiter', params: { Delimiter: 'notes/summer' }, - expectedResult: objects.filter(obj => - obj.name.indexOf('notes/summer') < 0), + expectedResult: objects.filter(obj => obj.name.indexOf('notes/summer') < 0), commonPrefix: ['notes/summer'], isTruncated: false, nextKeyMarker: undefined, @@ -247,15 +238,8 @@ describe('listObject - Delimiter version', function testSuite() { { name: 'delimiter and prefix (related to #147)', params: { Delimiter: '/', Prefix: 'notes/' }, - expectedResult: [ - objects[1], - objects[2], - ], - commonPrefix: [ - 'notes/spring/', - 'notes/summer/', - 'notes/zaphod/', - ], + expectedResult: [objects[1], objects[2]], + commonPrefix: ['notes/spring/', 'notes/summer/', 'notes/zaphod/'], isTruncated: false, nextKeyMarker: undefined, nextVersionIdMarker: undefined, @@ -346,62 +330,52 @@ describe('listObject - Delimiter version', function testSuite() { ].forEach(test => { it(test.name, done => { const expectedResult = test.expectedResult; - s3.listObjectVersions( - Object.assign({ Bucket: bucket }, test.params), - (err, res) => { - if (err) { - return done(err); - } - res.Versions.forEach(result => { - const item = expectedResult.find(obj => { - if (obj.name === result.Key && - obj.versionId === result.VersionId && - obj.value !== null) { - return true; - } - return false; - }); - if (!item) { - throw new Error('listing fail, ' + - `unexpected key ${result.Key} ` + - `with version ${result.VersionId}`); - } - _assertResultElements(result, 'Version'); - }); - res.DeleteMarkers.forEach(result => { - const item = expectedResult.find(obj => { - if (obj.name === result.Key && - obj.versionId === result.VersionId && - obj.value === null) { - return true; - } - return false; - }); - if (!item) { - throw new Error('listing fail, ' + - `unexpected key ${result.Key} ` + - `with version ${result.VersionId}`); + s3.listObjectVersions(Object.assign({ Bucket: bucket }, test.params), (err, res) => { + if (err) { + return done(err); + } + res.Versions.forEach(result => { + const item = expectedResult.find(obj => { + if (obj.name === result.Key && obj.versionId === result.VersionId && obj.value !== null) { + return true; } - _assertResultElements(result, 'DeleteMarker'); + return false; }); - res.CommonPrefixes.forEach(cp => { - if (!test.commonPrefix.find( - item => item === cp.Prefix)) { - throw new Error('listing fail, ' + - `unexpected prefix ${cp.Prefix}`); + if (!item) { + throw new Error( + 'listing fail, ' + `unexpected key ${result.Key} ` + `with version ${result.VersionId}` + ); + } + _assertResultElements(result, 'Version'); + }); + res.DeleteMarkers.forEach(result => { + const item = expectedResult.find(obj => { + if (obj.name === result.Key && obj.versionId === result.VersionId && obj.value === null) { + return true; } + return false; }); - assert.strictEqual(res.IsTruncated, test.isTruncated); - assert.strictEqual(res.NextKeyMarker, - test.nextKeyMarker); - if (!test.nextVersionIdMarker) { - // eslint-disable-next-line no-param-reassign - test.nextVersionIdMarker = {}; + if (!item) { + throw new Error( + 'listing fail, ' + `unexpected key ${result.Key} ` + `with version ${result.VersionId}` + ); + } + _assertResultElements(result, 'DeleteMarker'); + }); + res.CommonPrefixes.forEach(cp => { + if (!test.commonPrefix.find(item => item === cp.Prefix)) { + throw new Error('listing fail, ' + `unexpected prefix ${cp.Prefix}`); } - assert.strictEqual(res.NextVersionIdMarker, - test.nextVersionIdMarker.versionId); - return done(); }); + assert.strictEqual(res.IsTruncated, test.isTruncated); + assert.strictEqual(res.NextKeyMarker, test.nextKeyMarker); + if (!test.nextVersionIdMarker) { + // eslint-disable-next-line no-param-reassign + test.nextVersionIdMarker = {}; + } + assert.strictEqual(res.NextVersionIdMarker, test.nextVersionIdMarker.versionId); + return done(); + }); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/multiObjectDelete.js b/tests/functional/aws-node-sdk/test/versioning/multiObjectDelete.js index 6014e5dd51..4e7b241121 100644 --- a/tests/functional/aws-node-sdk/test/versioning/multiObjectDelete.js +++ b/tests/functional/aws-node-sdk/test/versioning/multiObjectDelete.js @@ -9,13 +9,12 @@ const bucketName = `multi-object-delete-${Date.now()}`; const key = 'key'; // formats differ for AWS and S3, use respective sample ids to obtain // correct error response in tests -const nonExistingId = process.env.AWS_ON_AIR ? - 'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' : - '3939393939393939393936493939393939393939756e6437'; +const nonExistingId = process.env.AWS_ON_AIR + ? 'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' + : '3939393939393939393936493939393939393939756e6437'; function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } function sortList(list) { @@ -30,7 +29,6 @@ function sortList(list) { }); } - describe('Multi-Object Versioning Delete Success', function success() { this.timeout(360000); @@ -40,42 +38,56 @@ describe('Multi-Object Versioning Delete Success', function success() { let objectsRes; beforeEach(done => { - async.waterfall([ - next => s3.createBucket({ Bucket: bucketName }, - err => next(err)), - next => s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { - Status: 'Enabled', - }, - }, err => next(err)), - next => { - const objects = []; - for (let i = 1; i < 1001; i++) { - objects.push(`${key}${i}`); - } - async.mapLimit(objects, 20, (key, next) => { - s3.putObject({ - Bucket: bucketName, - Key: key, - Body: 'somebody', - }, (err, res) => { - if (err) { - return next(err); - } - // eslint-disable-next-line no-param-reassign - res.Key = key; - return next(null, res); - }); - }, (err, results) => { - if (err) { - return next(err); + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucketName }, err => next(err)), + next => + s3.putBucketVersioning( + { + Bucket: bucketName, + VersioningConfiguration: { + Status: 'Enabled', + }, + }, + err => next(err) + ), + next => { + const objects = []; + for (let i = 1; i < 1001; i++) { + objects.push(`${key}${i}`); } - objectsRes = results; - return next(); - }); - }, - ], err => done(err)); + async.mapLimit( + objects, + 20, + (key, next) => { + s3.putObject( + { + Bucket: bucketName, + Key: key, + Body: 'somebody', + }, + (err, res) => { + if (err) { + return next(err); + } + // eslint-disable-next-line no-param-reassign + res.Key = key; + return next(null, res); + } + ); + }, + (err, results) => { + if (err) { + return next(err); + } + objectsRes = results; + return next(); + } + ); + }, + ], + err => done(err) + ); }); afterEach(done => { @@ -84,128 +96,145 @@ describe('Multi-Object Versioning Delete Success', function success() { return done(err); } return s3.deleteBucket({ Bucket: bucketName }, err => { - assert.strictEqual(err, null, - `Error deleting bucket: ${err}`); + assert.strictEqual(err, null, `Error deleting bucket: ${err}`); return done(); }); }); }); it('should batch delete 1000 objects quietly', () => { - const objects = objectsRes.slice(0, 1000).map(obj => - ({ Key: obj.Key, VersionId: obj.VersionId })); - return s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - Quiet: true, - }, - }).promise().then(res => { - assert.strictEqual(res.Deleted.length, 0); - assert.strictEqual(res.Errors.length, 0); - }).catch(err => { - checkNoError(err); - }); + const objects = objectsRes.slice(0, 1000).map(obj => ({ Key: obj.Key, VersionId: obj.VersionId })); + return s3 + .deleteObjects({ + Bucket: bucketName, + Delete: { + Objects: objects, + Quiet: true, + }, + }) + .promise() + .then(res => { + assert.strictEqual(res.Deleted.length, 0); + assert.strictEqual(res.Errors.length, 0); + }) + .catch(err => { + checkNoError(err); + }); }); it('should batch delete 1000 objects', () => { - const objects = objectsRes.slice(0, 1000).map(obj => - ({ Key: obj.Key, VersionId: obj.VersionId })); - return s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - Quiet: false, - }, - }).promise().then(res => { - assert.strictEqual(res.Deleted.length, 1000); - // order of returned objects not sorted - assert.deepStrictEqual(sortList(res.Deleted), - sortList(objects)); - assert.strictEqual(res.Errors.length, 0); - }).catch(err => { - checkNoError(err); - }); + const objects = objectsRes.slice(0, 1000).map(obj => ({ Key: obj.Key, VersionId: obj.VersionId })); + return s3 + .deleteObjects({ + Bucket: bucketName, + Delete: { + Objects: objects, + Quiet: false, + }, + }) + .promise() + .then(res => { + assert.strictEqual(res.Deleted.length, 1000); + // order of returned objects not sorted + assert.deepStrictEqual(sortList(res.Deleted), sortList(objects)); + assert.strictEqual(res.Errors.length, 0); + }) + .catch(err => { + checkNoError(err); + }); }); - it('should return NoSuchVersion in errors if one versionId is ' + - 'invalid', () => { - const objects = objectsRes.slice(0, 1000).map(obj => - ({ Key: obj.Key, VersionId: obj.VersionId })); + it('should return NoSuchVersion in errors if one versionId is ' + 'invalid', () => { + const objects = objectsRes.slice(0, 1000).map(obj => ({ Key: obj.Key, VersionId: obj.VersionId })); objects[0].VersionId = 'invalid-version-id'; - return s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - }, - }).promise().then(res => { - assert.strictEqual(res.Deleted.length, 999); - assert.strictEqual(res.Errors.length, 1); - assert.strictEqual(res.Errors[0].Code, 'NoSuchVersion'); - }) - .catch(err => { - checkNoError(err); - }); + return s3 + .deleteObjects({ + Bucket: bucketName, + Delete: { + Objects: objects, + }, + }) + .promise() + .then(res => { + assert.strictEqual(res.Deleted.length, 999); + assert.strictEqual(res.Errors.length, 1); + assert.strictEqual(res.Errors[0].Code, 'NoSuchVersion'); + }) + .catch(err => { + checkNoError(err); + }); }); - it('should not send back any error if a versionId does not exist ' + - 'and should not create a new delete marker', () => { - const objects = objectsRes.slice(0, 1000).map(obj => - ({ Key: obj.Key, VersionId: obj.VersionId })); - objects[0].VersionId = nonExistingId; - return s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - }, - }).promise().then(res => { - assert.strictEqual(res.Deleted.length, 1000); - assert.strictEqual(res.Errors.length, 0); - const foundVersionId = res.Deleted.find(entry => - entry.VersionId === nonExistingId); - assert(foundVersionId); - assert.strictEqual(foundVersionId.DeleteMarker, undefined); - }) - .catch(err => { - checkNoError(err); - }); - }); + it( + 'should not send back any error if a versionId does not exist ' + + 'and should not create a new delete marker', + () => { + const objects = objectsRes.slice(0, 1000).map(obj => ({ Key: obj.Key, VersionId: obj.VersionId })); + objects[0].VersionId = nonExistingId; + return s3 + .deleteObjects({ + Bucket: bucketName, + Delete: { + Objects: objects, + }, + }) + .promise() + .then(res => { + assert.strictEqual(res.Deleted.length, 1000); + assert.strictEqual(res.Errors.length, 0); + const foundVersionId = res.Deleted.find(entry => entry.VersionId === nonExistingId); + assert(foundVersionId); + assert.strictEqual(foundVersionId.DeleteMarker, undefined); + }) + .catch(err => { + checkNoError(err); + }); + } + ); it('should not crash when deleting a null versionId that does not exist', () => { const objects = [{ Key: objectsRes[0].Key, VersionId: 'null' }]; - return s3.deleteObjects({ - Bucket: bucketName, - Delete: { - Objects: objects, - }, - }).promise().then(res => { - assert.deepStrictEqual(res.Deleted, [{ Key: objectsRes[0].Key, VersionId: 'null' }]); - assert.strictEqual(res.Errors.length, 0); - }) - .catch(err => { - checkNoError(err); - }); + return s3 + .deleteObjects({ + Bucket: bucketName, + Delete: { + Objects: objects, + }, + }) + .promise() + .then(res => { + assert.deepStrictEqual(res.Deleted, [{ Key: objectsRes[0].Key, VersionId: 'null' }]); + assert.strictEqual(res.Errors.length, 0); + }) + .catch(err => { + checkNoError(err); + }); }); }); }); -describe('Multi-Object Versioning Delete - deleting delete marker', -() => { +describe('Multi-Object Versioning Delete - deleting delete marker', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); const s3 = bucketUtil.s3; beforeEach(done => { - async.waterfall([ - next => s3.createBucket({ Bucket: bucketName }, - err => next(err)), - next => s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { - Status: 'Enabled', - }, - }, err => next(err)), - ], done); + async.waterfall( + [ + next => s3.createBucket({ Bucket: bucketName }, err => next(err)), + next => + s3.putBucketVersioning( + { + Bucket: bucketName, + VersioningConfiguration: { + Status: 'Enabled', + }, + }, + err => next(err) + ), + ], + done + ); }); afterEach(done => { removeAllVersions({ Bucket: bucketName }, err => { @@ -213,113 +242,126 @@ describe('Multi-Object Versioning Delete - deleting delete marker', return done(err); } return s3.deleteBucket({ Bucket: bucketName }, err => { - assert.strictEqual(err, null, - `Error deleting bucket: ${err}`); + assert.strictEqual(err, null, `Error deleting bucket: ${err}`); return done(); }); }); }); - it('should send back VersionId and DeleteMarkerVersionId both equal ' + - 'to deleteVersionId', done => { - async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: key }, - err => next(err)), - next => s3.deleteObject({ Bucket: bucketName, - Key: key }, (err, data) => { - const deleteVersionId = data.VersionId; - next(err, deleteVersionId); - }), - (deleteVersionId, next) => s3.deleteObjects({ Bucket: - bucketName, - Delete: { - Objects: [ + it('should send back VersionId and DeleteMarkerVersionId both equal ' + 'to deleteVersionId', done => { + async.waterfall( + [ + next => s3.putObject({ Bucket: bucketName, Key: key }, err => next(err)), + next => + s3.deleteObject({ Bucket: bucketName, Key: key }, (err, data) => { + const deleteVersionId = data.VersionId; + next(err, deleteVersionId); + }), + (deleteVersionId, next) => + s3.deleteObjects( { - Key: key, - VersionId: deleteVersionId, + Bucket: bucketName, + Delete: { + Objects: [ + { + Key: key, + VersionId: deleteVersionId, + }, + ], + }, }, - ], - } }, (err, data) => { - assert.strictEqual(data.Deleted[0].DeleteMarker, true); - assert.strictEqual(data.Deleted[0].VersionId, - deleteVersionId); - assert.strictEqual(data.Deleted[0].DeleteMarkerVersionId, - deleteVersionId); - next(err); - }), - ], err => done(err)); + (err, data) => { + assert.strictEqual(data.Deleted[0].DeleteMarker, true); + assert.strictEqual(data.Deleted[0].VersionId, deleteVersionId); + assert.strictEqual(data.Deleted[0].DeleteMarkerVersionId, deleteVersionId); + next(err); + } + ), + ], + err => done(err) + ); }); - it('should send back a DeleteMarkerVersionId matching the versionId ' + - 'stored for the object if trying to delete an object that does not exist', - done => { - s3.deleteObjects({ Bucket: bucketName, - Delete: { - Objects: [ - { - Key: key, + it( + 'should send back a DeleteMarkerVersionId matching the versionId ' + + 'stored for the object if trying to delete an object that does not exist', + done => { + s3.deleteObjects( + { + Bucket: bucketName, + Delete: { + Objects: [ + { + Key: key, + }, + ], }, - ], - } }, (err, data) => { - if (err) { - return done(err); - } - const versionIdFromDeleteObjects = - data.Deleted[0].DeleteMarkerVersionId; - assert.strictEqual(data.Deleted[0].DeleteMarker, true); - return s3.listObjectVersions({ Bucket: bucketName }, - (err, data) => { - if (err) { - return done(err); - } - const versionIdFromListObjectVersions = - data.DeleteMarkers[0].VersionId; - assert.strictEqual(versionIdFromDeleteObjects, - versionIdFromListObjectVersions); - return done(); - }); - }); - }); - - it('should send back a DeleteMarkerVersionId matching the versionId ' + - 'stored for the object if object exists but no version was specified', - done => { - async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: key }, - (err, data) => { - const versionId = data.VersionId; - next(err, versionId); - }), - (versionId, next) => s3.deleteObjects({ Bucket: - bucketName, - Delete: { - Objects: [ - { - Key: key, - }, - ], - } }, (err, data) => { - if (err) { - return next(err); - } - assert.strictEqual(data.Deleted[0].DeleteMarker, true); - const deleteVersionId = data.Deleted[0]. - DeleteMarkerVersionId; - assert.notEqual(deleteVersionId, versionId); - return next(err, deleteVersionId, versionId); - }), - (deleteVersionId, versionId, next) => s3.listObjectVersions( - { Bucket: bucketName }, (err, data) => { - if (err) { - return next(err); + }, + (err, data) => { + if (err) { + return done(err); + } + const versionIdFromDeleteObjects = data.Deleted[0].DeleteMarkerVersionId; + assert.strictEqual(data.Deleted[0].DeleteMarker, true); + return s3.listObjectVersions({ Bucket: bucketName }, (err, data) => { + if (err) { + return done(err); + } + const versionIdFromListObjectVersions = data.DeleteMarkers[0].VersionId; + assert.strictEqual(versionIdFromDeleteObjects, versionIdFromListObjectVersions); + return done(); + }); } - assert.strictEqual(deleteVersionId, - data.DeleteMarkers[0].VersionId); - assert.strictEqual(versionId, - data.Versions[0].VersionId); - return next(); - }), - ], err => done(err)); - }); + ); + } + ); + + it( + 'should send back a DeleteMarkerVersionId matching the versionId ' + + 'stored for the object if object exists but no version was specified', + done => { + async.waterfall( + [ + next => + s3.putObject({ Bucket: bucketName, Key: key }, (err, data) => { + const versionId = data.VersionId; + next(err, versionId); + }), + (versionId, next) => + s3.deleteObjects( + { + Bucket: bucketName, + Delete: { + Objects: [ + { + Key: key, + }, + ], + }, + }, + (err, data) => { + if (err) { + return next(err); + } + assert.strictEqual(data.Deleted[0].DeleteMarker, true); + const deleteVersionId = data.Deleted[0].DeleteMarkerVersionId; + assert.notEqual(deleteVersionId, versionId); + return next(err, deleteVersionId, versionId); + } + ), + (deleteVersionId, versionId, next) => + s3.listObjectVersions({ Bucket: bucketName }, (err, data) => { + if (err) { + return next(err); + } + assert.strictEqual(deleteVersionId, data.DeleteMarkers[0].VersionId); + assert.strictEqual(versionId, data.Versions[0].VersionId); + return next(); + }), + ], + err => done(err) + ); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/objectACL.js b/tests/functional/aws-node-sdk/test/versioning/objectACL.js index 7976d852e1..2c095b0493 100644 --- a/tests/functional/aws-node-sdk/test/versioning/objectACL.js +++ b/tests/functional/aws-node-sdk/test/versioning/objectACL.js @@ -17,9 +17,9 @@ const key = '/'; const invalidId = 'invalidIdWithMoreThan40BytesAndThatIsNotLongEnoughYet'; // formats differ for AWS and S3, use respective sample ids to obtain // correct error response in tests -const nonExistingId = process.env.AWS_ON_AIR ? - 'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' : - '3939393939393939393936493939393939393939756e6437'; +const nonExistingId = process.env.AWS_ON_AIR + ? 'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' + : '3939393939393939393936493939393939393939756e6437'; class _Utils { constructor(s3) { @@ -34,19 +34,24 @@ class _Utils { // exposed data object for put/get acl methods _wrapDataObject(method, params, callback) { let request; - async.waterfall([ - next => { - request = this.s3[method](params, next); - }, - (data, next) => { - const responseHeaders = request.response - .httpResponse.headers; - const dataObj = Object.assign({ - VersionId: responseHeaders['x-amz-version-id'], - }, data); - return next(null, dataObj); - }, - ], callback); + async.waterfall( + [ + next => { + request = this.s3[method](params, next); + }, + (data, next) => { + const responseHeaders = request.response.httpResponse.headers; + const dataObj = Object.assign( + { + VersionId: responseHeaders['x-amz-version-id'], + }, + data + ); + return next(null, dataObj); + }, + ], + callback + ); } getObjectAcl(params, callback) { @@ -69,27 +74,29 @@ class _Utils { this.putObjectAcl(params, (err, data) => { if (expected.error) { assert.strictEqual(expected.error.code, err.code); - assert.strictEqual(expected.error.statusCode, - err.statusCode); + assert.strictEqual(expected.error.statusCode, err.statusCode); } else { - _Utils.assertNoError(err, - `putting object acl with version id: ${versionId}`); - assert.strictEqual(data.VersionId, expected.versionId, + _Utils.assertNoError(err, `putting object acl with version id: ${versionId}`); + assert.strictEqual( + data.VersionId, + expected.versionId, `expected version id '${expected.versionId}' in ` + - `putacl res headers, got '${data.VersionId}' instead`); + `putacl res headers, got '${data.VersionId}' instead` + ); } delete params.ACL; this.getObjectAcl(params, (err, data) => { if (expected.error) { assert.strictEqual(expected.error.code, err.code); - assert.strictEqual(expected.error.statusCode, - err.statusCode); + assert.strictEqual(expected.error.statusCode, err.statusCode); } else { - _Utils.assertNoError(err, - `getting object acl with version id: ${versionId}`); - assert.strictEqual(data.VersionId, expected.versionId, + _Utils.assertNoError(err, `getting object acl with version id: ${versionId}`); + assert.strictEqual( + data.VersionId, + expected.versionId, `expected version id '${expected.versionId}' in ` + - `getacl res headers, got '${data.VersionId}'`); + `getacl res headers, got '${data.VersionId}'` + ); assert.strictEqual(data.Grants.length, 2); } cb(); @@ -101,58 +108,58 @@ class _Utils { function _testBehaviorVersioningEnabledOrSuspended(utils, versionIds) { const s3 = utils.s3; - it('should return 405 MethodNotAllowed putting acl without ' + - 'version id if latest version is a delete marker', done => { - const aclParams = { - Bucket: bucket, - Key: key, - ACL: 'public-read-write', - }; - s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { - assert.strictEqual(err, null, - `Unexpected err deleting object: ${err}`); - assert.strictEqual(data.DeleteMarker, true); - assert(data.VersionId); - utils.putObjectAcl(aclParams, err => { - assert(err); - assert.strictEqual(err.code, 'MethodNotAllowed'); - assert.strictEqual(err.statusCode, 405); - done(); + it( + 'should return 405 MethodNotAllowed putting acl without ' + 'version id if latest version is a delete marker', + done => { + const aclParams = { + Bucket: bucket, + Key: key, + ACL: 'public-read-write', + }; + s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { + assert.strictEqual(err, null, `Unexpected err deleting object: ${err}`); + assert.strictEqual(data.DeleteMarker, true); + assert(data.VersionId); + utils.putObjectAcl(aclParams, err => { + assert(err); + assert.strictEqual(err.code, 'MethodNotAllowed'); + assert.strictEqual(err.statusCode, 405); + done(); + }); }); - }); - }); - - it('should return 405 MethodNotAllowed putting acl with ' + - 'version id if version specified is a delete marker', done => { - const aclParams = { - Bucket: bucket, - Key: key, - ACL: 'public-read-write', - }; - s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { - assert.strictEqual(err, null, - `Unexpected err deleting object: ${err}`); - assert.strictEqual(data.DeleteMarker, true); - assert(data.VersionId); - aclParams.VersionId = data.VersionId; - utils.putObjectAcl(aclParams, err => { - assert(err); - assert.strictEqual(err.code, 'MethodNotAllowed'); - assert.strictEqual(err.statusCode, 405); - done(); + } + ); + + it( + 'should return 405 MethodNotAllowed putting acl with ' + 'version id if version specified is a delete marker', + done => { + const aclParams = { + Bucket: bucket, + Key: key, + ACL: 'public-read-write', + }; + s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { + assert.strictEqual(err, null, `Unexpected err deleting object: ${err}`); + assert.strictEqual(data.DeleteMarker, true); + assert(data.VersionId); + aclParams.VersionId = data.VersionId; + utils.putObjectAcl(aclParams, err => { + assert(err); + assert.strictEqual(err.code, 'MethodNotAllowed'); + assert.strictEqual(err.statusCode, 405); + done(); + }); }); - }); - }); + } + ); - it('should return 404 NoSuchKey getting acl without ' + - 'version id if latest version is a delete marker', done => { + it('should return 404 NoSuchKey getting acl without ' + 'version id if latest version is a delete marker', done => { const aclParams = { Bucket: bucket, Key: key, }; s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { - assert.strictEqual(err, null, - `Unexpected err deleting object: ${err}`); + assert.strictEqual(err, null, `Unexpected err deleting object: ${err}`); assert.strictEqual(data.DeleteMarker, true); assert(data.VersionId); utils.getObjectAcl(aclParams, err => { @@ -164,48 +171,54 @@ function _testBehaviorVersioningEnabledOrSuspended(utils, versionIds) { }); }); - it('should return 405 MethodNotAllowed getting acl with ' + - 'version id if version specified is a delete marker', done => { - const latestVersion = versionIds[versionIds.length - 1]; - const aclParams = { - Bucket: bucket, - Key: key, - VersionId: latestVersion, - }; - s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { - assert.strictEqual(err, null, - `Unexpected err deleting object: ${err}`); - assert.strictEqual(data.DeleteMarker, true); - assert(data.VersionId); - aclParams.VersionId = data.VersionId; - utils.getObjectAcl(aclParams, err => { - assert(err); - assert.strictEqual(err.code, 'MethodNotAllowed'); - assert.strictEqual(err.statusCode, 405); - done(); + it( + 'should return 405 MethodNotAllowed getting acl with ' + 'version id if version specified is a delete marker', + done => { + const latestVersion = versionIds[versionIds.length - 1]; + const aclParams = { + Bucket: bucket, + Key: key, + VersionId: latestVersion, + }; + s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { + assert.strictEqual(err, null, `Unexpected err deleting object: ${err}`); + assert.strictEqual(data.DeleteMarker, true); + assert(data.VersionId); + aclParams.VersionId = data.VersionId; + utils.getObjectAcl(aclParams, err => { + assert(err); + assert.strictEqual(err.code, 'MethodNotAllowed'); + assert.strictEqual(err.statusCode, 405); + done(); + }); }); - }); - }); - - it('non-version specific put and get ACL should target latest ' + - 'version AND return version ID in response headers', done => { - const latestVersion = versionIds[versionIds.length - 1]; - const expectedRes = { versionId: latestVersion }; - utils.putAndGetAcl('public-read', undefined, expectedRes, done); - }); + } + ); + + it( + 'non-version specific put and get ACL should target latest ' + + 'version AND return version ID in response headers', + done => { + const latestVersion = versionIds[versionIds.length - 1]; + const expectedRes = { versionId: latestVersion }; + utils.putAndGetAcl('public-read', undefined, expectedRes, done); + } + ); - it('version specific put and get ACL should return version ID ' + - 'in response headers', done => { + it('version specific put and get ACL should return version ID ' + 'in response headers', done => { const firstVersion = versionIds[0]; const expectedRes = { versionId: firstVersion }; utils.putAndGetAcl('public-read', firstVersion, expectedRes, done); }); - it('version specific put and get ACL (version id = "null") ' + - 'should return version ID ("null") in response headers', done => { - const expectedRes = { versionId: 'null' }; - utils.putAndGetAcl('public-read', 'null', expectedRes, done); - }); + it( + 'version specific put and get ACL (version id = "null") ' + + 'should return version ID ("null") in response headers', + done => { + const expectedRes = { versionId: 'null' }; + utils.putAndGetAcl('public-read', 'null', expectedRes, done); + } + ); } describe('versioned put and get object acl ::', () => { @@ -233,44 +246,46 @@ describe('versioned put and get object acl ::', () => { s3.putObject({ Bucket: bucket, Key: key }, done); }); - it('should not return version id for non-version specific ' + - 'put and get ACL', done => { + it('should not return version id for non-version specific ' + 'put and get ACL', done => { const expectedRes = { versionId: undefined }; utils.putAndGetAcl('public-read', undefined, expectedRes, done); }); - it('should not return version id for version specific ' + - 'put and get ACL (version id = "null")', done => { + it('should not return version id for version specific ' + 'put and get ACL (version id = "null")', done => { const expectedRes = { versionId: undefined }; utils.putAndGetAcl('public-read', 'null', expectedRes, done); }); - it('should return NoSuchVersion if attempting to put or get acl ' + - 'for non-existing version', done => { + it('should return NoSuchVersion if attempting to put or get acl ' + 'for non-existing version', done => { const error = { code: 'NoSuchVersion', statusCode: 404 }; utils.putAndGetAcl('private', nonExistingId, { error }, done); }); - it('should return InvalidArgument if attempting to put/get acl ' + - 'for invalid hex string', done => { + it('should return InvalidArgument if attempting to put/get acl ' + 'for invalid hex string', done => { const error = { code: 'InvalidArgument', statusCode: 400 }; utils.putAndGetAcl('private', invalidId, { error }, done); }); }); - describe('on a version-enabled bucket with non-versioned object :: ', - () => { + describe('on a version-enabled bucket with non-versioned object :: ', () => { const versionIds = []; beforeEach(done => { const params = { Bucket: bucket, Key: key }; - async.waterfall([ - callback => s3.putObject(params, err => callback(err)), - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, err => callback(err)), - ], done); + async.waterfall( + [ + callback => s3.putObject(params, err => callback(err)), + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + err => callback(err) + ), + ], + done + ); }); afterEach(done => { @@ -280,72 +295,93 @@ describe('versioned put and get object acl ::', () => { }); describe('before putting new versions :: ', () => { - it('non-version specific put and get ACL should now ' + - 'return version ID ("null") in response headers', done => { - const expectedRes = { versionId: 'null' }; - utils.putAndGetAcl('public-read', undefined, expectedRes, - done); - }); + it( + 'non-version specific put and get ACL should now ' + + 'return version ID ("null") in response headers', + done => { + const expectedRes = { versionId: 'null' }; + utils.putAndGetAcl('public-read', undefined, expectedRes, done); + } + ); }); describe('after putting new versions :: ', () => { beforeEach(done => { const params = { Bucket: bucket, Key: key }; - async.timesSeries(counter, (i, next) => - s3.putObject(params, (err, data) => { - _Utils.assertNoError(err, `putting version #${i}`); - versionIds.push(data.VersionId); - next(err); - }), done); + async.timesSeries( + counter, + (i, next) => + s3.putObject(params, (err, data) => { + _Utils.assertNoError(err, `putting version #${i}`); + versionIds.push(data.VersionId); + next(err); + }), + done + ); }); _testBehaviorVersioningEnabledOrSuspended(utils, versionIds); }); }); - describe('on a version-enabled bucket - version non-specified :: ', - () => { + describe('on a version-enabled bucket - version non-specified :: ', () => { let versionId; beforeEach(done => { const params = { Bucket: bucket, Key: key }; - async.waterfall([ - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, err => callback(err)), - callback => s3.putObject(params, (err, data) => { - if (err) { - return callback(err); - } - versionId = data.VersionId; - return callback(); - }), - ], done); + async.waterfall( + [ + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + err => callback(err) + ), + callback => + s3.putObject(params, (err, data) => { + if (err) { + return callback(err); + } + versionId = data.VersionId; + return callback(); + }), + ], + done + ); }); - it('should not create version putting ACL on a' + - 'version-enabled bucket where no version id is specified', - done => { - const params = { Bucket: bucket, Key: key, ACL: 'public-read' }; - utils.putObjectAcl(params, () => { - checkOneVersion(s3, bucket, versionId, done); - }); - }); + it( + 'should not create version putting ACL on a' + + 'version-enabled bucket where no version id is specified', + done => { + const params = { Bucket: bucket, Key: key, ACL: 'public-read' }; + utils.putObjectAcl(params, () => { + checkOneVersion(s3, bucket, versionId, done); + }); + } + ); }); - describe('on version-suspended bucket with non-versioned object :: ', - () => { + describe('on version-suspended bucket with non-versioned object :: ', () => { const versionIds = []; beforeEach(done => { const params = { Bucket: bucket, Key: key }; - async.waterfall([ - callback => s3.putObject(params, err => callback(err)), - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, err => callback(err)), - ], done); + async.waterfall( + [ + callback => s3.putObject(params, err => callback(err)), + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + err => callback(err) + ), + ], + done + ); }); afterEach(done => { @@ -355,34 +391,51 @@ describe('versioned put and get object acl ::', () => { }); describe('before putting new versions :: ', () => { - it('non-version specific put and get ACL should still ' + - 'return version ID ("null") in response headers', done => { - const expectedRes = { versionId: 'null' }; - utils.putAndGetAcl('public-read', undefined, expectedRes, - done); - }); + it( + 'non-version specific put and get ACL should still ' + + 'return version ID ("null") in response headers', + done => { + const expectedRes = { versionId: 'null' }; + utils.putAndGetAcl('public-read', undefined, expectedRes, done); + } + ); }); describe('after putting new versions :: ', () => { beforeEach(done => { const params = { Bucket: bucket, Key: key }; - async.waterfall([ - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, err => callback(err)), - callback => async.timesSeries(counter, (i, next) => - s3.putObject(params, (err, data) => { - _Utils.assertNoError(err, - `putting version #${i}`); - versionIds.push(data.VersionId); - next(err); - }), err => callback(err)), - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, err => callback(err)), - ], done); + async.waterfall( + [ + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + err => callback(err) + ), + callback => + async.timesSeries( + counter, + (i, next) => + s3.putObject(params, (err, data) => { + _Utils.assertNoError(err, `putting version #${i}`); + versionIds.push(data.VersionId); + next(err); + }), + err => callback(err) + ), + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + err => callback(err) + ), + ], + done + ); }); _testBehaviorVersioningEnabledOrSuspended(utils, versionIds); diff --git a/tests/functional/aws-node-sdk/test/versioning/objectCopy.js b/tests/functional/aws-node-sdk/test/versioning/objectCopy.js index 752778afac..ac985affd7 100644 --- a/tests/functional/aws-node-sdk/test/versioning/objectCopy.js +++ b/tests/functional/aws-node-sdk/test/versioning/objectCopy.js @@ -47,8 +47,7 @@ const otherAccountBucketUtility = new BucketUtility('lisa', {}); const otherAccountS3 = otherAccountBucketUtility.s3; function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); } function checkError(err, code) { @@ -63,10 +62,9 @@ function dateFromNow(diff) { } function dateConvert(d) { - return (new Date(d)).toISOString(); + return new Date(d).toISOString(); } - describe('Object Version Copy', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); @@ -87,79 +85,97 @@ describe('Object Version Copy', () => { }); } - beforeEach(() => bucketUtil.createOne(sourceBucketName) - .then(() => bucketUtil.createOne(destBucketName)) - .then(() => s3.putBucketVersioning({ - Bucket: sourceBucketName, - VersioningConfiguration: { Status: 'Enabled' }, - }).promise()) - .then(() => s3.putObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - Body: content, - Metadata: originalMetadata, - CacheControl: originalCacheControl, - ContentDisposition: originalContentDisposition, - ContentEncoding: originalContentEncoding, - Expires: originalExpires, - Tagging: originalTagging, - }).promise()).then(res => { - etag = res.ETag; - versionId = res.VersionId; - copySource = `${sourceBucketName}/${sourceObjName}` + - `?versionId=${versionId}`; - etagTrim = etag.substring(1, etag.length - 1); - copySourceVersionId = res.VersionId; - return s3.headObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - }).promise(); - }).then(res => { - lastModified = res.LastModified; - }).then(() => s3.putObject({ Bucket: sourceBucketName, - Key: sourceObjName, - Body: secondContent }).promise()) + beforeEach(() => + bucketUtil + .createOne(sourceBucketName) + .then(() => bucketUtil.createOne(destBucketName)) + .then(() => + s3 + .putBucketVersioning({ + Bucket: sourceBucketName, + VersioningConfiguration: { Status: 'Enabled' }, + }) + .promise() + ) + .then(() => + s3 + .putObject({ + Bucket: sourceBucketName, + Key: sourceObjName, + Body: content, + Metadata: originalMetadata, + CacheControl: originalCacheControl, + ContentDisposition: originalContentDisposition, + ContentEncoding: originalContentEncoding, + Expires: originalExpires, + Tagging: originalTagging, + }) + .promise() + ) + .then(res => { + etag = res.ETag; + versionId = res.VersionId; + copySource = `${sourceBucketName}/${sourceObjName}` + `?versionId=${versionId}`; + etagTrim = etag.substring(1, etag.length - 1); + copySourceVersionId = res.VersionId; + return s3 + .headObject({ + Bucket: sourceBucketName, + Key: sourceObjName, + }) + .promise(); + }) + .then(res => { + lastModified = res.LastModified; + }) + .then(() => + s3.putObject({ Bucket: sourceBucketName, Key: sourceObjName, Body: secondContent }).promise() + ) ); - afterEach(done => async.parallel([ - next => emptyAndDeleteBucket(sourceBucketName, next), - next => emptyAndDeleteBucket(destBucketName, next), - ], done)); + afterEach(done => + async.parallel( + [ + next => emptyAndDeleteBucket(sourceBucketName, next), + next => emptyAndDeleteBucket(destBucketName, next), + ], + done + ) + ); function requestCopy(fields, cb) { - s3.copyObject(Object.assign({ - Bucket: destBucketName, - Key: destObjName, - CopySource: copySource, - }, fields), cb); + s3.copyObject( + Object.assign( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: copySource, + }, + fields + ), + cb + ); } - function successCopyCheck(error, response, copyVersionMetadata, - destBucketName, destObjName, done) { + function successCopyCheck(error, response, copyVersionMetadata, destBucketName, destObjName, done) { checkNoError(error); - assert.strictEqual(response.CopySourceVersionId, - copySourceVersionId); - assert.notStrictEqual(response.CopySourceVersionId, - response.VersionId); + assert.strictEqual(response.CopySourceVersionId, copySourceVersionId); + assert.notStrictEqual(response.CopySourceVersionId, response.VersionId); const destinationVersionId = response.VersionId; assert.strictEqual(response.ETag, etag); - const copyLastModified = new Date(response.LastModified) - .toGMTString(); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + const copyLastModified = new Date(response.LastModified).toGMTString(); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { checkNoError(err); assert.strictEqual(res.VersionId, destinationVersionId); assert.strictEqual(res.Body.toString(), content); assert.deepStrictEqual(res.Metadata, copyVersionMetadata); - assert.strictEqual(res.LastModified.toGMTString(), - copyLastModified); + assert.strictEqual(res.LastModified.toGMTString(), copyLastModified); done(); }); } function checkSuccessTagging(key, value, cb) { - s3.getObjectTagging({ Bucket: destBucketName, Key: destObjName }, - (err, data) => { + s3.getObjectTagging({ Bucket: destBucketName, Key: destObjName }, (err, data) => { checkNoError(err); assert.strictEqual(data.TagSet[0].Key, key); assert.strictEqual(data.TagSet[0].Value, value); @@ -167,40 +183,52 @@ describe('Object Version Copy', () => { }); } - it('should copy an object from a source bucket to a different ' + - 'destination bucket and copy the tag set if no tagging directive' + - 'header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource }, - err => { + it( + 'should copy an object from a source bucket to a different ' + + 'destination bucket and copy the tag set if no tagging directive' + + 'header provided', + done => { + s3.copyObject({ Bucket: destBucketName, Key: destObjName, CopySource: copySource }, err => { checkNoError(err); checkSuccessTagging(originalTagKey, originalTagValue, done); }); - }); + } + ); - it('should copy an object from a source bucket to a different ' + - 'destination bucket and copy the tag set if COPY tagging ' + - 'directive header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - TaggingDirective: 'COPY' }, - err => { - checkNoError(err); - checkSuccessTagging(originalTagKey, originalTagValue, done); - }); - }); + it( + 'should copy an object from a source bucket to a different ' + + 'destination bucket and copy the tag set if COPY tagging ' + + 'directive header provided', + done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: copySource, TaggingDirective: 'COPY' }, + err => { + checkNoError(err); + checkSuccessTagging(originalTagKey, originalTagValue, done); + } + ); + } + ); - it('should copy an object from a source to the same destination ' + - 'updating tag if REPLACE tagging directive header provided', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - TaggingDirective: 'REPLACE', Tagging: newTagging }, - err => { - checkNoError(err); - checkSuccessTagging(newTagKey, newTagValue, done); - }); - }); + it( + 'should copy an object from a source to the same destination ' + + 'updating tag if REPLACE tagging directive header provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: copySource, + TaggingDirective: 'REPLACE', + Tagging: newTagging, + }, + err => { + checkNoError(err); + checkSuccessTagging(newTagKey, newTagValue, done); + } + ); + } + ); describe('Copy object with versioning updating tag set', () => { taggingTests.forEach(taggingTest => { @@ -208,27 +236,27 @@ describe('Object Version Copy', () => { const key = encodeURIComponent(taggingTest.tag.key); const value = encodeURIComponent(taggingTest.tag.value); const tagging = `${key}=${value}`; - const params = { Bucket: destBucketName, Key: destObjName, + const params = { + Bucket: destBucketName, + Key: destObjName, CopySource: copySource, - TaggingDirective: 'REPLACE', Tagging: tagging }; + TaggingDirective: 'REPLACE', + Tagging: tagging, + }; s3.copyObject(params, err => { if (taggingTest.error) { checkError(err, taggingTest.error); return done(); } - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - return checkSuccessTagging(taggingTest.tag.key, - taggingTest.tag.value, done); + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + return checkSuccessTagging(taggingTest.tag.key, taggingTest.tag.value, done); }); }); }); }); - it('should return InvalidArgument for a request with versionId query', - done => { - const params = { Bucket: destBucketName, Key: destObjName, - CopySource: copySource }; + it('should return InvalidArgument for a request with versionId query', done => { + const params = { Bucket: destBucketName, Key: destObjName, CopySource: copySource }; const query = { versionId: 'testVersionId' }; customS3Request(s3.copyObject, params, { query }, err => { assert(err, 'Expected error but did not find one'); @@ -238,10 +266,8 @@ describe('Object Version Copy', () => { }); }); - it('should return InvalidArgument for a request with empty string ' + - 'versionId query', done => { - const params = { Bucket: destBucketName, Key: destObjName, - CopySource: copySource }; + it('should return InvalidArgument for a request with empty string ' + 'versionId query', done => { + const params = { Bucket: destBucketName, Key: destObjName, CopySource: copySource }; const query = { versionId: '' }; customS3Request(s3.copyObject, params, { query }, err => { assert(err, 'Expected error but did not find one'); @@ -251,606 +277,653 @@ describe('Object Version Copy', () => { }); }); - it('should copy a version from a source bucket to a different ' + - 'destination bucket and copy the metadata if no metadata directve' + - 'header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource }, - (err, res) => - successCopyCheck(err, res, originalMetadata, - destBucketName, destObjName, done) + it( + 'should copy a version from a source bucket to a different ' + + 'destination bucket and copy the metadata if no metadata directve' + + 'header provided', + done => { + s3.copyObject({ Bucket: destBucketName, Key: destObjName, CopySource: copySource }, (err, res) => + successCopyCheck(err, res, originalMetadata, destBucketName, destObjName, done) ); - }); - - it('should also copy additional headers (CacheControl, ' + - 'ContentDisposition, ContentEncoding, Expires) when copying an ' + - 'object from a source bucket to a different destination bucket', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource }, - err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { - if (err) { - done(err); - } - assert.strictEqual(res.CacheControl, - originalCacheControl); - assert.strictEqual(res.ContentDisposition, - originalContentDisposition); - // Should remove V4 streaming value 'aws-chunked' - // to be compatible with AWS behavior - assert.strictEqual(res.ContentEncoding, - 'base64,' - ); - assert.strictEqual(res.Expires.toGMTString(), - originalExpires.toGMTString()); - done(); - }); - }); - }); + } + ); - it('should copy an object from a source bucket to a different ' + - 'key in the same bucket', + it( + 'should also copy additional headers (CacheControl, ' + + 'ContentDisposition, ContentEncoding, Expires) when copying an ' + + 'object from a source bucket to a different destination bucket', done => { - s3.copyObject({ Bucket: sourceBucketName, Key: destObjName, - CopySource: copySource }, - (err, res) => - successCopyCheck(err, res, originalMetadata, - sourceBucketName, destObjName, done) - ); - }); + s3.copyObject({ Bucket: destBucketName, Key: destObjName, CopySource: copySource }, err => { + checkNoError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + if (err) { + done(err); + } + assert.strictEqual(res.CacheControl, originalCacheControl); + assert.strictEqual(res.ContentDisposition, originalContentDisposition); + // Should remove V4 streaming value 'aws-chunked' + // to be compatible with AWS behavior + assert.strictEqual(res.ContentEncoding, 'base64,'); + assert.strictEqual(res.Expires.toGMTString(), originalExpires.toGMTString()); + done(); + }); + }); + } + ); - it('should copy an object from a source to the same destination ' + - '(update metadata)', done => { - s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, - CopySource: copySource, - MetadataDirective: 'REPLACE', - Metadata: newMetadata }, - (err, res) => - successCopyCheck(err, res, newMetadata, - sourceBucketName, sourceObjName, done) - ); + it('should copy an object from a source bucket to a different ' + 'key in the same bucket', done => { + s3.copyObject({ Bucket: sourceBucketName, Key: destObjName, CopySource: copySource }, (err, res) => + successCopyCheck(err, res, originalMetadata, sourceBucketName, destObjName, done) + ); }); - it('should copy an object and replace the metadata if replace ' + - 'included as metadata directive header', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - MetadataDirective: 'REPLACE', - Metadata: newMetadata, - }, - (err, res) => - successCopyCheck(err, res, newMetadata, - destBucketName, destObjName, done) - ); + it('should copy an object from a source to the same destination ' + '(update metadata)', done => { + s3.copyObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + CopySource: copySource, + MetadataDirective: 'REPLACE', + Metadata: newMetadata, + }, + (err, res) => successCopyCheck(err, res, newMetadata, sourceBucketName, sourceObjName, done) + ); }); - it('should copy an object and replace ContentType if replace ' + - 'included as a metadata directive header, and new ContentType is ' + - 'provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - MetadataDirective: 'REPLACE', - ContentType: 'image', - }, () => { - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - if (err) { - return done(err); + it( + 'should copy an object and replace the metadata if replace ' + 'included as metadata directive header', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: copySource, + MetadataDirective: 'REPLACE', + Metadata: newMetadata, + }, + (err, res) => successCopyCheck(err, res, newMetadata, destBucketName, destObjName, done) + ); + } + ); + + it( + 'should copy an object and replace ContentType if replace ' + + 'included as a metadata directive header, and new ContentType is ' + + 'provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: copySource, + MetadataDirective: 'REPLACE', + ContentType: 'image', + }, + () => { + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.ContentType, 'image'); + return done(); + }); } - assert.strictEqual(res.ContentType, 'image'); - return done(); - }); - }); - }); + ); + } + ); - it('should copy an object and keep ContentType if replace ' + - 'included as a metadata directive header, but no new ContentType ' + - 'is provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, MetadataDirective: 'REPLACE', - }, () => { - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - if (err) { - return done(err); + it( + 'should copy an object and keep ContentType if replace ' + + 'included as a metadata directive header, but no new ContentType ' + + 'is provided', + done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: copySource, MetadataDirective: 'REPLACE' }, + () => { + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.ContentType, 'application/octet-stream'); + return done(); + }); } - assert.strictEqual(res.ContentType, - 'application/octet-stream'); - return done(); - }); - }); - }); + ); + } + ); - it('should also replace additional headers if replace ' + - 'included as metadata directive header and new headers are ' + - 'specified', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - MetadataDirective: 'REPLACE', - CacheControl: newCacheControl, - ContentDisposition: newContentDisposition, - ContentEncoding: newContentEncoding, - Expires: newExpires, - }, err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - if (err) { - done(err); + it( + 'should also replace additional headers if replace ' + + 'included as metadata directive header and new headers are ' + + 'specified', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: copySource, + MetadataDirective: 'REPLACE', + CacheControl: newCacheControl, + ContentDisposition: newContentDisposition, + ContentEncoding: newContentEncoding, + Expires: newExpires, + }, + err => { + checkNoError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + if (err) { + done(err); + } + assert.strictEqual(res.CacheControl, newCacheControl); + assert.strictEqual(res.ContentDisposition, newContentDisposition); + // Should remove V4 streaming value 'aws-chunked' + // to be compatible with AWS behavior + assert.strictEqual(res.ContentEncoding, 'gzip,'); + assert.strictEqual(res.Expires.toGMTString(), newExpires.toGMTString()); + done(); + }); } - assert.strictEqual(res.CacheControl, newCacheControl); - assert.strictEqual(res.ContentDisposition, - newContentDisposition); - // Should remove V4 streaming value 'aws-chunked' - // to be compatible with AWS behavior - assert.strictEqual(res.ContentEncoding, 'gzip,'); - assert.strictEqual(res.Expires.toGMTString(), - newExpires.toGMTString()); - done(); - }); - }); - }); + ); + } + ); - it('should copy an object and the metadata if copy ' + - 'included as metadata directive header (and ignore any new ' + - 'metadata sent with copy request)', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - MetadataDirective: 'COPY', - Metadata: newMetadata, - }, - err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.deepStrictEqual(res.Metadata, originalMetadata); - done(); - }); - }); - }); + it( + 'should copy an object and the metadata if copy ' + + 'included as metadata directive header (and ignore any new ' + + 'metadata sent with copy request)', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: copySource, + MetadataDirective: 'COPY', + Metadata: newMetadata, + }, + err => { + checkNoError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + assert.deepStrictEqual(res.Metadata, originalMetadata); + done(); + }); + } + ); + } + ); - it('should copy an object and its additional headers if copy ' + - 'included as metadata directive header (and ignore any new ' + - 'headers sent with copy request)', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - MetadataDirective: 'COPY', - Metadata: newMetadata, - CacheControl: newCacheControl, - ContentDisposition: newContentDisposition, - ContentEncoding: newContentEncoding, - Expires: newExpires, - }, err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { - if (err) { - done(err); - } - assert.strictEqual(res.CacheControl, - originalCacheControl); - assert.strictEqual(res.ContentDisposition, - originalContentDisposition); - assert.strictEqual(res.ContentEncoding, - 'base64,'); - assert.strictEqual(res.Expires.toGMTString(), - originalExpires.toGMTString()); - done(); - }); - }); - }); + it( + 'should copy an object and its additional headers if copy ' + + 'included as metadata directive header (and ignore any new ' + + 'headers sent with copy request)', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: copySource, + MetadataDirective: 'COPY', + Metadata: newMetadata, + CacheControl: newCacheControl, + ContentDisposition: newContentDisposition, + ContentEncoding: newContentEncoding, + Expires: newExpires, + }, + err => { + checkNoError(err); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + if (err) { + done(err); + } + assert.strictEqual(res.CacheControl, originalCacheControl); + assert.strictEqual(res.ContentDisposition, originalContentDisposition); + assert.strictEqual(res.ContentEncoding, 'base64,'); + assert.strictEqual(res.Expires.toGMTString(), originalExpires.toGMTString()); + done(); + }); + } + ); + } + ); it('should copy a 0 byte object to different destination', done => { const emptyFileETag = '"d41d8cd98f00b204e9800998ecf8427e"'; - s3.putObject({ Bucket: sourceBucketName, Key: sourceObjName, - Body: '', Metadata: originalMetadata }, (err, res) => { - checkNoError(err); - copySource = `${sourceBucketName}/${sourceObjName}` + - `?versionId=${res.VersionId}`; - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - }, - (err, res) => { + s3.putObject( + { Bucket: sourceBucketName, Key: sourceObjName, Body: '', Metadata: originalMetadata }, + (err, res) => { + checkNoError(err); + copySource = `${sourceBucketName}/${sourceObjName}` + `?versionId=${res.VersionId}`; + s3.copyObject({ Bucket: destBucketName, Key: destObjName, CopySource: copySource }, (err, res) => { checkNoError(err); assert.strictEqual(res.ETag, emptyFileETag); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.deepStrictEqual(res.Metadata, - originalMetadata); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + assert.deepStrictEqual(res.Metadata, originalMetadata); assert.strictEqual(res.ETag, emptyFileETag); done(); }); }); - }); + } + ); }); // TODO: remove (or update to use different location constraint) in CLDSRV-639 if (constants.validStorageClasses.includes('REDUCED_REDUNDANCY')) { it('should copy a 0 byte object to same destination', done => { const emptyFileETag = '"d41d8cd98f00b204e9800998ecf8427e"'; - s3.putObject({ - Bucket: sourceBucketName, Key: sourceObjName, - Body: '' - }, (err, putRes) => { - checkNoError(err); - copySource = `${sourceBucketName}/${sourceObjName}` + - `?versionId=${putRes.VersionId}`; - s3.copyObject({ - Bucket: sourceBucketName, Key: sourceObjName, - CopySource: copySource, - StorageClass: 'REDUCED_REDUNDANCY', - }, (err, copyRes) => { + s3.putObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + Body: '', + }, + (err, putRes) => { checkNoError(err); - assert.notEqual(copyRes.VersionId, putRes.VersionId); - assert.strictEqual(copyRes.ETag, emptyFileETag); - s3.getObject({ - Bucket: sourceBucketName, - Key: sourceObjName - }, (err, res) => { - assert.deepStrictEqual(res.Metadata, - {}); - assert.deepStrictEqual(res.StorageClass, - 'REDUCED_REDUNDANCY'); - assert.strictEqual(res.ETag, emptyFileETag); - done(); - }); - }); - }); + copySource = `${sourceBucketName}/${sourceObjName}` + `?versionId=${putRes.VersionId}`; + s3.copyObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + CopySource: copySource, + StorageClass: 'REDUCED_REDUNDANCY', + }, + (err, copyRes) => { + checkNoError(err); + assert.notEqual(copyRes.VersionId, putRes.VersionId); + assert.strictEqual(copyRes.ETag, emptyFileETag); + s3.getObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + }, + (err, res) => { + assert.deepStrictEqual(res.Metadata, {}); + assert.deepStrictEqual(res.StorageClass, 'REDUCED_REDUNDANCY'); + assert.strictEqual(res.ETag, emptyFileETag); + done(); + } + ); + } + ); + } + ); }); - it('should copy an object to a different destination and change ' + - 'the storage class if storage class header provided', done => { - s3.copyObject({ - Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - StorageClass: 'REDUCED_REDUNDANCY', - }, err => { - checkNoError(err); - s3.getObject({ - Bucket: destBucketName, - Key: destObjName - }, (err, res) => { - assert.strictEqual(res.StorageClass, - 'REDUCED_REDUNDANCY'); - done(); - }); - }); - }); + it( + 'should copy an object to a different destination and change ' + + 'the storage class if storage class header provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: copySource, + StorageClass: 'REDUCED_REDUNDANCY', + }, + err => { + checkNoError(err); + s3.getObject( + { + Bucket: destBucketName, + Key: destObjName, + }, + (err, res) => { + assert.strictEqual(res.StorageClass, 'REDUCED_REDUNDANCY'); + done(); + } + ); + } + ); + } + ); - it('should copy an object to the same destination and change the ' + - 'storage class if the storage class header provided', done => { - s3.copyObject({ - Bucket: sourceBucketName, Key: sourceObjName, - CopySource: copySource, - StorageClass: 'REDUCED_REDUNDANCY', - }, err => { - checkNoError(err); - s3.getObject({ - Bucket: sourceBucketName, - Key: sourceObjName - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.StorageClass, - 'REDUCED_REDUNDANCY'); - done(); - }); - }); - }); + it( + 'should copy an object to the same destination and change the ' + + 'storage class if the storage class header provided', + done => { + s3.copyObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + CopySource: copySource, + StorageClass: 'REDUCED_REDUNDANCY', + }, + err => { + checkNoError(err); + s3.getObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, + }, + (err, res) => { + checkNoError(err); + assert.strictEqual(res.StorageClass, 'REDUCED_REDUNDANCY'); + done(); + } + ); + } + ); + } + ); } - it('should copy an object to a new bucket and overwrite an already ' + - 'existing object in the destination bucket', done => { - s3.putObject({ Bucket: destBucketName, Key: destObjName, - Body: 'overwrite me', Metadata: originalMetadata }, - err => { - checkNoError(err); - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + it( + 'should copy an object to a new bucket and overwrite an already ' + + 'existing object in the destination bucket', + done => { + s3.putObject( + { Bucket: destBucketName, Key: destObjName, Body: 'overwrite me', Metadata: originalMetadata }, + err => { + checkNoError(err); + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: copySource, + MetadataDirective: 'REPLACE', + Metadata: newMetadata, + }, + (err, res) => { + checkNoError(err); + assert.strictEqual(res.ETag, etag); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + assert.deepStrictEqual(res.Metadata, newMetadata); + assert.strictEqual(res.ETag, etag); + assert.strictEqual(res.Body.toString(), content); + done(); + }); + } + ); + } + ); + } + ); + + // skipping test as object level encryption is not implemented yet + it.skip( + 'should copy an object and change the server side encryption' + + 'option if server side encryption header provided', + done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, CopySource: copySource, - MetadataDirective: 'REPLACE', - Metadata: newMetadata, - }, (err, res) => { + ServerSideEncryption: 'AES256', + }, + err => { checkNoError(err); - assert.strictEqual(res.ETag, etag); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.deepStrictEqual(res.Metadata, - newMetadata); - assert.strictEqual(res.ETag, etag); - assert.strictEqual(res.Body.toString(), content); + s3.getObject({ Bucket: destBucketName, Key: destObjName }, (err, res) => { + assert.strictEqual(res.ServerSideEncryption, 'AES256'); done(); }); - }); + } + ); + } + ); + + it( + 'should return Not Implemented error for obj. encryption using ' + 'customer-provided encryption keys', + done => { + const params = { + Bucket: destBucketName, + Key: 'key', + CopySource: copySource, + SSECustomerAlgorithm: 'AES256', + }; + s3.copyObject(params, err => { + assert.strictEqual(err.code, 'NotImplemented'); + done(); }); - }); + } + ); - // skipping test as object level encryption is not implemented yet - it.skip('should copy an object and change the server side encryption' + - 'option if server side encryption header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - ServerSideEncryption: 'AES256', - }, + it('should copy an object and set the acl on the new object', done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: copySource, ACL: 'authenticated-read' }, err => { checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.strictEqual(res.ServerSideEncryption, - 'AES256'); - done(); - }); - }); - }); - - it('should return Not Implemented error for obj. encryption using ' + - 'customer-provided encryption keys', done => { - const params = { Bucket: destBucketName, Key: 'key', - CopySource: copySource, - SSECustomerAlgorithm: 'AES256' }; - s3.copyObject(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); - done(); - }); - }); - - it('should copy an object and set the acl on the new object', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - ACL: 'authenticated-read', - }, - err => { - checkNoError(err); - s3.getObjectAcl({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + s3.getObjectAcl({ Bucket: destBucketName, Key: destObjName }, (err, res) => { // With authenticated-read ACL, there are two // grants: // (1) FULL_CONTROL to the object owner // (2) READ to the authenticated-read assert.strictEqual(res.Grants.length, 2); - assert.strictEqual(res.Grants[0].Permission, - 'FULL_CONTROL'); - assert.strictEqual(res.Grants[1].Permission, - 'READ'); - assert.strictEqual(res.Grants[1].Grantee.URI, - 'http://acs.amazonaws.com/groups/' + - 'global/AuthenticatedUsers'); + assert.strictEqual(res.Grants[0].Permission, 'FULL_CONTROL'); + assert.strictEqual(res.Grants[1].Permission, 'READ'); + assert.strictEqual( + res.Grants[1].Grantee.URI, + 'http://acs.amazonaws.com/groups/' + 'global/AuthenticatedUsers' + ); done(); }); - }); + } + ); }); - it('should copy an object and default the acl on the new object ' + - 'to private even if the copied object had a ' + - 'different acl', done => { - s3.putObjectAcl({ Bucket: sourceBucketName, Key: sourceObjName, - ACL: 'authenticated-read' }, () => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - }, - () => { - s3.getObjectAcl({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + it( + 'should copy an object and default the acl on the new object ' + + 'to private even if the copied object had a ' + + 'different acl', + done => { + s3.putObjectAcl({ Bucket: sourceBucketName, Key: sourceObjName, ACL: 'authenticated-read' }, () => { + s3.copyObject({ Bucket: destBucketName, Key: destObjName, CopySource: copySource }, () => { + s3.getObjectAcl({ Bucket: destBucketName, Key: destObjName }, (err, res) => { // With private ACL, there is only one grant // of FULL_CONTROL to the object owner assert.strictEqual(res.Grants.length, 1); - assert.strictEqual(res.Grants[0].Permission, - 'FULL_CONTROL'); + assert.strictEqual(res.Grants[0].Permission, 'FULL_CONTROL'); done(); }); }); - }); - }); + }); + } + ); - it('should copy a version to same object name to restore ' + - 'version of object', done => { - s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, - CopySource: copySource }, - (err, res) => - successCopyCheck(err, res, originalMetadata, - sourceBucketName, sourceObjName, done) + it('should copy a version to same object name to restore ' + 'version of object', done => { + s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, CopySource: copySource }, (err, res) => + successCopyCheck(err, res, originalMetadata, sourceBucketName, sourceObjName, done) ); }); - it('should return an error if attempt to copy from nonexistent bucket', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `nobucket453234/${sourceObjName}`, - }, + it('should return an error if attempt to copy from nonexistent bucket', done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: `nobucket453234/${sourceObjName}` }, err => { checkError(err, 'NoSuchBucket'); done(); - }); - }); + } + ); + }); - it('should return an error if use invalid redirect location', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + it('should return an error if use invalid redirect location', done => { + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, CopySource: copySource, WebsiteRedirectLocation: 'google.com', }, err => { checkError(err, 'InvalidRedirectLocation'); done(); - }); - }); - + } + ); + }); - it('should return an error if attempt to copy to nonexistent bucket', - done => { - s3.copyObject({ Bucket: 'nobucket453234', Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, + it('should return an error if attempt to copy to nonexistent bucket', done => { + s3.copyObject( + { Bucket: 'nobucket453234', Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}` }, err => { checkError(err, 'NoSuchBucket'); done(); - }); - }); + } + ); + }); - it('should return an error if attempt to copy nonexistent object', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/nokey`, - }, + it('should return an error if attempt to copy nonexistent object', done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/nokey` }, err => { checkError(err, 'NoSuchKey'); done(); - }); - }); - - it('should return NoSuchKey if attempt to copy version with ' + - 'delete marker', done => { - s3.deleteObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - }, (err, data) => { - if (err) { - done(err); } - assert.strictEqual(data.DeleteMarker, true); - s3.copyObject({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - err => { - checkError(err, 'NoSuchKey'); - done(); - }); - }); + ); }); - it('should return InvalidRequest if attempt to copy specific ' + - 'version that is a delete marker', done => { - s3.deleteObject({ - Bucket: sourceBucketName, - Key: sourceObjName, - }, (err, data) => { - if (err) { - done(err); - } - assert.strictEqual(data.DeleteMarker, true); - const deleteMarkerId = data.VersionId; - s3.copyObject({ - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}` + - `?versionId=${deleteMarkerId}`, + it('should return NoSuchKey if attempt to copy version with ' + 'delete marker', done => { + s3.deleteObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, }, - err => { - checkError(err, 'InvalidRequest'); - done(); - }); - }); + (err, data) => { + if (err) { + done(err); + } + assert.strictEqual(data.DeleteMarker, true); + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, + }, + err => { + checkError(err, 'NoSuchKey'); + done(); + } + ); + } + ); }); - it('should return an error if send invalid metadata directive header', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: copySource, - MetadataDirective: 'copyHalf', + it('should return InvalidRequest if attempt to copy specific ' + 'version that is a delete marker', done => { + s3.deleteObject( + { + Bucket: sourceBucketName, + Key: sourceObjName, }, + (err, data) => { + if (err) { + done(err); + } + assert.strictEqual(data.DeleteMarker, true); + const deleteMarkerId = data.VersionId; + s3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}` + `?versionId=${deleteMarkerId}`, + }, + err => { + checkError(err, 'InvalidRequest'); + done(); + } + ); + } + ); + }); + + it('should return an error if send invalid metadata directive header', done => { + s3.copyObject( + { Bucket: destBucketName, Key: destObjName, CopySource: copySource, MetadataDirective: 'copyHalf' }, err => { checkError(err, 'InvalidArgument'); done(); - }); - }); + } + ); + }); describe('copying by another account', () => { const otherAccountBucket = 'otheraccountbucket42342342342'; const otherAccountKey = 'key'; - beforeEach(() => otherAccountBucketUtility - .createOne(otherAccountBucket) - ); + beforeEach(() => otherAccountBucketUtility.createOne(otherAccountBucket)); - afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket) - .then(() => otherAccountBucketUtility - .deleteOne(otherAccountBucket)) + afterEach(() => + otherAccountBucketUtility + .empty(otherAccountBucket) + .then(() => otherAccountBucketUtility.deleteOne(otherAccountBucket)) ); - it('should not allow an account without read persmission on the ' + - 'source object to copy the object', done => { - otherAccountS3.copyObject({ Bucket: otherAccountBucket, - Key: otherAccountKey, - CopySource: copySource, - }, - err => { - checkError(err, 'AccessDenied'); - done(); - }); - }); - - it('should not allow an account without write persmission on the ' + - 'destination bucket to copy the object', done => { - otherAccountS3.putObject({ Bucket: otherAccountBucket, - Key: otherAccountKey, Body: '' }, () => { - otherAccountS3.copyObject({ Bucket: destBucketName, - Key: destObjName, - CopySource: `${otherAccountBucket}/${otherAccountKey}`, - }, + it( + 'should not allow an account without read persmission on the ' + 'source object to copy the object', + done => { + otherAccountS3.copyObject( + { Bucket: otherAccountBucket, Key: otherAccountKey, CopySource: copySource }, err => { checkError(err, 'AccessDenied'); done(); - }); - }); - }); + } + ); + } + ); - it('should allow an account with read permission on the ' + - 'source object and write permission on the destination ' + - 'bucket to copy the object', done => { - s3.putObjectAcl({ Bucket: sourceBucketName, - Key: sourceObjName, ACL: 'public-read', VersionId: - versionId }, () => { - otherAccountS3.copyObject({ Bucket: otherAccountBucket, - Key: otherAccountKey, - CopySource: copySource, - }, - err => { - checkNoError(err); - done(); - }); - }); - }); + it( + 'should not allow an account without write persmission on the ' + + 'destination bucket to copy the object', + done => { + otherAccountS3.putObject({ Bucket: otherAccountBucket, Key: otherAccountKey, Body: '' }, () => { + otherAccountS3.copyObject( + { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${otherAccountBucket}/${otherAccountKey}`, + }, + err => { + checkError(err, 'AccessDenied'); + done(); + } + ); + }); + } + ); + + it( + 'should allow an account with read permission on the ' + + 'source object and write permission on the destination ' + + 'bucket to copy the object', + done => { + s3.putObjectAcl( + { Bucket: sourceBucketName, Key: sourceObjName, ACL: 'public-read', VersionId: versionId }, + () => { + otherAccountS3.copyObject( + { Bucket: otherAccountBucket, Key: otherAccountKey, CopySource: copySource }, + err => { + checkNoError(err); + done(); + } + ); + } + ); + } + ); }); - it('If-Match: returns no error when ETag match, with double quotes ' + - 'around ETag', - done => { - requestCopy({ CopySourceIfMatch: etag }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when ETag match, with double quotes ' + 'around ETag', done => { + requestCopy({ CopySourceIfMatch: etag }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when one of ETags match, with double ' + - 'quotes around ETag', - done => { - requestCopy({ CopySourceIfMatch: - `non-matching,${etag}` }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when one of ETags match, with double ' + 'quotes around ETag', done => { + requestCopy({ CopySourceIfMatch: `non-matching,${etag}` }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when ETag match, without double ' + - 'quotes around ETag', - done => { - requestCopy({ CopySourceIfMatch: etagTrim }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when ETag match, without double ' + 'quotes around ETag', done => { + requestCopy({ CopySourceIfMatch: etagTrim }, err => { + checkNoError(err); + done(); }); + }); - it('If-Match: returns no error when one of ETags match, without ' + - 'double quotes around ETag', - done => { - requestCopy({ CopySourceIfMatch: - `non-matching,${etagTrim}` }, err => { - checkNoError(err); - done(); - }); + it('If-Match: returns no error when one of ETags match, without ' + 'double quotes around ETag', done => { + requestCopy({ CopySourceIfMatch: `non-matching,${etagTrim}` }, err => { + checkNoError(err); + done(); }); + }); it('If-Match: returns no error when ETag match with *', done => { requestCopy({ CopySourceIfMatch: '*' }, err => { @@ -859,13 +932,12 @@ describe('Object Version Copy', () => { }); }); - it('If-Match: returns PreconditionFailed when ETag does not match', - done => { - requestCopy({ CopySourceIfMatch: 'non-matching ETag' }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it('If-Match: returns PreconditionFailed when ETag does not match', done => { + requestCopy({ CopySourceIfMatch: 'non-matching ETag' }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); it('If-None-Match: returns no error when ETag does not match', done => { requestCopy({ CopySourceIfNoneMatch: 'non-matching' }, err => { @@ -874,286 +946,320 @@ describe('Object Version Copy', () => { }); }); - it('If-None-Match: returns no error when all ETags do not match', - done => { - requestCopy({ + it('If-None-Match: returns no error when all ETags do not match', done => { + requestCopy( + { CopySourceIfNoneMatch: 'non-matching,non-matching-either', - }, err => { + }, + err => { checkNoError(err); done(); - }); - }); + } + ); + }); - it('If-None-Match: returns NotModified when ETag match, with double ' + - 'quotes around ETag', - done => { - requestCopy({ CopySourceIfNoneMatch: etag }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it('If-None-Match: returns NotModified when ETag match, with double ' + 'quotes around ETag', done => { + requestCopy({ CopySourceIfNoneMatch: etag }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); - it('If-None-Match: returns NotModified when one of ETags match, with ' + - 'double quotes around ETag', - done => { - requestCopy({ + it('If-None-Match: returns NotModified when one of ETags match, with ' + 'double quotes around ETag', done => { + requestCopy( + { CopySourceIfNoneMatch: `non-matching,${etag}`, - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); - }); - - it('If-None-Match: returns NotModified when ETag match, without ' + - 'double quotes around ETag', - done => { - requestCopy({ CopySourceIfNoneMatch: etagTrim }, err => { + }, + err => { checkError(err, 'PreconditionFailed'); done(); - }); - }); + } + ); + }); - it('If-None-Match: returns NotModified when one of ETags match, ' + - 'without double quotes around ETag', - done => { - requestCopy({ - CopySourceIfNoneMatch: `non-matching,${etagTrim}`, - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it('If-None-Match: returns NotModified when ETag match, without ' + 'double quotes around ETag', done => { + requestCopy({ CopySourceIfNoneMatch: etagTrim }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); - it('If-Modified-Since: returns no error if Last modified date is ' + - 'greater', + it( + 'If-None-Match: returns NotModified when one of ETags match, ' + 'without double quotes around ETag', done => { - requestCopy({ CopySourceIfModifiedSince: dateFromNow(-1) }, + requestCopy( + { + CopySourceIfNoneMatch: `non-matching,${etagTrim}`, + }, err => { - checkNoError(err); + checkError(err, 'PreconditionFailed'); done(); - }); + } + ); + } + ); + + it('If-Modified-Since: returns no error if Last modified date is ' + 'greater', done => { + requestCopy({ CopySourceIfModifiedSince: dateFromNow(-1) }, err => { + checkNoError(err); + done(); }); + }); // Skipping this test, because real AWS does not provide error as // expected - it.skip('If-Modified-Since: returns NotModified if Last modified ' + - 'date is lesser', - done => { - requestCopy({ CopySourceIfModifiedSince: dateFromNow(1) }, - err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it.skip('If-Modified-Since: returns NotModified if Last modified ' + 'date is lesser', done => { + requestCopy({ CopySourceIfModifiedSince: dateFromNow(1) }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); - it('If-Modified-Since: returns NotModified if Last modified ' + - 'date is equal', - done => { - requestCopy({ CopySourceIfModifiedSince: - dateConvert(lastModified) }, - err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it('If-Modified-Since: returns NotModified if Last modified ' + 'date is equal', done => { + requestCopy({ CopySourceIfModifiedSince: dateConvert(lastModified) }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); - it('If-Unmodified-Since: returns no error when lastModified date is ' + - 'greater', - done => { - requestCopy({ CopySourceIfUnmodifiedSince: dateFromNow(1) }, - err => { - checkNoError(err); - done(); - }); + it('If-Unmodified-Since: returns no error when lastModified date is ' + 'greater', done => { + requestCopy({ CopySourceIfUnmodifiedSince: dateFromNow(1) }, err => { + checkNoError(err); + done(); }); + }); - it('If-Unmodified-Since: returns no error when lastModified ' + - 'date is equal', - done => { - requestCopy({ CopySourceIfUnmodifiedSince: - dateConvert(lastModified) }, - err => { - checkNoError(err); - done(); - }); + it('If-Unmodified-Since: returns no error when lastModified ' + 'date is equal', done => { + requestCopy({ CopySourceIfUnmodifiedSince: dateConvert(lastModified) }, err => { + checkNoError(err); + done(); }); + }); - it('If-Unmodified-Since: returns PreconditionFailed when ' + - 'lastModified date is lesser', - done => { - requestCopy({ CopySourceIfUnmodifiedSince: dateFromNow(-1) }, - err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + it('If-Unmodified-Since: returns PreconditionFailed when ' + 'lastModified date is lesser', done => { + requestCopy({ CopySourceIfUnmodifiedSince: dateFromNow(-1) }, err => { + checkError(err, 'PreconditionFailed'); + done(); }); + }); - it('If-Match & If-Unmodified-Since: returns no error when match Etag ' + - 'and lastModified is greater', + it( + 'If-Match & If-Unmodified-Since: returns no error when match Etag ' + 'and lastModified is greater', done => { - requestCopy({ + requestCopy( + { + CopySourceIfMatch: etagTrim, + CopySourceIfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); + } + ); + + it('If-Match match & If-Unmodified-Since match', done => { + requestCopy( + { CopySourceIfMatch: etagTrim, - CopySourceIfUnmodifiedSince: dateFromNow(-1), - }, err => { + CopySourceIfUnmodifiedSince: dateFromNow(1), + }, + err => { checkNoError(err); done(); - }); - }); - - it('If-Match match & If-Unmodified-Since match', done => { - requestCopy({ - CopySourceIfMatch: etagTrim, - CopySourceIfUnmodifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + } + ); }); it('If-Match not match & If-Unmodified-Since not match', done => { - requestCopy({ - CopySourceIfMatch: 'non-matching', - CopySourceIfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfMatch: 'non-matching', + CopySourceIfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-Match not match & If-Unmodified-Since match', done => { - requestCopy({ - CopySourceIfMatch: 'non-matching', - CopySourceIfUnmodifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfMatch: 'non-matching', + CopySourceIfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); // Skipping this test, because real AWS does not provide error as // expected it.skip('If-Match match & If-Modified-Since not match', done => { - requestCopy({ - CopySourceIfMatch: etagTrim, - CopySourceIfModifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + requestCopy( + { + CopySourceIfMatch: etagTrim, + CopySourceIfModifiedSince: dateFromNow(1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-Match match & If-Modified-Since match', done => { - requestCopy({ - CopySourceIfMatch: etagTrim, - CopySourceIfModifiedSince: dateFromNow(-1), - }, err => { - checkNoError(err); - done(); - }); + requestCopy( + { + CopySourceIfMatch: etagTrim, + CopySourceIfModifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-Match not match & If-Modified-Since not match', done => { - requestCopy({ - CopySourceIfMatch: 'non-matching', - CopySourceIfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfMatch: 'non-matching', + CopySourceIfModifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-Match not match & If-Modified-Since match', done => { - requestCopy({ - CopySourceIfMatch: 'non-matching', - CopySourceIfModifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfMatch: 'non-matching', + CopySourceIfModifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); - it('If-None-Match & If-Modified-Since: returns NotModified when Etag ' + - 'does not match and lastModified is greater', + it( + 'If-None-Match & If-Modified-Since: returns NotModified when Etag ' + + 'does not match and lastModified is greater', done => { - requestCopy({ + requestCopy( + { + CopySourceIfNoneMatch: etagTrim, + CopySourceIfModifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); + } + ); + + it('If-None-Match not match & If-Modified-Since not match', done => { + requestCopy( + { CopySourceIfNoneMatch: etagTrim, - CopySourceIfModifiedSince: dateFromNow(-1), - }, err => { + CopySourceIfModifiedSince: dateFromNow(1), + }, + err => { checkError(err, 'PreconditionFailed'); done(); - }); - }); - - it('If-None-Match not match & If-Modified-Since not match', done => { - requestCopy({ - CopySourceIfNoneMatch: etagTrim, - CopySourceIfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + } + ); }); it('If-None-Match match & If-Modified-Since match', done => { - requestCopy({ - CopySourceIfNoneMatch: 'non-matching', - CopySourceIfModifiedSince: dateFromNow(-1), - }, err => { - checkNoError(err); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: 'non-matching', + CopySourceIfModifiedSince: dateFromNow(-1), + }, + err => { + checkNoError(err); + done(); + } + ); }); // Skipping this test, because real AWS does not provide error as // expected it.skip('If-None-Match match & If-Modified-Since not match', done => { - requestCopy({ - CopySourceIfNoneMatch: 'non-matching', - CopySourceIfModifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: 'non-matching', + CopySourceIfModifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-None-Match match & If-Unmodified-Since match', done => { - requestCopy({ - CopySourceIfNoneMatch: 'non-matching', - CopySourceIfUnmodifiedSince: dateFromNow(1), - }, err => { - checkNoError(err); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: 'non-matching', + CopySourceIfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkNoError(err); + done(); + } + ); }); it('If-None-Match match & If-Unmodified-Since not match', done => { - requestCopy({ - CopySourceIfNoneMatch: 'non-matching', - CopySourceIfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: 'non-matching', + CopySourceIfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-None-Match not match & If-Unmodified-Since match', done => { - requestCopy({ - CopySourceIfNoneMatch: etagTrim, - CopySourceIfUnmodifiedSince: dateFromNow(1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: etagTrim, + CopySourceIfUnmodifiedSince: dateFromNow(1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); it('If-None-Match not match & If-Unmodified-Since not match', done => { - requestCopy({ - CopySourceIfNoneMatch: etagTrim, - CopySourceIfUnmodifiedSince: dateFromNow(-1), - }, err => { - checkError(err, 'PreconditionFailed'); - done(); - }); + requestCopy( + { + CopySourceIfNoneMatch: etagTrim, + CopySourceIfUnmodifiedSince: dateFromNow(-1), + }, + err => { + checkError(err, 'PreconditionFailed'); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/objectDelete.js b/tests/functional/aws-node-sdk/test/versioning/objectDelete.js index a23f697eba..304da0b06b 100644 --- a/tests/functional/aws-node-sdk/test/versioning/objectDelete.js +++ b/tests/functional/aws-node-sdk/test/versioning/objectDelete.js @@ -4,19 +4,15 @@ const async = require('async'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); -const { - versioningSuspended, - versioningEnabled, - removeAllVersions, -} = require('../../lib/utility/versioning-util.js'); +const { versioningSuspended, versioningEnabled, removeAllVersions } = require('../../lib/utility/versioning-util.js'); const bucket = `versioning-bucket-${Date.now()}`; const key = 'anObject'; // formats differ for AWS and S3, use respective sample ids to obtain // correct error response in tests -const nonExistingId = process.env.AWS_ON_AIR ? - 'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' : - '3939393939393939393936493939393939393939756e6437'; +const nonExistingId = process.env.AWS_ON_AIR + ? 'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' + : '3939393939393939393936493939393939393939756e6437'; function _assertNoError(err, desc) { assert.strictEqual(err, null, `Unexpected err ${desc || ''}: ${err}`); @@ -33,11 +29,14 @@ describe('delete marker creation in bucket with null version', () => { if (err) { return done(err); } // put null object - return s3.putObject({ - Bucket: bucket, - Key: key, - Body: nullVersionBody, - }, done); + return s3.putObject( + { + Bucket: bucket, + Key: key, + Body: nullVersionBody, + }, + done + ); }); }); @@ -47,78 +46,85 @@ describe('delete marker creation in bucket with null version', () => { return done(err); } return s3.deleteBucket({ Bucket: bucket }, err => { - assert.strictEqual(err, null, - `Error deleting bucket: ${err}`); + assert.strictEqual(err, null, `Error deleting bucket: ${err}`); return done(); }); }); }); it('should keep the null version if versioning enabled', done => { - async.waterfall([ - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, err => callback(err)), - callback => - s3.listObjectVersions({ Bucket: bucket }, (err, data) => { - _assertNoError(err, 'listing object versions'); - assert.strictEqual(data.Versions.length, 1); - assert.strictEqual(data.Versions[0].VersionId, - 'null'); - return callback(); - }), - callback => s3.deleteObject({ Bucket: bucket, Key: key }, - (err, data) => { - _assertNoError(err, 'creating delete marker'); - assert.strictEqual(data.DeleteMarker, true); - assert(data.VersionId); - return callback(null, data.VersionId); - }), - (deleteMarkerVerId, callback) => - s3.listObjectVersions({ Bucket: bucket }, (err, data) => { - _assertNoError(err, 'listing object versions'); - assert.strictEqual(data.Versions.length, 1); - assert.strictEqual(data.Versions[0].VersionId, - 'null'); - assert.strictEqual(data.DeleteMarkers[0].VersionId, - deleteMarkerVerId); - return callback(); - }), - ], done); - }); - - it('delete marker overwrites null version if versioning suspended', - done => { - async.waterfall([ - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, err => callback(err)), - callback => - s3.listObjectVersions({ Bucket: bucket }, (err, data) => { - _assertNoError(err, 'listing object versions'); - assert.strictEqual(data.Versions.length, 1); - assert.strictEqual(data.Versions[0].VersionId, - 'null'); - return callback(); - }), - callback => s3.deleteObject({ Bucket: bucket, Key: key }, - (err, data) => { - _assertNoError(err, 'creating delete marker'); - assert.strictEqual(data.DeleteMarker, true); - assert.strictEqual(data.VersionId, 'null'); - return callback(null, data.VersionId); - }), - (deleteMarkerVerId, callback) => - s3.listObjectVersions({ Bucket: bucket }, (err, data) => { - _assertNoError(err, 'listing object versions'); - assert.strictEqual(data.Versions.length, 0); - assert.strictEqual(data.DeleteMarkers[0].VersionId, - deleteMarkerVerId); - return callback(); - }), - ], done); + async.waterfall( + [ + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + err => callback(err) + ), + callback => + s3.listObjectVersions({ Bucket: bucket }, (err, data) => { + _assertNoError(err, 'listing object versions'); + assert.strictEqual(data.Versions.length, 1); + assert.strictEqual(data.Versions[0].VersionId, 'null'); + return callback(); + }), + callback => + s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { + _assertNoError(err, 'creating delete marker'); + assert.strictEqual(data.DeleteMarker, true); + assert(data.VersionId); + return callback(null, data.VersionId); + }), + (deleteMarkerVerId, callback) => + s3.listObjectVersions({ Bucket: bucket }, (err, data) => { + _assertNoError(err, 'listing object versions'); + assert.strictEqual(data.Versions.length, 1); + assert.strictEqual(data.Versions[0].VersionId, 'null'); + assert.strictEqual(data.DeleteMarkers[0].VersionId, deleteMarkerVerId); + return callback(); + }), + ], + done + ); + }); + + it('delete marker overwrites null version if versioning suspended', done => { + async.waterfall( + [ + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + err => callback(err) + ), + callback => + s3.listObjectVersions({ Bucket: bucket }, (err, data) => { + _assertNoError(err, 'listing object versions'); + assert.strictEqual(data.Versions.length, 1); + assert.strictEqual(data.Versions[0].VersionId, 'null'); + return callback(); + }), + callback => + s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { + _assertNoError(err, 'creating delete marker'); + assert.strictEqual(data.DeleteMarker, true); + assert.strictEqual(data.VersionId, 'null'); + return callback(null, data.VersionId); + }), + (deleteMarkerVerId, callback) => + s3.listObjectVersions({ Bucket: bucket }, (err, data) => { + _assertNoError(err, 'listing object versions'); + assert.strictEqual(data.Versions.length, 0); + assert.strictEqual(data.DeleteMarkers[0].VersionId, deleteMarkerVerId); + return callback(); + }), + ], + done + ); }); }); }); @@ -144,63 +150,72 @@ describe('aws-node-sdk test delete object', () => { return done(err); } return s3.deleteBucket({ Bucket: bucket }, err => { - assert.strictEqual(err, null, - `Error deleting bucket: ${err}`); + assert.strictEqual(err, null, `Error deleting bucket: ${err}`); return done(); }); }); }); - it('delete non existent object should not create a delete marker', - done => { - s3.deleteObject({ - Bucket: bucket, - Key: `${key}000`, - }, (err, res) => { - if (err) { - return done(err); + it('delete non existent object should not create a delete marker', done => { + s3.deleteObject( + { + Bucket: bucket, + Key: `${key}000`, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.DeleteMarker, undefined); + assert.strictEqual(res.VersionId, undefined); + return done(); } - assert.strictEqual(res.DeleteMarker, undefined); - assert.strictEqual(res.VersionId, undefined); - return done(); - }); + ); }); it('creating non-versioned object', done => { - s3.putObject({ - Bucket: bucket, - Key: key, - }, (err, res) => { - if (err) { - return done(err); + s3.putObject( + { + Bucket: bucket, + Key: key, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.equal(res.VersionId, undefined); + return done(); } - assert.equal(res.VersionId, undefined); - return done(); - }); + ); }); - it('delete in non-versioned bucket should not create delete marker', - done => { - s3.putObject({ - Bucket: bucket, - Key: key, - }, (err, res) => { - if (err) { - return done(err); - } - assert.equal(res.VersionId, undefined); - return s3.deleteObject({ + it('delete in non-versioned bucket should not create delete marker', done => { + s3.putObject( + { Bucket: bucket, - Key: `${key}2`, - }, (err, res) => { + Key: key, + }, + (err, res) => { if (err) { return done(err); } - assert.strictEqual(res.DeleteMarker, undefined); - assert.strictEqual(res.VersionId, undefined); - return done(); - }); - }); + assert.equal(res.VersionId, undefined); + return s3.deleteObject( + { + Bucket: bucket, + Key: `${key}2`, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.DeleteMarker, undefined); + assert.strictEqual(res.VersionId, undefined); + return done(); + } + ); + } + ); }); it('enable versioning', done => { @@ -213,202 +228,243 @@ describe('aws-node-sdk test delete object', () => { s3.putBucketVersioning(params, done); }); - it('should not send back error for non-existing key (specific version)', - done => { - s3.deleteObject({ + it('should not send back error for non-existing key (specific version)', done => { + s3.deleteObject( + { Bucket: bucket, Key: `${key}3`, VersionId: 'null', - }, err => { + }, + err => { if (err) { return done(err); } return done(); - }); - }); + } + ); + }); it('delete non existent object should create a delete marker', done => { - s3.deleteObject({ - Bucket: bucket, - Key: `${key}2`, - }, (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.DeleteMarker, true); - assert.notEqual(res.VersionId, undefined); - return s3.deleteObject({ + s3.deleteObject( + { Bucket: bucket, Key: `${key}2`, - }, (err, res2) => { + }, + (err, res) => { if (err) { return done(err); } - assert.strictEqual(res2.DeleteMarker, true); - assert.notEqual(res2.VersionId, res.VersionId); - return s3.deleteObject({ - Bucket: bucket, - Key: `${key}2`, - VersionId: res.VersionId, - }, err => { - if (err) { - return done(err); - } - return s3.deleteObject({ + assert.strictEqual(res.DeleteMarker, true); + assert.notEqual(res.VersionId, undefined); + return s3.deleteObject( + { Bucket: bucket, Key: `${key}2`, - VersionId: res2.VersionId, - }, err => done(err)); - }); - }); - }); + }, + (err, res2) => { + if (err) { + return done(err); + } + assert.strictEqual(res2.DeleteMarker, true); + assert.notEqual(res2.VersionId, res.VersionId); + return s3.deleteObject( + { + Bucket: bucket, + Key: `${key}2`, + VersionId: res.VersionId, + }, + err => { + if (err) { + return done(err); + } + return s3.deleteObject( + { + Bucket: bucket, + Key: `${key}2`, + VersionId: res2.VersionId, + }, + err => done(err) + ); + } + ); + } + ); + } + ); }); - it('delete non existent version should not create delete marker', - done => { - s3.deleteObject({ - Bucket: bucket, - Key: key, - VersionId: nonExistingId, - }, (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.VersionId, nonExistingId); - return s3.listObjectVersions({ Bucket: bucket }, (err, res) => { + it('delete non existent version should not create delete marker', done => { + s3.deleteObject( + { + Bucket: bucket, + Key: key, + VersionId: nonExistingId, + }, + (err, res) => { if (err) { return done(err); } - assert.strictEqual(res.DeleteMarkers.length, 0); - return done(); - }); - }); + assert.strictEqual(res.VersionId, nonExistingId); + return s3.listObjectVersions({ Bucket: bucket }, (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.DeleteMarkers.length, 0); + return done(); + }); + } + ); }); it('put a version to the object', done => { - s3.putObject({ - Bucket: bucket, - Key: key, - Body: 'test', - }, (err, res) => { - if (err) { - return done(err); + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: 'test', + }, + (err, res) => { + if (err) { + return done(err); + } + versionIds.push('null'); + versionIds.push(res.VersionId); + assert.notEqual(res.VersionId, undefined); + return done(); } - versionIds.push('null'); - versionIds.push(res.VersionId); - assert.notEqual(res.VersionId, undefined); - return done(); - }); + ); }); it('should create a delete marker', done => { - s3.deleteObject({ - Bucket: bucket, - Key: key, - }, (err, res) => { - if (err) { - return done(err); + s3.deleteObject( + { + Bucket: bucket, + Key: key, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.DeleteMarker, true); + assert.strictEqual( + versionIds.find(item => item === res.VersionId), + undefined + ); + versionIds.push(res.VersionId); + return done(); } - assert.strictEqual(res.DeleteMarker, true); - assert.strictEqual( - versionIds.find(item => item === res.VersionId), - undefined); - versionIds.push(res.VersionId); - return done(); - }); + ); }); it('should return 404 with a delete marker', done => { - s3.getObject({ - Bucket: bucket, - Key: key, - }, function test(err) { - if (!err) { - return done(new Error('should return 404')); + s3.getObject( + { + Bucket: bucket, + Key: key, + }, + function test(err) { + if (!err) { + return done(new Error('should return 404')); + } + const headers = this.httpResponse.headers; + assert.strictEqual(headers['x-amz-delete-marker'], 'true'); + return done(); } - const headers = this.httpResponse.headers; - assert.strictEqual(headers['x-amz-delete-marker'], 'true'); - return done(); - }); + ); }); it('should delete the null version', done => { const version = versionIds.shift(); - s3.deleteObject({ - Bucket: bucket, - Key: key, - VersionId: version, - }, (err, res) => { - if (err) { - return done(err); + s3.deleteObject( + { + Bucket: bucket, + Key: key, + VersionId: version, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.VersionId, version); + assert.equal(res.DeleteMarker, undefined); + return done(); } - assert.strictEqual(res.VersionId, version); - assert.equal(res.DeleteMarker, undefined); - return done(); - }); + ); }); it('should delete the versioned object', done => { const version = versionIds.shift(); - s3.deleteObject({ - Bucket: bucket, - Key: key, - VersionId: version, - }, (err, res) => { - if (err) { - return done(err); + s3.deleteObject( + { + Bucket: bucket, + Key: key, + VersionId: version, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.VersionId, version); + assert.equal(res.DeleteMarker, undefined); + return done(); } - assert.strictEqual(res.VersionId, version); - assert.equal(res.DeleteMarker, undefined); - return done(); - }); + ); }); it('should delete the delete-marker version', done => { const version = versionIds.shift(); - s3.deleteObject({ - Bucket: bucket, - Key: key, - VersionId: version, - }, function test(err, res) { - if (err) { - return done(err); + s3.deleteObject( + { + Bucket: bucket, + Key: key, + VersionId: version, + }, + function test(err, res) { + if (err) { + return done(err); + } + assert.strictEqual(res.VersionId, version); + assert.equal(res.DeleteMarker, true); + // deleting a delete marker should set the x-amz-delete-marker header + const headers = this.httpResponse.headers; + assert.strictEqual(headers['x-amz-delete-marker'], 'true'); + return done(); } - assert.strictEqual(res.VersionId, version); - assert.equal(res.DeleteMarker, true); - // deleting a delete marker should set the x-amz-delete-marker header - const headers = this.httpResponse.headers; - assert.strictEqual(headers['x-amz-delete-marker'], 'true'); - return done(); - }); + ); }); it('put a new version', done => { - s3.putObject({ - Bucket: bucket, - Key: key, - Body: 'test', - }, (err, res) => { - if (err) { - return done(err); + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: 'test', + }, + (err, res) => { + if (err) { + return done(err); + } + versionIds.push(res.VersionId); + assert.notEqual(res.VersionId, undefined); + return done(); } - versionIds.push(res.VersionId); - assert.notEqual(res.VersionId, undefined); - return done(); - }); + ); }); it('get the null version', done => { - s3.getObject({ - Bucket: bucket, - Key: key, - VersionId: 'null', - }, err => { - if (!err || err.code !== 'NoSuchVersion') { - return done(err || 'should send back an error'); + s3.getObject( + { + Bucket: bucket, + Key: key, + VersionId: 'null', + }, + err => { + if (!err || err.code !== 'NoSuchVersion') { + return done(err || 'should send back an error'); + } + return done(); } - return done(); - }); + ); }); it('suspending versioning', done => { @@ -422,45 +478,57 @@ describe('aws-node-sdk test delete object', () => { }); it('delete non existent object should create a delete marker', done => { - s3.deleteObject({ - Bucket: bucket, - Key: `${key}2`, - }, (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.DeleteMarker, true); - assert.notEqual(res.VersionId, undefined); - return s3.deleteObject({ + s3.deleteObject( + { Bucket: bucket, Key: `${key}2`, - }, (err, res2) => { + }, + (err, res) => { if (err) { return done(err); } - assert.strictEqual(res2.DeleteMarker, true); - assert.strictEqual(res2.VersionId, res.VersionId); - return s3.deleteObject({ - Bucket: bucket, - Key: `${key}2`, - VersionId: res.VersionId, - }, err => done(err)); - }); - }); + assert.strictEqual(res.DeleteMarker, true); + assert.notEqual(res.VersionId, undefined); + return s3.deleteObject( + { + Bucket: bucket, + Key: `${key}2`, + }, + (err, res2) => { + if (err) { + return done(err); + } + assert.strictEqual(res2.DeleteMarker, true); + assert.strictEqual(res2.VersionId, res.VersionId); + return s3.deleteObject( + { + Bucket: bucket, + Key: `${key}2`, + VersionId: res.VersionId, + }, + err => done(err) + ); + } + ); + } + ); }); it('should put a new delete marker', done => { - s3.deleteObject({ - Bucket: bucket, - Key: key, - }, (err, res) => { - if (err) { - return done(err); + s3.deleteObject( + { + Bucket: bucket, + Key: key, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.DeleteMarker, true); + assert.strictEqual(res.VersionId, 'null'); + return done(); } - assert.strictEqual(res.DeleteMarker, true); - assert.strictEqual(res.VersionId, 'null'); - return done(); - }); + ); }); it('enabling versioning', done => { @@ -474,35 +542,41 @@ describe('aws-node-sdk test delete object', () => { }); it('should get the null version', done => { - s3.getObject({ - Bucket: bucket, - Key: key, - VersionId: 'null', - }, function test(err) { - const headers = this.httpResponse.headers; - assert.strictEqual(headers['x-amz-delete-marker'], 'true'); - assert.strictEqual(headers['x-amz-version-id'], 'null'); - if (err && err.code !== 'MethodNotAllowed') { - return done(err); - } else if (err) { - return done(); + s3.getObject( + { + Bucket: bucket, + Key: key, + VersionId: 'null', + }, + function test(err) { + const headers = this.httpResponse.headers; + assert.strictEqual(headers['x-amz-delete-marker'], 'true'); + assert.strictEqual(headers['x-amz-version-id'], 'null'); + if (err && err.code !== 'MethodNotAllowed') { + return done(err); + } else if (err) { + return done(); + } + return done('should return an error'); } - return done('should return an error'); - }); + ); }); it('put a new version to store the null version', done => { - s3.putObject({ - Bucket: bucket, - Key: key, - Body: 'test', - }, (err, res) => { - if (err) { - return done(err); + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: 'test', + }, + (err, res) => { + if (err) { + return done(err); + } + versionIds.push(res.VersionId); + return done(); } - versionIds.push(res.VersionId); - return done(); - }); + ); }); it('suspending versioning', done => { @@ -516,17 +590,20 @@ describe('aws-node-sdk test delete object', () => { }); it('put null version', done => { - s3.putObject({ - Bucket: bucket, - Key: key, - Body: 'test-null-version', - }, (err, res) => { - if (err) { - return done(err); + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: 'test-null-version', + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.VersionId, undefined); + return done(); } - assert.strictEqual(res.VersionId, undefined); - return done(); - }); + ); }); it('enabling versioning', done => { @@ -540,144 +617,177 @@ describe('aws-node-sdk test delete object', () => { }); it('should get the null version', done => { - s3.getObject({ - Bucket: bucket, - Key: key, - }, (err, res) => { - if (err) { - return done(err); + s3.getObject( + { + Bucket: bucket, + Key: key, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.Body.toString(), 'test-null-version'); + return done(); } - assert.strictEqual(res.Body.toString(), 'test-null-version'); - return done(); - }); + ); }); it('should add a delete marker', done => { - s3.deleteObject({ - Bucket: bucket, - Key: key, - }, (err, res) => { - if (err) { - return done(err); + s3.deleteObject( + { + Bucket: bucket, + Key: key, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.DeleteMarker, true); + versionIds.push(res.VersionId); + return done(); } - assert.strictEqual(res.DeleteMarker, true); - versionIds.push(res.VersionId); - return done(); - }); + ); }); it('should get the null version', done => { - s3.getObject({ - Bucket: bucket, - Key: key, - VersionId: 'null', - }, (err, res) => { - if (err) { - return done(err); + s3.getObject( + { + Bucket: bucket, + Key: key, + VersionId: 'null', + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.Body.toString(), 'test-null-version'); + return done(); } - assert.strictEqual(res.Body.toString(), 'test-null-version'); - return done(); - }); + ); }); it('should add a delete marker', done => { - s3.deleteObject({ - Bucket: bucket, - Key: key, - }, (err, res) => { - if (err) { - return done(err); + s3.deleteObject( + { + Bucket: bucket, + Key: key, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.DeleteMarker, true); + assert.strictEqual( + versionIds.find(item => item === res.VersionId), + undefined + ); + versionIds.push(res.VersionId); + return done(); } - assert.strictEqual(res.DeleteMarker, true); - assert.strictEqual( - versionIds.find(item => item === res.VersionId), - undefined); - versionIds.push(res.VersionId); - return done(); - }); + ); }); it('should set the null version as master', done => { let version = versionIds.pop(); - s3.deleteObject({ - Bucket: bucket, - Key: key, - VersionId: version, - }, (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.VersionId, version); - assert.strictEqual(res.DeleteMarker, true); - version = versionIds.pop(); - return s3.deleteObject({ + s3.deleteObject( + { Bucket: bucket, Key: key, VersionId: version, - }, (err, res) => { + }, + (err, res) => { if (err) { return done(err); } assert.strictEqual(res.VersionId, version); assert.strictEqual(res.DeleteMarker, true); - return s3.getObject({ - Bucket: bucket, - Key: key, - }, (err, res) => { - if (err) { - return done(err); + version = versionIds.pop(); + return s3.deleteObject( + { + Bucket: bucket, + Key: key, + VersionId: version, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.VersionId, version); + assert.strictEqual(res.DeleteMarker, true); + return s3.getObject( + { + Bucket: bucket, + Key: key, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.Body.toString(), 'test-null-version'); + return done(); + } + ); } - assert.strictEqual(res.Body.toString(), - 'test-null-version'); - return done(); - }); - }); - }); + ); + } + ); }); it('should delete null version', done => { - s3.deleteObject({ - Bucket: bucket, - Key: key, - VersionId: 'null', - }, (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.VersionId, 'null'); - return s3.getObject({ + s3.deleteObject( + { Bucket: bucket, Key: key, - }, (err, res) => { + VersionId: 'null', + }, + (err, res) => { if (err) { return done(err); } - assert.strictEqual(res.VersionId, - versionIds[versionIds.length - 1]); - return done(); - }); - }); + assert.strictEqual(res.VersionId, 'null'); + return s3.getObject( + { + Bucket: bucket, + Key: key, + }, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res.VersionId, versionIds[versionIds.length - 1]); + return done(); + } + ); + } + ); }); it('should be able to delete the bucket', done => { - async.eachSeries(versionIds, (id, next) => { - s3.deleteObject({ - Bucket: bucket, - Key: key, - VersionId: id, - }, (err, res) => { + async.eachSeries( + versionIds, + (id, next) => { + s3.deleteObject( + { + Bucket: bucket, + Key: key, + VersionId: id, + }, + (err, res) => { + if (err) { + return next(err); + } + assert.strictEqual(res.VersionId, id); + return next(); + } + ); + }, + err => { if (err) { - return next(err); + return done(err); } - assert.strictEqual(res.VersionId, id); - return next(); - }); - }, err => { - if (err) { - return done(err); + return s3.deleteBucket({ Bucket: bucket }, err => done(err)); } - return s3.deleteBucket({ Bucket: bucket }, err => done(err)); - }); + ); }); }); }); @@ -701,25 +811,27 @@ describe('aws-node-sdk test concurrent version-specific deletes with null', () = return done(err); } return s3.deleteBucket({ Bucket: bucket }, err => { - assert.strictEqual(err, null, - `Error deleting bucket: ${err}`); + assert.strictEqual(err, null, `Error deleting bucket: ${err}`); return done(); }); }); }); it('creating non-versioned object', done => { - s3.putObject({ - Bucket: bucket, - Key: key, - Body: 'null-body', - }, (err, res) => { - if (err) { - return done(err); + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: 'null-body', + }, + (err, res) => { + if (err) { + return done(err); + } + assert.equal(res.VersionId, undefined); + return done(); } - assert.equal(res.VersionId, undefined); - return done(); - }); + ); }); it('enable versioning', done => { @@ -733,11 +845,19 @@ describe('aws-node-sdk test concurrent version-specific deletes with null', () = }); it('put 5 new versions to the object', done => { - async.times(5, (i, putDone) => s3.putObject({ - Bucket: bucket, - Key: key, - Body: `test-body-${i}`, - }, putDone), done); + async.times( + 5, + (i, putDone) => + s3.putObject( + { + Bucket: bucket, + Key: key, + Body: `test-body-${i}`, + }, + putDone + ), + done + ); }); it('list versions and batch-delete all except null version', done => { @@ -748,15 +868,18 @@ describe('aws-node-sdk test concurrent version-specific deletes with null', () = assert.strictEqual(res.DeleteMarkers.length, 0); assert.strictEqual(res.Versions.length, 6); assert.strictEqual(res.Versions[5].VersionId, 'null'); - return s3.deleteObjects({ - Bucket: bucket, - Delete: { - Objects: res.Versions.slice(0, 5).map(item => ({ - Key: item.Key, - VersionId: item.VersionId, - })), + return s3.deleteObjects( + { + Bucket: bucket, + Delete: { + Objects: res.Versions.slice(0, 5).map(item => ({ + Key: item.Key, + VersionId: item.VersionId, + })), + }, }, - }, done); + done + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/objectDeleteTagging.js b/tests/functional/aws-node-sdk/test/versioning/objectDeleteTagging.js index e63cee30b9..25f03c54cb 100644 --- a/tests/functional/aws-node-sdk/test/versioning/objectDeleteTagging.js +++ b/tests/functional/aws-node-sdk/test/versioning/objectDeleteTagging.js @@ -10,10 +10,7 @@ const objectName = 'testtaggingobject'; const invalidId = 'invalidIdWithMoreThan40BytesAndThatIsNotLongEnoughYet'; -const { - removeAllVersions, - versioningEnabled, -} = require('../../lib/utility/versioning-util'); +const { removeAllVersions, versioningEnabled } = require('../../lib/utility/versioning-util'); function _checkError(err, code, statusCode) { assert(err, 'Expected error but found none'); @@ -21,7 +18,6 @@ function _checkError(err, code, statusCode) { assert.strictEqual(err.statusCode, statusCode); } - describe('Delete object tagging with versioning', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); @@ -37,139 +33,212 @@ describe('Delete object tagging with versioning', () => { }); it('should be able to delete tag set with versioning', done => { - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - (err, data) => next(err, data.VersionId)), - (versionId, next) => s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - Tagging: { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }] }, - }, err => next(err, versionId)), - (versionId, next) => s3.deleteObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - }, (err, data) => next(err, data, versionId)), - ], (err, data, versionId) => { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(data.VersionId, versionId); - done(); - }); + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.putObject({ Bucket: bucketName, Key: objectName }, (err, data) => next(err, data.VersionId)), + (versionId, next) => + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + Tagging: { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + ], + }, + }, + err => next(err, versionId) + ), + (versionId, next) => + s3.deleteObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + }, + (err, data) => next(err, data, versionId) + ), + ], + (err, data, versionId) => { + assert.ifError(err, `Found unexpected err ${err}`); + assert.strictEqual(data.VersionId, versionId); + done(); + } + ); }); - it('should not create version deleting object tags on a ' + - ' version-enabled bucket where no version id is specified ', done => { - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - (err, data) => next(err, data.VersionId)), - (versionId, next) => s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - Tagging: { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }] }, - }, err => next(err, versionId)), - (versionId, next) => s3.deleteObjectTagging({ - Bucket: bucketName, - Key: objectName, - }, err => next(err, versionId)), - (versionId, next) => - checkOneVersion(s3, bucketName, versionId, next), - ], done); - }); + it( + 'should not create version deleting object tags on a ' + + ' version-enabled bucket where no version id is specified ', + done => { + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.putObject({ Bucket: bucketName, Key: objectName }, (err, data) => + next(err, data.VersionId) + ), + (versionId, next) => + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + Tagging: { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + ], + }, + }, + err => next(err, versionId) + ), + (versionId, next) => + s3.deleteObjectTagging( + { + Bucket: bucketName, + Key: objectName, + }, + err => next(err, versionId) + ), + (versionId, next) => checkOneVersion(s3, bucketName, versionId, next), + ], + done + ); + } + ); - it('should be able to delete tag set with a version of id "null"', - done => { - async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.deleteObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: 'null', - }, (err, data) => next(err, data)), - ], (err, data) => { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(data.VersionId, 'null'); - done(); - }); + it('should be able to delete tag set with a version of id "null"', done => { + async.waterfall( + [ + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.deleteObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: 'null', + }, + (err, data) => next(err, data) + ), + ], + (err, data) => { + assert.ifError(err, `Found unexpected err ${err}`); + assert.strictEqual(data.VersionId, 'null'); + done(); + } + ); }); - it('should return InvalidArgument deleting tag set with a non ' + - 'existing version id', done => { - async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.deleteObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: invalidId, - }, (err, data) => next(err, data)), - ], err => { - _checkError(err, 'InvalidArgument', 400); - done(); - }); + it('should return InvalidArgument deleting tag set with a non ' + 'existing version id', done => { + async.waterfall( + [ + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.deleteObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: invalidId, + }, + (err, data) => next(err, data) + ), + ], + err => { + _checkError(err, 'InvalidArgument', 400); + done(); + } + ); }); - it('should return 405 MethodNotAllowed deleting tag set without ' + - 'version id if version specified is a delete marker', done => { - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.deleteObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.deleteObjectTagging({ - Bucket: bucketName, - Key: objectName, - }, (err, data) => next(err, data)), - ], err => { - _checkError(err, 'MethodNotAllowed', 405); - done(); - }); - }); + it( + 'should return 405 MethodNotAllowed deleting tag set without ' + + 'version id if version specified is a delete marker', + done => { + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => s3.deleteObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.deleteObjectTagging( + { + Bucket: bucketName, + Key: objectName, + }, + (err, data) => next(err, data) + ), + ], + err => { + _checkError(err, 'MethodNotAllowed', 405); + done(); + } + ); + } + ); - it('should return 405 MethodNotAllowed deleting tag set with ' + - 'version id if version specified is a delete marker', done => { - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.deleteObject({ Bucket: bucketName, Key: objectName }, - (err, data) => next(err, data.VersionId)), - (versionId, next) => s3.deleteObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - }, (err, data) => next(err, data)), - ], err => { - _checkError(err, 'MethodNotAllowed', 405); - done(); - }); - }); + it( + 'should return 405 MethodNotAllowed deleting tag set with ' + + 'version id if version specified is a delete marker', + done => { + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.deleteObject({ Bucket: bucketName, Key: objectName }, (err, data) => + next(err, data.VersionId) + ), + (versionId, next) => + s3.deleteObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + }, + (err, data) => next(err, data) + ), + ], + err => { + _checkError(err, 'MethodNotAllowed', 405); + done(); + } + ); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/objectGet.js b/tests/functional/aws-node-sdk/test/versioning/objectGet.js index 605e3b1ee7..9c397c32ad 100644 --- a/tests/functional/aws-node-sdk/test/versioning/objectGet.js +++ b/tests/functional/aws-node-sdk/test/versioning/objectGet.js @@ -4,31 +4,25 @@ const async = require('async'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); -const { - removeAllVersions, - versioningEnabled, - versioningSuspended, -} = require('../../lib/utility/versioning-util.js'); +const { removeAllVersions, versioningEnabled, versioningSuspended } = require('../../lib/utility/versioning-util.js'); const key = 'objectKey'; // formats differ for AWS and S3, use respective sample ids to obtain // correct error response in tests -const nonExistingId = process.env.AWS_ON_AIR ? - 'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' : - '3939393939393939393936493939393939393939756e6437'; +const nonExistingId = process.env.AWS_ON_AIR + ? 'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' + : '3939393939393939393936493939393939393939756e6437'; function _assertNoError(err, desc) { assert.ifError(err, `Unexpected err ${desc}: ${err}`); } function _assertError(err, statusCode, code) { - assert.notEqual(err, null, - 'Expected failure but got success'); + assert.notEqual(err, null, 'Expected failure but got success'); assert.strictEqual(err.code, code); assert.strictEqual(err.statusCode, statusCode); } - describe('get behavior on versioning-enabled bucket', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); @@ -39,10 +33,13 @@ describe('get behavior on versioning-enabled bucket', () => { bucket = `versioning-bucket-${Date.now()}`; s3.createBucket({ Bucket: bucket }, err => { _assertNoError(err, 'createBucket'); - return s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, done); + return s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + done + ); }); }); @@ -63,157 +60,190 @@ describe('get behavior on versioning-enabled bucket', () => { }); it('should be able to get the object version', function itF(done) { - s3.getObject({ - Bucket: bucket, - Key: key, - VersionId: this.test.versionId, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.ContentLength, 0); - done(); - }); + s3.getObject( + { + Bucket: bucket, + Key: key, + VersionId: this.test.versionId, + }, + (err, data) => { + assert.ifError(err); + assert.strictEqual(data.ContentLength, 0); + done(); + } + ); }); it('it should return NoSuchVersion if try to get a non-existing object version', done => { - s3.getObject({ - Bucket: bucket, - Key: key, - VersionId: nonExistingId, - }, - err => { - _assertError(err, 404, 'NoSuchVersion'); - done(); - }); + s3.getObject( + { + Bucket: bucket, + Key: key, + VersionId: nonExistingId, + }, + err => { + _assertError(err, 404, 'NoSuchVersion'); + done(); + } + ); }); it('it should return NoSuchVersion if try to get a non-existing null version', done => { - s3.getObject({ - Bucket: bucket, - Key: key, - VersionId: 'null', - }, - err => { - _assertError(err, 404, 'NoSuchVersion'); - done(); - }); - }); - - it('it should return NoSuchVersion if try to get a deleted noncurrent null version', done => { - async.series([ - next => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - next => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, next), - next => s3.putObject({ Bucket: bucket, Key: key }, next), - next => s3.deleteObject({ Bucket: bucket, Key: key, VersionId: 'null' }, next), - next => s3.getObject({ + s3.getObject( + { Bucket: bucket, Key: key, VersionId: 'null', - }, err => { + }, + err => { _assertError(err, 404, 'NoSuchVersion'); - next(); - }), - ], done); + done(); + } + ); + }); + + it('it should return NoSuchVersion if try to get a deleted noncurrent null version', done => { + async.series( + [ + next => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + next + ), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + next + ), + next => s3.putObject({ Bucket: bucket, Key: key }, next), + next => s3.deleteObject({ Bucket: bucket, Key: key, VersionId: 'null' }, next), + next => + s3.getObject( + { + Bucket: bucket, + Key: key, + VersionId: 'null', + }, + err => { + _assertError(err, 404, 'NoSuchVersion'); + next(); + } + ), + ], + done + ); }); }); describe('behavior when only version put is a delete marker', () => { beforeEach(function beforeEachF(done) { - s3.deleteObject({ Bucket: bucket, Key: key }, - (err, data) => { - _assertNoError(err, 'deleteObject'); - this.currentTest.deleteVersionId = data.VersionId; - done(err); - }); + s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { + _assertNoError(err, 'deleteObject'); + this.currentTest.deleteVersionId = data.VersionId; + done(err); + }); }); it('should not be able to get a delete marker', function itF(done) { - s3.getObject({ - Bucket: bucket, - Key: key, - VersionId: this.test.deleteVersionId, - }, function test1(err) { - _assertError(err, 405, 'MethodNotAllowed'); - const headers = this.httpResponse.headers; - assert.strictEqual(headers['x-amz-delete-marker'], 'true'); - done(); - }); + s3.getObject( + { + Bucket: bucket, + Key: key, + VersionId: this.test.deleteVersionId, + }, + function test1(err) { + _assertError(err, 405, 'MethodNotAllowed'); + const headers = this.httpResponse.headers; + assert.strictEqual(headers['x-amz-delete-marker'], 'true'); + done(); + } + ); }); - it('it should return NoSuchKey if try to get object whose ' + - 'latest version is a delete marker', done => { - s3.getObject({ - Bucket: bucket, - Key: key, - }, function test2(err) { - _assertError(err, 404, 'NoSuchKey'); - const headers = this.httpResponse.headers; - assert.strictEqual(headers['x-amz-delete-marker'], 'true'); - done(); - }); + it('it should return NoSuchKey if try to get object whose ' + 'latest version is a delete marker', done => { + s3.getObject( + { + Bucket: bucket, + Key: key, + }, + function test2(err) { + _assertError(err, 404, 'NoSuchKey'); + const headers = this.httpResponse.headers; + assert.strictEqual(headers['x-amz-delete-marker'], 'true'); + done(); + } + ); }); }); - describe('behavior when put version with content then put delete ' + - 'marker', () => { + describe('behavior when put version with content then put delete ' + 'marker', () => { beforeEach(function beforeEachF(done) { s3.putObject({ Bucket: bucket, Key: key }, (err, data) => { _assertNoError(err, 'putObject'); this.currentTest.versionId = data.VersionId; - s3.deleteObject({ Bucket: bucket, Key: key }, - (err, data) => { - _assertNoError(err, 'deleteObject'); - this.currentTest.deleteVersionId = data.VersionId; - done(err); - }); + s3.deleteObject({ Bucket: bucket, Key: key }, (err, data) => { + _assertNoError(err, 'deleteObject'); + this.currentTest.deleteVersionId = data.VersionId; + done(err); + }); }); }); it('should not be able to get a delete marker', function itF(done) { - s3.getObject({ - Bucket: bucket, - Key: key, - VersionId: this.test.deleteVersionId, - }, function test3(err) { - _assertError(err, 405, 'MethodNotAllowed'); - const headers = this.httpResponse.headers; - assert.strictEqual(headers['x-amz-delete-marker'], 'true'); - done(); - }); + s3.getObject( + { + Bucket: bucket, + Key: key, + VersionId: this.test.deleteVersionId, + }, + function test3(err) { + _assertError(err, 405, 'MethodNotAllowed'); + const headers = this.httpResponse.headers; + assert.strictEqual(headers['x-amz-delete-marker'], 'true'); + done(); + } + ); }); - it('should be able to get a version that was put prior to the ' + - 'delete marker', function itF(done) { - s3.getObject({ - Bucket: bucket, - Key: key, - VersionId: this.test.versionId }, - (err, data) => { - _assertNoError(err, 'getObject'); - assert.strictEqual(data.VersionId, this.test.versionId); - done(); - }); + it('should be able to get a version that was put prior to the ' + 'delete marker', function itF(done) { + s3.getObject( + { + Bucket: bucket, + Key: key, + VersionId: this.test.versionId, + }, + (err, data) => { + _assertNoError(err, 'getObject'); + assert.strictEqual(data.VersionId, this.test.versionId); + done(); + } + ); }); - it('should return NoSuchKey if get object without version and ' + - 'latest version is a delete marker', - done => { - s3.getObject({ - Bucket: bucket, - Key: key, - }, function test4(err) { - _assertError(err, 404, 'NoSuchKey'); - const headers = this.httpResponse.headers; - assert.strictEqual(headers['x-amz-delete-marker'], 'true'); - done(); - }); - }); + it( + 'should return NoSuchKey if get object without version and ' + 'latest version is a delete marker', + done => { + s3.getObject( + { + Bucket: bucket, + Key: key, + }, + function test4(err) { + _assertError(err, 404, 'NoSuchKey'); + const headers = this.httpResponse.headers; + assert.strictEqual(headers['x-amz-delete-marker'], 'true'); + done(); + } + ); + } + ); }); describe('x-amz-tagging-count with versioning', () => { @@ -245,9 +275,7 @@ describe('get behavior on versioning-enabled bucket', () => { }); }); - it('should not return "x-amz-tagging-count" if no tag ' + - 'associated with the object', - function itF(done) { + it('should not return "x-amz-tagging-count" if no tag ' + 'associated with the object', function itF(done) { params.VersionId = this.test.VersionId; s3.getObject(params, (err, data) => { if (err) { @@ -261,18 +289,20 @@ describe('get behavior on versioning-enabled bucket', () => { describe('tag associated with the object ', () => { beforeEach(done => s3.putObjectTagging(paramsTagging, done)); - it('should return "x-amz-tagging-count" header that provides ' + - 'the count of number of tags associated with the object', - function itF(done) { - params.VersionId = this.test.VersionId; - s3.getObject(params, (err, data) => { - if (err) { - return done(err); - } - assert.equal(data.TagCount, 1); - return done(); - }); - }); + it( + 'should return "x-amz-tagging-count" header that provides ' + + 'the count of number of tags associated with the object', + function itF(done) { + params.VersionId = this.test.VersionId; + s3.getObject(params, (err, data) => { + if (err) { + return done(err); + } + assert.equal(data.TagCount, 1); + return done(); + }); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/objectGetTagging.js b/tests/functional/aws-node-sdk/test/versioning/objectGetTagging.js index 7cd42350ac..80595ed491 100644 --- a/tests/functional/aws-node-sdk/test/versioning/objectGetTagging.js +++ b/tests/functional/aws-node-sdk/test/versioning/objectGetTagging.js @@ -4,10 +4,7 @@ const async = require('async'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); -const { - removeAllVersions, - versioningEnabled, -} = require('../../lib/utility/versioning-util'); +const { removeAllVersions, versioningEnabled } = require('../../lib/utility/versioning-util'); const bucketName = 'testtaggingbucket'; const objectName = 'testtaggingobject'; @@ -20,7 +17,6 @@ function _checkError(err, code, statusCode) { assert.strictEqual(err.statusCode, statusCode); } - describe('Get object tagging with versioning', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); @@ -36,113 +32,166 @@ describe('Get object tagging with versioning', () => { }); it('should be able to get tag with versioning', done => { - const taggingConfig = { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }] }; - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - (err, data) => next(err, data.VersionId)), - (versionId, next) => s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - Tagging: taggingConfig, - }, err => next(err, versionId)), - (versionId, next) => s3.getObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - }, (err, data) => next(err, data, versionId)), - ], (err, data, versionId) => { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(data.VersionId, versionId); - assert.deepStrictEqual(data.TagSet, taggingConfig.TagSet); - done(); - }); + const taggingConfig = { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + ], + }; + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.putObject({ Bucket: bucketName, Key: objectName }, (err, data) => next(err, data.VersionId)), + (versionId, next) => + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + Tagging: taggingConfig, + }, + err => next(err, versionId) + ), + (versionId, next) => + s3.getObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + }, + (err, data) => next(err, data, versionId) + ), + ], + (err, data, versionId) => { + assert.ifError(err, `Found unexpected err ${err}`); + assert.strictEqual(data.VersionId, versionId); + assert.deepStrictEqual(data.TagSet, taggingConfig.TagSet); + done(); + } + ); }); it('should be able to get tag with a version of id "null"', done => { - async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.getObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: 'null', - }, (err, data) => next(err, data)), - ], (err, data) => { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(data.VersionId, 'null'); - done(); - }); + async.waterfall( + [ + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.getObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: 'null', + }, + (err, data) => next(err, data) + ), + ], + (err, data) => { + assert.ifError(err, `Found unexpected err ${err}`); + assert.strictEqual(data.VersionId, 'null'); + done(); + } + ); }); - it('should return InvalidArgument getting tag with a non existing ' + - 'version id', done => { - async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.getObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: invalidId, - }, (err, data) => next(err, data)), - ], err => { - _checkError(err, 'InvalidArgument', 400); - done(); - }); + it('should return InvalidArgument getting tag with a non existing ' + 'version id', done => { + async.waterfall( + [ + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.getObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: invalidId, + }, + (err, data) => next(err, data) + ), + ], + err => { + _checkError(err, 'InvalidArgument', 400); + done(); + } + ); }); - it('should return 404 NoSuchKey getting tag without ' + - 'version id if version specified is a delete marker', done => { - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.deleteObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.getObjectTagging({ - Bucket: bucketName, - Key: objectName, - }, (err, data) => next(err, data)), - ], err => { - _checkError(err, 'NoSuchKey', 404); - done(); - }); - }); + it( + 'should return 404 NoSuchKey getting tag without ' + 'version id if version specified is a delete marker', + done => { + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => s3.deleteObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.getObjectTagging( + { + Bucket: bucketName, + Key: objectName, + }, + (err, data) => next(err, data) + ), + ], + err => { + _checkError(err, 'NoSuchKey', 404); + done(); + } + ); + } + ); - it('should return 405 MethodNotAllowed getting tag with ' + - 'version id if version specified is a delete marker', done => { - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.deleteObject({ Bucket: bucketName, Key: objectName }, - (err, data) => next(err, data.VersionId)), - (versionId, next) => s3.getObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - }, (err, data) => next(err, data)), - ], err => { - _checkError(err, 'MethodNotAllowed', 405); - done(); - }); - }); + it( + 'should return 405 MethodNotAllowed getting tag with ' + + 'version id if version specified is a delete marker', + done => { + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.deleteObject({ Bucket: bucketName, Key: objectName }, (err, data) => + next(err, data.VersionId) + ), + (versionId, next) => + s3.getObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + }, + (err, data) => next(err, data) + ), + ], + err => { + _checkError(err, 'MethodNotAllowed', 405); + done(); + } + ); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/objectHead.js b/tests/functional/aws-node-sdk/test/versioning/objectHead.js index 2ff2af0934..e4da77c935 100644 --- a/tests/functional/aws-node-sdk/test/versioning/objectHead.js +++ b/tests/functional/aws-node-sdk/test/versioning/objectHead.js @@ -4,11 +4,7 @@ const async = require('async'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); -const { - removeAllVersions, - versioningEnabled, - versioningSuspended, -} = require('../../lib/utility/versioning-util.js'); +const { removeAllVersions, versioningEnabled, versioningSuspended } = require('../../lib/utility/versioning-util.js'); const data = ['foo1', 'foo2']; const counter = 100; @@ -19,7 +15,6 @@ function _assertNoError(err, desc) { assert.strictEqual(err, null, `Unexpected err ${desc}: ${err}`); } - // Same tests as objectPut versioning tests, but head object instead of get describe('put and head object with versioning', function testSuite() { this.timeout(600000); @@ -42,22 +37,23 @@ describe('put and head object with versioning', function testSuite() { }); }); - it('should put and head a non-versioned object without including ' + - 'version ids in response headers', done => { - const params = { Bucket: bucket, Key: key }; - s3.putObject(params, (err, data) => { - _assertNoError(err, 'putting object'); - assert.strictEqual(data.VersionId, undefined); - s3.headObject(params, (err, data) => { - _assertNoError(err, 'heading object'); + it( + 'should put and head a non-versioned object without including ' + 'version ids in response headers', + done => { + const params = { Bucket: bucket, Key: key }; + s3.putObject(params, (err, data) => { + _assertNoError(err, 'putting object'); assert.strictEqual(data.VersionId, undefined); - done(); + s3.headObject(params, (err, data) => { + _assertNoError(err, 'heading object'); + assert.strictEqual(data.VersionId, undefined); + done(); + }); }); - }); - }); + } + ); - it('version-specific head should still not return version id in ' + - 'response header', done => { + it('version-specific head should still not return version id in ' + 'response header', done => { const params = { Bucket: bucket, Key: key }; s3.putObject(params, (err, data) => { _assertNoError(err, 'putting object'); @@ -73,10 +69,13 @@ describe('put and head object with versioning', function testSuite() { describe('on a version-enabled bucket', () => { beforeEach(done => { - s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, done); + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + done + ); }); it('should create a new version for an object', done => { @@ -86,8 +85,7 @@ describe('put and head object with versioning', function testSuite() { params.VersionId = data.VersionId; s3.headObject(params, (err, data) => { _assertNoError(err, 'heading object'); - assert.strictEqual(params.VersionId, data.VersionId, - 'version ids are not equal'); + assert.strictEqual(params.VersionId, data.VersionId, 'version ids are not equal'); done(); }); }); @@ -98,17 +96,19 @@ describe('put and head object with versioning', function testSuite() { const eTags = []; beforeEach(done => { - s3.putObject({ Bucket: bucket, Key: key, Body: data[0] }, - (err, data) => { - if (err) { - done(err); - } - eTags.push(data.ETag); - s3.putBucketVersioning({ + s3.putObject({ Bucket: bucket, Key: key, Body: data[0] }, (err, data) => { + if (err) { + done(err); + } + eTags.push(data.ETag); + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: versioningEnabled, - }, done); - }); + }, + done + ); + }); }); afterEach(done => { @@ -117,12 +117,11 @@ describe('put and head object with versioning', function testSuite() { done(); }); - it('should head null version in versioning enabled bucket', - done => { + it('should head null version in versioning enabled bucket', done => { const paramsNull = { Bucket: bucket, - Key: '/', VersionId: - 'null', + Key: '/', + VersionId: 'null', }; s3.headObject(paramsNull, err => { _assertNoError(err, 'heading null version'); @@ -135,14 +134,11 @@ describe('put and head object with versioning', function testSuite() { s3.putObject(params, (err, data) => { const newVersion = data.VersionId; eTags.push(data.ETag); - s3.headObject({ Bucket: bucket, Key: key, - VersionId: newVersion }, (err, data) => { + s3.headObject({ Bucket: bucket, Key: key, VersionId: newVersion }, (err, data) => { assert.strictEqual(err, null); - assert.strictEqual(data.VersionId, newVersion, - 'version ids are not equal'); + assert.strictEqual(data.VersionId, newVersion, 'version ids are not equal'); assert.strictEqual(data.ETag, eTags[1]); - s3.headObject({ Bucket: bucket, Key: key, - VersionId: 'null' }, (err, data) => { + s3.headObject({ Bucket: bucket, Key: key, VersionId: 'null' }, (err, data) => { _assertNoError(err, 'heading null version'); assert.strictEqual(data.VersionId, 'null'); assert.strictEqual(data.ETag, eTags[0]); @@ -152,44 +148,50 @@ describe('put and head object with versioning', function testSuite() { }); }); - it('should create new versions but still keep nullVersionId', - done => { + it('should create new versions but still keep nullVersionId', done => { const versionIds = []; const params = { Bucket: bucket, Key: key }; const paramsNull = { Bucket: bucket, - Key: '/', VersionId: - 'null', + Key: '/', + VersionId: 'null', }; // create new versions - async.timesSeries(counter, (i, next) => s3.putObject(params, - (err, data) => { - versionIds.push(data.VersionId); - // head the 'null' version - s3.headObject(paramsNull, (err, nullVerData) => { - assert.strictEqual(err, null); - assert.strictEqual(nullVerData.ETag, eTags[0]); - assert.strictEqual(nullVerData.VersionId, 'null'); - next(err); - }); - }), done); + async.timesSeries( + counter, + (i, next) => + s3.putObject(params, (err, data) => { + versionIds.push(data.VersionId); + // head the 'null' version + s3.headObject(paramsNull, (err, nullVerData) => { + assert.strictEqual(err, null); + assert.strictEqual(nullVerData.ETag, eTags[0]); + assert.strictEqual(nullVerData.VersionId, 'null'); + next(err); + }); + }), + done + ); }); }); describe('on version-suspended bucket', () => { beforeEach(done => { - s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, done); + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + done + ); }); it('should not return version id for new object', done => { const params = { Bucket: bucket, Key: key, Body: 'foo' }; const paramsNull = { Bucket: bucket, - Key: '/', VersionId: - 'null', + Key: '/', + VersionId: 'null', }; s3.putObject(params, (err, data) => { const eTag = data.ETag; @@ -211,57 +213,63 @@ describe('put and head object with versioning', function testSuite() { const params2 = { Bucket: bucket, Key: key, Body: data[1] }; const paramsNull = { Bucket: bucket, - Key: '/', VersionId: - 'null', + Key: '/', + VersionId: 'null', }; const eTags = []; - async.waterfall([ - callback => s3.putObject(params1, (err, data) => { - _assertNoError(err, 'putting first object'); - assert.strictEqual(data.VersionId, undefined); - eTags.push(data.ETag); - callback(); - }), - callback => s3.headObject(params, (err, data) => { - _assertNoError(err, 'heading master version'); - assert.strictEqual(data.VersionId, 'null'); - assert.strictEqual(data.ETag, eTags[0], - 'wrong object data'); - callback(); - }), - callback => s3.putObject(params2, (err, data) => { - _assertNoError(err, 'putting second object'); - assert.strictEqual(data.VersionId, undefined); - eTags.push(data.ETag); - callback(); - }), - callback => s3.headObject(paramsNull, (err, data) => { - _assertNoError(err, 'heading null version'); - assert.strictEqual(data.VersionId, 'null'); - assert.strictEqual(data.ETag, eTags[1], - 'wrong object data'); - callback(); - }), - ], done); + async.waterfall( + [ + callback => + s3.putObject(params1, (err, data) => { + _assertNoError(err, 'putting first object'); + assert.strictEqual(data.VersionId, undefined); + eTags.push(data.ETag); + callback(); + }), + callback => + s3.headObject(params, (err, data) => { + _assertNoError(err, 'heading master version'); + assert.strictEqual(data.VersionId, 'null'); + assert.strictEqual(data.ETag, eTags[0], 'wrong object data'); + callback(); + }), + callback => + s3.putObject(params2, (err, data) => { + _assertNoError(err, 'putting second object'); + assert.strictEqual(data.VersionId, undefined); + eTags.push(data.ETag); + callback(); + }), + callback => + s3.headObject(paramsNull, (err, data) => { + _assertNoError(err, 'heading null version'); + assert.strictEqual(data.VersionId, 'null'); + assert.strictEqual(data.ETag, eTags[1], 'wrong object data'); + callback(); + }), + ], + done + ); }); }); - describe('on a version-suspended bucket with non-versioned object', - () => { + describe('on a version-suspended bucket with non-versioned object', () => { const eTags = []; beforeEach(done => { - s3.putObject({ Bucket: bucket, Key: key, Body: data[0] }, - (err, data) => { - if (err) { - done(err); - } - eTags.push(data.ETag); - s3.putBucketVersioning({ + s3.putObject({ Bucket: bucket, Key: key, Body: data[0] }, (err, data) => { + if (err) { + done(err); + } + eTags.push(data.ETag); + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: versioningSuspended, - }, done); - }); + }, + done + ); + }); }); afterEach(done => { @@ -270,12 +278,11 @@ describe('put and head object with versioning', function testSuite() { done(); }); - it('should head null version in versioning suspended bucket', - done => { + it('should head null version in versioning suspended bucket', done => { const paramsNull = { Bucket: bucket, - Key: '/', VersionId: - 'null', + Key: '/', + VersionId: 'null', }; s3.headObject(paramsNull, err => { _assertNoError(err, 'heading null version'); @@ -283,67 +290,82 @@ describe('put and head object with versioning', function testSuite() { }); }); - it('should update null version in versioning suspended bucket', - done => { + it('should update null version in versioning suspended bucket', done => { const params = { Bucket: bucket, Key: key }; const putParams = { Bucket: bucket, Key: '/', Body: data[1] }; const paramsNull = { Bucket: bucket, - Key: '/', VersionId: - 'null', + Key: '/', + VersionId: 'null', }; - async.waterfall([ - callback => s3.headObject(paramsNull, (err, data) => { - _assertNoError(err, 'heading null version'); - assert.strictEqual(data.VersionId, 'null'); - callback(); - }), - callback => s3.putObject(putParams, (err, data) => { - _assertNoError(err, 'putting object'); - assert.strictEqual(data.VersionId, undefined); - eTags.push(data.ETag); - callback(); - }), - callback => s3.headObject(paramsNull, (err, data) => { - _assertNoError(err, 'heading null version'); - assert.strictEqual(data.VersionId, 'null'); - assert.strictEqual(data.ETag, eTags[1], - 'wrong object data'); - callback(); - }), - callback => s3.headObject(params, (err, data) => { - _assertNoError(err, 'heading master version'); - assert.strictEqual(data.VersionId, 'null'); - assert.strictEqual(data.ETag, eTags[1], - 'wrong object data'); - callback(); - }), - ], done); + async.waterfall( + [ + callback => + s3.headObject(paramsNull, (err, data) => { + _assertNoError(err, 'heading null version'); + assert.strictEqual(data.VersionId, 'null'); + callback(); + }), + callback => + s3.putObject(putParams, (err, data) => { + _assertNoError(err, 'putting object'); + assert.strictEqual(data.VersionId, undefined); + eTags.push(data.ETag); + callback(); + }), + callback => + s3.headObject(paramsNull, (err, data) => { + _assertNoError(err, 'heading null version'); + assert.strictEqual(data.VersionId, 'null'); + assert.strictEqual(data.ETag, eTags[1], 'wrong object data'); + callback(); + }), + callback => + s3.headObject(params, (err, data) => { + _assertNoError(err, 'heading master version'); + assert.strictEqual(data.VersionId, 'null'); + assert.strictEqual(data.ETag, eTags[1], 'wrong object data'); + callback(); + }), + ], + done + ); }); }); - describe('on versioning suspended then enabled bucket w/ null version', - () => { + describe('on versioning suspended then enabled bucket w/ null version', () => { const eTags = []; beforeEach(done => { const params = { Bucket: bucket, Key: key, Body: data[0] }; - async.waterfall([ - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, err => callback(err)), - callback => s3.putObject(params, (err, data) => { - if (err) { - callback(err); - } - eTags.push(data.ETag); - callback(); - }), - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, callback), - ], done); + async.waterfall( + [ + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + err => callback(err) + ), + callback => + s3.putObject(params, (err, data) => { + if (err) { + callback(err); + } + eTags.push(data.ETag); + callback(); + }), + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + callback + ), + ], + done + ); }); afterEach(done => { @@ -352,33 +374,42 @@ describe('put and head object with versioning', function testSuite() { done(); }); - it('should preserve the null version when creating new versions', - done => { + it('should preserve the null version when creating new versions', done => { const params = { Bucket: bucket, Key: key }; const paramsNull = { Bucket: bucket, - Key: '/', VersionId: - 'null', + Key: '/', + VersionId: 'null', }; - async.waterfall([ - cb => s3.headObject(paramsNull, (err, nullVerData) => { - _assertNoError(err, 'heading null version'); - assert.strictEqual(nullVerData.ETag, eTags[0]); - assert.strictEqual(nullVerData.VersionId, 'null'); - cb(); - }), - cb => async.timesSeries(counter, (i, next) => - s3.putObject(params, (err, data) => { - _assertNoError(err, `putting object #${i}`); - assert.notEqual(data.VersionId, undefined); - next(); - }), err => cb(err)), - cb => s3.headObject(paramsNull, (err, nullVerData) => { - _assertNoError(err, 'heading null version'); - assert.strictEqual(nullVerData.ETag, eTags[0]); - cb(); - }), - ], done); + async.waterfall( + [ + cb => + s3.headObject(paramsNull, (err, nullVerData) => { + _assertNoError(err, 'heading null version'); + assert.strictEqual(nullVerData.ETag, eTags[0]); + assert.strictEqual(nullVerData.VersionId, 'null'); + cb(); + }), + cb => + async.timesSeries( + counter, + (i, next) => + s3.putObject(params, (err, data) => { + _assertNoError(err, `putting object #${i}`); + assert.notEqual(data.VersionId, undefined); + next(); + }), + err => cb(err) + ), + cb => + s3.headObject(paramsNull, (err, nullVerData) => { + _assertNoError(err, 'heading null version'); + assert.strictEqual(nullVerData.ETag, eTags[0]); + cb(); + }), + ], + done + ); }); it('should create a bunch of objects and their versions', done => { @@ -386,21 +417,31 @@ describe('put and head object with versioning', function testSuite() { const keycount = 50; const versioncount = 20; const value = '{"foo":"bar"}'; - async.timesLimit(keycount, 10, (i, next1) => { - const key = `foo${i}`; - const params = { Bucket: bucket, Key: key, Body: value }; - async.timesLimit(versioncount, 10, (j, next2) => - s3.putObject(params, (err, data) => { - assert.strictEqual(err, null); - assert(data.VersionId, 'invalid versionId'); - vids.push({ Key: key, VersionId: data.VersionId }); - next2(); - }), next1); - }, err => { - assert.strictEqual(err, null); - assert.strictEqual(vids.length, keycount * versioncount); - done(); - }); + async.timesLimit( + keycount, + 10, + (i, next1) => { + const key = `foo${i}`; + const params = { Bucket: bucket, Key: key, Body: value }; + async.timesLimit( + versioncount, + 10, + (j, next2) => + s3.putObject(params, (err, data) => { + assert.strictEqual(err, null); + assert(data.VersionId, 'invalid versionId'); + vids.push({ Key: key, VersionId: data.VersionId }); + next2(); + }), + next1 + ); + }, + err => { + assert.strictEqual(err, null); + assert.strictEqual(vids.length, keycount * versioncount); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/objectPut.js b/tests/functional/aws-node-sdk/test/versioning/objectPut.js index 81d4cbfbca..cf6348b4ec 100644 --- a/tests/functional/aws-node-sdk/test/versioning/objectPut.js +++ b/tests/functional/aws-node-sdk/test/versioning/objectPut.js @@ -22,7 +22,6 @@ function _assertNoError(err, desc) { assert.strictEqual(err, null, `Unexpected err ${desc}: ${err}`); } - describe('put and get object with versioning', function testSuite() { this.timeout(600000); @@ -45,8 +44,7 @@ describe('put and get object with versioning', function testSuite() { }); }); - it('should return InvalidArgument for a request with versionId query', - done => { + it('should return InvalidArgument for a request with versionId query', done => { const params = { Bucket: bucket, Key: key }; const query = { versionId: 'testVersionId' }; customS3Request(s3.putObject, params, { query }, err => { @@ -57,8 +55,7 @@ describe('put and get object with versioning', function testSuite() { }); }); - it('should return InvalidArgument for a request with empty string ' + - 'versionId query', done => { + it('should return InvalidArgument for a request with empty string ' + 'versionId query', done => { const params = { Bucket: bucket, Key: key }; const query = { versionId: '' }; customS3Request(s3.putObject, params, { query }, err => { @@ -69,8 +66,7 @@ describe('put and get object with versioning', function testSuite() { }); }); - it('should put and get a non-versioned object without including ' + - 'version ids in response headers', done => { + it('should put and get a non-versioned object without including ' + 'version ids in response headers', done => { const params = { Bucket: bucket, Key: key }; s3.putObject(params, (err, data) => { _assertNoError(err, 'putting object'); @@ -83,8 +79,7 @@ describe('put and get object with versioning', function testSuite() { }); }); - it('version-specific get should still not return version id in ' + - 'response header', done => { + it('version-specific get should still not return version id in ' + 'response header', done => { const params = { Bucket: bucket, Key: key }; s3.putObject(params, (err, data) => { _assertNoError(err, 'putting object'); @@ -100,10 +95,13 @@ describe('put and get object with versioning', function testSuite() { describe('on a version-enabled bucket', () => { beforeEach(done => { - s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, done); + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + done + ); }); it('should create a new version for an object', done => { @@ -113,27 +111,22 @@ describe('put and get object with versioning', function testSuite() { params.VersionId = data.VersionId; s3.getObject(params, (err, data) => { _assertNoError(err, 'getting object'); - assert.strictEqual(params.VersionId, data.VersionId, - 'version ids are not equal'); + assert.strictEqual(params.VersionId, data.VersionId, 'version ids are not equal'); done(); }); }); }); - it('should create a new version with tag set for an object', - done => { + it('should create a new version with tag set for an object', done => { const tagKey = 'key1'; const tagValue = 'value1'; - const putParams = { Bucket: bucket, Key: key, - Tagging: `${tagKey}=${tagValue}` }; + const putParams = { Bucket: bucket, Key: key, Tagging: `${tagKey}=${tagValue}` }; s3.putObject(putParams, (err, data) => { _assertNoError(err, 'putting object'); - const getTagParams = { Bucket: bucket, Key: - key, VersionId: data.VersionId }; + const getTagParams = { Bucket: bucket, Key: key, VersionId: data.VersionId }; s3.getObjectTagging(getTagParams, (err, data) => { _assertNoError(err, 'getting object tagging'); - assert.strictEqual(getTagParams.VersionId, - data.VersionId, 'version ids are not equal'); + assert.strictEqual(getTagParams.VersionId, data.VersionId, 'version ids are not equal'); assert.strictEqual(data.TagSet[0].Key, tagKey); assert.strictEqual(data.TagSet[0].Value, tagValue); done(); @@ -142,22 +135,23 @@ describe('put and get object with versioning', function testSuite() { }); }); - describe('on a version-enabled bucket with non-versioned object', - () => { + describe('on a version-enabled bucket with non-versioned object', () => { const eTags = []; beforeEach(done => { - s3.putObject({ Bucket: bucket, Key: key, Body: data[0] }, - (err, data) => { - if (err) { - done(err); - } - eTags.push(data.ETag); - s3.putBucketVersioning({ + s3.putObject({ Bucket: bucket, Key: key, Body: data[0] }, (err, data) => { + if (err) { + done(err); + } + eTags.push(data.ETag); + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: versioningEnabled, - }, done); - }); + }, + done + ); + }); }); afterEach(done => { @@ -166,23 +160,22 @@ describe('put and get object with versioning', function testSuite() { done(); }); - it('should get null (latest) version in versioning enabled ' + - 'bucket when version id is not specified', - done => { - const paramsNull = { - Bucket: bucket, - Key: key, - }; - s3.getObject(paramsNull, (err, data) => { - _assertNoError(err, 'getting null version'); - assert.strictEqual(data.VersionId, 'null'); - done(); - }); - }); + it( + 'should get null (latest) version in versioning enabled ' + 'bucket when version id is not specified', + done => { + const paramsNull = { + Bucket: bucket, + Key: key, + }; + s3.getObject(paramsNull, (err, data) => { + _assertNoError(err, 'getting null version'); + assert.strictEqual(data.VersionId, 'null'); + done(); + }); + } + ); - it('should get null version in versioning enabled bucket ' + - 'when version id is specified', - done => { + it('should get null version in versioning enabled bucket ' + 'when version id is specified', done => { const paramsNull = { Bucket: bucket, Key: key, @@ -195,20 +188,16 @@ describe('put and get object with versioning', function testSuite() { }); }); - it('should keep null version and create a new version', - done => { + it('should keep null version and create a new version', done => { const params = { Bucket: bucket, Key: key, Body: data[1] }; s3.putObject(params, (err, data) => { const newVersion = data.VersionId; eTags.push(data.ETag); - s3.getObject({ Bucket: bucket, Key: key, - VersionId: newVersion }, (err, data) => { + s3.getObject({ Bucket: bucket, Key: key, VersionId: newVersion }, (err, data) => { assert.strictEqual(err, null); - assert.strictEqual(data.VersionId, newVersion, - 'version ids are not equal'); + assert.strictEqual(data.VersionId, newVersion, 'version ids are not equal'); assert.strictEqual(data.ETag, eTags[1]); - s3.getObject({ Bucket: bucket, Key: key, - VersionId: 'null' }, (err, data) => { + s3.getObject({ Bucket: bucket, Key: key, VersionId: 'null' }, (err, data) => { _assertNoError(err, 'getting null version'); assert.strictEqual(data.VersionId, 'null'); assert.strictEqual(data.ETag, eTags[0]); @@ -218,8 +207,7 @@ describe('put and get object with versioning', function testSuite() { }); }); - it('should create new versions but still keep the null version', - done => { + it('should create new versions but still keep the null version', done => { const versionIds = []; const params = { Bucket: bucket, Key: key }; const paramsNull = { @@ -228,58 +216,79 @@ describe('put and get object with versioning', function testSuite() { VersionId: 'null', }; // create new versions - async.timesSeries(counter, (i, next) => s3.putObject(params, - (err, data) => { - versionIds.push(data.VersionId); - // get the 'null' version - s3.getObject(paramsNull, (err, nullVerData) => { - assert.strictEqual(err, null); - assert.strictEqual(nullVerData.ETag, eTags[0]); - assert.strictEqual(nullVerData.VersionId, 'null'); - next(err); - }); - }), done); + async.timesSeries( + counter, + (i, next) => + s3.putObject(params, (err, data) => { + versionIds.push(data.VersionId); + // get the 'null' version + s3.getObject(paramsNull, (err, nullVerData) => { + assert.strictEqual(err, null); + assert.strictEqual(nullVerData.ETag, eTags[0]); + assert.strictEqual(nullVerData.VersionId, 'null'); + next(err); + }); + }), + done + ); }); // S3C-5139 - it('should not fail PUT on versioning-suspended bucket if nullVersionId refers ' + - 'to deleted null version', done => { - async.series([ - // create a new version on top of non-versioned object - next => s3.putObject({ Bucket: bucket, Key: key }, next), - // suspend versioning - next => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, next), - // delete existing non-versioned object - next => s3.deleteObject({ Bucket: bucket, Key: key, VersionId: 'null' }, next), - // put a new null version - next => s3.putObject({ Bucket: bucket, Key: key, Body: data[0] }, next), - // get the new null version - next => s3.getObject({ - Bucket: bucket, - Key: key, - VersionId: 'null', - }, (err, nullVerData) => { - assert.ifError(err); - assert.strictEqual(nullVerData.ETag, eTags[0]); - assert.strictEqual(nullVerData.VersionId, 'null'); - next(); - }), - ], err => { - assert.ifError(err); - done(); - }); - }); + it( + 'should not fail PUT on versioning-suspended bucket if nullVersionId refers ' + + 'to deleted null version', + done => { + async.series( + [ + // create a new version on top of non-versioned object + next => s3.putObject({ Bucket: bucket, Key: key }, next), + // suspend versioning + next => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + next + ), + // delete existing non-versioned object + next => s3.deleteObject({ Bucket: bucket, Key: key, VersionId: 'null' }, next), + // put a new null version + next => s3.putObject({ Bucket: bucket, Key: key, Body: data[0] }, next), + // get the new null version + next => + s3.getObject( + { + Bucket: bucket, + Key: key, + VersionId: 'null', + }, + (err, nullVerData) => { + assert.ifError(err); + assert.strictEqual(nullVerData.ETag, eTags[0]); + assert.strictEqual(nullVerData.VersionId, 'null'); + next(); + } + ), + ], + err => { + assert.ifError(err); + done(); + } + ); + } + ); }); describe('on version-suspended bucket', () => { beforeEach(done => { - s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, done); + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + done + ); }); it('should not return version id for new object', done => { @@ -313,107 +322,128 @@ describe('put and get object with versioning', function testSuite() { VersionId: 'null', }; const eTags = []; - async.waterfall([ - callback => s3.putObject(params1, (err, data) => { - _assertNoError(err, 'putting first object'); - assert.strictEqual(data.VersionId, undefined); - eTags.push(data.ETag); - callback(); - }), - callback => s3.getObject(params, (err, data) => { - _assertNoError(err, 'getting master version'); - assert.strictEqual(data.VersionId, 'null'); - assert.strictEqual(data.ETag, eTags[0], - 'wrong object data'); - callback(); - }), - callback => s3.putObject(params2, (err, data) => { - _assertNoError(err, 'putting second object'); - assert.strictEqual(data.VersionId, undefined); - eTags.push(data.ETag); - callback(); - }), - callback => s3.getObject(paramsNull, (err, data) => { - _assertNoError(err, 'getting null version'); - assert.strictEqual(data.VersionId, 'null'); - assert.strictEqual(data.ETag, eTags[1], - 'wrong object data'); - callback(); - }), - ], done); + async.waterfall( + [ + callback => + s3.putObject(params1, (err, data) => { + _assertNoError(err, 'putting first object'); + assert.strictEqual(data.VersionId, undefined); + eTags.push(data.ETag); + callback(); + }), + callback => + s3.getObject(params, (err, data) => { + _assertNoError(err, 'getting master version'); + assert.strictEqual(data.VersionId, 'null'); + assert.strictEqual(data.ETag, eTags[0], 'wrong object data'); + callback(); + }), + callback => + s3.putObject(params2, (err, data) => { + _assertNoError(err, 'putting second object'); + assert.strictEqual(data.VersionId, undefined); + eTags.push(data.ETag); + callback(); + }), + callback => + s3.getObject(paramsNull, (err, data) => { + _assertNoError(err, 'getting null version'); + assert.strictEqual(data.VersionId, 'null'); + assert.strictEqual(data.ETag, eTags[1], 'wrong object data'); + callback(); + }), + ], + done + ); }); // Jira issue: S3C-444 - it('put object after put object acl on null version which is ' + - 'latest version should not result in two null version with ' + - 'different version ids', done => { - async.waterfall([ - // create new null version (master version in metadata) - callback => s3.putObject({ Bucket: bucket, Key: key }, - err => callback(err)), - callback => checkOneVersion(s3, bucket, 'null', callback), - // note after put object acl in metadata will have null - // version (with same version ID) stored in both master and - // separate version due to using versionId= - // option in metadata PUT call - callback => s3.putObjectAcl({ - Bucket: bucket, - Key: key, - ACL: 'public-read-write', - VersionId: 'null', - }, err => callback(err)), - // before overwriting master version, put object should - // clean up latest null version (both master version and - // separate version in metadata) - callback => s3.putObject({ Bucket: bucket, Key: key }, - err => callback(err)), - // if clean-up did not occur, would see two null versions - // with different version IDs in version listing - callback => checkOneVersion(s3, bucket, 'null', callback), - ], done); - }); + it( + 'put object after put object acl on null version which is ' + + 'latest version should not result in two null version with ' + + 'different version ids', + done => { + async.waterfall( + [ + // create new null version (master version in metadata) + callback => s3.putObject({ Bucket: bucket, Key: key }, err => callback(err)), + callback => checkOneVersion(s3, bucket, 'null', callback), + // note after put object acl in metadata will have null + // version (with same version ID) stored in both master and + // separate version due to using versionId= + // option in metadata PUT call + callback => + s3.putObjectAcl( + { + Bucket: bucket, + Key: key, + ACL: 'public-read-write', + VersionId: 'null', + }, + err => callback(err) + ), + // before overwriting master version, put object should + // clean up latest null version (both master version and + // separate version in metadata) + callback => s3.putObject({ Bucket: bucket, Key: key }, err => callback(err)), + // if clean-up did not occur, would see two null versions + // with different version IDs in version listing + callback => checkOneVersion(s3, bucket, 'null', callback), + ], + done + ); + } + ); // Jira issue: S3C-444 - it('put object after creating dual null version another way ' + - 'should not result in two null version with different version ids', - done => { - async.waterfall([ - // create dual null version state another way - callback => - createDualNullVersion(s3, bucket, key, callback), - // versioning is left enabled after above step - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, err => callback(err)), - // before overwriting master version, put object should - // clean up latest null version (both master version and - // separate version in metadata) - callback => s3.putObject({ Bucket: bucket, Key: key }, - err => callback(err)), - // if clean-up did not occur, would see two null versions - // with different version IDs in version listing - callback => checkOneVersion(s3, bucket, 'null', callback), - ], done); - }); + it( + 'put object after creating dual null version another way ' + + 'should not result in two null version with different version ids', + done => { + async.waterfall( + [ + // create dual null version state another way + callback => createDualNullVersion(s3, bucket, key, callback), + // versioning is left enabled after above step + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + err => callback(err) + ), + // before overwriting master version, put object should + // clean up latest null version (both master version and + // separate version in metadata) + callback => s3.putObject({ Bucket: bucket, Key: key }, err => callback(err)), + // if clean-up did not occur, would see two null versions + // with different version IDs in version listing + callback => checkOneVersion(s3, bucket, 'null', callback), + ], + done + ); + } + ); }); - describe('on a version-suspended bucket with non-versioned object', - () => { + describe('on a version-suspended bucket with non-versioned object', () => { const eTags = []; beforeEach(done => { - s3.putObject({ Bucket: bucket, Key: key, Body: data[0] }, - (err, data) => { - if (err) { - done(err); - } - eTags.push(data.ETag); - s3.putBucketVersioning({ + s3.putObject({ Bucket: bucket, Key: key, Body: data[0] }, (err, data) => { + if (err) { + done(err); + } + eTags.push(data.ETag); + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: versioningSuspended, - }, done); - }); + }, + done + ); + }); }); afterEach(done => { @@ -422,23 +452,22 @@ describe('put and get object with versioning', function testSuite() { done(); }); - it('should get null version (latest) in versioning ' + - 'suspended bucket without specifying version id', - done => { - const paramsNull = { - Bucket: bucket, - Key: key, - }; - s3.getObject(paramsNull, (err, data) => { - assert.strictEqual(data.VersionId, 'null'); - _assertNoError(err, 'getting null version'); - done(); - }); - }); + it( + 'should get null version (latest) in versioning ' + 'suspended bucket without specifying version id', + done => { + const paramsNull = { + Bucket: bucket, + Key: key, + }; + s3.getObject(paramsNull, (err, data) => { + assert.strictEqual(data.VersionId, 'null'); + _assertNoError(err, 'getting null version'); + done(); + }); + } + ); - it('should get null version in versioning suspended bucket ' + - 'specifying version id', - done => { + it('should get null version in versioning suspended bucket ' + 'specifying version id', done => { const paramsNull = { Bucket: bucket, Key: key, @@ -451,8 +480,7 @@ describe('put and get object with versioning', function testSuite() { }); }); - it('should update null version in versioning suspended bucket', - done => { + it('should update null version in versioning suspended bucket', done => { const params = { Bucket: bucket, Key: key }; const putParams = { Bucket: bucket, Key: key, Body: data[1] }; const paramsNull = { @@ -460,58 +488,74 @@ describe('put and get object with versioning', function testSuite() { Key: key, VersionId: 'null', }; - async.waterfall([ - callback => s3.getObject(paramsNull, (err, data) => { - _assertNoError(err, 'getting null version'); - assert.strictEqual(data.VersionId, 'null'); - callback(); - }), - callback => s3.putObject(putParams, (err, data) => { - _assertNoError(err, 'putting object'); - assert.strictEqual(data.VersionId, undefined); - eTags.push(data.ETag); - callback(); - }), - callback => s3.getObject(paramsNull, (err, data) => { - _assertNoError(err, 'getting null version'); - assert.strictEqual(data.VersionId, 'null'); - assert.strictEqual(data.ETag, eTags[1], - 'wrong object data'); - callback(); - }), - callback => s3.getObject(params, (err, data) => { - _assertNoError(err, 'getting master version'); - assert.strictEqual(data.VersionId, 'null'); - assert.strictEqual(data.ETag, eTags[1], - 'wrong object data'); - callback(); - }), - ], done); + async.waterfall( + [ + callback => + s3.getObject(paramsNull, (err, data) => { + _assertNoError(err, 'getting null version'); + assert.strictEqual(data.VersionId, 'null'); + callback(); + }), + callback => + s3.putObject(putParams, (err, data) => { + _assertNoError(err, 'putting object'); + assert.strictEqual(data.VersionId, undefined); + eTags.push(data.ETag); + callback(); + }), + callback => + s3.getObject(paramsNull, (err, data) => { + _assertNoError(err, 'getting null version'); + assert.strictEqual(data.VersionId, 'null'); + assert.strictEqual(data.ETag, eTags[1], 'wrong object data'); + callback(); + }), + callback => + s3.getObject(params, (err, data) => { + _assertNoError(err, 'getting master version'); + assert.strictEqual(data.VersionId, 'null'); + assert.strictEqual(data.ETag, eTags[1], 'wrong object data'); + callback(); + }), + ], + done + ); }); }); - describe('on versioning suspended then enabled bucket w/ null version', - () => { + describe('on versioning suspended then enabled bucket w/ null version', () => { const eTags = []; beforeEach(done => { const params = { Bucket: bucket, Key: key, Body: data[0] }; - async.waterfall([ - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningSuspended, - }, err => callback(err)), - callback => s3.putObject(params, (err, data) => { - if (err) { - callback(err); - } - eTags.push(data.ETag); - callback(); - }), - callback => s3.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningEnabled, - }, callback), - ], done); + async.waterfall( + [ + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningSuspended, + }, + err => callback(err) + ), + callback => + s3.putObject(params, (err, data) => { + if (err) { + callback(err); + } + eTags.push(data.ETag); + callback(); + }), + callback => + s3.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningEnabled, + }, + callback + ), + ], + done + ); }); afterEach(done => { @@ -520,33 +564,42 @@ describe('put and get object with versioning', function testSuite() { done(); }); - it('should preserve the null version when creating new versions', - done => { + it('should preserve the null version when creating new versions', done => { const params = { Bucket: bucket, Key: key }; const paramsNull = { Bucket: bucket, Key: key, VersionId: 'null', }; - async.waterfall([ - callback => s3.getObject(paramsNull, (err, nullVerData) => { - _assertNoError(err, 'getting null version'); - assert.strictEqual(nullVerData.ETag, eTags[0]); - assert.strictEqual(nullVerData.VersionId, 'null'); - callback(); - }), - callback => async.timesSeries(counter, (i, next) => - s3.putObject(params, (err, data) => { - _assertNoError(err, `putting object #${i}`); - assert.notEqual(data.VersionId, undefined); - next(); - }), err => callback(err)), - callback => s3.getObject(paramsNull, (err, nullVerData) => { - _assertNoError(err, 'getting null version'); - assert.strictEqual(nullVerData.ETag, eTags[0]); - callback(); - }), - ], done); + async.waterfall( + [ + callback => + s3.getObject(paramsNull, (err, nullVerData) => { + _assertNoError(err, 'getting null version'); + assert.strictEqual(nullVerData.ETag, eTags[0]); + assert.strictEqual(nullVerData.VersionId, 'null'); + callback(); + }), + callback => + async.timesSeries( + counter, + (i, next) => + s3.putObject(params, (err, data) => { + _assertNoError(err, `putting object #${i}`); + assert.notEqual(data.VersionId, undefined); + next(); + }), + err => callback(err) + ), + callback => + s3.getObject(paramsNull, (err, nullVerData) => { + _assertNoError(err, 'getting null version'); + assert.strictEqual(nullVerData.ETag, eTags[0]); + callback(); + }), + ], + done + ); }); it('should create a bunch of objects and their versions', done => { @@ -554,21 +607,31 @@ describe('put and get object with versioning', function testSuite() { const keycount = 50; const versioncount = 20; const value = '{"foo":"bar"}'; - async.timesLimit(keycount, 10, (i, next1) => { - const key = `foo${i}`; - const params = { Bucket: bucket, Key: key, Body: value }; - async.timesLimit(versioncount, 10, (j, next2) => - s3.putObject(params, (err, data) => { - assert.strictEqual(err, null); - assert(data.VersionId, 'invalid versionId'); - vids.push({ Key: key, VersionId: data.VersionId }); - next2(); - }), next1); - }, err => { - assert.strictEqual(err, null); - assert.strictEqual(vids.length, keycount * versioncount); - done(); - }); + async.timesLimit( + keycount, + 10, + (i, next1) => { + const key = `foo${i}`; + const params = { Bucket: bucket, Key: key, Body: value }; + async.timesLimit( + versioncount, + 10, + (j, next2) => + s3.putObject(params, (err, data) => { + assert.strictEqual(err, null); + assert(data.VersionId, 'invalid versionId'); + vids.push({ Key: key, VersionId: data.VersionId }); + next2(); + }), + next1 + ); + }, + err => { + assert.strictEqual(err, null); + assert.strictEqual(vids.length, keycount * versioncount); + done(); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/objectPutCopyPart.js b/tests/functional/aws-node-sdk/test/versioning/objectPutCopyPart.js index 241eab1ec9..b17ffa7266 100644 --- a/tests/functional/aws-node-sdk/test/versioning/objectPutCopyPart.js +++ b/tests/functional/aws-node-sdk/test/versioning/objectPutCopyPart.js @@ -4,11 +4,7 @@ const async = require('async'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); -const { - removeAllVersions, - versioningEnabled, - versioningSuspended, -} = require('../../lib/utility/versioning-util.js'); +const { removeAllVersions, versioningEnabled, versioningSuspended } = require('../../lib/utility/versioning-util.js'); let sourceBucket; let destBucket; @@ -20,7 +16,6 @@ function _assertNoError(err, desc) { assert.strictEqual(err, null, `Unexpected err ${desc}: ${err}`); } - describe('Object Part Copy with Versioning', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); @@ -30,50 +25,62 @@ describe('Object Part Copy with Versioning', () => { beforeEach(done => { sourceBucket = `copypartsourcebucket-${Date.now()}`; destBucket = `copypartdestbucket-${Date.now()}`; - async.forEach([sourceBucket, destBucket], (bucket, cb) => { - s3.createBucket({ Bucket: bucket }, cb); - }, done); + async.forEach( + [sourceBucket, destBucket], + (bucket, cb) => { + s3.createBucket({ Bucket: bucket }, cb); + }, + done + ); }); afterEach(done => { - s3.abortMultipartUpload({ - Bucket: destBucket, - Key: destKey, - UploadId: uploadId, - }, err => { - if (err) { - return done(err); + s3.abortMultipartUpload( + { + Bucket: destBucket, + Key: destKey, + UploadId: uploadId, + }, + err => { + if (err) { + return done(err); + } + return async.each( + [sourceBucket, destBucket], + (bucket, cb) => { + removeAllVersions({ Bucket: bucket }, err => { + if (err) { + return cb(err); + } + return s3.deleteBucket({ Bucket: bucket }, cb); + }); + }, + done + ); } - return async.each([sourceBucket, destBucket], (bucket, cb) => { - removeAllVersions({ Bucket: bucket }, err => { - if (err) { - return cb(err); - } - return s3.deleteBucket({ Bucket: bucket }, cb); - }); - }, done); - }); + ); }); describe('on bucket without versioning', () => { const eTags = []; beforeEach(done => { - async.waterfall([ - next => s3.putObject({ Bucket: sourceBucket, Key: sourceKey, - Body: 'foobar' }, next), - (data, next) => { - eTags.push(data.ETag); - s3.createMultipartUpload({ Bucket: destBucket, - Key: destKey }, next); - }, - ], (err, data) => { - if (err) { - return done(err); + async.waterfall( + [ + next => s3.putObject({ Bucket: sourceBucket, Key: sourceKey, Body: 'foobar' }, next), + (data, next) => { + eTags.push(data.ETag); + s3.createMultipartUpload({ Bucket: destBucket, Key: destKey }, next); + }, + ], + (err, data) => { + if (err) { + return done(err); + } + uploadId = data.UploadId; + return done(); } - uploadId = data.UploadId; - return done(); - }); + ); }); afterEach(done => { @@ -81,55 +88,63 @@ describe('Object Part Copy with Versioning', () => { done(); }); - it('should not return a version id when put part by copying ' + - 'without specifying version id', done => { - s3.uploadPartCopy({ - Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}`, - Key: destKey, - PartNumber: 1, - UploadId: uploadId, - }, (err, data) => { - _assertNoError(err, 'uploading part copy w/o version id'); - assert.strictEqual(data.CopySourceVersionId, undefined); - assert.strictEqual(data.CopyPartResult.ETag, eTags[0]); - done(); - }); + it('should not return a version id when put part by copying ' + 'without specifying version id', done => { + s3.uploadPartCopy( + { + Bucket: destBucket, + CopySource: `${sourceBucket}/${sourceKey}`, + Key: destKey, + PartNumber: 1, + UploadId: uploadId, + }, + (err, data) => { + _assertNoError(err, 'uploading part copy w/o version id'); + assert.strictEqual(data.CopySourceVersionId, undefined); + assert.strictEqual(data.CopyPartResult.ETag, eTags[0]); + done(); + } + ); }); - it('should return NoSuchKey if copy source version id is invalid ' + - 'id', done => { - s3.uploadPartCopy({ - Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}?` + - `versionId=${invalidId}`, - Key: destKey, - PartNumber: 1, - UploadId: uploadId, - }, err => { - assert(err, `Expected err but got ${err}`); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + it('should return NoSuchKey if copy source version id is invalid ' + 'id', done => { + s3.uploadPartCopy( + { + Bucket: destBucket, + CopySource: `${sourceBucket}/${sourceKey}?` + `versionId=${invalidId}`, + Key: destKey, + PartNumber: 1, + UploadId: uploadId, + }, + err => { + assert(err, `Expected err but got ${err}`); + assert.strictEqual(err.code, 'InvalidArgument'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); }); - it('should allow specific version "null" for copy source ' + - 'and return version id "null" in response headers', done => { - s3.uploadPartCopy({ - Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}?versionId=null`, - Key: destKey, - PartNumber: 1, - UploadId: uploadId, - }, (err, data) => { - _assertNoError(err, - 'using specific version "null" for copy source'); - assert.strictEqual(data.CopySourceVersionId, 'null'); - assert.strictEqual(data.ETag, eTags[0]); - done(); - }); - }); + it( + 'should allow specific version "null" for copy source ' + + 'and return version id "null" in response headers', + done => { + s3.uploadPartCopy( + { + Bucket: destBucket, + CopySource: `${sourceBucket}/${sourceKey}?versionId=null`, + Key: destKey, + PartNumber: 1, + UploadId: uploadId, + }, + (err, data) => { + _assertNoError(err, 'using specific version "null" for copy source'); + assert.strictEqual(data.CopySourceVersionId, 'null'); + assert.strictEqual(data.ETag, eTags[0]); + done(); + } + ); + } + ); }); describe('on bucket with versioning', () => { @@ -139,33 +154,45 @@ describe('Object Part Copy with Versioning', () => { beforeEach(done => { const params = { Bucket: sourceBucket, Key: sourceKey }; - async.waterfall([ - next => s3.putObject(params, next), - (data, next) => { - eTags.push(data.ETag); - versionIds.push('null'); - s3.putBucketVersioning({ - Bucket: sourceBucket, - VersioningConfiguration: versioningEnabled, - }, err => next(err)); - }, - next => async.timesSeries(counter, (i, cb) => - s3.putObject({ Bucket: sourceBucket, Key: sourceKey, - Body: `foo${i}` }, (err, data) => { - _assertNoError(err, `putting version #${i}`); + async.waterfall( + [ + next => s3.putObject(params, next), + (data, next) => { eTags.push(data.ETag); - versionIds.push(data.VersionId); - cb(err); - }), err => next(err)), - next => s3.createMultipartUpload({ Bucket: destBucket, - Key: destKey }, next), - ], (err, data) => { - if (err) { - return done(err); + versionIds.push('null'); + s3.putBucketVersioning( + { + Bucket: sourceBucket, + VersioningConfiguration: versioningEnabled, + }, + err => next(err) + ); + }, + next => + async.timesSeries( + counter, + (i, cb) => + s3.putObject( + { Bucket: sourceBucket, Key: sourceKey, Body: `foo${i}` }, + (err, data) => { + _assertNoError(err, `putting version #${i}`); + eTags.push(data.ETag); + versionIds.push(data.VersionId); + cb(err); + } + ), + err => next(err) + ), + next => s3.createMultipartUpload({ Bucket: destBucket, Key: destKey }, next), + ], + (err, data) => { + if (err) { + return done(err); + } + uploadId = data.UploadId; + return done(); } - uploadId = data.UploadId; - return done(); - }); + ); }); afterEach(done => { @@ -174,128 +201,154 @@ describe('Object Part Copy with Versioning', () => { done(); }); - it('copy part without specifying version should return data and ' + - 'version id of latest version', done => { - const lastVersion = versionIds[versionIds.length - 1]; - const lastETag = eTags[eTags.length - 1]; - s3.uploadPartCopy({ - Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}`, - Key: destKey, - PartNumber: 1, - UploadId: uploadId, - }, (err, data) => { - _assertNoError(err, 'uploading part copy w/o version id'); - assert.strictEqual(data.CopySourceVersionId, lastVersion); - assert.strictEqual(data.CopyPartResult.ETag, lastETag); - done(); - }); - }); - - it('copy part without specifying version should return NoSuchKey ' + - 'if latest version has a delete marker', done => { - s3.deleteObject({ Bucket: sourceBucket, Key: sourceKey }, - err => { - _assertNoError(err, 'deleting latest version'); - s3.uploadPartCopy({ + it( + 'copy part without specifying version should return data and ' + 'version id of latest version', + done => { + const lastVersion = versionIds[versionIds.length - 1]; + const lastETag = eTags[eTags.length - 1]; + s3.uploadPartCopy( + { Bucket: destBucket, CopySource: `${sourceBucket}/${sourceKey}`, Key: destKey, PartNumber: 1, UploadId: uploadId, - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'NoSuchKey'); - assert.strictEqual(err.statusCode, 404); + }, + (err, data) => { + _assertNoError(err, 'uploading part copy w/o version id'); + assert.strictEqual(data.CopySourceVersionId, lastVersion); + assert.strictEqual(data.CopyPartResult.ETag, lastETag); done(); - }); + } + ); + } + ); + + it( + 'copy part without specifying version should return NoSuchKey ' + + 'if latest version has a delete marker', + done => { + s3.deleteObject({ Bucket: sourceBucket, Key: sourceKey }, err => { + _assertNoError(err, 'deleting latest version'); + s3.uploadPartCopy( + { + Bucket: destBucket, + CopySource: `${sourceBucket}/${sourceKey}`, + Key: destKey, + PartNumber: 1, + UploadId: uploadId, + }, + err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'NoSuchKey'); + assert.strictEqual(err.statusCode, 404); + done(); + } + ); }); - }); + } + ); + + it( + 'copy part with specific version id should return ' + 'InvalidRequest if that id is a delete marker', + done => { + async.waterfall( + [ + next => + s3.deleteObject( + { + Bucket: sourceBucket, + Key: sourceKey, + }, + err => next(err) + ), + next => s3.listObjectVersions({ Bucket: sourceBucket }, next), + (data, next) => { + const deleteMarkerId = data.DeleteMarkers[0].VersionId; + return s3.uploadPartCopy( + { + Bucket: destBucket, + CopySource: `${sourceBucket}/${sourceKey}` + `?versionId=${deleteMarkerId}`, + Key: destKey, + PartNumber: 1, + UploadId: uploadId, + }, + next + ); + }, + ], + err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'InvalidRequest'); + assert.strictEqual(err.statusCode, 400); + done(); + } + ); + } + ); - it('copy part with specific version id should return ' + - 'InvalidRequest if that id is a delete marker', done => { - async.waterfall([ - next => s3.deleteObject({ - Bucket: sourceBucket, - Key: sourceKey, - }, err => next(err)), - next => s3.listObjectVersions({ Bucket: sourceBucket }, - next), - (data, next) => { - const deleteMarkerId = data.DeleteMarkers[0].VersionId; - return s3.uploadPartCopy({ + it('copy part with specific version should return NoSuchVersion ' + 'if version does not exist', done => { + const versionId = versionIds[1]; + s3.deleteObject({ Bucket: sourceBucket, Key: sourceKey, VersionId: versionId }, (err, data) => { + _assertNoError(err, `deleting version ${versionId}`); + assert.strictEqual(data.VersionId, versionId); + s3.uploadPartCopy( + { Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}` + - `?versionId=${deleteMarkerId}`, + CopySource: `${sourceBucket}/${sourceKey}` + `?versionId=${versionId}`, Key: destKey, PartNumber: 1, UploadId: uploadId, - }, next); - }, - ], err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidRequest'); - assert.strictEqual(err.statusCode, 400); - done(); + }, + err => { + assert(err, 'Expected err but did not find one'); + assert.strictEqual(err.code, 'NoSuchVersion'); + assert.strictEqual(err.statusCode, 404); + done(); + } + ); }); }); - it('copy part with specific version should return NoSuchVersion ' + - 'if version does not exist', done => { + it('copy part with specific version should return copy source ' + 'version id if it exists', done => { const versionId = versionIds[1]; - s3.deleteObject({ Bucket: sourceBucket, Key: sourceKey, - VersionId: versionId }, (err, data) => { - _assertNoError(err, `deleting version ${versionId}`); - assert.strictEqual(data.VersionId, versionId); - s3.uploadPartCopy({ + s3.uploadPartCopy( + { Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}` + - `?versionId=${versionId}`, + CopySource: `${sourceBucket}/${sourceKey}` + `?versionId=${versionId}`, Key: destKey, PartNumber: 1, UploadId: uploadId, - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'NoSuchVersion'); - assert.strictEqual(err.statusCode, 404); + }, + (err, data) => { + _assertNoError(err, 'copy part from specific version'); + assert.strictEqual(data.CopySourceVersionId, versionId); + assert.strictEqual(data.CopyPartResult.ETag, eTags[1]); done(); - }); - }); - }); - - it('copy part with specific version should return copy source ' + - 'version id if it exists', done => { - const versionId = versionIds[1]; - s3.uploadPartCopy({ - Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}` + - `?versionId=${versionId}`, - Key: destKey, - PartNumber: 1, - UploadId: uploadId, - }, (err, data) => { - _assertNoError(err, 'copy part from specific version'); - assert.strictEqual(data.CopySourceVersionId, versionId); - assert.strictEqual(data.CopyPartResult.ETag, eTags[1]); - done(); - }); + } + ); }); - it('copy part with specific version "null" should return copy ' + - 'source version id "null" if it exists', done => { - s3.uploadPartCopy({ - Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}?versionId=null`, - Key: destKey, - PartNumber: 1, - UploadId: uploadId, - }, (err, data) => { - _assertNoError(err, 'copy part from specific version'); - assert.strictEqual(data.CopySourceVersionId, 'null'); - assert.strictEqual(data.CopyPartResult.ETag, eTags[0]); - done(); - }); - }); + it( + 'copy part with specific version "null" should return copy ' + 'source version id "null" if it exists', + done => { + s3.uploadPartCopy( + { + Bucket: destBucket, + CopySource: `${sourceBucket}/${sourceKey}?versionId=null`, + Key: destKey, + PartNumber: 1, + UploadId: uploadId, + }, + (err, data) => { + _assertNoError(err, 'copy part from specific version'); + assert.strictEqual(data.CopySourceVersionId, 'null'); + assert.strictEqual(data.CopyPartResult.ETag, eTags[0]); + done(); + } + ); + } + ); }); describe('on bucket with versioning suspended', () => { @@ -305,39 +358,54 @@ describe('Object Part Copy with Versioning', () => { beforeEach(done => { const params = { Bucket: sourceBucket, Key: sourceKey }; - async.waterfall([ - next => s3.putObject(params, next), - (data, next) => { - eTags.push(data.ETag); - versionIds.push('null'); - s3.putBucketVersioning({ - Bucket: sourceBucket, - VersioningConfiguration: versioningEnabled, - }, err => next(err)); - }, - next => async.timesSeries(counter, (i, cb) => - s3.putObject({ Bucket: sourceBucket, Key: sourceKey, - Body: `foo${i}` }, (err, data) => { - _assertNoError(err, `putting version #${i}`); + async.waterfall( + [ + next => s3.putObject(params, next), + (data, next) => { eTags.push(data.ETag); - versionIds.push(data.VersionId); - cb(err); - }), err => next(err)), - next => { - s3.putBucketVersioning({ - Bucket: sourceBucket, - VersioningConfiguration: versioningSuspended, - }, err => next(err)); - }, - next => s3.createMultipartUpload({ Bucket: destBucket, - Key: destKey }, next), - ], (err, data) => { - if (err) { - return done(err); + versionIds.push('null'); + s3.putBucketVersioning( + { + Bucket: sourceBucket, + VersioningConfiguration: versioningEnabled, + }, + err => next(err) + ); + }, + next => + async.timesSeries( + counter, + (i, cb) => + s3.putObject( + { Bucket: sourceBucket, Key: sourceKey, Body: `foo${i}` }, + (err, data) => { + _assertNoError(err, `putting version #${i}`); + eTags.push(data.ETag); + versionIds.push(data.VersionId); + cb(err); + } + ), + err => next(err) + ), + next => { + s3.putBucketVersioning( + { + Bucket: sourceBucket, + VersioningConfiguration: versioningSuspended, + }, + err => next(err) + ); + }, + next => s3.createMultipartUpload({ Bucket: destBucket, Key: destKey }, next), + ], + (err, data) => { + if (err) { + return done(err); + } + uploadId = data.UploadId; + return done(); } - uploadId = data.UploadId; - return done(); - }); + ); }); afterEach(done => { @@ -346,57 +414,66 @@ describe('Object Part Copy with Versioning', () => { done(); }); - it('copy part without specifying version should still return ' + - 'version id of latest version', done => { + it('copy part without specifying version should still return ' + 'version id of latest version', done => { const lastVersion = versionIds[versionIds.length - 1]; const lastETag = eTags[eTags.length - 1]; - s3.uploadPartCopy({ - Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}`, - Key: destKey, - PartNumber: 1, - UploadId: uploadId, - }, (err, data) => { - _assertNoError(err, 'uploading part copy w/o version id'); - assert.strictEqual(data.CopySourceVersionId, lastVersion); - assert.strictEqual(data.CopyPartResult.ETag, lastETag); - done(); - }); + s3.uploadPartCopy( + { + Bucket: destBucket, + CopySource: `${sourceBucket}/${sourceKey}`, + Key: destKey, + PartNumber: 1, + UploadId: uploadId, + }, + (err, data) => { + _assertNoError(err, 'uploading part copy w/o version id'); + assert.strictEqual(data.CopySourceVersionId, lastVersion); + assert.strictEqual(data.CopyPartResult.ETag, lastETag); + done(); + } + ); }); - it('copy part with specific version should still return copy ' + - 'source version id if it exists', done => { + it('copy part with specific version should still return copy ' + 'source version id if it exists', done => { const versionId = versionIds[1]; - s3.uploadPartCopy({ - Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}` + - `?versionId=${versionId}`, - Key: destKey, - PartNumber: 1, - UploadId: uploadId, - }, (err, data) => { - _assertNoError(err, 'copy part from specific version'); - assert.strictEqual(data.CopySourceVersionId, versionId); - assert.strictEqual(data.CopyPartResult.ETag, eTags[1]); - done(); - }); + s3.uploadPartCopy( + { + Bucket: destBucket, + CopySource: `${sourceBucket}/${sourceKey}` + `?versionId=${versionId}`, + Key: destKey, + PartNumber: 1, + UploadId: uploadId, + }, + (err, data) => { + _assertNoError(err, 'copy part from specific version'); + assert.strictEqual(data.CopySourceVersionId, versionId); + assert.strictEqual(data.CopyPartResult.ETag, eTags[1]); + done(); + } + ); }); - it('copy part with specific version "null" should still return ' + - 'copy source version id "null" if it exists', done => { - s3.uploadPartCopy({ - Bucket: destBucket, - CopySource: `${sourceBucket}/${sourceKey}?versionId=null`, - Key: destKey, - PartNumber: 1, - UploadId: uploadId, - }, (err, data) => { - _assertNoError(err, 'copy part from specific version'); - assert.strictEqual(data.CopySourceVersionId, 'null'); - assert.strictEqual(data.CopyPartResult.ETag, eTags[0]); - done(); - }); - }); + it( + 'copy part with specific version "null" should still return ' + + 'copy source version id "null" if it exists', + done => { + s3.uploadPartCopy( + { + Bucket: destBucket, + CopySource: `${sourceBucket}/${sourceKey}?versionId=null`, + Key: destKey, + PartNumber: 1, + UploadId: uploadId, + }, + (err, data) => { + _assertNoError(err, 'copy part from specific version'); + assert.strictEqual(data.CopySourceVersionId, 'null'); + assert.strictEqual(data.CopyPartResult.ETag, eTags[0]); + done(); + } + ); + } + ); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/objectPutTagging.js b/tests/functional/aws-node-sdk/test/versioning/objectPutTagging.js index eab3c9fe23..5efc1c7239 100644 --- a/tests/functional/aws-node-sdk/test/versioning/objectPutTagging.js +++ b/tests/functional/aws-node-sdk/test/versioning/objectPutTagging.js @@ -5,10 +5,7 @@ const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const { checkOneVersion } = require('../../lib/utility/versioning-util'); -const { - removeAllVersions, - versioningEnabled, -} = require('../../lib/utility/versioning-util'); +const { removeAllVersions, versioningEnabled } = require('../../lib/utility/versioning-util'); const bucketName = 'testtaggingbucket'; const objectName = 'testtaggingobject'; @@ -21,7 +18,6 @@ function _checkError(err, code, statusCode) { assert.strictEqual(err.statusCode, statusCode); } - describe('Put object tagging with versioning', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); @@ -38,148 +34,226 @@ describe('Put object tagging with versioning', () => { }); it('should be able to put tag with versioning', done => { - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - (err, data) => next(err, data.VersionId)), - (versionId, next) => s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - Tagging: { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }] }, - }, (err, data) => next(err, data, versionId)), - ], (err, data, versionId) => { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(data.VersionId, versionId); - done(); - }); + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.putObject({ Bucket: bucketName, Key: objectName }, (err, data) => next(err, data.VersionId)), + (versionId, next) => + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + Tagging: { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + ], + }, + }, + (err, data) => next(err, data, versionId) + ), + ], + (err, data, versionId) => { + assert.ifError(err, `Found unexpected err ${err}`); + assert.strictEqual(data.VersionId, versionId); + done(); + } + ); }); - it('should not create version putting object tags on a ' + - ' version-enabled bucket where no version id is specified ', done => { - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - (err, data) => next(err, data.VersionId)), - (versionId, next) => s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - Tagging: { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }] }, - }, err => next(err, versionId)), - (versionId, next) => - checkOneVersion(s3, bucketName, versionId, next), - ], done); - }); + it( + 'should not create version putting object tags on a ' + + ' version-enabled bucket where no version id is specified ', + done => { + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.putObject({ Bucket: bucketName, Key: objectName }, (err, data) => + next(err, data.VersionId) + ), + (versionId, next) => + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + Tagging: { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + ], + }, + }, + err => next(err, versionId) + ), + (versionId, next) => checkOneVersion(s3, bucketName, versionId, next), + ], + done + ); + } + ); it('should be able to put tag with a version of id "null"', done => { - async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: 'null', - Tagging: { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }] }, - }, (err, data) => next(err, data)), - ], (err, data) => { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(data.VersionId, 'null'); - done(); - }); + async.waterfall( + [ + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: 'null', + Tagging: { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + ], + }, + }, + (err, data) => next(err, data) + ), + ], + (err, data) => { + assert.ifError(err, `Found unexpected err ${err}`); + assert.strictEqual(data.VersionId, 'null'); + done(); + } + ); }); - it('should return InvalidArgument putting tag with a non existing ' + - 'version id', done => { - async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: invalidId, - Tagging: { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }] }, - }, (err, data) => next(err, data)), - ], err => { - _checkError(err, 'InvalidArgument', 400); - done(); - }); + it('should return InvalidArgument putting tag with a non existing ' + 'version id', done => { + async.waterfall( + [ + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: invalidId, + Tagging: { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + ], + }, + }, + (err, data) => next(err, data) + ), + ], + err => { + _checkError(err, 'InvalidArgument', 400); + done(); + } + ); }); - it('should return 405 MethodNotAllowed putting tag without ' + - 'version id if version specified is a delete marker', done => { - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.deleteObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - Tagging: { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }] }, - }, (err, data) => next(err, data)), - ], err => { - _checkError(err, 'MethodNotAllowed', 405); - done(); - }); - }); + it( + 'should return 405 MethodNotAllowed putting tag without ' + + 'version id if version specified is a delete marker', + done => { + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => s3.deleteObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + Tagging: { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + ], + }, + }, + (err, data) => next(err, data) + ), + ], + err => { + _checkError(err, 'MethodNotAllowed', 405); + done(); + } + ); + } + ); - it('should return 405 MethodNotAllowed putting tag with ' + - 'version id if version specified is a delete marker', done => { - async.waterfall([ - next => s3.putBucketVersioning({ Bucket: bucketName, - VersioningConfiguration: versioningEnabled }, - err => next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - next => s3.deleteObject({ Bucket: bucketName, Key: objectName }, - (err, data) => next(err, data.VersionId)), - (versionId, next) => s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - Tagging: { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }] }, - }, (err, data) => next(err, data)), - ], err => { - _checkError(err, 'MethodNotAllowed', 405); - done(); - }); - }); + it( + 'should return 405 MethodNotAllowed putting tag with ' + + 'version id if version specified is a delete marker', + done => { + async.waterfall( + [ + next => + s3.putBucketVersioning( + { Bucket: bucketName, VersioningConfiguration: versioningEnabled }, + err => next(err) + ), + next => s3.putObject({ Bucket: bucketName, Key: objectName }, err => next(err)), + next => + s3.deleteObject({ Bucket: bucketName, Key: objectName }, (err, data) => + next(err, data.VersionId) + ), + (versionId, next) => + s3.putObjectTagging( + { + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + Tagging: { + TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + ], + }, + }, + (err, data) => next(err, data) + ), + ], + err => { + _checkError(err, 'MethodNotAllowed', 405); + done(); + } + ); + } + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/replicationBucket.js b/tests/functional/aws-node-sdk/test/versioning/replicationBucket.js index 230fe0d5a1..72fd0c42f0 100644 --- a/tests/functional/aws-node-sdk/test/versioning/replicationBucket.js +++ b/tests/functional/aws-node-sdk/test/versioning/replicationBucket.js @@ -6,7 +6,6 @@ const BucketUtility = require('../../lib/utility/bucket-util'); const bucketName = `versioning-bucket-${Date.now()}`; - function checkError(err, code) { assert.notEqual(err, null, 'Expected failure but got success'); assert.strictEqual(err.code, code); @@ -17,13 +16,11 @@ function checkNoError(err) { } function testVersioning(s3, versioningStatus, replicationStatus, removeReplication, cb) { - const versioningParams = { Bucket: bucketName, - VersioningConfiguration: { Status: versioningStatus } }; + const versioningParams = { Bucket: bucketName, VersioningConfiguration: { Status: versioningStatus } }; const replicationParams = { Bucket: bucketName, ReplicationConfiguration: { - Role: 'arn:aws:iam::123456789012:role/examplerole,' + - 'arn:aws:iam::123456789012:role/examplerole', + Role: 'arn:aws:iam::123456789012:role/examplerole,' + 'arn:aws:iam::123456789012:role/examplerole', Rules: [ { Destination: { @@ -36,16 +33,19 @@ function testVersioning(s3, versioningStatus, replicationStatus, removeReplicati ], }, }; - async.waterfall([ - cb => s3.putBucketReplication(replicationParams, e => cb(e)), - cb => { - if (removeReplication) { - return s3.deleteBucketReplication({ Bucket: bucketName }, e => cb(e)); - } - return process.nextTick(() => cb()); - }, - cb => s3.putBucketVersioning(versioningParams, e => cb(e)), - ], cb); + async.waterfall( + [ + cb => s3.putBucketReplication(replicationParams, e => cb(e)), + cb => { + if (removeReplication) { + return s3.deleteBucketReplication({ Bucket: bucketName }, e => cb(e)); + } + return process.nextTick(() => cb()); + }, + cb => s3.putBucketVersioning(versioningParams, e => cb(e)), + ], + cb + ); } describe('Versioning on a replication source bucket', () => { @@ -54,29 +54,34 @@ describe('Versioning on a replication source bucket', () => { const s3 = bucketUtil.s3; beforeEach(done => { - async.waterfall([ - cb => s3.createBucket({ Bucket: bucketName }, e => cb(e)), - cb => s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { - Status: 'Enabled', - }, - }, err => cb(err)), - ], done); + async.waterfall( + [ + cb => s3.createBucket({ Bucket: bucketName }, e => cb(e)), + cb => + s3.putBucketVersioning( + { + Bucket: bucketName, + VersioningConfiguration: { + Status: 'Enabled', + }, + }, + err => cb(err) + ), + ], + done + ); }); afterEach(done => s3.deleteBucket({ Bucket: bucketName }, done)); - it('should not be able to disable versioning if replication enabled', - done => { + it('should not be able to disable versioning if replication enabled', done => { testVersioning(s3, 'Suspended', 'Enabled', false, err => { checkError(err, 'InvalidBucketState'); done(); }); }); - it('should be able to suspend versioning if replication disabled', - done => { + it('should be able to suspend versioning if replication disabled', done => { testVersioning(s3, 'Suspended', 'Disabled', false, err => { checkNoError(err); done(); diff --git a/tests/functional/aws-node-sdk/test/versioning/versioningGeneral1.js b/tests/functional/aws-node-sdk/test/versioning/versioningGeneral1.js index 3c8ac77131..607310ccf5 100644 --- a/tests/functional/aws-node-sdk/test/versioning/versioningGeneral1.js +++ b/tests/functional/aws-node-sdk/test/versioning/versioningGeneral1.js @@ -22,7 +22,6 @@ function comp(v1, v2) { return 0; } - describe('aws-node-sdk test bucket versioning listing', function testSuite() { this.timeout(600000); let s3; @@ -53,54 +52,66 @@ describe('aws-node-sdk test bucket versioning listing', function testSuite() { const keycount = 20; const versioncount = 20; const value = '{"foo":"bar"}'; - async.timesLimit(keycount, 10, (i, next1) => { - const key = `foo${i}`; - masterVersions.push(key); - const params = { Bucket: bucket, Key: key, Body: value }; - async.timesLimit(versioncount, 10, (j, next2) => - s3.putObject(params, (err, data) => { - assert.strictEqual(err, null); - assert(data.VersionId, 'invalid versionId'); - allVersions.push({ Key: key, VersionId: data.VersionId }); - next2(); - }), next1); - }, err => { - assert.strictEqual(err, null); - assert.strictEqual(allVersions.length, keycount * versioncount); - done(); - }); + async.timesLimit( + keycount, + 10, + (i, next1) => { + const key = `foo${i}`; + masterVersions.push(key); + const params = { Bucket: bucket, Key: key, Body: value }; + async.timesLimit( + versioncount, + 10, + (j, next2) => + s3.putObject(params, (err, data) => { + assert.strictEqual(err, null); + assert(data.VersionId, 'invalid versionId'); + allVersions.push({ Key: key, VersionId: data.VersionId }); + next2(); + }), + next1 + ); + }, + err => { + assert.strictEqual(err, null); + assert.strictEqual(allVersions.length, keycount * versioncount); + done(); + } + ); }); it('should list all latest versions', done => { const params = { Bucket: bucket, MaxKeys: 1000, Delimiter: '/' }; s3.listObjects(params, (err, data) => { const keys = data.Contents.map(entry => entry.Key); - assert.deepStrictEqual(keys.sort(), masterVersions.sort(), - 'not same keys'); + assert.deepStrictEqual(keys.sort(), masterVersions.sort(), 'not same keys'); done(); }); }); it('should create some delete markers', done => { const keycount = 15; - async.times(keycount, (i, next) => { - const key = masterVersions[i]; - const params = { Bucket: bucket, Key: key }; - s3.deleteObject(params, (err, data) => { - assert.strictEqual(err, null); - assert(data.VersionId, 'invalid versionId'); - allVersions.push({ Key: key, VersionId: data.VersionId }); - next(); - }); - }, done); + async.times( + keycount, + (i, next) => { + const key = masterVersions[i]; + const params = { Bucket: bucket, Key: key }; + s3.deleteObject(params, (err, data) => { + assert.strictEqual(err, null); + assert(data.VersionId, 'invalid versionId'); + allVersions.push({ Key: key, VersionId: data.VersionId }); + next(); + }); + }, + done + ); }); it('should list all latest versions', done => { const params = { Bucket: bucket, MaxKeys: 1000, Delimiter: '/' }; s3.listObjects(params, (err, data) => { const keys = data.Contents.map(entry => entry.Key); - assert.deepStrictEqual(keys.sort(), masterVersions.sort().slice(15), - 'not same keys'); + assert.deepStrictEqual(keys.sort(), masterVersions.sort().slice(15), 'not same keys'); done(); }); }); @@ -108,22 +119,34 @@ describe('aws-node-sdk test bucket versioning listing', function testSuite() { it('should list all versions', done => { const versions = []; const params = { Bucket: bucket, MaxKeys: 15, Delimiter: '/' }; - async.retry(100, done => s3.listObjectVersions(params, (err, data) => { - data.Versions.forEach(version => versions.push({ - Key: version.Key, VersionId: version.VersionId })); - data.DeleteMarkers.forEach(version => versions.push({ - Key: version.Key, VersionId: version.VersionId })); - if (data.IsTruncated) { - params.KeyMarker = data.NextKeyMarker; - params.VersionIdMarker = data.NextVersionIdMarker; - return done('not done yet'); + async.retry( + 100, + done => + s3.listObjectVersions(params, (err, data) => { + data.Versions.forEach(version => + versions.push({ + Key: version.Key, + VersionId: version.VersionId, + }) + ); + data.DeleteMarkers.forEach(version => + versions.push({ + Key: version.Key, + VersionId: version.VersionId, + }) + ); + if (data.IsTruncated) { + params.KeyMarker = data.NextKeyMarker; + params.VersionIdMarker = data.NextVersionIdMarker; + return done('not done yet'); + } + return done(); + }), + () => { + assert.deepStrictEqual(versions.sort(comp), allVersions.sort(comp), 'not same versions'); + const params = { Bucket: bucket, Delete: { Objects: allVersions } }; + s3.deleteObjects(params, done); } - return done(); - }), () => { - assert.deepStrictEqual(versions.sort(comp), allVersions.sort(comp), - 'not same versions'); - const params = { Bucket: bucket, Delete: { Objects: allVersions } }; - s3.deleteObjects(params, done); - }); + ); }); }); diff --git a/tests/functional/aws-node-sdk/test/versioning/versioningGeneral2.js b/tests/functional/aws-node-sdk/test/versioning/versioningGeneral2.js index f38a2d2b71..4f72a0ce60 100644 --- a/tests/functional/aws-node-sdk/test/versioning/versioningGeneral2.js +++ b/tests/functional/aws-node-sdk/test/versioning/versioningGeneral2.js @@ -30,8 +30,7 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { s3.putBucketVersioning(params, error => { if (error) { assert.strictEqual(error.statusCode, 400); - assert.strictEqual( - error.code, 'IllegalVersioningConfigurationException'); + assert.strictEqual(error.code, 'IllegalVersioningConfigurationException'); done(); } else { done('accepted empty versioning configuration'); @@ -58,8 +57,7 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { s3.putBucketVersioning(params, error => { if (error) { assert.strictEqual(error.statusCode, 400); - assert.strictEqual( - error.code, 'IllegalVersioningConfigurationException'); + assert.strictEqual(error.code, 'IllegalVersioningConfigurationException'); done(); } else { done('accepted empty versioning configuration'); @@ -81,14 +79,13 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { Bucket: bucket, VersioningConfiguration: { MFADelete: 'fun', - Status: 'let\'s do it', + Status: "let's do it", }, }; s3.putBucketVersioning(params, error => { if (error) { assert.strictEqual(error.statusCode, 400); - assert.strictEqual( - error.code, 'IllegalVersioningConfigurationException'); + assert.strictEqual(error.code, 'IllegalVersioningConfigurationException'); done(); } else { done('accepted empty versioning configuration'); @@ -143,8 +140,7 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { versionIds.push(data.VersionId); s3.getObject(params, (err, data) => { assert.strictEqual(err, null); - assert.strictEqual(params.VersionId, data.VersionId, - 'version ids are not equal'); + assert.strictEqual(params.VersionId, data.VersionId, 'version ids are not equal'); // TODO compare the value of null version and the original // version when find out how to include value in the put params.VersionId = 'null'; @@ -158,21 +154,25 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { const paramsNull = { Bucket: bucket, Key: '/', VersionId: 'null' }; let nullVersionId; // create new versions - async.timesSeries(counter, (i, next) => s3.putObject(params, - (err, data) => { - versionIds.push(data.VersionId); - // get the 'null' version - s3.getObject(paramsNull, (err, data) => { - assert.strictEqual(err, null); - if (nullVersionId === undefined) { - nullVersionId = data.VersionId; - } - // what to expect: nullVersionId should be the same - assert(nullVersionId, 'nullVersionId should be valid'); - assert.strictEqual(nullVersionId, data.VersionId); - next(err); - }); - }), done); + async.timesSeries( + counter, + (i, next) => + s3.putObject(params, (err, data) => { + versionIds.push(data.VersionId); + // get the 'null' version + s3.getObject(paramsNull, (err, data) => { + assert.strictEqual(err, null); + if (nullVersionId === undefined) { + nullVersionId = data.VersionId; + } + // what to expect: nullVersionId should be the same + assert(nullVersionId, 'nullVersionId should be valid'); + assert.strictEqual(nullVersionId, data.VersionId); + next(err); + }); + }), + done + ); }); it('should accept valid versioning configuration', done => { @@ -200,30 +200,35 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { const paramsNull = { Bucket: bucket, Key: '/', VersionId: 'null' }; // let nullVersionId = undefined; // let newNullVersionId = undefined; - async.waterfall([ - callback => s3.getObject(paramsNull, err => { - assert.strictEqual(err, null); - // nullVersionId = data.VersionId; - callback(); - }), - callback => s3.putObject(params, err => { - assert.strictEqual(err, null); - versionIds.push('null'); - callback(); - }), - callback => s3.getObject(paramsNull, (err, data) => { - assert.strictEqual(err, null); - assert.strictEqual(data.VersionId, 'null', - 'version ids are equal'); - callback(); - }), - callback => s3.getObject(params, (err, data) => { - assert.strictEqual(err, null); - assert.strictEqual(data.VersionId, 'null', - 'version ids are not equal'); - callback(); - }), - ], done); + async.waterfall( + [ + callback => + s3.getObject(paramsNull, err => { + assert.strictEqual(err, null); + // nullVersionId = data.VersionId; + callback(); + }), + callback => + s3.putObject(params, err => { + assert.strictEqual(err, null); + versionIds.push('null'); + callback(); + }), + callback => + s3.getObject(paramsNull, (err, data) => { + assert.strictEqual(err, null); + assert.strictEqual(data.VersionId, 'null', 'version ids are equal'); + callback(); + }), + callback => + s3.getObject(params, (err, data) => { + assert.strictEqual(err, null); + assert.strictEqual(data.VersionId, 'null', 'version ids are not equal'); + callback(); + }), + ], + done + ); }); it('should enable versioning and preserve the null version', done => { @@ -236,27 +241,35 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { const params = { Bucket: bucket, Key: '/' }; const paramsNull = { Bucket: bucket, Key: '/', VersionId: 'null' }; let nullVersionId; - async.waterfall([ - callback => s3.getObject(paramsNull, (err, data) => { - assert.strictEqual(err, null); - nullVersionId = data.VersionId; - callback(); - }), - callback => s3.putBucketVersioning(paramsVersioning, - err => callback(err)), - callback => async.timesSeries(counter, (i, next) => - s3.putObject(params, (err, data) => { - assert.strictEqual(err, null); - versionIds.push(data.VersionId); - next(); - }), err => callback(err)), - callback => s3.getObject(paramsNull, (err, data) => { - assert.strictEqual(err, null); - assert.strictEqual(nullVersionId, data.VersionId, - 'version ids are not equal'); - callback(); - }), - ], done); + async.waterfall( + [ + callback => + s3.getObject(paramsNull, (err, data) => { + assert.strictEqual(err, null); + nullVersionId = data.VersionId; + callback(); + }), + callback => s3.putBucketVersioning(paramsVersioning, err => callback(err)), + callback => + async.timesSeries( + counter, + (i, next) => + s3.putObject(params, (err, data) => { + assert.strictEqual(err, null); + versionIds.push(data.VersionId); + next(); + }), + err => callback(err) + ), + callback => + s3.getObject(paramsNull, (err, data) => { + assert.strictEqual(err, null); + assert.strictEqual(nullVersionId, data.VersionId, 'version ids are not equal'); + callback(); + }), + ], + done + ); }); it('should create delete marker and keep the null version', done => { @@ -265,50 +278,54 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { s3.getObject(paramsNull, (err, data) => { assert.strictEqual(err, null); const nullVersionId = data.VersionId; - async.timesSeries(counter, (i, next) => s3.deleteObject(params, - (err, data) => { - assert.strictEqual(err, null); - versionIds.push(data.VersionId); - s3.getObject(params, err => { - assert.strictEqual(err.code, 'NoSuchKey'); - next(); - }); - }), err => { + async.timesSeries( + counter, + (i, next) => + s3.deleteObject(params, (err, data) => { + assert.strictEqual(err, null); + versionIds.push(data.VersionId); + s3.getObject(params, err => { + assert.strictEqual(err.code, 'NoSuchKey'); + next(); + }); + }), + err => { assert.strictEqual(err, null); s3.getObject(paramsNull, (err, data) => { - assert.strictEqual(nullVersionId, data.VersionId, - 'version ids are not equal'); + assert.strictEqual(nullVersionId, data.VersionId, 'version ids are not equal'); done(); }); - }); + } + ); }); }); it('should delete latest version and get the next version', done => { versionIds.reverse(); const params = { Bucket: bucket, Key: '/' }; - async.timesSeries(versionIds.length, (i, next) => { - const versionId = versionIds[i]; - const nextVersionId = i < versionIds.length ? - versionIds[i + 1] : undefined; - const paramsVersion = - { Bucket: bucket, Key: '/', VersionId: versionId }; - s3.deleteObject(paramsVersion, err => { - assert.strictEqual(err, null); - s3.getObject(params, (err, data) => { - if (err) { - assert(err.code === 'NotFound' || - err.code === 'NoSuchKey', 'error'); - } else { - assert(data.VersionId, 'invalid versionId'); - if (nextVersionId !== 'null') { - assert.strictEqual(data.VersionId, nextVersionId); + async.timesSeries( + versionIds.length, + (i, next) => { + const versionId = versionIds[i]; + const nextVersionId = i < versionIds.length ? versionIds[i + 1] : undefined; + const paramsVersion = { Bucket: bucket, Key: '/', VersionId: versionId }; + s3.deleteObject(paramsVersion, err => { + assert.strictEqual(err, null); + s3.getObject(params, (err, data) => { + if (err) { + assert(err.code === 'NotFound' || err.code === 'NoSuchKey', 'error'); + } else { + assert(data.VersionId, 'invalid versionId'); + if (nextVersionId !== 'null') { + assert.strictEqual(data.VersionId, nextVersionId); + } } - } - next(); + next(); + }); }); - }); - }, done); + }, + done + ); }); it('should create a bunch of objects and their versions', done => { @@ -316,23 +333,33 @@ describe('aws-node-sdk test bucket versioning', function testSuite() { const keycount = 50; const versioncount = 20; const value = '{"foo":"bar"}'; - async.timesLimit(keycount, 10, (i, next1) => { - const key = `foo${i}`; - const params = { Bucket: bucket, Key: key, Body: value }; - async.timesLimit(versioncount, 10, (j, next2) => - s3.putObject(params, (err, data) => { - assert.strictEqual(err, null); - assert(data.VersionId, 'invalid versionId'); - vids.push({ Key: key, VersionId: data.VersionId }); - next2(); - }), next1); - }, err => { - assert.strictEqual(err, null); - assert.strictEqual(vids.length, keycount * versioncount); - const params = { Bucket: bucket, Delete: { Objects: vids } }; - // TODO use delete marker and check with the result - process.stdout.write('creating objects done, now deleting...'); - s3.deleteObjects(params, done); - }); + async.timesLimit( + keycount, + 10, + (i, next1) => { + const key = `foo${i}`; + const params = { Bucket: bucket, Key: key, Body: value }; + async.timesLimit( + versioncount, + 10, + (j, next2) => + s3.putObject(params, (err, data) => { + assert.strictEqual(err, null); + assert(data.VersionId, 'invalid versionId'); + vids.push({ Key: key, VersionId: data.VersionId }); + next2(); + }), + next1 + ); + }, + err => { + assert.strictEqual(err, null); + assert.strictEqual(vids.length, keycount * versioncount); + const params = { Bucket: bucket, Delete: { Objects: vids } }; + // TODO use delete marker and check with the result + process.stdout.write('creating objects done, now deleting...'); + s3.deleteObjects(params, done); + } + ); }); }); diff --git a/tests/functional/backbeat/bucketIndexing.js b/tests/functional/backbeat/bucketIndexing.js index 8f2c2571b6..91a021e5db 100644 --- a/tests/functional/backbeat/bucketIndexing.js +++ b/tests/functional/backbeat/bucketIndexing.js @@ -2,8 +2,7 @@ const assert = require('assert'); const async = require('async'); const { makeRequest } = require('../../functional/raw-node/utils/makeRequest'); -const BucketUtility = - require('../../functional/aws-node-sdk/lib/utility/bucket-util'); +const BucketUtility = require('../../functional/aws-node-sdk/lib/utility/bucket-util'); const { runIfMongo } = require('./utils'); const ipAddress = process.env.IP ? process.env.IP : '127.0.0.1'; @@ -16,46 +15,52 @@ const backbeatAuthCredentials = { const TEST_BUCKET = 'backbeatbucket'; function indexDeleteRequest(payload, bucket, cb) { - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: - `/_/backbeat/index/${bucket}`, - headers: {}, - jsonResponse: true, - requestBody: JSON.stringify(payload), - queryObj: { operation: 'delete' }, - }, cb); + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: `/_/backbeat/index/${bucket}`, + headers: {}, + jsonResponse: true, + requestBody: JSON.stringify(payload), + queryObj: { operation: 'delete' }, + }, + cb + ); } function indexPutRequest(payload, bucket, cb) { - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: - `/_/backbeat/index/${bucket}`, - headers: {}, - jsonResponse: true, - requestBody: JSON.stringify(payload), - queryObj: { operation: 'add' }, - }, cb); + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: `/_/backbeat/index/${bucket}`, + headers: {}, + jsonResponse: true, + requestBody: JSON.stringify(payload), + queryObj: { operation: 'add' }, + }, + cb + ); } function indexGetRequest(bucket, cb) { - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'GET', - path: - `/_/backbeat/index/${bucket}`, - headers: {}, - jsonResponse: true, - }, cb); + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'GET', + path: `/_/backbeat/index/${bucket}`, + headers: {}, + jsonResponse: true, + }, + cb + ); } const indexReqObject = [ @@ -79,9 +84,7 @@ const indexReqObject = [ const indexRespObject = [ { name: '_id_', - keys: [ - { key: '_id', order: 1 }, - ] + keys: [{ key: '_id', order: 1 }], }, { keys: [ @@ -105,10 +108,10 @@ runIfMongo('Indexing Routes', () => { let s3; before(done => { - bucketUtil = new BucketUtility( - 'default', { signatureVersion: 'v4' }); + bucketUtil = new BucketUtility('default', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - s3.createBucket({ Bucket: TEST_BUCKET }).promise() + s3.createBucket({ Bucket: TEST_BUCKET }) + .promise() .then(() => done()) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); @@ -123,19 +126,21 @@ runIfMongo('Indexing Routes', () => { // }); it('should reject non-authenticated requests', done => { - makeRequest({ - hostname: ipAddress, - port: 8000, - method: 'GET', - path: - '/_/backbeat/index/testbucket', - headers: {}, - jsonResponse: true, - }, err => { - assert(err); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); + makeRequest( + { + hostname: ipAddress, + port: 8000, + method: 'GET', + path: '/_/backbeat/index/testbucket', + headers: {}, + jsonResponse: true, + }, + err => { + assert(err); + assert.strictEqual(err.code, 'AccessDenied'); + done(); + } + ); }); it('should return error: invalid payload - empty', done => { @@ -163,60 +168,65 @@ runIfMongo('Indexing Routes', () => { }); it('should successfully add indexes', done => { - async.series([ - next => { - indexPutRequest(indexReqObject, TEST_BUCKET, err => { - assert.ifError(err); - next(); - }); - }, - next => { - indexGetRequest(TEST_BUCKET, (err, data) => { - assert.ifError(err); - const res = JSON.parse(data.body); - assert.deepStrictEqual(res.Indexes, indexRespObject); - next(); - }); - }, - ], done); + async.series( + [ + next => { + indexPutRequest(indexReqObject, TEST_BUCKET, err => { + assert.ifError(err); + next(); + }); + }, + next => { + indexGetRequest(TEST_BUCKET, (err, data) => { + assert.ifError(err); + const res = JSON.parse(data.body); + assert.deepStrictEqual(res.Indexes, indexRespObject); + next(); + }); + }, + ], + done + ); }); it('should successfully delete indexes', done => { - async.series([ - next => { - indexPutRequest(indexReqObject, TEST_BUCKET, err => { - assert.ifError(err); - next(); - }); - }, - next => { - indexGetRequest(TEST_BUCKET, (err, data) => { - assert.ifError(err); - const res = JSON.parse(data.body); - assert.deepStrictEqual(res.Indexes, indexRespObject); - next(); - }); - }, - next => { - indexDeleteRequest(indexReqObject, TEST_BUCKET, err => { - assert.ifError(err); - next(); - }); - }, - next => { - indexGetRequest(TEST_BUCKET, (err, data) => { - assert.ifError(err); - const res = JSON.parse(data.body); - assert.deepStrictEqual(res.Indexes, [ - { - name: '_id_', - keys: [{ key: '_id', order: 1 }], - } - ]); - next(); - }); - }, - ], done); + async.series( + [ + next => { + indexPutRequest(indexReqObject, TEST_BUCKET, err => { + assert.ifError(err); + next(); + }); + }, + next => { + indexGetRequest(TEST_BUCKET, (err, data) => { + assert.ifError(err); + const res = JSON.parse(data.body); + assert.deepStrictEqual(res.Indexes, indexRespObject); + next(); + }); + }, + next => { + indexDeleteRequest(indexReqObject, TEST_BUCKET, err => { + assert.ifError(err); + next(); + }); + }, + next => { + indexGetRequest(TEST_BUCKET, (err, data) => { + assert.ifError(err); + const res = JSON.parse(data.body); + assert.deepStrictEqual(res.Indexes, [ + { + name: '_id_', + keys: [{ key: '_id', order: 1 }], + }, + ]); + next(); + }); + }, + ], + done + ); }); }); - diff --git a/tests/functional/backbeat/excludedDataStoreName.js b/tests/functional/backbeat/excludedDataStoreName.js index 64a15ff696..1aa5c8e811 100644 --- a/tests/functional/backbeat/excludedDataStoreName.js +++ b/tests/functional/backbeat/excludedDataStoreName.js @@ -22,228 +22,245 @@ describe('excludedDataStoreName', () => { bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - return async.series([ - next => s3.createBucket({ Bucket: testBucket }, next), - next => s3.putBucketVersioning({ - Bucket: testBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => s3.putObject({ Bucket: testBucket, Key: 'key0' }, (err, data) => { - expectedVersions.push(data.VersionId); - return next(err); - }), - next => s3.putObject({ Bucket: testBucket, Key: 'key0' }, (err, data) => { - if (err) { - return next(err); - } - const versionId = data.VersionId; - return updateMetadata( - { bucket: testBucket, objectKey: 'key0', versionId, authCredentials: credentials }, - { dataStoreName: location2 }, - next); - }), - next => s3.putObject({ Bucket: testBucket, Key: 'key0' }, (err, data) => { - expectedVersions.push(data.VersionId); - return next(err); - }), - next => s3.putObject({ Bucket: testBucket, Key: 'key0' }, next), - next => s3.putObject({ Bucket: testBucket, Key: 'key1' }, (err, data) => { - if (err) { - return next(err); - } - const versionId = data.VersionId; - return updateMetadata( - { bucket: testBucket, objectKey: 'key1', versionId, authCredentials: credentials }, - { dataStoreName: location2 }, - next); - }), - next => s3.putObject({ Bucket: testBucket, Key: 'key2' }, next), - ], done); + return async.series( + [ + next => s3.createBucket({ Bucket: testBucket }, next), + next => + s3.putBucketVersioning( + { + Bucket: testBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => + s3.putObject({ Bucket: testBucket, Key: 'key0' }, (err, data) => { + expectedVersions.push(data.VersionId); + return next(err); + }), + next => + s3.putObject({ Bucket: testBucket, Key: 'key0' }, (err, data) => { + if (err) { + return next(err); + } + const versionId = data.VersionId; + return updateMetadata( + { bucket: testBucket, objectKey: 'key0', versionId, authCredentials: credentials }, + { dataStoreName: location2 }, + next + ); + }), + next => + s3.putObject({ Bucket: testBucket, Key: 'key0' }, (err, data) => { + expectedVersions.push(data.VersionId); + return next(err); + }), + next => s3.putObject({ Bucket: testBucket, Key: 'key0' }, next), + next => + s3.putObject({ Bucket: testBucket, Key: 'key1' }, (err, data) => { + if (err) { + return next(err); + } + const versionId = data.VersionId; + return updateMetadata( + { bucket: testBucket, objectKey: 'key1', versionId, authCredentials: credentials }, + { dataStoreName: location2 }, + next + ); + }), + next => s3.putObject({ Bucket: testBucket, Key: 'key2' }, next), + ], + done + ); }); - after(done => async.series([ - next => removeAllVersions({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: testBucket }, next), - ], done)); + after(done => + async.series( + [ + next => removeAllVersions({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: testBucket }, next), + ], + done + ) + ); it('should return error when listing current versions if excluded-data-store-name is not in config', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'excluded-data-store-name': 'idonotexist' }, - authCredentials: credentials, - }, err => { - assert(err, 'Expected error but found none'); - assert.strictEqual(err.code, 'InvalidLocationConstraint'); - assert.strictEqual(err.statusCode, 400); - assert.strictEqual(err.message, 'value of the location you are attempting to set' + - ' - idonotexist - is not listed in the locationConstraint config'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'excluded-data-store-name': 'idonotexist' }, + authCredentials: credentials, + }, + err => { + assert(err, 'Expected error but found none'); + assert.strictEqual(err.code, 'InvalidLocationConstraint'); + assert.strictEqual(err.statusCode, 400); + assert.strictEqual( + err.message, + 'value of the location you are attempting to set' + + ' - idonotexist - is not listed in the locationConstraint config' + ); + return done(); + } + ); }); it('should return error when listing non-current versions if excluded-data-store-name is not in config', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'excluded-data-store-name': 'idonotexist' }, - authCredentials: credentials, - }, err => { - assert(err, 'Expected error but found none'); - assert.strictEqual(err.code, 'InvalidLocationConstraint'); - assert.strictEqual(err.statusCode, 400); - assert.strictEqual(err.message, 'value of the location you are attempting to set' + - ' - idonotexist - is not listed in the locationConstraint config'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', 'excluded-data-store-name': 'idonotexist' }, + authCredentials: credentials, + }, + err => { + assert(err, 'Expected error but found none'); + assert.strictEqual(err.code, 'InvalidLocationConstraint'); + assert.strictEqual(err.statusCode, 400); + assert.strictEqual( + err.message, + 'value of the location you are attempting to set' + + ' - idonotexist - is not listed in the locationConstraint config' + ); + return done(); + } + ); }); it('should exclude current versions stored in location2', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'excluded-data-store-name': location2 }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - - const contents = data.Contents; - assert.strictEqual(contents.length, 2); - - assert.strictEqual(contents[0].Key, 'key0'); - assert.strictEqual(contents[0].DataStoreName, location1); - assert.strictEqual(contents[1].Key, 'key2'); - assert.strictEqual(contents[1].DataStoreName, location1); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'excluded-data-store-name': location2 }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + + const contents = data.Contents; + assert.strictEqual(contents.length, 2); + + assert.strictEqual(contents[0].Key, 'key0'); + assert.strictEqual(contents[0].DataStoreName, location1); + assert.strictEqual(contents[1].Key, 'key2'); + assert.strictEqual(contents[1].DataStoreName, location1); + return done(); + } + ); }); it('should return trucated listing that excludes current versions stored in location2', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'excluded-data-store-name': location2, 'max-keys': '1' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.NextMarker, 'key0'); - assert.strictEqual(data.MaxKeys, 1); - - const contents = data.Contents; - assert.strictEqual(contents.length, 1); - - assert.strictEqual(contents[0].Key, 'key0'); - assert.strictEqual(contents[0].DataStoreName, location1); - - return makeBackbeatRequest({ + makeBackbeatRequest( + { method: 'GET', bucket: testBucket, - queryObj: { - 'list-type': 'current', - 'excluded-data-store-name': location2, - 'max-keys': '1', - 'marker': 'key0', - }, + queryObj: { 'list-type': 'current', 'excluded-data-store-name': location2, 'max-keys': '1' }, authCredentials: credentials, - }, (err, response) => { + }, + (err, response) => { assert.ifError(err); assert.strictEqual(response.statusCode, 200); const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.NextMarker, 'key0'); assert.strictEqual(data.MaxKeys, 1); const contents = data.Contents; assert.strictEqual(contents.length, 1); - assert.strictEqual(contents[0].Key, 'key2'); + assert.strictEqual(contents[0].Key, 'key0'); assert.strictEqual(contents[0].DataStoreName, location1); - return done(); - }); - }); + + return makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'current', + 'excluded-data-store-name': location2, + 'max-keys': '1', + marker: 'key0', + }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1); + + const contents = data.Contents; + assert.strictEqual(contents.length, 1); + + assert.strictEqual(contents[0].Key, 'key2'); + assert.strictEqual(contents[0].DataStoreName, location1); + return done(); + } + ); + } + ); }); it('should exclude non-current versions stored in location2', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'excluded-data-store-name': location2 }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert(!data.NextVersionIdMarker); - assert.strictEqual(data.MaxKeys, 1000); - - const contents = data.Contents; - assert.strictEqual(contents.length, 2); - - assert.strictEqual(contents[0].Key, 'key0'); - assert.strictEqual(contents[0].DataStoreName, location1); - assert.strictEqual(contents[0].VersionId, expectedVersions[1]); - assert.strictEqual(contents[1].Key, 'key0'); - assert.strictEqual(contents[1].DataStoreName, location1); - assert.strictEqual(contents[1].VersionId, expectedVersions[0]); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', 'excluded-data-store-name': location2 }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert(!data.NextVersionIdMarker); + assert.strictEqual(data.MaxKeys, 1000); + + const contents = data.Contents; + assert.strictEqual(contents.length, 2); + + assert.strictEqual(contents[0].Key, 'key0'); + assert.strictEqual(contents[0].DataStoreName, location1); + assert.strictEqual(contents[0].VersionId, expectedVersions[1]); + assert.strictEqual(contents[1].Key, 'key0'); + assert.strictEqual(contents[1].DataStoreName, location1); + assert.strictEqual(contents[1].VersionId, expectedVersions[0]); + return done(); + } + ); }); it('should return trucated listing that excludes non-current versions stored in location2', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'excluded-data-store-name': location2, 'max-keys': '1' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.NextKeyMarker, 'key0'); - assert.strictEqual(data.NextVersionIdMarker, expectedVersions[1]); - assert.strictEqual(data.MaxKeys, 1); - - const contents = data.Contents; - assert.strictEqual(contents.length, 1); - - assert.strictEqual(contents[0].Key, 'key0'); - assert.strictEqual(contents[0].DataStoreName, location1); - assert.strictEqual(contents[0].VersionId, expectedVersions[1]); - return makeBackbeatRequest({ + makeBackbeatRequest( + { method: 'GET', bucket: testBucket, - queryObj: { - 'list-type': 'noncurrent', - 'excluded-data-store-name': location2, - 'key-marker': 'key0', - 'version-id-marker': expectedVersions[1], - 'max-keys': '1', - }, + queryObj: { 'list-type': 'noncurrent', 'excluded-data-store-name': location2, 'max-keys': '1' }, authCredentials: credentials, - }, (err, response) => { + }, + (err, response) => { assert.ifError(err); assert.strictEqual(response.statusCode, 200); const data = JSON.parse(response.body); assert.strictEqual(data.IsTruncated, true); assert.strictEqual(data.NextKeyMarker, 'key0'); - assert.strictEqual(data.NextVersionIdMarker, expectedVersions[0]); + assert.strictEqual(data.NextVersionIdMarker, expectedVersions[1]); assert.strictEqual(data.MaxKeys, 1); const contents = data.Contents; @@ -251,33 +268,67 @@ describe('excludedDataStoreName', () => { assert.strictEqual(contents[0].Key, 'key0'); assert.strictEqual(contents[0].DataStoreName, location1); - assert.strictEqual(contents[0].VersionId, expectedVersions[0]); - return makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'noncurrent', - 'excluded-data-store-name': location2, - 'key-marker': 'key0', - 'version-id-marker': expectedVersions[0], - 'max-keys': '1', + assert.strictEqual(contents[0].VersionId, expectedVersions[1]); + return makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'noncurrent', + 'excluded-data-store-name': location2, + 'key-marker': 'key0', + 'version-id-marker': expectedVersions[1], + 'max-keys': '1', + }, + authCredentials: credentials, }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert(!data.NextVersionIdMarker); - assert.strictEqual(data.MaxKeys, 1); - - const contents = data.Contents; - assert.strictEqual(contents.length, 0); - return done(); - }); - }); - }); + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.NextKeyMarker, 'key0'); + assert.strictEqual(data.NextVersionIdMarker, expectedVersions[0]); + assert.strictEqual(data.MaxKeys, 1); + + const contents = data.Contents; + assert.strictEqual(contents.length, 1); + + assert.strictEqual(contents[0].Key, 'key0'); + assert.strictEqual(contents[0].DataStoreName, location1); + assert.strictEqual(contents[0].VersionId, expectedVersions[0]); + return makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'noncurrent', + 'excluded-data-store-name': location2, + 'key-marker': 'key0', + 'version-id-marker': expectedVersions[0], + 'max-keys': '1', + }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert(!data.NextVersionIdMarker); + assert.strictEqual(data.MaxKeys, 1); + + const contents = data.Contents; + assert.strictEqual(contents.length, 0); + return done(); + } + ); + } + ); + } + ); }); }); diff --git a/tests/functional/backbeat/listDeleteMarker.js b/tests/functional/backbeat/listDeleteMarker.js index 6911afb985..539cb368e0 100644 --- a/tests/functional/backbeat/listDeleteMarker.js +++ b/tests/functional/backbeat/listDeleteMarker.js @@ -21,95 +21,118 @@ describe('listLifecycle with non-current delete marker', () => { bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - return async.series([ - next => s3.createBucket({ Bucket: testBucket }, next), - next => s3.putBucketVersioning({ - Bucket: testBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => s3.deleteObject({ Bucket: testBucket, Key: keyName }, (err, data) => { - if (err) { - return next(err); - } - expectedDMVersionId = data.VersionId; - return next(); - }), - next => s3.putObject({ Bucket: testBucket, Key: keyName }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); - }), - ], done); + return async.series( + [ + next => s3.createBucket({ Bucket: testBucket }, next), + next => + s3.putBucketVersioning( + { + Bucket: testBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => + s3.deleteObject({ Bucket: testBucket, Key: keyName }, (err, data) => { + if (err) { + return next(err); + } + expectedDMVersionId = data.VersionId; + return next(); + }), + next => + s3.putObject({ Bucket: testBucket, Key: keyName }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + ], + done + ); }); - after(done => async.series([ - next => removeAllVersions({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: testBucket }, next), - ], done)); + after(done => + async.series( + [ + next => removeAllVersions({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: testBucket }, next), + ], + done + ) + ); it('should return the current version', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.Contents.length, 1); - const key = data.Contents[0]; - assert.strictEqual(key.Key, keyName); - assert.strictEqual(key.VersionId, expectedVersionId); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.Contents.length, 1); + const key = data.Contents[0]; + assert.strictEqual(key.Key, keyName); + assert.strictEqual(key.VersionId, expectedVersionId); + return done(); + } + ); }); it('should return the non-current delete marker', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.Contents.length, 1); - const key = data.Contents[0]; - assert.strictEqual(key.Key, keyName); - assert.strictEqual(key.VersionId, expectedDMVersionId); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.Contents.length, 1); + const key = data.Contents[0]; + assert.strictEqual(key.Key, keyName); + assert.strictEqual(key.VersionId, expectedDMVersionId); + return done(); + } + ); }); it('should return no orphan delete marker', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.Contents.length, 0); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.Contents.length, 0); + return done(); + } + ); }); }); @@ -124,85 +147,107 @@ describe('listLifecycle with current delete marker version', () => { bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - return async.series([ - next => s3.createBucket({ Bucket: testBucket }, next), - next => s3.putBucketVersioning({ - Bucket: testBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => s3.putObject({ Bucket: testBucket, Key: keyName }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); - }), - next => s3.deleteObject({ Bucket: testBucket, Key: keyName }, next), - ], done); + return async.series( + [ + next => s3.createBucket({ Bucket: testBucket }, next), + next => + s3.putBucketVersioning( + { + Bucket: testBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => + s3.putObject({ Bucket: testBucket, Key: keyName }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + next => s3.deleteObject({ Bucket: testBucket, Key: keyName }, next), + ], + done + ); }); - after(done => async.series([ - next => removeAllVersions({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: testBucket }, next), - ], done)); + after(done => + async.series( + [ + next => removeAllVersions({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: testBucket }, next), + ], + done + ) + ); it('should return no current object if current version is a delete marker', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.Contents.length, 0); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.Contents.length, 0); + return done(); + } + ); }); it('should return the non-current version', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.Contents.length, 1); - const key = data.Contents[0]; - assert.strictEqual(key.Key, keyName); - assert.strictEqual(key.VersionId, expectedVersionId); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.Contents.length, 1); + const key = data.Contents[0]; + assert.strictEqual(key.Key, keyName); + assert.strictEqual(key.VersionId, expectedVersionId); + return done(); + } + ); }); it('should return no orphan delete marker', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.Contents.length, 0); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.Contents.length, 0); + return done(); + } + ); }); }); diff --git a/tests/functional/backbeat/listLifecycleCurrents.js b/tests/functional/backbeat/listLifecycleCurrents.js index d7dec5c4f2..d532157dc5 100644 --- a/tests/functional/backbeat/listLifecycleCurrents.js +++ b/tests/functional/backbeat/listLifecycleCurrents.js @@ -25,10 +25,12 @@ function checkContents(contents, expectedKeyVersions) { assert(d.Owner.ID); assert(d.StorageClass); assert.strictEqual(d.StorageClass, 'STANDARD'); - assert.deepStrictEqual(d.TagSet, [{ - Key: 'mykey', - Value: 'myvalue', - }]); + assert.deepStrictEqual(d.TagSet, [ + { + Key: 'mykey', + Value: 'myvalue', + }, + ]); assert.strictEqual(d.IsLatest, true); assert.strictEqual(d.DataStoreName, 'us-east-1'); assert.strictEqual(d.ListType, 'current'); @@ -50,507 +52,651 @@ function checkContents(contents, expectedKeyVersions) { bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - return async.series([ - next => s3.createBucket({ Bucket: testBucket }, next), - next => s3.createBucket({ Bucket: emptyBucket }, next), - next => { - if (versioning !== 'Enabled') { - return process.nextTick(next); - } - return s3.putBucketVersioning({ - Bucket: testBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next); - }, - next => { - if (versioning !== 'Enabled') { - return process.nextTick(next); - } - return s3.putBucketVersioning({ - Bucket: emptyBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next); - }, - next => async.times(3, (n, cb) => { - const keyName = `oldkey${n}`; - s3.putObject({ Bucket: testBucket, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, - (err, data) => { - if (err) { - cb(err); + return async.series( + [ + next => s3.createBucket({ Bucket: testBucket }, next), + next => s3.createBucket({ Bucket: emptyBucket }, next), + next => { + if (versioning !== 'Enabled') { + return process.nextTick(next); } - expectedKeyVersions[keyName] = data.VersionId; - return cb(); - }); - }, next), - next => { - date = new Date(Date.now()).toISOString(); - return async.times(5, (n, cb) => { - const keyName = `key${n}`; - s3.putObject({ Bucket: testBucket, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, - (err, data) => { - if (err) { - cb(err); - } - expectedKeyVersions[keyName] = data.VersionId; - return cb(); - }); - }, next); - }, - ], done); + return s3.putBucketVersioning( + { + Bucket: testBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ); + }, + next => { + if (versioning !== 'Enabled') { + return process.nextTick(next); + } + return s3.putBucketVersioning( + { + Bucket: emptyBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ); + }, + next => + async.times( + 3, + (n, cb) => { + const keyName = `oldkey${n}`; + s3.putObject( + { Bucket: testBucket, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, + (err, data) => { + if (err) { + cb(err); + } + expectedKeyVersions[keyName] = data.VersionId; + return cb(); + } + ); + }, + next + ), + next => { + date = new Date(Date.now()).toISOString(); + return async.times( + 5, + (n, cb) => { + const keyName = `key${n}`; + s3.putObject( + { Bucket: testBucket, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, + (err, data) => { + if (err) { + cb(err); + } + expectedKeyVersions[keyName] = data.VersionId; + return cb(); + } + ); + }, + next + ); + }, + ], + done + ); }); - after(done => async.series([ - next => removeAllVersions({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: emptyBucket }, next), - ], done)); + after(done => + async.series( + [ + next => removeAllVersions({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: emptyBucket }, next), + ], + done + ) + ); it('should return empty list of current versions if bucket is empty', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: emptyBucket, - queryObj: { 'list-type': 'current' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 0); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: emptyBucket, + queryObj: { 'list-type': 'current' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual( + data.MaxScannedLifecycleListingEntries, + config.maxScannedLifecycleListingEntries + ); + assert.strictEqual(data.Contents.length, 0); + return done(); + } + ); }); it('should return empty list of current versions if prefix does not apply', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'prefix': 'unknown' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 0); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', prefix: 'unknown' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual( + data.MaxScannedLifecycleListingEntries, + config.maxScannedLifecycleListingEntries + ); + assert.strictEqual(data.Contents.length, 0); + return done(); + } + ); }); it('should return empty list if max-keys is set to 0', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'max-keys': '0' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 0); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 0); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'max-keys': '0' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 0); + assert.strictEqual( + data.MaxScannedLifecycleListingEntries, + config.maxScannedLifecycleListingEntries + ); + assert.strictEqual(data.Contents.length, 0); + + return done(); + } + ); }); it('should return NoSuchBucket error if bucket does not exist', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: 'idonotexist', - queryObj: { 'list-type': 'current' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'NoSuchBucket'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: 'idonotexist', + queryObj: { 'list-type': 'current' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'NoSuchBucket'); + return done(); + } + ); }); it('should return InvalidArgument error if max-keys is invalid', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'max-keys': 'a' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'max-keys': 'a' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument error if max-scanned-lifecycle-listing-entries is invalid', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'max-scanned-lifecycle-listing-entries': 'a' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'max-scanned-lifecycle-listing-entries': 'a' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument error if max-scanned-lifecycle-listing-entries is set to 0', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'max-scanned-lifecycle-listing-entries': '0' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'max-scanned-lifecycle-listing-entries': '0' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument if max-scanned-lifecycle-listing-entries exceeds the default value', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'max-scanned-lifecycle-listing-entries': - (config.maxScannedLifecycleListingEntries + 1).toString() }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'current', + 'max-scanned-lifecycle-listing-entries': ( + config.maxScannedLifecycleListingEntries + 1 + ).toString(), + }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return all the current versions', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - - const contents = data.Contents; - assert.strictEqual(contents.length, 8); - checkContents(contents, expectedKeyVersions); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual( + data.MaxScannedLifecycleListingEntries, + config.maxScannedLifecycleListingEntries + ); + + const contents = data.Contents; + assert.strictEqual(contents.length, 8); + checkContents(contents, expectedKeyVersions); + + return done(); + } + ); }); it('should return all the current versions before max scanned entries value is reached', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'max-scanned-lifecycle-listing-entries': '5' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.NextMarker, 'key4'); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, 5); - - const contents = data.Contents; - assert.strictEqual(contents.length, 5); - checkContents(contents, expectedKeyVersions); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'max-scanned-lifecycle-listing-entries': '5' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.NextMarker, 'key4'); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, 5); + + const contents = data.Contents; + assert.strictEqual(contents.length, 5); + checkContents(contents, expectedKeyVersions); + + return done(); + } + ); }); it('should return all the current versions with prefix old', done => { const prefix = 'old'; - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', prefix }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', prefix }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual( + data.MaxScannedLifecycleListingEntries, + config.maxScannedLifecycleListingEntries + ); + assert.strictEqual(data.Prefix, prefix); + + const contents = data.Contents; + assert.strictEqual(contents.length, 3); + checkContents(contents, expectedKeyVersions); + + return done(); + } + ); + }); - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Prefix, prefix); + it('should return the current versions before a defined date', done => { + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'before-date': date }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual( + data.MaxScannedLifecycleListingEntries, + config.maxScannedLifecycleListingEntries + ); + assert.strictEqual(data.Contents.length, 3); + assert.strictEqual(data.BeforeDate, date); + + const contents = data.Contents; + checkContents(contents, expectedKeyVersions); + assert.strictEqual(contents[0].Key, 'oldkey0'); + assert.strictEqual(contents[1].Key, 'oldkey1'); + assert.strictEqual(contents[2].Key, 'oldkey2'); + return done(); + } + ); + }); - const contents = data.Contents; - assert.strictEqual(contents.length, 3); - checkContents(contents, expectedKeyVersions); + it('should truncate list of current versions before a defined date', done => { + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'before-date': date, 'max-keys': '1' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.NextMarker, 'oldkey0'); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual( + data.MaxScannedLifecycleListingEntries, + config.maxScannedLifecycleListingEntries + ); + assert.strictEqual(data.BeforeDate, date); + assert.strictEqual(data.Contents.length, 1); + + const contents = data.Contents; + checkContents(contents, expectedKeyVersions); + assert.strictEqual(contents[0].Key, 'oldkey0'); + return done(); + } + ); + }); - return done(); - }); + it('should return the next truncate list of current versions before a defined date', done => { + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'before-date': date, 'max-keys': '1', marker: 'oldkey0' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.Marker, 'oldkey0'); + assert.strictEqual(data.NextMarker, 'oldkey1'); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual( + data.MaxScannedLifecycleListingEntries, + config.maxScannedLifecycleListingEntries + ); + assert.strictEqual(data.Contents.length, 1); + + const contents = data.Contents; + checkContents(contents, expectedKeyVersions); + assert.strictEqual(contents[0].Key, 'oldkey1'); + assert.strictEqual(data.BeforeDate, date); + return done(); + } + ); }); - it('should return the current versions before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'before-date': date }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); + it('should return the last truncate list of current versions before a defined date', done => { + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'before-date': date, 'max-keys': '1', marker: 'oldkey1' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual( + data.MaxScannedLifecycleListingEntries, + config.maxScannedLifecycleListingEntries + ); + assert.strictEqual(data.Marker, 'oldkey1'); + assert.strictEqual(data.BeforeDate, date); + + const contents = data.Contents; + assert.strictEqual(contents.length, 1); + checkContents(contents, expectedKeyVersions); + assert.strictEqual(contents[0].Key, 'oldkey2'); + return done(); + } + ); + }); + }); +}); - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 3); - assert.strictEqual(data.BeforeDate, date); +describe('listLifecycleCurrents with bucket versioning enabled and maxKeys', () => { + const testBucket = 'bucket-for-list-lifecycle-current-tests-truncated'; + let bucketUtil; + let s3; + const expectedKeyVersions = {}; - const contents = data.Contents; - checkContents(contents, expectedKeyVersions); - assert.strictEqual(contents[0].Key, 'oldkey0'); - assert.strictEqual(contents[1].Key, 'oldkey1'); - assert.strictEqual(contents[2].Key, 'oldkey2'); - return done(); - }); - }); + before(done => { + bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); + s3 = bucketUtil.s3; - it('should truncate list of current versions before a defined date', done => { - makeBackbeatRequest({ + return async.series( + [ + next => s3.createBucket({ Bucket: testBucket }, next), + next => + s3.putBucketVersioning( + { + Bucket: testBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => + async.times( + 3, + (n, cb) => { + const keyName = 'key0'; + s3.putObject( + { Bucket: testBucket, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, + (err, data) => { + if (err) { + cb(err); + } + expectedKeyVersions[keyName] = data.VersionId; + return cb(); + } + ); + }, + next + ), + next => + async.times( + 5, + (n, cb) => { + const keyName = 'key1'; + s3.putObject( + { Bucket: testBucket, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, + (err, data) => { + if (err) { + cb(err); + } + expectedKeyVersions[keyName] = data.VersionId; + return cb(); + } + ); + }, + next + ), + next => + async.times( + 3, + (n, cb) => { + const keyName = 'key2'; + s3.putObject( + { Bucket: testBucket, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, + (err, data) => { + if (err) { + cb(err); + } + expectedKeyVersions[keyName] = data.VersionId; + return cb(); + } + ); + }, + next + ), + ], + done + ); + }); + + after(done => + async.series( + [ + next => removeAllVersions({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: testBucket }, next), + ], + done + ) + ); + + it('should return truncated lists - part 1', done => { + makeBackbeatRequest( + { method: 'GET', bucket: testBucket, - queryObj: { 'list-type': 'current', 'before-date': date, 'max-keys': '1' }, + queryObj: { 'list-type': 'current', 'max-keys': '1' }, authCredentials: credentials, - }, (err, response) => { + }, + (err, response) => { assert.ifError(err); assert.strictEqual(response.statusCode, 200); const data = JSON.parse(response.body); assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.NextMarker, 'oldkey0'); + assert.strictEqual(data.NextMarker, 'key0'); assert.strictEqual(data.MaxKeys, 1); assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.BeforeDate, date); assert.strictEqual(data.Contents.length, 1); const contents = data.Contents; + assert.strictEqual(contents.length, 1); checkContents(contents, expectedKeyVersions); - assert.strictEqual(contents[0].Key, 'oldkey0'); + assert.strictEqual(contents[0].Key, 'key0'); return done(); - }); - }); + } + ); + }); - it('should return the next truncate list of current versions before a defined date', done => { - makeBackbeatRequest({ + it('should return truncated lists - part 2', done => { + makeBackbeatRequest( + { method: 'GET', bucket: testBucket, - queryObj: { 'list-type': 'current', 'before-date': date, 'max-keys': '1', 'marker': 'oldkey0' }, + queryObj: { + 'list-type': 'current', + 'max-keys': '1', + marker: 'key0', + }, authCredentials: credentials, - }, (err, response) => { + }, + (err, response) => { assert.ifError(err); assert.strictEqual(response.statusCode, 200); const data = JSON.parse(response.body); + assert.strictEqual(data.Marker, 'key0'); assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.Marker, 'oldkey0'); - assert.strictEqual(data.NextMarker, 'oldkey1'); + assert.strictEqual(data.NextMarker, 'key1'); assert.strictEqual(data.MaxKeys, 1); assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); assert.strictEqual(data.Contents.length, 1); const contents = data.Contents; + assert.strictEqual(contents.length, 1); checkContents(contents, expectedKeyVersions); - assert.strictEqual(contents[0].Key, 'oldkey1'); - assert.strictEqual(data.BeforeDate, date); + assert.strictEqual(contents[0].Key, 'key1'); return done(); - }); - }); + } + ); + }); - it('should return the last truncate list of current versions before a defined date', done => { - makeBackbeatRequest({ + it('should return truncated lists - part 3', done => { + makeBackbeatRequest( + { method: 'GET', bucket: testBucket, - queryObj: { 'list-type': 'current', 'before-date': date, 'max-keys': '1', 'marker': 'oldkey1' }, + queryObj: { + 'list-type': 'current', + 'max-keys': '1', + marker: 'key1', + }, authCredentials: credentials, - }, (err, response) => { + }, + (err, response) => { assert.ifError(err); assert.strictEqual(response.statusCode, 200); const data = JSON.parse(response.body); + assert(!data.NextMarker); assert.strictEqual(data.IsTruncated, false); + assert.strictEqual(data.Marker, 'key1'); assert.strictEqual(data.MaxKeys, 1); assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Marker, 'oldkey1'); - assert.strictEqual(data.BeforeDate, date); + assert.strictEqual(data.Contents.length, 1); const contents = data.Contents; assert.strictEqual(contents.length, 1); checkContents(contents, expectedKeyVersions); - assert.strictEqual(contents[0].Key, 'oldkey2'); + assert.strictEqual(contents[0].Key, 'key2'); return done(); - }); - }); - }); -}); - -describe('listLifecycleCurrents with bucket versioning enabled and maxKeys', () => { - const testBucket = 'bucket-for-list-lifecycle-current-tests-truncated'; - let bucketUtil; - let s3; - const expectedKeyVersions = {}; - - before(done => { - bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); - s3 = bucketUtil.s3; - - return async.series([ - next => s3.createBucket({ Bucket: testBucket }, next), - next => s3.putBucketVersioning({ - Bucket: testBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => async.times(3, (n, cb) => { - const keyName = 'key0'; - s3.putObject({ Bucket: testBucket, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, - (err, data) => { - if (err) { - cb(err); - } - expectedKeyVersions[keyName] = data.VersionId; - return cb(); - }); - }, next), - next => async.times(5, (n, cb) => { - const keyName = 'key1'; - s3.putObject({ Bucket: testBucket, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, - (err, data) => { - if (err) { - cb(err); - } - expectedKeyVersions[keyName] = data.VersionId; - return cb(); - }); - }, next), - next => async.times(3, (n, cb) => { - const keyName = 'key2'; - s3.putObject({ Bucket: testBucket, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, - (err, data) => { - if (err) { - cb(err); - } - expectedKeyVersions[keyName] = data.VersionId; - return cb(); - }); - }, next), - ], done); - }); - - after(done => async.series([ - next => removeAllVersions({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: testBucket }, next), - ], done)); - - it('should return truncated lists - part 1', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'max-keys': '1' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.NextMarker, 'key0'); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - assert.strictEqual(contents.length, 1); - checkContents(contents, expectedKeyVersions); - assert.strictEqual(contents[0].Key, 'key0'); - return done(); - }); - }); - - it('should return truncated lists - part 2', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'current', - 'max-keys': '1', - 'marker': 'key0', - }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.Marker, 'key0'); - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.NextMarker, 'key1'); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - assert.strictEqual(contents.length, 1); - checkContents(contents, expectedKeyVersions); - assert.strictEqual(contents[0].Key, 'key1'); - return done(); - }); - }); - - it('should return truncated lists - part 3', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'current', - 'max-keys': '1', - 'marker': 'key1', - }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert(!data.NextMarker); - assert.strictEqual(data.IsTruncated, false); - assert.strictEqual(data.Marker, 'key1'); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - assert.strictEqual(contents.length, 1); - checkContents(contents, expectedKeyVersions); - assert.strictEqual(contents[0].Key, 'key2'); - return done(); - }); + } + ); }); }); - describe('listLifecycleCurrents with bucket versioning enabled and delete object', () => { const testBucket = 'bucket-for-list-lifecycle-current-tests-truncated'; const keyName0 = 'key0'; @@ -564,99 +710,129 @@ describe('listLifecycleCurrents with bucket versioning enabled and delete object bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - return async.series([ - next => s3.createBucket({ Bucket: testBucket }, next), - next => s3.putBucketVersioning({ - Bucket: testBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => { - s3.putObject({ Bucket: testBucket, Key: keyName0, Body: '123', Tagging: 'mykey=myvalue' }, - (err, data) => { - if (err) { - next(err); - } - expectedKeyVersions[keyName0] = data.VersionId; - return next(); - }); - }, - next => s3.putObject({ Bucket: testBucket, Key: keyName1, Body: '123', Tagging: 'mykey=myvalue' }, next), - next => s3.deleteObject({ Bucket: testBucket, Key: keyName1 }, next), - next => s3.putObject({ Bucket: testBucket, Key: keyName2, Body: '123', Tagging: 'mykey=myvalue' }, - (err, data) => { - if (err) { - next(err); - } - expectedKeyVersions[keyName2] = data.VersionId; - return next(); - }), - next => s3.putObject({ Bucket: testBucket, Key: keyName2, Body: '123', Tagging: 'mykey=myvalue' }, - (err, data) => { - if (err) { - return next(err); - } - return s3.deleteObject({ Bucket: testBucket, Key: keyName2, VersionId: data.VersionId }, next); - }), - ], done); + return async.series( + [ + next => s3.createBucket({ Bucket: testBucket }, next), + next => + s3.putBucketVersioning( + { + Bucket: testBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => { + s3.putObject( + { Bucket: testBucket, Key: keyName0, Body: '123', Tagging: 'mykey=myvalue' }, + (err, data) => { + if (err) { + next(err); + } + expectedKeyVersions[keyName0] = data.VersionId; + return next(); + } + ); + }, + next => + s3.putObject({ Bucket: testBucket, Key: keyName1, Body: '123', Tagging: 'mykey=myvalue' }, next), + next => s3.deleteObject({ Bucket: testBucket, Key: keyName1 }, next), + next => + s3.putObject( + { Bucket: testBucket, Key: keyName2, Body: '123', Tagging: 'mykey=myvalue' }, + (err, data) => { + if (err) { + next(err); + } + expectedKeyVersions[keyName2] = data.VersionId; + return next(); + } + ), + next => + s3.putObject( + { Bucket: testBucket, Key: keyName2, Body: '123', Tagging: 'mykey=myvalue' }, + (err, data) => { + if (err) { + return next(err); + } + return s3.deleteObject( + { Bucket: testBucket, Key: keyName2, VersionId: data.VersionId }, + next + ); + } + ), + ], + done + ); }); - after(done => async.series([ - next => removeAllVersions({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: testBucket }, next), - ], done)); + after(done => + async.series( + [ + next => removeAllVersions({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: testBucket }, next), + ], + done + ) + ); it('should return truncated lists - part 1', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current', 'max-keys': '1' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.NextMarker, keyName0); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - assert.strictEqual(contents.length, 1); - checkContents(contents, expectedKeyVersions); - assert.strictEqual(contents[0].Key, keyName0); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current', 'max-keys': '1' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.NextMarker, keyName0); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Contents.length, 1); + + const contents = data.Contents; + assert.strictEqual(contents.length, 1); + checkContents(contents, expectedKeyVersions); + assert.strictEqual(contents[0].Key, keyName0); + return done(); + } + ); }); it('should return truncated lists - part 2', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'current', - 'max-keys': '1', - 'marker': keyName0, + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'current', + 'max-keys': '1', + marker: keyName0, + }, + authCredentials: credentials, }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert(!data.NextMarker); - assert.strictEqual(data.IsTruncated, false); - assert.strictEqual(data.Marker, keyName0); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - assert.strictEqual(contents.length, 1); - checkContents(contents, expectedKeyVersions); - assert.strictEqual(contents[0].Key, keyName2); - return done(); - }); + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert(!data.NextMarker); + assert.strictEqual(data.IsTruncated, false); + assert.strictEqual(data.Marker, keyName0); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Contents.length, 1); + + const contents = data.Contents; + assert.strictEqual(contents.length, 1); + checkContents(contents, expectedKeyVersions); + assert.strictEqual(contents[0].Key, keyName2); + return done(); + } + ); }); }); diff --git a/tests/functional/backbeat/listLifecycleNonCurrents.js b/tests/functional/backbeat/listLifecycleNonCurrents.js index 089f605171..28078c6f38 100644 --- a/tests/functional/backbeat/listLifecycleNonCurrents.js +++ b/tests/functional/backbeat/listLifecycleNonCurrents.js @@ -26,10 +26,12 @@ function checkContents(contents) { assert(d.VersionId); assert(d.staleDate); assert(!d.IsLatest); - assert.deepStrictEqual(d.TagSet, [{ - Key: 'mykey', - Value: 'myvalue', - }]); + assert.deepStrictEqual(d.TagSet, [ + { + Key: 'mykey', + Value: 'myvalue', + }, + ]); assert.strictEqual(d.DataStoreName, 'us-east-1'); assert.strictEqual(d.ListType, 'noncurrent'); assert.strictEqual(d.Size, 3); @@ -47,572 +49,708 @@ describe('listLifecycleNonCurrents', () => { bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - return async.series([ - next => s3.createBucket({ Bucket: testBucket }, next), - next => s3.createBucket({ Bucket: emptyBucket }, next), - next => s3.createBucket({ Bucket: nonVersionedBucket }, next), - next => s3.putBucketVersioning({ - Bucket: testBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => s3.putBucketVersioning({ - Bucket: emptyBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => async.timesSeries(3, (n, cb) => { - s3.putObject({ Bucket: testBucket, Key: 'key1', Body: '123', Tagging: 'mykey=myvalue' }, cb); - }, (err, res) => { - // Only the two first ones are kept, since the stale date of the last one (3rd) - // Will be the last-modified of the next one (4th) that is created after the "date". - // The array is reverse since, for a specific key, we expect the listing to be ordered - // by last-modified date in descending order due to the way version id is generated. - expectedKey1VersionIds = res.map(r => r.VersionId).slice(0, 2).reverse(); - return next(err); - }), - next => async.timesSeries(3, (n, cb) => { - s3.putObject({ Bucket: testBucket, Key: 'key2', Body: '123', Tagging: 'mykey=myvalue' }, cb); - }, (err, res) => { - // Only the two first ones are kept, since the stale date of the last one (3rd) - // Will be the last-modified of the next one (4th) that is created after the "date". - // The array is reverse since, for a specific key, we expect the listing to be ordered - // by last-modified date in descending order due to the way version id is generated. - expectedKey2VersionIds = res.map(r => r.VersionId).slice(0, 2).reverse(); - return next(err); - }), - next => { - date = new Date(Date.now()).toISOString(); - return async.times(5, (n, cb) => { - s3.putObject({ Bucket: testBucket, Key: 'key1', Body: '123', Tagging: 'mykey=myvalue' }, cb); - }, next); - }, - next => async.times(5, (n, cb) => { - s3.putObject({ Bucket: testBucket, Key: 'key2', Body: '123', Tagging: 'mykey=myvalue' }, cb); - }, next), - ], done); + return async.series( + [ + next => s3.createBucket({ Bucket: testBucket }, next), + next => s3.createBucket({ Bucket: emptyBucket }, next), + next => s3.createBucket({ Bucket: nonVersionedBucket }, next), + next => + s3.putBucketVersioning( + { + Bucket: testBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => + s3.putBucketVersioning( + { + Bucket: emptyBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => + async.timesSeries( + 3, + (n, cb) => { + s3.putObject( + { Bucket: testBucket, Key: 'key1', Body: '123', Tagging: 'mykey=myvalue' }, + cb + ); + }, + (err, res) => { + // Only the two first ones are kept, since the stale date of the last one (3rd) + // Will be the last-modified of the next one (4th) that is created after the "date". + // The array is reverse since, for a specific key, we expect the listing to be ordered + // by last-modified date in descending order due to the way version id is generated. + expectedKey1VersionIds = res + .map(r => r.VersionId) + .slice(0, 2) + .reverse(); + return next(err); + } + ), + next => + async.timesSeries( + 3, + (n, cb) => { + s3.putObject( + { Bucket: testBucket, Key: 'key2', Body: '123', Tagging: 'mykey=myvalue' }, + cb + ); + }, + (err, res) => { + // Only the two first ones are kept, since the stale date of the last one (3rd) + // Will be the last-modified of the next one (4th) that is created after the "date". + // The array is reverse since, for a specific key, we expect the listing to be ordered + // by last-modified date in descending order due to the way version id is generated. + expectedKey2VersionIds = res + .map(r => r.VersionId) + .slice(0, 2) + .reverse(); + return next(err); + } + ), + next => { + date = new Date(Date.now()).toISOString(); + return async.times( + 5, + (n, cb) => { + s3.putObject( + { Bucket: testBucket, Key: 'key1', Body: '123', Tagging: 'mykey=myvalue' }, + cb + ); + }, + next + ); + }, + next => + async.times( + 5, + (n, cb) => { + s3.putObject( + { Bucket: testBucket, Key: 'key2', Body: '123', Tagging: 'mykey=myvalue' }, + cb + ); + }, + next + ), + ], + done + ); }); - after(done => async.series([ - next => removeAllVersions({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: emptyBucket }, next), - next => s3.deleteBucket({ Bucket: nonVersionedBucket }, next), - ], done)); + after(done => + async.series( + [ + next => removeAllVersions({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: emptyBucket }, next), + next => s3.deleteBucket({ Bucket: nonVersionedBucket }, next), + ], + done + ) + ); it('should return empty list of noncurrent versions if bucket is empty', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: emptyBucket, - queryObj: { 'list-type': 'noncurrent' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 0); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: emptyBucket, + queryObj: { 'list-type': 'noncurrent' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Contents.length, 0); + return done(); + } + ); }); it('should return empty list of noncurrent versions if prefix does not apply', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'prefix': 'unknown' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 0); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', prefix: 'unknown' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Contents.length, 0); + return done(); + } + ); }); it('should return empty list if max-keys is set to 0', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'max-keys': '0' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 0); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 0); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', 'max-keys': '0' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 0); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Contents.length, 0); + + return done(); + } + ); }); it('should return error if bucket does not exist', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: 'idonotexist', - queryObj: { 'list-type': 'noncurrent' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'NoSuchBucket'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: 'idonotexist', + queryObj: { 'list-type': 'noncurrent' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'NoSuchBucket'); + return done(); + } + ); }); it('should return BadRequest error if list type is empty', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': '' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'BadRequest'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': '' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'BadRequest'); + return done(); + } + ); }); it('should return BadRequest error if list type is invalid', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'invalid' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'BadRequest'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'invalid' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'BadRequest'); + return done(); + } + ); }); it('should return InvalidArgument error if max-keys is invalid', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'max-keys': 'a' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', 'max-keys': 'a' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument error if max-scanned-lifecycle-listing-entries is invalid', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'max-scanned-lifecycle-listing-entries': 'a' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', 'max-scanned-lifecycle-listing-entries': 'a' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument error if max-scanned-lifecycle-listing-entries is set to 0', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'max-scanned-lifecycle-listing-entries': '0' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', 'max-scanned-lifecycle-listing-entries': '0' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument error if max-scanned-lifecycle-listing-entries is set to 2', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'max-scanned-lifecycle-listing-entries': '2' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', 'max-scanned-lifecycle-listing-entries': '2' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument if max-scanned-lifecycle-listing-entries exceeds the default value', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'max-scanned-lifecycle-listing-entries': - (config.maxScannedLifecycleListingEntries + 1).toString() }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'noncurrent', + 'max-scanned-lifecycle-listing-entries': (config.maxScannedLifecycleListingEntries + 1).toString(), + }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return error if bucket not versioned', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: nonVersionedBucket, - queryObj: { 'list-type': 'noncurrent' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidRequest'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: nonVersionedBucket, + queryObj: { 'list-type': 'noncurrent' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidRequest'); + return done(); + } + ); }); it('should return all the noncurrent versions', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - - const contents = data.Contents; - assert.strictEqual(contents.length, 14); - checkContents(contents); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + + const contents = data.Contents; + assert.strictEqual(contents.length, 14); + checkContents(contents); + + return done(); + } + ); }); it('should return all the noncurrent versions scanned before max scanned entries value is reached', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'max-scanned-lifecycle-listing-entries': '9' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.NextKeyMarker, 'key1'); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, 9); - - const contents = data.Contents; - // only return the first 7 entries as the master version is denoted by 2 entries. - assert.strictEqual(contents.length, 7); - assert(contents.every(c => c.Key === 'key1')); - checkContents(contents); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', 'max-scanned-lifecycle-listing-entries': '9' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.NextKeyMarker, 'key1'); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, 9); + + const contents = data.Contents; + // only return the first 7 entries as the master version is denoted by 2 entries. + assert.strictEqual(contents.length, 7); + assert(contents.every(c => c.Key === 'key1')); + checkContents(contents); + + return done(); + } + ); }); it('should return all the noncurrent versions with prefix key1', done => { const prefix = 'key1'; - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', prefix }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Prefix, prefix); - - const contents = data.Contents; - assert.strictEqual(contents.length, 7); - assert(contents.every(d => d.Key === 'key1')); - checkContents(contents); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', prefix }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Prefix, prefix); + + const contents = data.Contents; + assert.strictEqual(contents.length, 7); + assert(contents.every(d => d.Key === 'key1')); + checkContents(contents); + + return done(); + } + ); }); it('should return all the noncurrent versions with prefix key1 before a defined date', done => { const prefix = 'key1'; - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', prefix, 'before-date': date }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Prefix, prefix); - - const contents = data.Contents; - assert.strictEqual(contents.length, 2); - assert(contents.every(d => d.Key === 'key1')); - - assert.deepStrictEqual(contents.map(v => v.VersionId), expectedKey1VersionIds); - - checkContents(contents); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', prefix, 'before-date': date }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Prefix, prefix); + + const contents = data.Contents; + assert.strictEqual(contents.length, 2); + assert(contents.every(d => d.Key === 'key1')); + + assert.deepStrictEqual( + contents.map(v => v.VersionId), + expectedKey1VersionIds + ); + + checkContents(contents); + + return done(); + } + ); }); it('should return the noncurrent version with prefix, before a defined date and after key-marker', done => { const prefix = 'key2'; - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'noncurrent', - prefix, - 'before-date': date, - 'key-marker': 'key1', + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'noncurrent', + prefix, + 'before-date': date, + 'key-marker': 'key1', + }, + authCredentials: credentials, }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Prefix, prefix); - - const contents = data.Contents; - assert.strictEqual(contents.length, 2); - assert(contents.every(d => d.Key === 'key2')); - - assert.deepStrictEqual(contents.map(v => v.VersionId), expectedKey2VersionIds); - - checkContents(contents); - - return done(); - }); + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Prefix, prefix); + + const contents = data.Contents; + assert.strictEqual(contents.length, 2); + assert(contents.every(d => d.Key === 'key2')); + + assert.deepStrictEqual( + contents.map(v => v.VersionId), + expectedKey2VersionIds + ); + + checkContents(contents); + + return done(); + } + ); }); it('should return the noncurrent version with prefix, before a defined date, and after version-id-marker', done => { const prefix = 'key2'; - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'noncurrent', - prefix, - 'before-date': date, - 'key-marker': 'key2', - 'version-id-marker': expectedKey2VersionIds[0], + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'noncurrent', + prefix, + 'before-date': date, + 'key-marker': 'key2', + 'version-id-marker': expectedKey2VersionIds[0], + }, + authCredentials: credentials, }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Prefix, prefix); - - const contents = data.Contents; - assert.strictEqual(contents.length, 1); - assert(contents.every(d => d.Key === 'key2')); - contents[0].Key = 'key2'; - contents[0].VersionId = expectedKey2VersionIds[1]; - - checkContents(contents); - - return done(); - }); + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Prefix, prefix); + + const contents = data.Contents; + assert.strictEqual(contents.length, 1); + assert(contents.every(d => d.Key === 'key2')); + contents[0].Key = 'key2'; + contents[0].VersionId = expectedKey2VersionIds[1]; + + checkContents(contents); + + return done(); + } + ); }); it('should return the non current versions before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'before-date': date }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.BeforeDate, date); - - const contents = data.Contents; - assert.strictEqual(contents.length, 4); - checkContents(contents); - - const key1Versions = contents.filter(c => c.Key === 'key1'); - assert.strictEqual(key1Versions.length, 2); - - const key2Versions = contents.filter(c => c.Key === 'key2'); - assert.strictEqual(key2Versions.length, 2); - - assert.deepStrictEqual(key1Versions.map(v => v.VersionId), expectedKey1VersionIds); - assert.deepStrictEqual(key2Versions.map(v => v.VersionId), expectedKey2VersionIds); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', 'before-date': date }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.BeforeDate, date); + + const contents = data.Contents; + assert.strictEqual(contents.length, 4); + checkContents(contents); + + const key1Versions = contents.filter(c => c.Key === 'key1'); + assert.strictEqual(key1Versions.length, 2); + + const key2Versions = contents.filter(c => c.Key === 'key2'); + assert.strictEqual(key2Versions.length, 2); + + assert.deepStrictEqual( + key1Versions.map(v => v.VersionId), + expectedKey1VersionIds + ); + assert.deepStrictEqual( + key2Versions.map(v => v.VersionId), + expectedKey2VersionIds + ); + + return done(); + } + ); }); it('should truncate list of non current versions before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent', 'before-date': date, 'max-keys': '1' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.NextKeyMarker, 'key1'); - assert.strictEqual(data.NextVersionIdMarker, expectedKey1VersionIds[0]); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.BeforeDate, date); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - checkContents(contents); - assert.strictEqual(contents[0].Key, 'key1'); - assert.strictEqual(contents[0].VersionId, expectedKey1VersionIds[0]); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent', 'before-date': date, 'max-keys': '1' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.NextKeyMarker, 'key1'); + assert.strictEqual(data.NextVersionIdMarker, expectedKey1VersionIds[0]); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.BeforeDate, date); + assert.strictEqual(data.Contents.length, 1); + + const contents = data.Contents; + checkContents(contents); + assert.strictEqual(contents[0].Key, 'key1'); + assert.strictEqual(contents[0].VersionId, expectedKey1VersionIds[0]); + return done(); + } + ); }); it('should return the first following list of noncurrent versions before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'noncurrent', - 'before-date': date, - 'max-keys': '1', - 'key-marker': 'key1', - 'version-id-marker': expectedKey1VersionIds[0], + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'noncurrent', + 'before-date': date, + 'max-keys': '1', + 'key-marker': 'key1', + 'version-id-marker': expectedKey1VersionIds[0], + }, + authCredentials: credentials, }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.KeyMarker, 'key1'); - assert.strictEqual(data.VersionIdMarker, expectedKey1VersionIds[0]); - assert.strictEqual(data.NextKeyMarker, 'key1'); - assert.strictEqual(data.NextVersionIdMarker, expectedKey1VersionIds[1]); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.BeforeDate, date); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - checkContents(contents); - assert.strictEqual(contents[0].Key, 'key1'); - assert.strictEqual(contents[0].VersionId, expectedKey1VersionIds[1]); - return done(); - }); + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.KeyMarker, 'key1'); + assert.strictEqual(data.VersionIdMarker, expectedKey1VersionIds[0]); + assert.strictEqual(data.NextKeyMarker, 'key1'); + assert.strictEqual(data.NextVersionIdMarker, expectedKey1VersionIds[1]); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.BeforeDate, date); + assert.strictEqual(data.Contents.length, 1); + + const contents = data.Contents; + checkContents(contents); + assert.strictEqual(contents[0].Key, 'key1'); + assert.strictEqual(contents[0].VersionId, expectedKey1VersionIds[1]); + return done(); + } + ); }); it('should return the second following list of noncurrent versions before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'noncurrent', - 'before-date': date, - 'max-keys': '1', - 'key-marker': 'key1', - 'version-id-marker': expectedKey1VersionIds[1], + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'noncurrent', + 'before-date': date, + 'max-keys': '1', + 'key-marker': 'key1', + 'version-id-marker': expectedKey1VersionIds[1], + }, + authCredentials: credentials, }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.KeyMarker, 'key1'); - assert.strictEqual(data.VersionIdMarker, expectedKey1VersionIds[1]); - assert.strictEqual(data.NextKeyMarker, 'key2'); - assert.strictEqual(data.NextVersionIdMarker, expectedKey2VersionIds[0]); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.BeforeDate, date); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - checkContents(contents); - assert.strictEqual(contents[0].Key, 'key2'); - assert.strictEqual(contents[0].VersionId, expectedKey2VersionIds[0]); - return done(); - }); + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.KeyMarker, 'key1'); + assert.strictEqual(data.VersionIdMarker, expectedKey1VersionIds[1]); + assert.strictEqual(data.NextKeyMarker, 'key2'); + assert.strictEqual(data.NextVersionIdMarker, expectedKey2VersionIds[0]); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.BeforeDate, date); + assert.strictEqual(data.Contents.length, 1); + + const contents = data.Contents; + checkContents(contents); + assert.strictEqual(contents[0].Key, 'key2'); + assert.strictEqual(contents[0].VersionId, expectedKey2VersionIds[0]); + return done(); + } + ); }); it('should return the last and third following list of noncurrent versions before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'noncurrent', - 'before-date': date, - 'max-keys': '1', - 'key-marker': 'key2', - 'version-id-marker': expectedKey2VersionIds[0], + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'noncurrent', + 'before-date': date, + 'max-keys': '1', + 'key-marker': 'key2', + 'version-id-marker': expectedKey2VersionIds[0], + }, + authCredentials: credentials, }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert.strictEqual(data.KeyMarker, 'key2'); - assert.strictEqual(data.VersionIdMarker, expectedKey2VersionIds[0]); - assert(!data.NextKeyMarker); - assert(!data.NextVersionIdMarker); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.BeforeDate, date); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - checkContents(contents); - assert.strictEqual(contents[0].Key, 'key2'); - assert.strictEqual(contents[0].VersionId, expectedKey2VersionIds[1]); - return done(); - }); + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert.strictEqual(data.KeyMarker, 'key2'); + assert.strictEqual(data.VersionIdMarker, expectedKey2VersionIds[0]); + assert(!data.NextKeyMarker); + assert(!data.NextVersionIdMarker); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.BeforeDate, date); + assert.strictEqual(data.Contents.length, 1); + + const contents = data.Contents; + checkContents(contents); + assert.strictEqual(contents[0].Key, 'key2'); + assert.strictEqual(contents[0].VersionId, expectedKey2VersionIds[1]); + return done(); + } + ); }); }); diff --git a/tests/functional/backbeat/listLifecycleOrphanDeleteMarkers.js b/tests/functional/backbeat/listLifecycleOrphanDeleteMarkers.js index 09c5e9e19a..5fea4a3810 100644 --- a/tests/functional/backbeat/listLifecycleOrphanDeleteMarkers.js +++ b/tests/functional/backbeat/listLifecycleOrphanDeleteMarkers.js @@ -32,26 +32,35 @@ function checkContents(contents) { } function createDeleteMarker(s3, bucketName, keyName, cb) { - return async.series([ - next => s3.putObject({ Bucket: bucketName, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, next), - next => s3.deleteObject({ Bucket: bucketName, Key: keyName }, next), - ], cb); + return async.series( + [ + next => s3.putObject({ Bucket: bucketName, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, next), + next => s3.deleteObject({ Bucket: bucketName, Key: keyName }, next), + ], + cb + ); } function createOrphanDeleteMarker(s3, bucketName, keyName, cb) { let versionId; - return async.series([ - next => s3.putObject({ Bucket: bucketName, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, - (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - next => s3.deleteObject({ Bucket: bucketName, Key: keyName }, next), - next => s3.deleteObject({ Bucket: bucketName, Key: keyName, VersionId: versionId }, next), - ], cb); + return async.series( + [ + next => + s3.putObject( + { Bucket: bucketName, Key: keyName, Body: '123', Tagging: 'mykey=myvalue' }, + (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + } + ), + next => s3.deleteObject({ Bucket: bucketName, Key: keyName }, next), + next => s3.deleteObject({ Bucket: bucketName, Key: keyName, VersionId: versionId }, next), + ], + cb + ); } describe('listLifecycleOrphanDeleteMarkers', () => { @@ -63,442 +72,523 @@ describe('listLifecycleOrphanDeleteMarkers', () => { bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - return async.series([ - next => s3.createBucket({ Bucket: testBucket }, next), - next => s3.createBucket({ Bucket: emptyBucket }, next), - next => s3.createBucket({ Bucket: nonVersionedBucket }, next), - next => s3.putBucketVersioning({ - Bucket: testBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => s3.putBucketVersioning({ - Bucket: emptyBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => async.times(3, (n, cb) => { - createOrphanDeleteMarker(s3, testBucket, `key${n}old`, cb); - }, next), - next => createDeleteMarker(s3, testBucket, 'no-orphan-delete-marker', next), - next => { - date = new Date(Date.now()).toISOString(); - return async.times(5, (n, cb) => { - createOrphanDeleteMarker(s3, testBucket, `key${n}`, cb); - }, next); - }, - ], done); + return async.series( + [ + next => s3.createBucket({ Bucket: testBucket }, next), + next => s3.createBucket({ Bucket: emptyBucket }, next), + next => s3.createBucket({ Bucket: nonVersionedBucket }, next), + next => + s3.putBucketVersioning( + { + Bucket: testBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => + s3.putBucketVersioning( + { + Bucket: emptyBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => + async.times( + 3, + (n, cb) => { + createOrphanDeleteMarker(s3, testBucket, `key${n}old`, cb); + }, + next + ), + next => createDeleteMarker(s3, testBucket, 'no-orphan-delete-marker', next), + next => { + date = new Date(Date.now()).toISOString(); + return async.times( + 5, + (n, cb) => { + createOrphanDeleteMarker(s3, testBucket, `key${n}`, cb); + }, + next + ); + }, + ], + done + ); }); - after(done => async.series([ - next => removeAllVersions({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: emptyBucket }, next), - next => s3.deleteBucket({ Bucket: nonVersionedBucket }, next), - ], done)); + after(done => + async.series( + [ + next => removeAllVersions({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: emptyBucket }, next), + next => s3.deleteBucket({ Bucket: nonVersionedBucket }, next), + ], + done + ) + ); it('should return empty list of orphan delete markers if bucket is empty', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: emptyBucket, - queryObj: { 'list-type': 'orphan' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 0); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: emptyBucket, + queryObj: { 'list-type': 'orphan' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Contents.length, 0); + + return done(); + } + ); }); it('should return empty list of orphan delete markers if prefix does not apply', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'prefix': 'unknown' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 0); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', prefix: 'unknown' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Contents.length, 0); + + return done(); + } + ); }); it('should return empty list if max-keys is set to 0', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'max-keys': '0' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 0); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 0); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', 'max-keys': '0' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 0); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Contents.length, 0); + + return done(); + } + ); }); it('should return InvalidArgument error if max-keys is invalid', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'max-keys': 'a' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', 'max-keys': 'a' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument error if max-scanned-lifecycle-listing-entries is invalid', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': 'a' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': 'a' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument error if max-scanned-lifecycle-listing-entries is set to 0', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': '0' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': '0' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument error if max-scanned-lifecycle-listing-entries is set to 2', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': '2' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': '2' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return InvalidArgument if max-scanned-lifecycle-listing-entries exceeds the default value', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': - (config.maxScannedLifecycleListingEntries + 1).toString() }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'orphan', + 'max-scanned-lifecycle-listing-entries': (config.maxScannedLifecycleListingEntries + 1).toString(), + }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + return done(); + } + ); }); it('should return error if bucket does not exist', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: 'idonotexist', - queryObj: { 'list-type': 'orphan' }, - authCredentials: credentials, - }, err => { - assert.strictEqual(err.code, 'NoSuchBucket'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: 'idonotexist', + queryObj: { 'list-type': 'orphan' }, + authCredentials: credentials, + }, + err => { + assert.strictEqual(err.code, 'NoSuchBucket'); + return done(); + } + ); }); it('should return all the orphan delete markers', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - - const contents = data.Contents; - assert.strictEqual(contents.length, 8); - checkContents(contents); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + + const contents = data.Contents; + assert.strictEqual(contents.length, 8); + checkContents(contents); + + return done(); + } + ); }); it('should only return delete marker that passed the full keys evaluation to prevent false positives', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': '4' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, 4); - - // Depending on the metadata bucket key format, the orphan delete marker is denoted by 1 or 2 entries, - // which results in a difference in the number of keys scanned and, consequently, - // affects the value of the NextMarker and Contents. - const contents = data.Contents; - const nextMarker = data.NextMarker; - - if (process.env.DEFAULT_BUCKET_KEY_FORMAT === 'v1') { - // With v1 metadata bucket key format, master key is automaticaly deleted - // when the last version of an object is a delete marker - assert.strictEqual(nextMarker, 'key1'); - assert.strictEqual(contents.length, 3); - assert.strictEqual(contents[0].Key, 'key0'); - assert.strictEqual(contents[1].Key, 'key0old'); - assert.strictEqual(contents[2].Key, 'key1'); - } else { - assert.strictEqual(nextMarker, 'key0'); - assert.strictEqual(contents.length, 1); - assert.strictEqual(contents[0].Key, 'key0'); - } - checkContents(contents); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': '4' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, 4); + + // Depending on the metadata bucket key format, the orphan delete marker is denoted by 1 or 2 entries, + // which results in a difference in the number of keys scanned and, consequently, + // affects the value of the NextMarker and Contents. + const contents = data.Contents; + const nextMarker = data.NextMarker; + + if (process.env.DEFAULT_BUCKET_KEY_FORMAT === 'v1') { + // With v1 metadata bucket key format, master key is automaticaly deleted + // when the last version of an object is a delete marker + assert.strictEqual(nextMarker, 'key1'); + assert.strictEqual(contents.length, 3); + assert.strictEqual(contents[0].Key, 'key0'); + assert.strictEqual(contents[1].Key, 'key0old'); + assert.strictEqual(contents[2].Key, 'key1'); + } else { + assert.strictEqual(nextMarker, 'key0'); + assert.strictEqual(contents.length, 1); + assert.strictEqual(contents[0].Key, 'key0'); + } + checkContents(contents); - return done(); - }); + return done(); + } + ); }); it('should return all the orphan delete markers before max scanned entries value is reached', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': '3' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, 3); - - // Depending on the metadata bucket key format, the orphan delete marker is denoted by 1 or 2 entries, - // which results in a difference in the number of keys scanned and, consequently, - // affects the value of the NextMarker and Contents. - const contents = data.Contents; - const nextMarker = data.NextMarker; - - if (process.env.DEFAULT_BUCKET_KEY_FORMAT === 'v1') { - // With v1 metadata bucket key format, master key is automaticaly deleted - // when the last version of an object is a delete marker - assert.strictEqual(nextMarker, 'key0old'); - assert.strictEqual(contents.length, 2); - assert.strictEqual(contents[0].Key, 'key0'); - assert.strictEqual(contents[1].Key, 'key0old'); - } else { - assert.strictEqual(nextMarker, 'key0'); - assert.strictEqual(contents.length, 1); - assert.strictEqual(contents[0].Key, 'key0'); - } - checkContents(contents); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', 'max-scanned-lifecycle-listing-entries': '3' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, 3); + + // Depending on the metadata bucket key format, the orphan delete marker is denoted by 1 or 2 entries, + // which results in a difference in the number of keys scanned and, consequently, + // affects the value of the NextMarker and Contents. + const contents = data.Contents; + const nextMarker = data.NextMarker; + + if (process.env.DEFAULT_BUCKET_KEY_FORMAT === 'v1') { + // With v1 metadata bucket key format, master key is automaticaly deleted + // when the last version of an object is a delete marker + assert.strictEqual(nextMarker, 'key0old'); + assert.strictEqual(contents.length, 2); + assert.strictEqual(contents[0].Key, 'key0'); + assert.strictEqual(contents[1].Key, 'key0old'); + } else { + assert.strictEqual(nextMarker, 'key0'); + assert.strictEqual(contents.length, 1); + assert.strictEqual(contents[0].Key, 'key0'); + } + checkContents(contents); - return done(); - }); + return done(); + } + ); }); it('should return all the orphan delete markers with prefix key1', done => { const prefix = 'key1'; - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', prefix }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Prefix, prefix); - - const contents = data.Contents; - assert.strictEqual(contents.length, 2); - checkContents(contents); - assert.strictEqual(contents[0].Key, 'key1'); - assert.strictEqual(contents[1].Key, 'key1old'); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', prefix }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Prefix, prefix); + + const contents = data.Contents; + assert.strictEqual(contents.length, 2); + checkContents(contents); + assert.strictEqual(contents[0].Key, 'key1'); + assert.strictEqual(contents[1].Key, 'key1old'); + + return done(); + } + ); }); it('should return the orphan delete markers before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'orphan', - 'before-date': date, + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'orphan', + 'before-date': date, + }, + authCredentials: credentials, }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 3); - assert.strictEqual(data.BeforeDate, date); - - const contents = data.Contents; - checkContents(contents); - assert.strictEqual(contents[0].Key, 'key0old'); - assert.strictEqual(contents[1].Key, 'key1old'); - assert.strictEqual(contents[2].Key, 'key2old'); - - return done(); - }); + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Contents.length, 3); + assert.strictEqual(data.BeforeDate, date); + + const contents = data.Contents; + checkContents(contents); + assert.strictEqual(contents[0].Key, 'key0old'); + assert.strictEqual(contents[1].Key, 'key1old'); + assert.strictEqual(contents[2].Key, 'key2old'); + + return done(); + } + ); }); it('should truncate list of orphan delete markers before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { - 'list-type': 'orphan', - 'before-date': date, - 'max-keys': '1', + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { + 'list-type': 'orphan', + 'before-date': date, + 'max-keys': '1', + }, + authCredentials: credentials, }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.NextMarker, 'key0old'); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.BeforeDate, date); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - checkContents(contents); - assert.strictEqual(contents[0].Key, 'key0old'); - - return done(); - }); + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.NextMarker, 'key0old'); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.BeforeDate, date); + assert.strictEqual(data.Contents.length, 1); + + const contents = data.Contents; + checkContents(contents); + assert.strictEqual(contents[0].Key, 'key0old'); + + return done(); + } + ); }); it('should return the second truncate list of orphan delete markers before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'before-date': date, 'max-keys': '1', 'marker': 'key0old' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.Marker, 'key0old'); - assert.strictEqual(data.NextMarker, 'key1old'); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Contents.length, 1); - - const contents = data.Contents; - checkContents(contents); - assert.strictEqual(contents[0].Key, 'key1old'); - assert.strictEqual(data.BeforeDate, date); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', 'before-date': date, 'max-keys': '1', marker: 'key0old' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.Marker, 'key0old'); + assert.strictEqual(data.NextMarker, 'key1old'); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Contents.length, 1); + + const contents = data.Contents; + checkContents(contents); + assert.strictEqual(contents[0].Key, 'key1old'); + assert.strictEqual(data.BeforeDate, date); + + return done(); + } + ); }); it('should return the third truncate list of orphan delete markers before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'before-date': date, 'max-keys': '1', 'marker': 'key1old' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, true); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Marker, 'key1old'); - assert.strictEqual(data.BeforeDate, date); - assert.strictEqual(data.NextMarker, 'key2old'); - - const contents = data.Contents; - assert.strictEqual(contents.length, 1); - checkContents(contents); - assert.strictEqual(contents[0].Key, 'key2old'); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', 'before-date': date, 'max-keys': '1', marker: 'key1old' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, true); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Marker, 'key1old'); + assert.strictEqual(data.BeforeDate, date); + assert.strictEqual(data.NextMarker, 'key2old'); + + const contents = data.Contents; + assert.strictEqual(contents.length, 1); + checkContents(contents); + assert.strictEqual(contents[0].Key, 'key2old'); + + return done(); + } + ); }); it('should return the fourth and last truncate list of orphan delete markers before a defined date', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'orphan', 'before-date': date, 'max-keys': '1', 'marker': 'key2old' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - - const data = JSON.parse(response.body); - assert.strictEqual(data.IsTruncated, false); - assert.strictEqual(data.MaxKeys, 1); - assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); - assert.strictEqual(data.Marker, 'key2old'); - assert.strictEqual(data.BeforeDate, date); - - const contents = data.Contents; - assert.strictEqual(contents.length, 0); - - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'orphan', 'before-date': date, 'max-keys': '1', marker: 'key2old' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + + const data = JSON.parse(response.body); + assert.strictEqual(data.IsTruncated, false); + assert.strictEqual(data.MaxKeys, 1); + assert.strictEqual(data.MaxScannedLifecycleListingEntries, config.maxScannedLifecycleListingEntries); + assert.strictEqual(data.Marker, 'key2old'); + assert.strictEqual(data.BeforeDate, date); + + const contents = data.Contents; + assert.strictEqual(contents.length, 0); + + return done(); + } + ); }); }); diff --git a/tests/functional/backbeat/listNullVersion.js b/tests/functional/backbeat/listNullVersion.js index e7494a061e..11bb31b5b9 100644 --- a/tests/functional/backbeat/listNullVersion.js +++ b/tests/functional/backbeat/listNullVersion.js @@ -20,86 +20,106 @@ describe('listLifecycle if null version', () => { bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - return async.series([ - next => s3.createBucket({ Bucket: testBucket }, next), - next => s3.putObject({ Bucket: testBucket, Key: 'key1', Body: '123' }, next), - next => s3.putObject({ Bucket: testBucket, Key: 'key2', Body: '123' }, next), - next => s3.putBucketVersioning({ - Bucket: testBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => s3.putObject({ Bucket: testBucket, Key: 'key1', Body: '123' }, (err, data) => { - if (err) { - return next(err); - } - // delete version to create a null current version for key1. - return s3.deleteObject({ Bucket: testBucket, Key: 'key1', VersionId: data.VersionId }, next); - }), - next => s3.putObject({ Bucket: testBucket, Key: 'key2', Body: '123' }, (err, data) => { - if (err) { - return next(err); - } - versionForKey2 = data.VersionId; - return next(); - }), - ], done); + return async.series( + [ + next => s3.createBucket({ Bucket: testBucket }, next), + next => s3.putObject({ Bucket: testBucket, Key: 'key1', Body: '123' }, next), + next => s3.putObject({ Bucket: testBucket, Key: 'key2', Body: '123' }, next), + next => + s3.putBucketVersioning( + { + Bucket: testBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => + s3.putObject({ Bucket: testBucket, Key: 'key1', Body: '123' }, (err, data) => { + if (err) { + return next(err); + } + // delete version to create a null current version for key1. + return s3.deleteObject({ Bucket: testBucket, Key: 'key1', VersionId: data.VersionId }, next); + }), + next => + s3.putObject({ Bucket: testBucket, Key: 'key2', Body: '123' }, (err, data) => { + if (err) { + return next(err); + } + versionForKey2 = data.VersionId; + return next(); + }), + ], + done + ); }); - after(done => async.series([ - next => removeAllVersions({ Bucket: testBucket }, next), - next => s3.deleteBucket({ Bucket: testBucket }, next), - ], done)); + after(done => + async.series( + [ + next => removeAllVersions({ Bucket: testBucket }, next), + next => s3.deleteBucket({ Bucket: testBucket }, next), + ], + done + ) + ); it('should return the null noncurrent versions', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'noncurrent' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - - const contents = data.Contents; - assert.strictEqual(contents.length, 1); - assert.strictEqual(contents[0].Key, 'key2'); - assert.strictEqual(contents[0].VersionId, 'null'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'noncurrent' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + + const contents = data.Contents; + assert.strictEqual(contents.length, 1); + assert.strictEqual(contents[0].Key, 'key2'); + assert.strictEqual(contents[0].VersionId, 'null'); + return done(); + } + ); }); it('should return the null current versions', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: testBucket, - queryObj: { 'list-type': 'current' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - - const contents = data.Contents; - assert.strictEqual(contents.length, 2); - - const firstKey = contents[0]; - assert.strictEqual(firstKey.Key, 'key1'); - assert.strictEqual(firstKey.VersionId, 'null'); - - const secondKey = contents[1]; - assert.strictEqual(secondKey.Key, 'key2'); - assert.strictEqual(secondKey.VersionId, versionForKey2); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: testBucket, + queryObj: { 'list-type': 'current' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + + const contents = data.Contents; + assert.strictEqual(contents.length, 2); + + const firstKey = contents[0]; + assert.strictEqual(firstKey.Key, 'key1'); + assert.strictEqual(firstKey.VersionId, 'null'); + + const secondKey = contents[1]; + assert.strictEqual(secondKey.Key, 'key2'); + assert.strictEqual(secondKey.VersionId, versionForKey2); + return done(); + } + ); }); }); @@ -114,73 +134,96 @@ describe('listLifecycle with null current version after versioning suspended', ( bucketUtil = new BucketUtility('account1', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - return async.series([ - next => s3.createBucket({ Bucket: nullObjectBucket }, next), - next => s3.putBucketVersioning({ - Bucket: nullObjectBucket, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - next => s3.putObject({ Bucket: nullObjectBucket, Key: keyName }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); - }), - next => s3.putBucketVersioning({ - Bucket: nullObjectBucket, - VersioningConfiguration: { Status: 'Suspended' }, - }, next), - next => s3.putObject({ Bucket: nullObjectBucket, Key: keyName }, next), - ], done); + return async.series( + [ + next => s3.createBucket({ Bucket: nullObjectBucket }, next), + next => + s3.putBucketVersioning( + { + Bucket: nullObjectBucket, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + next => + s3.putObject({ Bucket: nullObjectBucket, Key: keyName }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + next => + s3.putBucketVersioning( + { + Bucket: nullObjectBucket, + VersioningConfiguration: { Status: 'Suspended' }, + }, + next + ), + next => s3.putObject({ Bucket: nullObjectBucket, Key: keyName }, next), + ], + done + ); }); - after(done => async.series([ - next => removeAllVersions({ Bucket: nullObjectBucket }, next), - next => s3.deleteBucket({ Bucket: nullObjectBucket }, next), - ], done)); + after(done => + async.series( + [ + next => removeAllVersions({ Bucket: nullObjectBucket }, next), + next => s3.deleteBucket({ Bucket: nullObjectBucket }, next), + ], + done + ) + ); it('should return list of current versions when bucket has a null current version', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: nullObjectBucket, - queryObj: { 'list-type': 'current' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.Contents.length, 1); - const key = data.Contents[0]; - assert.strictEqual(key.Key, keyName); - assert.strictEqual(key.VersionId, 'null'); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: nullObjectBucket, + queryObj: { 'list-type': 'current' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.Contents.length, 1); + const key = data.Contents[0]; + assert.strictEqual(key.Key, keyName); + assert.strictEqual(key.VersionId, 'null'); + return done(); + } + ); }); it('should return list of non-current versions when bucket has a null current version', done => { - makeBackbeatRequest({ - method: 'GET', - bucket: nullObjectBucket, - queryObj: { 'list-type': 'noncurrent' }, - authCredentials: credentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - const data = JSON.parse(response.body); - - assert.strictEqual(data.IsTruncated, false); - assert(!data.NextKeyMarker); - assert.strictEqual(data.MaxKeys, 1000); - assert.strictEqual(data.Contents.length, 1); - const key = data.Contents[0]; - assert.strictEqual(key.Key, keyName); - assert.strictEqual(key.VersionId, expectedVersionId); - return done(); - }); + makeBackbeatRequest( + { + method: 'GET', + bucket: nullObjectBucket, + queryObj: { 'list-type': 'noncurrent' }, + authCredentials: credentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + const data = JSON.parse(response.body); + + assert.strictEqual(data.IsTruncated, false); + assert(!data.NextKeyMarker); + assert.strictEqual(data.MaxKeys, 1000); + assert.strictEqual(data.Contents.length, 1); + const key = data.Contents[0]; + assert.strictEqual(key.Key, keyName); + assert.strictEqual(key.VersionId, expectedVersionId); + return done(); + } + ); }); }); diff --git a/tests/functional/backbeat/utils.js b/tests/functional/backbeat/utils.js index 9e460e6503..d9c349a48a 100644 --- a/tests/functional/backbeat/utils.js +++ b/tests/functional/backbeat/utils.js @@ -1,7 +1,9 @@ const { makeRequest } = require('../raw-node/utils/makeRequest'); const ipAddress = process.env.IP ? process.env.IP : '127.0.0.1'; -const { models: { ObjectMD } } = require('arsenal'); +const { + models: { ObjectMD }, +} = require('arsenal'); // NOTE: The routes "getMetadata" and "putMetadata" are utilized for modifying the metadata of an object. // This approach is preferred over directly updating the metadata in MongoDB, diff --git a/tests/functional/healthchecks/package.json b/tests/functional/healthchecks/package.json index 87380915e2..ebfbdb8c70 100644 --- a/tests/functional/healthchecks/package.json +++ b/tests/functional/healthchecks/package.json @@ -1,16 +1,15 @@ { - "name": "Test-healthcheck", - "version": "0.1.0", - "description": "Test-healthcheck", - "main": "tests.js", - "repository": "", - "keywords": [ - "test" - ], - "scripts": { - "test": "mocha -t 40000 test/ --exit", - "test-debug": "_mocha -t 40000 test/ --exit" - }, - "author": "" + "name": "Test-healthcheck", + "version": "0.1.0", + "description": "Test-healthcheck", + "main": "tests.js", + "repository": "", + "keywords": [ + "test" + ], + "scripts": { + "test": "mocha -t 40000 test/ --exit", + "test-debug": "_mocha -t 40000 test/ --exit" + }, + "author": "" } - diff --git a/tests/functional/healthchecks/test/checkRoutes.js b/tests/functional/healthchecks/test/checkRoutes.js index fbe0d90015..7ce9302cfe 100644 --- a/tests/functional/healthchecks/test/checkRoutes.js +++ b/tests/functional/healthchecks/test/checkRoutes.js @@ -110,8 +110,7 @@ describe('Healthcheck stats', () => { const totalReqs = 5; beforeEach(done => { redis.flushdb(() => { - async.timesSeries(totalReqs, - (n, next) => makeDummyS3Request(next), done); + async.timesSeries(totalReqs, (n, next) => makeDummyS3Request(next), done); }); }); @@ -124,11 +123,9 @@ describe('Healthcheck stats', () => { if (err) { return done(err); } - const expectedStatsRes = { 'requests': totalReqs, '500s': 0, - 'sampleDuration': 30 }; + const expectedStatsRes = { requests: totalReqs, '500s': 0, sampleDuration: 30 }; assert.deepStrictEqual(JSON.parse(res), expectedStatsRes); return done(); }); - }, 500) - ); + }, 500)); }); diff --git a/tests/functional/kmip/serverside_encryption.js b/tests/functional/kmip/serverside_encryption.js index 9ce02c6428..3d995023cd 100644 --- a/tests/functional/kmip/serverside_encryption.js +++ b/tests/functional/kmip/serverside_encryption.js @@ -8,7 +8,6 @@ const assert = require('assert'); const logger = { info: msg => process.stdout.write(`${msg}\n`) }; const async = require('async'); - function _createBucket(name, encrypt, done) { const { transport, ipAddress, accessKey, secretKey } = config; const verbose = false; @@ -91,8 +90,7 @@ function _putObject(bucketName, objectName, encrypt, cb) { s3.putObject(params, cb); } -function _copyObject(sourceBucket, sourceObject, targetBucket, targetObject, - encrypt, cb) { +function _copyObject(sourceBucket, sourceObject, targetBucket, targetObject, encrypt, cb) { const params = { Bucket: targetBucket, CopySource: `/${sourceBucket}/${sourceObject}`, @@ -134,58 +132,62 @@ describe('KMIP backed server-side encryption', () => { it('should create an encrypted bucket', done => { _createBucket(bucketName, true, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); done(); }); }); it('should create an encrypted bucket and upload an object', done => { - async.waterfall([ - next => _createBucket(bucketName, true, err => next(err)), - next => _putObject(bucketName, objectName, false, err => next(err)), - ], err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - done(); - }); + async.waterfall( + [ + next => _createBucket(bucketName, true, err => next(err)), + next => _putObject(bucketName, objectName, false, err => next(err)), + ], + err => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + done(); + } + ); }); it('should allow object PUT with SSE header in encrypted bucket', done => { - async.waterfall([ - next => _createBucket(bucketName, true, err => next(err)), - next => _putObject(bucketName, objectName, true, err => next(err)), - ], err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - done(); - }); + async.waterfall( + [ + next => _createBucket(bucketName, true, err => next(err)), + next => _putObject(bucketName, objectName, true, err => next(err)), + ], + err => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + done(); + } + ); }); it('should allow object copy with SSE header in encrypted bucket', done => { - async.waterfall([ - next => _createBucket(bucketName, false, err => next(err)), - next => _putObject(bucketName, objectName, false, err => next(err)), - next => _createBucket(`${bucketName}2`, true, err => next(err)), - next => _copyObject(bucketName, objectName, `${bucketName}2`, - `${objectName}2`, true, err => next(err)), - ], err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - done(); - }); + async.waterfall( + [ + next => _createBucket(bucketName, false, err => next(err)), + next => _putObject(bucketName, objectName, false, err => next(err)), + next => _createBucket(`${bucketName}2`, true, err => next(err)), + next => _copyObject(bucketName, objectName, `${bucketName}2`, `${objectName}2`, true, err => next(err)), + ], + err => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + done(); + } + ); }); - it('should allow creating mpu with SSE header ' + - 'in encrypted bucket', done => { - async.waterfall([ - next => _createBucket(bucketName, true, err => next(err)), - next => _initiateMultipartUpload(bucketName, objectName, - true, err => next(err)), - ], err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - done(); - }); + it('should allow creating mpu with SSE header ' + 'in encrypted bucket', done => { + async.waterfall( + [ + next => _createBucket(bucketName, true, err => next(err)), + next => _initiateMultipartUpload(bucketName, objectName, true, err => next(err)), + ], + err => { + assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); + done(); + } + ); }); }); diff --git a/tests/functional/metadata/MixedVersionFormat.js b/tests/functional/metadata/MixedVersionFormat.js index 61ad261f36..93a8e28067 100644 --- a/tests/functional/metadata/MixedVersionFormat.js +++ b/tests/functional/metadata/MixedVersionFormat.js @@ -9,8 +9,8 @@ const replicaSetHosts = 'localhost:27017,localhost:27018,localhost:27019'; const writeConcern = 'majority'; const replicaSet = 'rs0'; const readPreference = 'primary'; -const mongoUrl = `mongodb://${replicaSetHosts}/?w=${writeConcern}&` + - `replicaSet=${replicaSet}&readPreference=${readPreference}`; +const mongoUrl = + `mongodb://${replicaSetHosts}/?w=${writeConcern}&` + `replicaSet=${replicaSet}&readPreference=${readPreference}`; /** * These tests are intended to see if the vFormat of buckets is respected @@ -40,61 +40,74 @@ describe('Mongo backend mixed bucket format versions', () => { function updateBucketVFormat(bucketName, vFormat) { const db = mongoClient.db('metadata'); - return db.collection('__metastore') - .updateOne({ + return db.collection('__metastore').updateOne( + { _id: bucketName, - }, { + }, + { $set: { vFormat }, - }, {}); + }, + {} + ); } function getObject(bucketName, key, cb) { const db = mongoClient.db('metadata'); - return db.collection(bucketName) - .findOne({ - _id: key, - }, {}).then(doc => { - if (!doc) { - return cb(errors.NoSuchKey); - } - return cb(null, doc.value); - }).catch(err => cb(err)); + return db + .collection(bucketName) + .findOne( + { + _id: key, + }, + {} + ) + .then(doc => { + if (!doc) { + return cb(errors.NoSuchKey); + } + return cb(null, doc.value); + }) + .catch(err => cb(err)); } before(done => { - MongoClient.connect(mongoUrl, {}).then(client => { - mongoClient = client; - bucketUtil = new BucketUtility('default', sigCfg); - s3 = bucketUtil.s3; - return done(); - }).catch(err => done(err)); + MongoClient.connect(mongoUrl, {}) + .then(client => { + mongoClient = client; + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; + return done(); + }) + .catch(err => done(err)); }); beforeEach(() => { process.stdout.write('Creating buckets'); - return bucketUtil.createMany(['v0-bucket', 'v1-bucket']) - .then(async () => { - process.stdout.write('Updating bucket vFormat'); - await updateBucketVFormat('v0-bucket', 'v0'); - await updateBucketVFormat('v1-bucket', 'v1'); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return bucketUtil + .createMany(['v0-bucket', 'v1-bucket']) + .then(async () => { + process.stdout.write('Updating bucket vFormat'); + await updateBucketVFormat('v0-bucket', 'v0'); + await updateBucketVFormat('v1-bucket', 'v1'); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); afterEach(() => { process.stdout.write('Emptying buckets'); - return bucketUtil.emptyMany(['v0-bucket', 'v1-bucket']) - .then(() => { - process.stdout.write('Deleting buckets'); - return bucketUtil.deleteMany(['v0-bucket', 'v1-bucket']); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + return bucketUtil + .emptyMany(['v0-bucket', 'v1-bucket']) + .then(() => { + process.stdout.write('Deleting buckets'); + return bucketUtil.deleteMany(['v0-bucket', 'v1-bucket']); + }) + .catch(err => { + process.stdout.write('Error in afterEach'); + throw err; + }); }); after(async () => { @@ -105,87 +118,98 @@ describe('Mongo backend mixed bucket format versions', () => { it(`Should perform operations on non versioned bucket in ${vFormat} format`, done => { const paramsObj1 = { Bucket: `${vFormat}-bucket`, - Key: `${vFormat}-object-1` + Key: `${vFormat}-object-1`, }; const paramsObj2 = { Bucket: `${vFormat}-bucket`, - Key: `${vFormat}-object-2` + Key: `${vFormat}-object-2`, }; const masterKey = vFormat === 'v0' ? `${vFormat}-object-1` : `\x7fM${vFormat}-object-1`; - async.series([ - next => s3.putObject(paramsObj1, next), - next => s3.putObject(paramsObj2, next), - // check if data stored in the correct format - next => getObject(`${vFormat}-bucket`, masterKey, (err, doc) => { - assert.ifError(err); - assert.strictEqual(doc.key, `${vFormat}-object-1`); - return next(); - }), - // test if we can get object - next => s3.getObject(paramsObj1, next), - // test if we can list objects - next => s3.listObjects({ Bucket: `${vFormat}-bucket` }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Contents.length, 2); - const keys = data.Contents.map(obj => obj.Key); - assert(keys.includes(`${vFormat}-object-1`)); - assert(keys.includes(`${vFormat}-object-2`)); - return next(); - }) - ], done); + async.series( + [ + next => s3.putObject(paramsObj1, next), + next => s3.putObject(paramsObj2, next), + // check if data stored in the correct format + next => + getObject(`${vFormat}-bucket`, masterKey, (err, doc) => { + assert.ifError(err); + assert.strictEqual(doc.key, `${vFormat}-object-1`); + return next(); + }), + // test if we can get object + next => s3.getObject(paramsObj1, next), + // test if we can list objects + next => + s3.listObjects({ Bucket: `${vFormat}-bucket` }, (err, data) => { + assert.ifError(err); + assert.strictEqual(data.Contents.length, 2); + const keys = data.Contents.map(obj => obj.Key); + assert(keys.includes(`${vFormat}-object-1`)); + assert(keys.includes(`${vFormat}-object-2`)); + return next(); + }), + ], + done + ); }); it(`Should perform operations on versioned bucket in ${vFormat} format`, done => { const paramsObj1 = { Bucket: `${vFormat}-bucket`, - Key: `${vFormat}-object-1` + Key: `${vFormat}-object-1`, }; const paramsObj2 = { Bucket: `${vFormat}-bucket`, - Key: `${vFormat}-object-2` + Key: `${vFormat}-object-2`, }; const versioningParams = { Bucket: `${vFormat}-bucket`, VersioningConfiguration: { - Status: 'Enabled', - } + Status: 'Enabled', + }, }; const masterKey = vFormat === 'v0' ? `${vFormat}-object-1` : `\x7fM${vFormat}-object-1`; - async.series([ - next => s3.putBucketVersioning(versioningParams, next), - next => s3.putObject(paramsObj1, next), - next => s3.putObject(paramsObj1, next), - next => s3.putObject(paramsObj2, next), - // check if data stored in the correct version format - next => getObject(`${vFormat}-bucket`, masterKey, (err, doc) => { - assert.ifError(err); - assert.strictEqual(doc.key, `${vFormat}-object-1`); - return next(); - }), - // test if we can get object - next => s3.getObject(paramsObj1, next), - // test if we can list objects - next => s3.listObjects({ Bucket: `${vFormat}-bucket` }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Contents.length, 2); - const keys = data.Contents.map(obj => obj.Key); - assert(keys.includes(`${vFormat}-object-1`)); - assert(keys.includes(`${vFormat}-object-2`)); - return next(); - }), - // test if we can list object versions - next => s3.listObjectVersions({ Bucket: `${vFormat}-bucket` }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Versions.length, 3); - const versionPerObject = {}; - data.Versions.forEach(version => { - versionPerObject[version.Key] = (versionPerObject[version.Key] || 0) + 1; - }); - assert.strictEqual(versionPerObject[`${vFormat}-object-1`], 2); - assert.strictEqual(versionPerObject[`${vFormat}-object-2`], 1); - return next(); - }) - ], done); + async.series( + [ + next => s3.putBucketVersioning(versioningParams, next), + next => s3.putObject(paramsObj1, next), + next => s3.putObject(paramsObj1, next), + next => s3.putObject(paramsObj2, next), + // check if data stored in the correct version format + next => + getObject(`${vFormat}-bucket`, masterKey, (err, doc) => { + assert.ifError(err); + assert.strictEqual(doc.key, `${vFormat}-object-1`); + return next(); + }), + // test if we can get object + next => s3.getObject(paramsObj1, next), + // test if we can list objects + next => + s3.listObjects({ Bucket: `${vFormat}-bucket` }, (err, data) => { + assert.ifError(err); + assert.strictEqual(data.Contents.length, 2); + const keys = data.Contents.map(obj => obj.Key); + assert(keys.includes(`${vFormat}-object-1`)); + assert(keys.includes(`${vFormat}-object-2`)); + return next(); + }), + // test if we can list object versions + next => + s3.listObjectVersions({ Bucket: `${vFormat}-bucket` }, (err, data) => { + assert.ifError(err); + assert.strictEqual(data.Versions.length, 3); + const versionPerObject = {}; + data.Versions.forEach(version => { + versionPerObject[version.Key] = (versionPerObject[version.Key] || 0) + 1; + }); + assert.strictEqual(versionPerObject[`${vFormat}-object-1`], 2); + assert.strictEqual(versionPerObject[`${vFormat}-object-2`], 1); + return next(); + }), + ], + done + ); }); }); }); diff --git a/tests/functional/metadata/MongoClientInterface.js b/tests/functional/metadata/MongoClientInterface.js index a8ce1456a2..a93639422a 100644 --- a/tests/functional/metadata/MongoClientInterface.js +++ b/tests/functional/metadata/MongoClientInterface.js @@ -2,9 +2,7 @@ const assert = require('assert'); const async = require('async'); const MongoClient = require('mongodb').MongoClient; -const { - MongoClientInterface, -} = require('arsenal').storage.metadata.mongoclient; +const { MongoClientInterface } = require('arsenal').storage.metadata.mongoclient; const { errors } = require('arsenal'); const log = require('./utils/fakeLogger'); @@ -13,8 +11,8 @@ const replicaSetHosts = 'localhost:27017,localhost:27018,localhost:27019'; const writeConcern = 'majority'; const replicaSet = 'rs0'; const readPreference = 'primary'; -const mongoUrl = `mongodb://${replicaSetHosts}/?w=${writeConcern}&` + - `replicaSet=${replicaSet}&readPreference=${readPreference}`; +const mongoUrl = + `mongodb://${replicaSetHosts}/?w=${writeConcern}&` + `replicaSet=${replicaSet}&readPreference=${readPreference}`; const VID_SEP = '\0'; const TEST_DB = 'test'; @@ -41,13 +39,14 @@ const objVal = { const updatedObjVal = { updated: true }; -const runIfMongo = - process.env.S3METADATA === 'mongodb' ? describe : describe.skip; +const runIfMongo = process.env.S3METADATA === 'mongodb' ? describe : describe.skip; function unescapeJSON(obj) { - return JSON.parse(JSON.stringify(obj). - replace(/\uFF04/g, '$'). - replace(/\uFF0E/g, '.')); + return JSON.parse( + JSON.stringify(obj) + .replace(/\uFF04/g, '$') + .replace(/\uFF0E/g, '.') + ); } function getCaseMethod(number) { @@ -72,25 +71,28 @@ runIfMongo('MongoClientInterface', () => { if (params && params.versionId) { objName = `${objName}${VID_SEP}${params.versionId}`; } - collection.findOne({ - _id: objName, - }, {}).then(doc => { - if (!doc) { - return cb(errors.NoSuchKey); - } - if (doc.value.tags) { - // eslint-disable-next-line - doc.value.tags = unescapeJSON(doc.value.tags); - } - return cb(null, doc.value); - }).catch(err => cb(err)); + collection + .findOne( + { + _id: objName, + }, + {} + ) + .then(doc => { + if (!doc) { + return cb(errors.NoSuchKey); + } + if (doc.value.tags) { + // eslint-disable-next-line + doc.value.tags = unescapeJSON(doc.value.tags); + } + return cb(null, doc.value); + }) + .catch(err => cb(err)); } function checkVersionAndMasterMatch(versionId, cb) { - async.parallel([ - next => getObject({}, next), - next => getObject({ versionId }, next), - ], (err, res) => { + async.parallel([next => getObject({}, next), next => getObject({ versionId }, next)], (err, res) => { if (err) { return cb(err); } @@ -104,41 +106,42 @@ runIfMongo('MongoClientInterface', () => { function putObject(caseNum, params, objectValue, cb) { const method = getCaseMethod(caseNum); const dupeObjVal = Object.assign({}, objectValue || objVal); - mongoClientInterface[method](collection, BUCKET_NAME, OBJECT_NAME, - dupeObjVal, params, log, (err, res) => { - if (err) { - return cb(err); - } - let parsedRes; - if (res) { - try { - parsedRes = JSON.parse(res); - } catch (error) { - return cb(error); - } + mongoClientInterface[method](collection, BUCKET_NAME, OBJECT_NAME, dupeObjVal, params, log, (err, res) => { + if (err) { + return cb(err); + } + let parsedRes; + if (res) { + try { + parsedRes = JSON.parse(res); + } catch (error) { + return cb(error); } - return cb(null, parsedRes); - }); + } + return cb(null, parsedRes); + }); } function checkNewPutObject(caseNum, params, cb) { const method = getCaseMethod(caseNum); const bucket = 'a'; const key = 'b'; - async.series([ - next => mongoClientInterface[method]( - collection, bucket, key, updatedObjVal, params, log, next), - next => { - collection.findOne({ _id: key }, (err, result) => { - if (err) { - return next(err); - } - assert.strictEqual(result._id, key); - assert(result.value.updated); - return next(); - }); - }, - ], cb); + async.series( + [ + next => mongoClientInterface[method](collection, bucket, key, updatedObjVal, params, log, next), + next => { + collection.findOne({ _id: key }, (err, result) => { + if (err) { + return next(err); + } + assert.strictEqual(result._id, key); + assert(result.value.updated); + return next(); + }); + }, + ], + cb + ); } before(done => { @@ -180,11 +183,13 @@ runIfMongo('MongoClientInterface', () => { describe('::putObjectVerCase1', () => { it('should put new metadata and update master', done => { - async.waterfall([ - next => putObject(1, {}, null, - (err, res) => next(err, res.versionId)), - (id, next) => checkVersionAndMasterMatch(id, next), - ], done); + async.waterfall( + [ + next => putObject(1, {}, null, (err, res) => next(err, res.versionId)), + (id, next) => checkVersionAndMasterMatch(id, next), + ], + done + ); }); }); @@ -192,130 +197,152 @@ runIfMongo('MongoClientInterface', () => { it('should put new metadata', done => checkNewPutObject(2, {}, done)); it('should set new version id for master', done => { - async.waterfall([ - // first create new ver and master - next => putObject(1, {}, null, next), - // check master and version were created and match - (res, next) => checkVersionAndMasterMatch(res.versionId, - err => next(err, res.versionId)), - // call ver case 2 - (id, next) => putObject(2, {}, null, (err, res) => { - if (err) { - return next(err); - } - assert(id !== res.versionId); - return next(null, res.versionId); - }), - // assert master updated with new version id - (newId, next) => getObject({}, (err, res) => { - if (err) { - return next(err); - } - assert.strictEqual(res.versionId, newId); - return next(null, newId); - }), - // new version entry should not have been created - (id, next) => getObject({ versionId: id }, err => { - assert(err); - assert(err.is.NoSuchKey); - return next(); - }), - ], done); + async.waterfall( + [ + // first create new ver and master + next => putObject(1, {}, null, next), + // check master and version were created and match + (res, next) => checkVersionAndMasterMatch(res.versionId, err => next(err, res.versionId)), + // call ver case 2 + (id, next) => + putObject(2, {}, null, (err, res) => { + if (err) { + return next(err); + } + assert(id !== res.versionId); + return next(null, res.versionId); + }), + // assert master updated with new version id + (newId, next) => + getObject({}, (err, res) => { + if (err) { + return next(err); + } + assert.strictEqual(res.versionId, newId); + return next(null, newId); + }), + // new version entry should not have been created + (id, next) => + getObject({ versionId: id }, err => { + assert(err); + assert(err.is.NoSuchKey); + return next(); + }), + ], + done + ); }); }); describe('::putObjectVerCase3', () => { - it('should put new metadata', done => - checkNewPutObject(3, { versionId: VERSION_ID }, done)); + it('should put new metadata', done => checkNewPutObject(3, { versionId: VERSION_ID }, done)); it('should put new metadata and not update master', done => { - async.waterfall([ - // first create new ver and master - next => putObject(1, {}, null, next), - // check master and version were created and match - (res, next) => checkVersionAndMasterMatch(res.versionId, - err => next(err, res.versionId)), - // call ver case 3 - (id, next) => putObject(3, { versionId: VERSION_ID }, null, - (err, res) => { - if (err) { - return next(err); - } - // assert new version id created - assert(id !== res.versionId); - assert.strictEqual(res.versionId, VERSION_ID); - return next(null, id); - }), - // assert master did not update and matches old initial version - (oldId, next) => getObject({}, (err, res) => { - if (err) { - return next(err); - } - assert.strictEqual(oldId, res.versionId); - return next(); - }), - // assert new version was created - next => getObject({ versionId: VERSION_ID }, (err, res) => { - if (err) { - return next(err); - } - assert(res); - assert.strictEqual(res.versionId, VERSION_ID); - return next(); - }), - ], done); + async.waterfall( + [ + // first create new ver and master + next => putObject(1, {}, null, next), + // check master and version were created and match + (res, next) => checkVersionAndMasterMatch(res.versionId, err => next(err, res.versionId)), + // call ver case 3 + (id, next) => + putObject(3, { versionId: VERSION_ID }, null, (err, res) => { + if (err) { + return next(err); + } + // assert new version id created + assert(id !== res.versionId); + assert.strictEqual(res.versionId, VERSION_ID); + return next(null, id); + }), + // assert master did not update and matches old initial version + (oldId, next) => + getObject({}, (err, res) => { + if (err) { + return next(err); + } + assert.strictEqual(oldId, res.versionId); + return next(); + }), + // assert new version was created + next => + getObject({ versionId: VERSION_ID }, (err, res) => { + if (err) { + return next(err); + } + assert(res); + assert.strictEqual(res.versionId, VERSION_ID); + return next(); + }), + ], + done + ); }); - it('should put new metadata and update master if version id matches', - done => { - async.waterfall([ - // first create new ver and master - next => putObject(1, {}, null, next), - // check master and version were created and match - (res, next) => checkVersionAndMasterMatch(res.versionId, - err => next(err, res.versionId)), - // call ver case 3 w/ same version id and update - (id, next) => mongoClientInterface.putObjectVerCase3(collection, - BUCKET_NAME, OBJECT_NAME, updatedObjVal, - { versionId: id }, log, err => next(err, id)), - (oldId, next) => getObject({}, (err, res) => { - if (err) { - return next(err); - } - // assert updated - assert(res); - assert(res.updated); - // assert same version id - assert.strictEqual(oldId, res.versionId); - return next(); - }), - ], done); + it('should put new metadata and update master if version id matches', done => { + async.waterfall( + [ + // first create new ver and master + next => putObject(1, {}, null, next), + // check master and version were created and match + (res, next) => checkVersionAndMasterMatch(res.versionId, err => next(err, res.versionId)), + // call ver case 3 w/ same version id and update + (id, next) => + mongoClientInterface.putObjectVerCase3( + collection, + BUCKET_NAME, + OBJECT_NAME, + updatedObjVal, + { versionId: id }, + log, + err => next(err, id) + ), + (oldId, next) => + getObject({}, (err, res) => { + if (err) { + return next(err); + } + // assert updated + assert(res); + assert(res.updated); + // assert same version id + assert.strictEqual(oldId, res.versionId); + return next(); + }), + ], + done + ); }); }); describe('::putObjectVerCase4', () => { function putAndCheckCase4(versionId, cb) { const objectValue = Object.assign({}, objVal, { versionId }); - async.waterfall([ - // put object - next => putObject(4, { versionId }, objectValue, (err, res) => { - if (err) { - return next(err); - } - return next(null, res.versionId); - }), - (id, next) => getObject({}, (err, res) => { - if (err) { - return next(err); - } - // assert PHD was placed on master - assert(res); - assert.strictEqual(res.isPHD, true); - // assert same version id as master - assert.strictEqual(id, res.versionId); - return next(); - }), - ], cb); + async.waterfall( + [ + // put object + next => + putObject(4, { versionId }, objectValue, (err, res) => { + if (err) { + return next(err); + } + return next(null, res.versionId); + }), + (id, next) => + getObject({}, (err, res) => { + if (err) { + return next(err); + } + // assert PHD was placed on master + assert(res); + assert.strictEqual(res.isPHD, true); + // assert same version id as master + assert.strictEqual(id, res.versionId); + return next(); + }), + ], + cb + ); } it('should put new metadata and update master', done => { @@ -330,11 +357,14 @@ runIfMongo('MongoClientInterface', () => { const suffix = `22019.${count++}`; return `${prefix}${repID}${suffix}`; } - async.series([ - next => putAndCheckCase4(getNewVersion(), next), - next => putAndCheckCase4(getNewVersion(), next), - next => putAndCheckCase4(getNewVersion(), next), - ], done); + async.series( + [ + next => putAndCheckCase4(getNewVersion(), next), + next => putAndCheckCase4(getNewVersion(), next), + next => putAndCheckCase4(getNewVersion(), next), + ], + done + ); }); }); }); diff --git a/tests/functional/raw-node/package.json b/tests/functional/raw-node/package.json index dbebc265c3..85292b7692 100644 --- a/tests/functional/raw-node/package.json +++ b/tests/functional/raw-node/package.json @@ -1,18 +1,18 @@ { - "name": "Test-rawnode", - "version": "0.1.0", - "description": "Test-rawnode", - "main": "tests.js", - "repository": "", - "keywords": [ - "test" - ], - "scripts": { - "test-aws": "AWS_ON_AIR=true mocha -t 40000 test/ --exit", - "test-gcp": "mocha -t 40000 test/GCP/ --exit", - "test-routes": "mocha -t 40000 test/routes/ --exit", - "test": "mocha -t 40000 test/ --exit", - "test-debug": "_mocha -t 40000 test/ --exit" - }, - "author": "" + "name": "Test-rawnode", + "version": "0.1.0", + "description": "Test-rawnode", + "main": "tests.js", + "repository": "", + "keywords": [ + "test" + ], + "scripts": { + "test-aws": "AWS_ON_AIR=true mocha -t 40000 test/ --exit", + "test-gcp": "mocha -t 40000 test/GCP/ --exit", + "test-routes": "mocha -t 40000 test/routes/ --exit", + "test": "mocha -t 40000 test/ --exit", + "test-debug": "_mocha -t 40000 test/ --exit" + }, + "author": "" } diff --git a/tests/functional/raw-node/test/GCP/bucket/get.js b/tests/functional/raw-node/test/GCP/bucket/get.js index 7473c1b06b..274c5db2a1 100644 --- a/tests/functional/raw-node/test/GCP/bucket/get.js +++ b/tests/functional/raw-node/test/GCP/bucket/get.js @@ -4,8 +4,7 @@ const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); const { gcpRequestRetry, genUniqID } = require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const { listingHardLimit } = require('../../../../../../constants'); const credentialOne = 'gcpbackend'; @@ -16,139 +15,165 @@ const config = getRealAwsConfig(credentialOne); const gcpClient = new GCP(config); function populateBucket(createdObjects, callback) { - process.stdout.write( - `Putting ${createdObjects.length} objects into bucket\n`); - async.mapLimit(createdObjects, 10, - (object, moveOn) => { - makeGcpRequest({ - method: 'PUT', - bucket: bucketName, - objectKey: object, - authCredentials: config.credentials, - }, err => moveOn(err)); - }, err => { - if (err) { - process.stdout - .write(`err putting objects ${err.code}`); + process.stdout.write(`Putting ${createdObjects.length} objects into bucket\n`); + async.mapLimit( + createdObjects, + 10, + (object, moveOn) => { + makeGcpRequest( + { + method: 'PUT', + bucket: bucketName, + objectKey: object, + authCredentials: config.credentials, + }, + err => moveOn(err) + ); + }, + err => { + if (err) { + process.stdout.write(`err putting objects ${err.code}`); + } + return callback(err); } - return callback(err); - }); + ); } function removeObjects(createdObjects, callback) { - process.stdout.write( - `Deleting ${createdObjects.length} objects from bucket\n`); - async.mapLimit(createdObjects, 10, - (object, moveOn) => { - makeGcpRequest({ - method: 'DELETE', - bucket: bucketName, - objectKey: object, - authCredentials: config.credentials, - }, err => moveOn(err)); - }, err => { - if (err) { - process.stdout - .write(`err deleting objects ${err.code}`); + process.stdout.write(`Deleting ${createdObjects.length} objects from bucket\n`); + async.mapLimit( + createdObjects, + 10, + (object, moveOn) => { + makeGcpRequest( + { + method: 'DELETE', + bucket: bucketName, + objectKey: object, + authCredentials: config.credentials, + }, + err => moveOn(err) + ); + }, + err => { + if (err) { + process.stdout.write(`err deleting objects ${err.code}`); + } + return callback(err); } - return callback(err); - }); + ); } describe('GCP: GET Bucket', function testSuite() { this.timeout(180000); before(done => { - gcpRequestRetry({ - method: 'PUT', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); + gcpRequestRetry( + { + method: 'PUT', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); after(done => { - gcpRequestRetry({ - method: 'DELETE', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}\n`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); describe('without existing bucket', () => { it('should return 404 and NoSuchBucket', done => { const badBucketName = `nonexistingbucket-${genUniqID()}`; - gcpClient.getBucket({ - Bucket: badBucketName, - }, err => { - assert(err); - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NoSuchBucket'); - return done(); - }); + gcpClient.getBucket( + { + Bucket: badBucketName, + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NoSuchBucket'); + return done(); + } + ); }); }); describe('with existing bucket', () => { describe('with less than listingHardLimit number of objects', () => { - const createdObjects = Array.from( - Array(smallSize).keys()).map(i => `someObject-${i}`); + const createdObjects = Array.from(Array(smallSize).keys()).map(i => `someObject-${i}`); before(done => populateBucket(createdObjects, done)); after(done => removeObjects(createdObjects, done)); it(`should list all ${smallSize} created objects`, done => { - gcpClient.listObjects({ - Bucket: bucketName, - }, (err, res) => { - assert.equal(err, null, `Expected success, but got ${err}`); - assert.strictEqual(res.Contents.length, smallSize); - return done(); - }); + gcpClient.listObjects( + { + Bucket: bucketName, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got ${err}`); + assert.strictEqual(res.Contents.length, smallSize); + return done(); + } + ); }); describe('with MaxKeys at 10', () => { it('should list MaxKeys number of objects', done => { - gcpClient.listObjects({ - Bucket: bucketName, - MaxKeys: 10, - }, (err, res) => { - assert.equal(err, null, - `Expected success, but got ${err}`); - assert.strictEqual(res.Contents.length, 10); - return done(); - }); + gcpClient.listObjects( + { + Bucket: bucketName, + MaxKeys: 10, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got ${err}`); + assert.strictEqual(res.Contents.length, 10); + return done(); + } + ); }); }); }); describe('with more than listingHardLimit number of objects', () => { - const createdObjects = Array.from( - Array(bigSize).keys()).map(i => `someObject-${i}`); + const createdObjects = Array.from(Array(bigSize).keys()).map(i => `someObject-${i}`); before(done => populateBucket(createdObjects, done)); after(done => removeObjects(createdObjects, done)); it('should list at max 1000 of objects created', done => { - gcpClient.listObjects({ - Bucket: bucketName, - }, (err, res) => { - assert.equal(err, null, `Expected success, but got ${err}`); - assert.strictEqual(res.Contents.length, - listingHardLimit); - return done(); - }); + gcpClient.listObjects( + { + Bucket: bucketName, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got ${err}`); + assert.strictEqual(res.Contents.length, listingHardLimit); + return done(); + } + ); }); describe('with MaxKeys at 1001', () => { @@ -164,16 +189,17 @@ describe('GCP: GET Bucket', function testSuite() { // Actual behavior: it returns a list longer than 1000 objects when // max-keys is greater than 1000 it.skip('should list at max 1000, ignoring MaxKeys', done => { - gcpClient.listObjects({ - Bucket: bucketName, - MaxKeys: 1001, - }, (err, res) => { - assert.equal(err, null, - `Expected success, but got ${err}`); - assert.strictEqual(res.Contents.length, - listingHardLimit); - return done(); - }); + gcpClient.listObjects( + { + Bucket: bucketName, + MaxKeys: 1001, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got ${err}`); + assert.strictEqual(res.Contents.length, listingHardLimit); + return done(); + } + ); }); }); }); diff --git a/tests/functional/raw-node/test/GCP/bucket/getVersioning.js b/tests/functional/raw-node/test/GCP/bucket/getVersioning.js index dbe256115b..3660ad7047 100644 --- a/tests/functional/raw-node/test/GCP/bucket/getVersioning.js +++ b/tests/functional/raw-node/test/GCP/bucket/getVersioning.js @@ -4,8 +4,7 @@ const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); const { gcpRequestRetry, genUniqID } = require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const verEnabledObj = { Status: 'Enabled' }; @@ -27,78 +26,106 @@ describe('GCP: GET Bucket Versioning', () => { beforeEach(function beforeFn(done) { this.currentTest.bucketName = `somebucket-${genUniqID()}`; - gcpRequestRetry({ - method: 'PUT', - bucket: this.currentTest.bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); + gcpRequestRetry( + { + method: 'PUT', + bucket: this.currentTest.bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); afterEach(function afterFn(done) { - gcpRequestRetry({ - method: 'DELETE', - bucket: this.currentTest.bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}\n`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: this.currentTest.bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); it('should verify bucket versioning is enabled', function testFn(done) { - return async.waterfall([ - next => makeGcpRequest({ - method: 'PUT', - bucket: this.test.bucketName, - authCredentials: config.credentials, - queryObj: { versioning: {} }, - requestBody: xmlEnable, - }, err => { - if (err) { - process.stdout.write(`err in setting versioning ${err}`); - } - return next(err); - }), - next => gcpClient.getBucketVersioning({ - Bucket: this.test.bucketName, - }, (err, res) => { - assert.equal(err, null, - `Expected success, but got err ${err}`); - assert.deepStrictEqual(res, verEnabledObj); - return next(); - }), - ], err => done(err)); + return async.waterfall( + [ + next => + makeGcpRequest( + { + method: 'PUT', + bucket: this.test.bucketName, + authCredentials: config.credentials, + queryObj: { versioning: {} }, + requestBody: xmlEnable, + }, + err => { + if (err) { + process.stdout.write(`err in setting versioning ${err}`); + } + return next(err); + } + ), + next => + gcpClient.getBucketVersioning( + { + Bucket: this.test.bucketName, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got err ${err}`); + assert.deepStrictEqual(res, verEnabledObj); + return next(); + } + ), + ], + err => done(err) + ); }); it('should verify bucket versioning is disabled', function testFn(done) { - return async.waterfall([ - next => makeGcpRequest({ - method: 'PUT', - bucket: this.test.bucketName, - authCredentials: config.credentials, - queryObj: { versioning: {} }, - requestBody: xmlDisable, - }, err => { - if (err) { - process.stdout.write(`err in setting versioning ${err}`); - } - return next(err); - }), - next => gcpClient.getBucketVersioning({ - Bucket: this.test.bucketName, - }, (err, res) => { - assert.equal(err, null, - `Expected success, but got err ${err}`); - assert.deepStrictEqual(res, verDisabledObj); - return next(); - }), - ], err => done(err)); + return async.waterfall( + [ + next => + makeGcpRequest( + { + method: 'PUT', + bucket: this.test.bucketName, + authCredentials: config.credentials, + queryObj: { versioning: {} }, + requestBody: xmlDisable, + }, + err => { + if (err) { + process.stdout.write(`err in setting versioning ${err}`); + } + return next(err); + } + ), + next => + gcpClient.getBucketVersioning( + { + Bucket: this.test.bucketName, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got err ${err}`); + assert.deepStrictEqual(res, verDisabledObj); + return next(); + } + ), + ], + err => done(err) + ); }); }); diff --git a/tests/functional/raw-node/test/GCP/bucket/head.js b/tests/functional/raw-node/test/GCP/bucket/head.js index 244c4baa65..b5a506dda3 100644 --- a/tests/functional/raw-node/test/GCP/bucket/head.js +++ b/tests/functional/raw-node/test/GCP/bucket/head.js @@ -2,8 +2,7 @@ const assert = require('assert'); const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { gcpRequestRetry, genUniqID } = require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; @@ -18,58 +17,70 @@ describe('GCP: HEAD Bucket', () => { }); it('should return 404', function testFn(done) { - gcpClient.headBucket({ - Bucket: this.test.bucketName, - }, err => { - assert(err); - assert.strictEqual(err.statusCode, 404); - return done(); - }); + gcpClient.headBucket( + { + Bucket: this.test.bucketName, + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 404); + return done(); + } + ); }); }); describe('with existing bucket', () => { beforeEach(function beforeFn(done) { this.currentTest.bucketName = `somebucket-${genUniqID()}`; - process.stdout - .write(`Creating test bucket ${this.currentTest.bucketName}\n`); - gcpRequestRetry({ - method: 'PUT', - bucket: this.currentTest.bucketName, - authCredentials: config.credentials, - }, 0, (err, res) => { - if (err) { - return done(err); + process.stdout.write(`Creating test bucket ${this.currentTest.bucketName}\n`); + gcpRequestRetry( + { + method: 'PUT', + bucket: this.currentTest.bucketName, + authCredentials: config.credentials, + }, + 0, + (err, res) => { + if (err) { + return done(err); + } + this.currentTest.bucketObj = { + MetaVersionId: res.headers['x-goog-metageneration'], + }; + return done(); } - this.currentTest.bucketObj = { - MetaVersionId: res.headers['x-goog-metageneration'], - }; - return done(); - }); + ); }); afterEach(function afterFn(done) { - gcpRequestRetry({ - method: 'DELETE', - bucket: this.currentTest.bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout - .write(`err deleting bucket: ${err.code}\n`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: this.currentTest.bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err deleting bucket: ${err.code}\n`); + } + return done(err); } - return done(err); - }); + ); }); it('should get bucket information', function testFn(done) { - gcpClient.headBucket({ - Bucket: this.test.bucketName, - }, (err, res) => { - assert.equal(err, null, `Expected success, but got ${err}`); - assert.deepStrictEqual(this.test.bucketObj, res); - return done(); - }); + gcpClient.headBucket( + { + Bucket: this.test.bucketName, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got ${err}`); + assert.deepStrictEqual(this.test.bucketObj, res); + return done(); + } + ); }); }); }); diff --git a/tests/functional/raw-node/test/GCP/bucket/putVersioning.js b/tests/functional/raw-node/test/GCP/bucket/putVersioning.js index 028e0b24e6..949059ac0f 100644 --- a/tests/functional/raw-node/test/GCP/bucket/putVersioning.js +++ b/tests/functional/raw-node/test/GCP/bucket/putVersioning.js @@ -5,8 +5,7 @@ const xml2js = require('xml2js'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); const { gcpRequestRetry, genUniqID } = require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const verEnabledObj = { VersioningConfiguration: { Status: ['Enabled'] } }; @@ -29,82 +28,110 @@ describe('GCP: PUT Bucket Versioning', () => { beforeEach(function beforeFn(done) { this.currentTest.bucketName = `somebucket-${genUniqID()}`; - gcpRequestRetry({ - method: 'PUT', - bucket: this.currentTest.bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); + gcpRequestRetry( + { + method: 'PUT', + bucket: this.currentTest.bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); afterEach(function afterFn(done) { - gcpRequestRetry({ - method: 'DELETE', - bucket: this.currentTest.bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}\n`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: this.currentTest.bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); it('should enable bucket versioning', function testFn(done) { - return async.waterfall([ - next => gcpClient.putBucketVersioning({ - Bucket: this.test.bucketName, - VersioningConfiguration: { - Status: 'Enabled', - }, - }, err => { - assert.equal(err, null, - `Expected success, but got err ${err}`); - return next(); - }), - next => makeGcpRequest({ - method: 'GET', - bucket: this.test.bucketName, - authCredentials: config.credentials, - queryObj: { versioning: {} }, - }, (err, res) => { - if (err) { - process.stdout.write(`err in retrieving bucket ${err}`); - return next(err); - } - return resParseAndAssert(res.body, verEnabledObj, next); - }), - ], err => done(err)); + return async.waterfall( + [ + next => + gcpClient.putBucketVersioning( + { + Bucket: this.test.bucketName, + VersioningConfiguration: { + Status: 'Enabled', + }, + }, + err => { + assert.equal(err, null, `Expected success, but got err ${err}`); + return next(); + } + ), + next => + makeGcpRequest( + { + method: 'GET', + bucket: this.test.bucketName, + authCredentials: config.credentials, + queryObj: { versioning: {} }, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in retrieving bucket ${err}`); + return next(err); + } + return resParseAndAssert(res.body, verEnabledObj, next); + } + ), + ], + err => done(err) + ); }); it('should disable bucket versioning', function testFn(done) { - return async.waterfall([ - next => gcpClient.putBucketVersioning({ - Bucket: this.test.bucketName, - VersioningConfiguration: { - Status: 'Suspended', - }, - }, err => { - assert.equal(err, null, - `Expected success, but got err ${err}`); - return next(); - }), - next => makeGcpRequest({ - method: 'GET', - bucket: this.test.bucketName, - authCredentials: config.credentials, - queryObj: { versioning: {} }, - }, (err, res) => { - if (err) { - process.stdout.write(`err in retrieving bucket ${err}`); - return next(err); - } - return resParseAndAssert(res.body, verDisabledObj, next); - }), - ], err => done(err)); + return async.waterfall( + [ + next => + gcpClient.putBucketVersioning( + { + Bucket: this.test.bucketName, + VersioningConfiguration: { + Status: 'Suspended', + }, + }, + err => { + assert.equal(err, null, `Expected success, but got err ${err}`); + return next(); + } + ), + next => + makeGcpRequest( + { + method: 'GET', + bucket: this.test.bucketName, + authCredentials: config.credentials, + queryObj: { versioning: {} }, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in retrieving bucket ${err}`); + return next(err); + } + return resParseAndAssert(res.body, verDisabledObj, next); + } + ), + ], + err => done(err) + ); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/completeMpu.js b/tests/functional/raw-node/test/GCP/object/completeMpu.js index 146cd55b73..c3bb066086 100644 --- a/tests/functional/raw-node/test/GCP/object/completeMpu.js +++ b/tests/functional/raw-node/test/GCP/object/completeMpu.js @@ -2,10 +2,8 @@ const assert = require('assert'); const async = require('async'); const arsenal = require('arsenal'); const { GCP, GcpUtils } = arsenal.storage.data.external; -const { gcpRequestRetry, setBucketClass, gcpMpuSetup, genUniqID } = - require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { gcpRequestRetry, setBucketClass, gcpMpuSetup, genUniqID } = require('../../../utils/gcpUtils'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const bucketNames = { @@ -42,67 +40,88 @@ describe('GCP: Complete MPU', function testSuite() { before(done => { config = getRealAwsConfig(credentialOne); gcpClient = new GCP(config); - async.eachSeries(bucketNames, - (bucket, next) => gcpRequestRetry({ - method: 'PUT', - bucket: bucket.Name, - authCredentials: config.credentials, - requestBody: setBucketClass(bucket.Type), - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); - } - return next(err); - }), - done); - }); - - after(done => { - async.eachSeries(bucketNames, - (bucket, next) => gcpClient.listObjects({ - Bucket: bucket.Name, - }, (err, res) => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - async.map(res.Contents, (object, moveOn) => { - const deleteParams = { - Bucket: bucket.Name, - Key: object.Key, - }; - gcpClient.deleteObject( - deleteParams, err => moveOn(err)); - }, err => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - gcpRequestRetry({ - method: 'DELETE', + async.eachSeries( + bucketNames, + (bucket, next) => + gcpRequestRetry( + { + method: 'PUT', bucket: bucket.Name, authCredentials: config.credentials, - }, 0, err => { + requestBody: setBucketClass(bucket.Type), + }, + 0, + err => { if (err) { - process.stdout.write( - `err in deleting bucket ${err}\n`); + process.stdout.write(`err in creating bucket ${err}\n`); } return next(err); - }); - }); - }), - done); + } + ), + done + ); + }); + + after(done => { + async.eachSeries( + bucketNames, + (bucket, next) => + gcpClient.listObjects( + { + Bucket: bucket.Name, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got error ${err}`); + async.map( + res.Contents, + (object, moveOn) => { + const deleteParams = { + Bucket: bucket.Name, + Key: object.Key, + }; + gcpClient.deleteObject(deleteParams, err => moveOn(err)); + }, + err => { + assert.equal(err, null, `Expected success, but got error ${err}`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucket.Name, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}\n`); + } + return next(err); + } + ); + } + ); + } + ), + done + ); }); describe('when MPU has 0 parts', () => { beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; - gcpMpuSetupWrapper.call(this, { - gcpClient, - bucketNames, - key: this.currentTest.key, - partCount: 0, partSize, - }, done); + gcpMpuSetupWrapper.call( + this, + { + gcpClient, + bucketNames, + key: this.currentTest.key, + partCount: 0, + partSize, + }, + done + ); }); - it('should return error if 0 parts are given in MPU complete', - function testFn(done) { + it('should return error if 0 parts are given in MPU complete', function testFn(done) { const params = { Bucket: bucketNames.main.Name, MPU: bucketNames.mpu.Name, @@ -121,20 +140,28 @@ describe('GCP: Complete MPU', function testSuite() { describe('when MPU has 1 uploaded part', () => { beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; - gcpMpuSetupWrapper.call(this, { - gcpClient, - bucketNames, - key: this.currentTest.key, - partCount: 1, partSize, - }, done); + gcpMpuSetupWrapper.call( + this, + { + gcpClient, + bucketNames, + key: this.currentTest.key, + partCount: 1, + partSize, + }, + done + ); }); - it('should successfully complete MPU', - function testFn(done) { - const parts = GcpUtils.createMpuList({ - Key: this.test.key, - UploadId: this.test.uploadId, - }, 'parts', 1).map(item => { + it('should successfully complete MPU', function testFn(done) { + const parts = GcpUtils.createMpuList( + { + Key: this.test.key, + UploadId: this.test.uploadId, + }, + 'parts', + 1 + ).map(item => { Object.assign(item, { ETag: this.test.etagList[item.PartNumber - 1], }); @@ -148,8 +175,7 @@ describe('GCP: Complete MPU', function testSuite() { MultipartUpload: { Parts: parts }, }; gcpClient.completeMultipartUpload(params, (err, res) => { - assert.equal(err, null, - `Expected success, but got error ${err}`); + assert.equal(err, null, `Expected success, but got error ${err}`); assert.strictEqual(res.ETag, `"${smallMD5}"`); return done(); }); @@ -159,20 +185,28 @@ describe('GCP: Complete MPU', function testSuite() { describe('when MPU has 1024 uploaded parts', () => { beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; - gcpMpuSetupWrapper.call(this, { - gcpClient, - bucketNames, - key: this.currentTest.key, - partCount: numParts, partSize, - }, done); + gcpMpuSetupWrapper.call( + this, + { + gcpClient, + bucketNames, + key: this.currentTest.key, + partCount: numParts, + partSize, + }, + done + ); }); - it('should successfully complete MPU', - function testFn(done) { - const parts = GcpUtils.createMpuList({ - Key: this.test.key, - UploadId: this.test.uploadId, - }, 'parts', numParts).map(item => { + it('should successfully complete MPU', function testFn(done) { + const parts = GcpUtils.createMpuList( + { + Key: this.test.key, + UploadId: this.test.uploadId, + }, + 'parts', + numParts + ).map(item => { Object.assign(item, { ETag: this.test.etagList[item.PartNumber - 1], }); @@ -186,8 +220,7 @@ describe('GCP: Complete MPU', function testSuite() { MultipartUpload: { Parts: parts }, }; gcpClient.completeMultipartUpload(params, (err, res) => { - assert.equal(err, null, - `Expected success, but got error ${err}`); + assert.equal(err, null, `Expected success, but got error ${err}`); assert.strictEqual(res.ETag, `"${bigMD5}"`); return done(); }); diff --git a/tests/functional/raw-node/test/GCP/object/copy.js b/tests/functional/raw-node/test/GCP/object/copy.js index e8476fffe5..42e36e85db 100644 --- a/tests/functional/raw-node/test/GCP/object/copy.js +++ b/tests/functional/raw-node/test/GCP/object/copy.js @@ -4,8 +4,7 @@ const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); const { gcpRequestRetry, genUniqID } = require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const bucketName = `somebucket-${genUniqID()}`; @@ -16,45 +15,56 @@ describe('GCP: COPY Object', function testSuite() { const gcpClient = new GCP(config); before(done => { - gcpRequestRetry({ - method: 'PUT', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); + gcpRequestRetry( + { + method: 'PUT', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); after(done => { - gcpRequestRetry({ - method: 'DELETE', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); describe('without existing object in bucket', () => { - it('should return 404 and \'NoSuchKey\'', done => { + it("should return 404 and 'NoSuchKey'", done => { const missingObject = `nonexistingkey-${genUniqID()}`; const someKey = `somekey-${genUniqID()}`; - gcpClient.copyObject({ - Bucket: bucketName, - Key: someKey, - CopySource: `/${bucketName}/${missingObject}`, - }, err => { - assert(err); - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NoSuchKey'); - return done(); - }); + gcpClient.copyObject( + { + Bucket: bucketName, + Key: someKey, + CopySource: `/${bucketName}/${missingObject}`, + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NoSuchKey'); + return done(); + } + ); }); }); @@ -63,119 +73,144 @@ describe('GCP: COPY Object', function testSuite() { this.currentTest.key = `somekey-${genUniqID()}`; this.currentTest.copyKey = `copykey-${genUniqID()}`; this.currentTest.initValue = `${genUniqID()}`; - makeGcpRequest({ - method: 'PUT', - bucket: bucketName, - objectKey: this.currentTest.copyKey, - headers: { - 'x-goog-meta-value': this.currentTest.initValue, - }, - authCredentials: config.credentials, - }, (err, res) => { - if (err) { - process.stdout.write(`err in creating object ${err}\n`); - } - this.currentTest.contentHash = res.headers['x-goog-hash']; - return done(err); - }); - }); - - afterEach(function afterFn(done) { - async.parallel([ - next => makeGcpRequest({ - method: 'DELETE', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, err => { - if (err) { - process.stdout.write(`err in deleting object ${err}\n`); - } - return next(err); - }), - next => makeGcpRequest({ - method: 'DELETE', + makeGcpRequest( + { + method: 'PUT', bucket: bucketName, objectKey: this.currentTest.copyKey, + headers: { + 'x-goog-meta-value': this.currentTest.initValue, + }, authCredentials: config.credentials, - }, err => { + }, + (err, res) => { if (err) { - process.stdout - .write(`err in deleting copy object ${err}\n`); + process.stdout.write(`err in creating object ${err}\n`); } - return next(err); - }), - ], done); + this.currentTest.contentHash = res.headers['x-goog-hash']; + return done(err); + } + ); + }); + + afterEach(function afterFn(done) { + async.parallel( + [ + next => + makeGcpRequest( + { + method: 'DELETE', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + err => { + if (err) { + process.stdout.write(`err in deleting object ${err}\n`); + } + return next(err); + } + ), + next => + makeGcpRequest( + { + method: 'DELETE', + bucket: bucketName, + objectKey: this.currentTest.copyKey, + authCredentials: config.credentials, + }, + err => { + if (err) { + process.stdout.write(`err in deleting copy object ${err}\n`); + } + return next(err); + } + ), + ], + done + ); }); - it('should successfully copy with REPLACE directive', - function testFn(done) { + it('should successfully copy with REPLACE directive', function testFn(done) { const newValue = `${genUniqID()}`; - async.waterfall([ - next => gcpClient.copyObject({ - Bucket: bucketName, - Key: this.test.key, - CopySource: `/${bucketName}/${this.test.copyKey}`, - MetadataDirective: 'REPLACE', - Metadata: { - value: newValue, - }, - }, err => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - return next(); - }), - next => makeGcpRequest({ - method: 'HEAD', - bucket: bucketName, - objectKey: this.test.key, - authCredentials: config.credentials, - }, (err, res) => { - if (err) { - process.stdout - .write(`err in retrieving object ${err}\n`); - return next(err); - } - assert.strictEqual(this.test.contentHash, - res.headers['x-goog-hash']); - assert.notStrictEqual(res.headers['x-goog-meta-value'], - this.test.initValue); - return next(); - }), - ], done); + async.waterfall( + [ + next => + gcpClient.copyObject( + { + Bucket: bucketName, + Key: this.test.key, + CopySource: `/${bucketName}/${this.test.copyKey}`, + MetadataDirective: 'REPLACE', + Metadata: { + value: newValue, + }, + }, + err => { + assert.equal(err, null, `Expected success, but got error ${err}`); + return next(); + } + ), + next => + makeGcpRequest( + { + method: 'HEAD', + bucket: bucketName, + objectKey: this.test.key, + authCredentials: config.credentials, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in retrieving object ${err}\n`); + return next(err); + } + assert.strictEqual(this.test.contentHash, res.headers['x-goog-hash']); + assert.notStrictEqual(res.headers['x-goog-meta-value'], this.test.initValue); + return next(); + } + ), + ], + done + ); }); - it('should successfully copy with COPY directive', - function testFn(done) { - async.waterfall([ - next => gcpClient.copyObject({ - Bucket: bucketName, - Key: this.test.key, - CopySource: `/${bucketName}/${this.test.copyKey}`, - MetadataDirective: 'COPY', - }, err => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - return next(); - }), - next => makeGcpRequest({ - method: 'HEAD', - bucket: bucketName, - objectKey: this.test.key, - authCredentials: config.credentials, - }, (err, res) => { - if (err) { - process.stdout - .write(`err in retrieving object ${err}\n`); - return next(err); - } - assert.strictEqual(this.test.contentHash, - res.headers['x-goog-hash']); - assert.strictEqual(res.headers['x-goog-meta-value'], - this.test.initValue); - return next(); - }), - ], done); + it('should successfully copy with COPY directive', function testFn(done) { + async.waterfall( + [ + next => + gcpClient.copyObject( + { + Bucket: bucketName, + Key: this.test.key, + CopySource: `/${bucketName}/${this.test.copyKey}`, + MetadataDirective: 'COPY', + }, + err => { + assert.equal(err, null, `Expected success, but got error ${err}`); + return next(); + } + ), + next => + makeGcpRequest( + { + method: 'HEAD', + bucket: bucketName, + objectKey: this.test.key, + authCredentials: config.credentials, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in retrieving object ${err}\n`); + return next(err); + } + assert.strictEqual(this.test.contentHash, res.headers['x-goog-hash']); + assert.strictEqual(res.headers['x-goog-meta-value'], this.test.initValue); + return next(); + } + ), + ], + done + ); }); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/delete.js b/tests/functional/raw-node/test/GCP/object/delete.js index e19cd28a37..6185e5e38c 100644 --- a/tests/functional/raw-node/test/GCP/object/delete.js +++ b/tests/functional/raw-node/test/GCP/object/delete.js @@ -4,8 +4,7 @@ const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); const { gcpRequestRetry, genUniqID } = require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const bucketName = `somebucket-${genUniqID()}`; @@ -18,82 +17,106 @@ describe('GCP: DELETE Object', function testSuite() { const gcpClient = new GCP(config); before(done => { - gcpRequestRetry({ - method: 'PUT', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); + gcpRequestRetry( + { + method: 'PUT', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); after(done => { - gcpRequestRetry({ - method: 'DELETE', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}\n`); - } - return done(err); - }); - }); - - describe('with existing object in bucket', () => { - beforeEach(done => { - makeGcpRequest({ - method: 'PUT', + gcpRequestRetry( + { + method: 'DELETE', bucket: bucketName, - objectKey, authCredentials: config.credentials, - }, err => { + }, + 0, + err => { if (err) { - process.stdout.write(`err in creating object ${err}\n`); + process.stdout.write(`err in deleting bucket ${err}\n`); } return done(err); - }); - }); + } + ); + }); - it('should successfully delete object', done => { - async.waterfall([ - next => gcpClient.deleteObject({ - Bucket: bucketName, - Key: objectKey, - }, err => { - assert.equal(err, null, - `Expected success, got error ${err}`); - return next(); - }), - next => makeGcpRequest({ - method: 'GET', + describe('with existing object in bucket', () => { + beforeEach(done => { + makeGcpRequest( + { + method: 'PUT', bucket: bucketName, objectKey, authCredentials: config.credentials, - }, err => { - assert(err); - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NoSuchKey'); - return next(); - }), - ], err => done(err)); + }, + err => { + if (err) { + process.stdout.write(`err in creating object ${err}\n`); + } + return done(err); + } + ); + }); + + it('should successfully delete object', done => { + async.waterfall( + [ + next => + gcpClient.deleteObject( + { + Bucket: bucketName, + Key: objectKey, + }, + err => { + assert.equal(err, null, `Expected success, got error ${err}`); + return next(); + } + ), + next => + makeGcpRequest( + { + method: 'GET', + bucket: bucketName, + objectKey, + authCredentials: config.credentials, + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NoSuchKey'); + return next(); + } + ), + ], + err => done(err) + ); }); }); describe('without existing object in bucket', () => { it('should return 404 and NoSuchKey', done => { - gcpClient.deleteObject({ - Bucket: bucketName, - Key: badObjectKey, - }, err => { - assert(err); - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NoSuchKey'); - return done(); - }); + gcpClient.deleteObject( + { + Bucket: bucketName, + Key: badObjectKey, + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NoSuchKey'); + return done(); + } + ); }); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/deleteMpu.js b/tests/functional/raw-node/test/GCP/object/deleteMpu.js index 72b3c6f3c7..f0b0c8f09d 100644 --- a/tests/functional/raw-node/test/GCP/object/deleteMpu.js +++ b/tests/functional/raw-node/test/GCP/object/deleteMpu.js @@ -2,10 +2,8 @@ const assert = require('assert'); const async = require('async'); const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; -const { gcpRequestRetry, setBucketClass, gcpMpuSetup, genUniqID } = - require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { gcpRequestRetry, setBucketClass, gcpMpuSetup, genUniqID } = require('../../../utils/gcpUtils'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const bucketNames = { @@ -23,8 +21,7 @@ const partSize = 10; function gcpMpuSetupWrapper(params, callback) { gcpMpuSetup(params, (err, result) => { - assert.equal(err, null, - `Unable to setup MPU test, error ${err}`); + assert.equal(err, null, `Unable to setup MPU test, error ${err}`); const { uploadId, etagList } = result; this.currentTest.uploadId = uploadId; this.currentTest.etagList = etagList; @@ -40,135 +37,170 @@ describe('GCP: Abort MPU', function testSuite() { before(done => { config = getRealAwsConfig(credentialOne); gcpClient = new GCP(config); - async.eachSeries(bucketNames, - (bucket, next) => gcpRequestRetry({ - method: 'PUT', - bucket: bucket.Name, - authCredentials: config.credentials, - requestBody: setBucketClass(bucket.Type), - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); - } - return next(err); - }), - done); - }); - - after(done => { - async.eachSeries(bucketNames, - (bucket, next) => gcpClient.listObjects({ - Bucket: bucket.Name, - }, (err, res) => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - async.map(res.Contents, (object, moveOn) => { - const deleteParams = { - Bucket: bucket.Name, - Key: object.Key, - }; - gcpClient.deleteObject( - deleteParams, err => moveOn(err)); - }, err => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - gcpRequestRetry({ - method: 'DELETE', + async.eachSeries( + bucketNames, + (bucket, next) => + gcpRequestRetry( + { + method: 'PUT', bucket: bucket.Name, authCredentials: config.credentials, - }, 0, err => { + requestBody: setBucketClass(bucket.Type), + }, + 0, + err => { if (err) { - process.stdout.write( - `err in deleting bucket ${err}\n`); + process.stdout.write(`err in creating bucket ${err}\n`); } return next(err); - }); - }); - }), - done); + } + ), + done + ); + }); + + after(done => { + async.eachSeries( + bucketNames, + (bucket, next) => + gcpClient.listObjects( + { + Bucket: bucket.Name, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got error ${err}`); + async.map( + res.Contents, + (object, moveOn) => { + const deleteParams = { + Bucket: bucket.Name, + Key: object.Key, + }; + gcpClient.deleteObject(deleteParams, err => moveOn(err)); + }, + err => { + assert.equal(err, null, `Expected success, but got error ${err}`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucket.Name, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}\n`); + } + return next(err); + } + ); + } + ); + } + ), + done + ); }); describe('when MPU has 0 parts', () => { beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; - gcpMpuSetupWrapper.call(this, { - gcpClient, - bucketNames, - key: this.currentTest.key, - partCount: 0, partSize, - }, done); + gcpMpuSetupWrapper.call( + this, + { + gcpClient, + bucketNames, + key: this.currentTest.key, + partCount: 0, + partSize, + }, + done + ); }); it('should abort MPU with 0 parts', function testFn(done) { - return async.waterfall([ - next => { - const params = { - Bucket: bucketNames.main.Name, - MPU: bucketNames.mpu.Name, - Key: this.test.key, - UploadId: this.test.uploadId, - }; - gcpClient.abortMultipartUpload(params, err => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - return next(); - }); - }, - next => { - const keyName = - `${this.test.key}-${this.test.uploadId}/init`; - gcpClient.headObject({ - Bucket: bucketNames.mpu.Name, - Key: keyName, - }, err => { - assert(err); - assert.strictEqual(err.code, 404); - return next(); - }); - }, - ], done); + return async.waterfall( + [ + next => { + const params = { + Bucket: bucketNames.main.Name, + MPU: bucketNames.mpu.Name, + Key: this.test.key, + UploadId: this.test.uploadId, + }; + gcpClient.abortMultipartUpload(params, err => { + assert.equal(err, null, `Expected success, but got error ${err}`); + return next(); + }); + }, + next => { + const keyName = `${this.test.key}-${this.test.uploadId}/init`; + gcpClient.headObject( + { + Bucket: bucketNames.mpu.Name, + Key: keyName, + }, + err => { + assert(err); + assert.strictEqual(err.code, 404); + return next(); + } + ); + }, + ], + done + ); }); }); describe('when MPU is incomplete', () => { beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; - gcpMpuSetupWrapper.call(this, { - gcpClient, - bucketNames, - key: this.currentTest.key, - partCount: numParts, partSize, - }, done); + gcpMpuSetupWrapper.call( + this, + { + gcpClient, + bucketNames, + key: this.currentTest.key, + partCount: numParts, + partSize, + }, + done + ); }); it('should abort incomplete MPU', function testFn(done) { - return async.waterfall([ - next => { - const params = { - Bucket: bucketNames.main.Name, - MPU: bucketNames.mpu.Name, - Key: this.test.key, - UploadId: this.test.uploadId, - }; - gcpClient.abortMultipartUpload(params, err => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - return next(); - }); - }, - next => { - const keyName = - `${this.test.key}-${this.test.uploadId}/init`; - gcpClient.headObject({ - Bucket: bucketNames.mpu.Name, - Key: keyName, - }, err => { - assert(err); - assert.strictEqual(err.code, 404); - return next(); - }); - }, - ], err => done(err)); + return async.waterfall( + [ + next => { + const params = { + Bucket: bucketNames.main.Name, + MPU: bucketNames.mpu.Name, + Key: this.test.key, + UploadId: this.test.uploadId, + }; + gcpClient.abortMultipartUpload(params, err => { + assert.equal(err, null, `Expected success, but got error ${err}`); + return next(); + }); + }, + next => { + const keyName = `${this.test.key}-${this.test.uploadId}/init`; + gcpClient.headObject( + { + Bucket: bucketNames.mpu.Name, + Key: keyName, + }, + err => { + assert(err); + assert.strictEqual(err.code, 404); + return next(); + } + ); + }, + ], + err => done(err) + ); }); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/deleteTagging.js b/tests/functional/raw-node/test/GCP/object/deleteTagging.js index 331d3bdf68..64679f6744 100644 --- a/tests/functional/raw-node/test/GCP/object/deleteTagging.js +++ b/tests/functional/raw-node/test/GCP/object/deleteTagging.js @@ -3,10 +3,8 @@ const async = require('async'); const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); -const { gcpRequestRetry, genDelTagObj, genUniqID } = - require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { gcpRequestRetry, genDelTagObj, genUniqID } = require('../../../utils/gcpUtils'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const { gcpTaggingPrefix } = require('../../../../../../constants'); const credentialOne = 'gcpbackend'; @@ -16,40 +14,41 @@ let config; let gcpClient; function assertObjectMetaTag(params, callback) { - return makeGcpRequest({ - method: 'HEAD', - bucket: params.bucket, - objectKey: params.key, - authCredentials: config.credentials, - headers: { - 'x-goog-generation': params.versionId, + return makeGcpRequest( + { + method: 'HEAD', + bucket: params.bucket, + objectKey: params.key, + authCredentials: config.credentials, + headers: { + 'x-goog-generation': params.versionId, + }, }, - }, (err, res) => { - if (err) { - process.stdout.write(`err in retrieving object ${err}`); - return callback(err); - } - const resObj = res.headers; - const tagRes = {}; - Object.keys(resObj).forEach( - header => { - if (header.startsWith(gcpTagPrefix)) { - tagRes[header] = resObj[header]; - delete resObj[header]; - } - }); - const metaRes = {}; - Object.keys(resObj).forEach( - header => { - if (header.startsWith('x-goog-meta-')) { - metaRes[header] = resObj[header]; - delete resObj[header]; + (err, res) => { + if (err) { + process.stdout.write(`err in retrieving object ${err}`); + return callback(err); } - }); - assert.deepStrictEqual(params.tag, tagRes); - assert.deepStrictEqual(params.meta, metaRes); - return callback(); - }); + const resObj = res.headers; + const tagRes = {}; + Object.keys(resObj).forEach(header => { + if (header.startsWith(gcpTagPrefix)) { + tagRes[header] = resObj[header]; + delete resObj[header]; + } + }); + const metaRes = {}; + Object.keys(resObj).forEach(header => { + if (header.startsWith('x-goog-meta-')) { + metaRes[header] = resObj[header]; + delete resObj[header]; + } + }); + assert.deepStrictEqual(params.tag, tagRes); + assert.deepStrictEqual(params.meta, metaRes); + return callback(); + } + ); } describe('GCP: DELETE Object Tagging', function testSuite() { @@ -58,93 +57,120 @@ describe('GCP: DELETE Object Tagging', function testSuite() { before(done => { config = getRealAwsConfig(credentialOne); gcpClient = new GCP(config); - gcpRequestRetry({ - method: 'PUT', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}`); + gcpRequestRetry( + { + method: 'PUT', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}`); + } + return done(err); } - return done(err); - }); + ); }); beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; this.currentTest.specialKey = `veryspecial-${genUniqID()}`; - const { headers, expectedTagObj, expectedMetaObj } = - genDelTagObj(10, gcpTagPrefix); + const { headers, expectedTagObj, expectedMetaObj } = genDelTagObj(10, gcpTagPrefix); this.currentTest.expectedTagObj = expectedTagObj; this.currentTest.expectedMetaObj = expectedMetaObj; - makeGcpRequest({ - method: 'PUT', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - headers, - }, (err, res) => { - if (err) { - process.stdout.write(`err in creating object ${err}`); - return done(err); + makeGcpRequest( + { + method: 'PUT', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + headers, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in creating object ${err}`); + return done(err); + } + this.currentTest.versionId = res.headers['x-goog-generation']; + return done(); } - this.currentTest.versionId = res.headers['x-goog-generation']; - return done(); - }); + ); }); afterEach(function afterFn(done) { - makeGcpRequest({ - method: 'DELETE', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, err => { - if (err) { - process.stdout.write(`err in deleting object ${err}`); + makeGcpRequest( + { + method: 'DELETE', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + err => { + if (err) { + process.stdout.write(`err in deleting object ${err}`); + } + return done(err); } - return done(err); - }); + ); }); after(done => { - gcpRequestRetry({ - method: 'DELETE', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}`); + } + return done(err); } - return done(err); - }); + ); }); it('should successfully delete object tags', function testFn(done) { - async.waterfall([ - next => assertObjectMetaTag({ - bucket: bucketName, - key: this.test.key, - versionId: this.test.versionId, - meta: this.test.expectedMetaObj, - tag: this.test.expectedTagObj, - }, next), - next => gcpClient.deleteObjectTagging({ - Bucket: bucketName, - Key: this.test.key, - VersionId: this.test.versionId, - }, err => { - assert.equal(err, null, - `Expected success, got error ${err}`); - return next(); - }), - next => assertObjectMetaTag({ - bucket: bucketName, - key: this.test.key, - versionId: this.test.versionId, - meta: this.test.expectedMetaObj, - tag: {}, - }, next), - ], done); + async.waterfall( + [ + next => + assertObjectMetaTag( + { + bucket: bucketName, + key: this.test.key, + versionId: this.test.versionId, + meta: this.test.expectedMetaObj, + tag: this.test.expectedTagObj, + }, + next + ), + next => + gcpClient.deleteObjectTagging( + { + Bucket: bucketName, + Key: this.test.key, + VersionId: this.test.versionId, + }, + err => { + assert.equal(err, null, `Expected success, got error ${err}`); + return next(); + } + ), + next => + assertObjectMetaTag( + { + bucket: bucketName, + key: this.test.key, + versionId: this.test.versionId, + meta: this.test.expectedMetaObj, + tag: {}, + }, + next + ), + ], + done + ); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/get.js b/tests/functional/raw-node/test/GCP/object/get.js index a16d28e106..d15f303328 100644 --- a/tests/functional/raw-node/test/GCP/object/get.js +++ b/tests/functional/raw-node/test/GCP/object/get.js @@ -3,8 +3,7 @@ const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); const { gcpRequestRetry, genUniqID } = require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const bucketName = `somebucket-${genUniqID()}`; @@ -15,91 +14,109 @@ describe('GCP: GET Object', function testSuite() { const gcpClient = new GCP(config); before(done => { - gcpRequestRetry({ - method: 'PUT', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); + gcpRequestRetry( + { + method: 'PUT', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); after(done => { - gcpRequestRetry({ - method: 'DELETE', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}\n`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); describe('with existing object in bucket', () => { beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; - makeGcpRequest({ - method: 'PUT', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, (err, res) => { - if (err) { - process.stdout.write(`err in creating object ${err}\n`); - return done(err); + makeGcpRequest( + { + method: 'PUT', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in creating object ${err}\n`); + return done(err); + } + this.currentTest.uploadId = res.headers['x-goog-generation']; + this.currentTest.ETag = res.headers.etag; + return done(); } - this.currentTest.uploadId = - res.headers['x-goog-generation']; - this.currentTest.ETag = res.headers.etag; - return done(); - }); + ); }); afterEach(function afterFn(done) { - makeGcpRequest({ - method: 'DELETE', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, err => { - if (err) { - process.stdout.write(`err in deleting object ${err}\n`); + makeGcpRequest( + { + method: 'DELETE', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + err => { + if (err) { + process.stdout.write(`err in deleting object ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); it('should successfully retrieve object', function testFn(done) { - gcpClient.getObject({ - Bucket: bucketName, - Key: this.test.key, - }, (err, res) => { - assert.equal(err, null, - `Expected success, got error ${err}`); - assert.strictEqual(res.ETag, this.test.ETag); - assert.strictEqual(res.VersionId, this.test.uploadId); - return done(); - }); + gcpClient.getObject( + { + Bucket: bucketName, + Key: this.test.key, + }, + (err, res) => { + assert.equal(err, null, `Expected success, got error ${err}`); + assert.strictEqual(res.ETag, this.test.ETag); + assert.strictEqual(res.VersionId, this.test.uploadId); + return done(); + } + ); }); }); describe('without existing object in bucket', () => { it('should return 404 and NoSuchKey', done => { const badObjectKey = `nonexistingkey-${genUniqID()}`; - gcpClient.getObject({ - Bucket: bucketName, - Key: badObjectKey, - }, err => { - assert(err); - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NoSuchKey'); - return done(); - }); + gcpClient.getObject( + { + Bucket: bucketName, + Key: badObjectKey, + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NoSuchKey'); + return done(); + } + ); }); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/getTagging.js b/tests/functional/raw-node/test/GCP/object/getTagging.js index 3328290c27..8fda3fad01 100644 --- a/tests/functional/raw-node/test/GCP/object/getTagging.js +++ b/tests/functional/raw-node/test/GCP/object/getTagging.js @@ -2,10 +2,8 @@ const assert = require('assert'); const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); -const { gcpRequestRetry, genGetTagObj, genUniqID } = - require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { gcpRequestRetry, genGetTagObj, genUniqID } = require('../../../utils/gcpUtils'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const { gcpTaggingPrefix } = require('../../../../../../constants'); const credentialOne = 'gcpbackend'; @@ -20,77 +18,92 @@ describe('GCP: GET Object Tagging', () => { before(done => { config = getRealAwsConfig(credentialOne); gcpClient = new GCP(config); - gcpRequestRetry({ - method: 'PUT', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}`); + gcpRequestRetry( + { + method: 'PUT', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}`); + } + return done(err); } - return done(err); - }); + ); }); beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; this.currentTest.specialKey = `veryspecial-${genUniqID()}`; - const { tagHeader, expectedTagObj } = - genGetTagObj(tagSize, gcpTagPrefix); + const { tagHeader, expectedTagObj } = genGetTagObj(tagSize, gcpTagPrefix); this.currentTest.tagObj = expectedTagObj; - makeGcpRequest({ - method: 'PUT', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - headers: tagHeader, - }, (err, res) => { - if (err) { - process.stdout.write(`err in creating object ${err}`); - return done(err); + makeGcpRequest( + { + method: 'PUT', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + headers: tagHeader, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in creating object ${err}`); + return done(err); + } + this.currentTest.versionId = res.headers['x-goog-generation']; + return done(); } - this.currentTest.versionId = res.headers['x-goog-generation']; - return done(); - }); + ); }); afterEach(function afterFn(done) { - makeGcpRequest({ - method: 'DELETE', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, err => { - if (err) { - process.stdout.write(`err in deleting object ${err}`); + makeGcpRequest( + { + method: 'DELETE', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + err => { + if (err) { + process.stdout.write(`err in deleting object ${err}`); + } + return done(err); } - return done(err); - }); + ); }); after(done => { - gcpRequestRetry({ - method: 'DELETE', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}`); + } + return done(err); } - return done(err); - }); + ); }); it('should successfully get object tags', function testFn(done) { - gcpClient.getObjectTagging({ - Bucket: bucketName, - Key: this.test.key, - VersionId: this.test.versionId, - }, (err, res) => { - assert.equal(err, null, - `Expected success, got error ${err}`); - assert.deepStrictEqual(res.TagSet, this.test.tagObj); - return done(); - }); + gcpClient.getObjectTagging( + { + Bucket: bucketName, + Key: this.test.key, + VersionId: this.test.versionId, + }, + (err, res) => { + assert.equal(err, null, `Expected success, got error ${err}`); + assert.deepStrictEqual(res.TagSet, this.test.tagObj); + return done(); + } + ); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/head.js b/tests/functional/raw-node/test/GCP/object/head.js index 0e1c66a8a1..01e0b85e11 100644 --- a/tests/functional/raw-node/test/GCP/object/head.js +++ b/tests/functional/raw-node/test/GCP/object/head.js @@ -3,8 +3,7 @@ const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); const { gcpRequestRetry, genUniqID } = require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const bucketName = `somebucket-${genUniqID()}`; @@ -15,90 +14,108 @@ describe('GCP: HEAD Object', function testSuite() { const gcpClient = new GCP(config); before(done => { - gcpRequestRetry({ - method: 'PUT', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); + gcpRequestRetry( + { + method: 'PUT', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); after(done => { - gcpRequestRetry({ - method: 'DELETE', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}\n`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); describe('with existing object in bucket', () => { beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; - makeGcpRequest({ - method: 'PUT', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, (err, res) => { - if (err) { - process.stdout.write(`err in creating object ${err}\n`); - return done(err); + makeGcpRequest( + { + method: 'PUT', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in creating object ${err}\n`); + return done(err); + } + this.currentTest.uploadId = res.headers['x-goog-generation']; + this.currentTest.ETag = res.headers.etag; + return done(); } - this.currentTest.uploadId = - res.headers['x-goog-generation']; - this.currentTest.ETag = res.headers.etag; - return done(); - }); + ); }); afterEach(function afterFn(done) { - makeGcpRequest({ - method: 'DELETE', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, err => { - if (err) { - process.stdout.write(`err in deleting object ${err}\n`); + makeGcpRequest( + { + method: 'DELETE', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + err => { + if (err) { + process.stdout.write(`err in deleting object ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); it('should successfully retrieve object', function testFn(done) { - gcpClient.headObject({ - Bucket: bucketName, - Key: this.test.key, - }, (err, res) => { - assert.equal(err, null, - `Expected success, got error ${err}`); - assert.strictEqual(res.ETag, this.test.ETag); - assert.strictEqual(res.VersionId, this.test.uploadId); - return done(); - }); + gcpClient.headObject( + { + Bucket: bucketName, + Key: this.test.key, + }, + (err, res) => { + assert.equal(err, null, `Expected success, got error ${err}`); + assert.strictEqual(res.ETag, this.test.ETag); + assert.strictEqual(res.VersionId, this.test.uploadId); + return done(); + } + ); }); }); describe('without existing object in bucket', () => { it('should return 404', done => { const badObjectkey = `nonexistingkey-${genUniqID()}`; - gcpClient.headObject({ - Bucket: bucketName, - Key: badObjectkey, - }, err => { - assert(err); - assert.strictEqual(err.statusCode, 404); - return done(); - }); + gcpClient.headObject( + { + Bucket: bucketName, + Key: badObjectkey, + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 404); + return done(); + } + ); }); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/initiateMpu.js b/tests/functional/raw-node/test/GCP/object/initiateMpu.js index 9ae4671d11..354c92b9a5 100644 --- a/tests/functional/raw-node/test/GCP/object/initiateMpu.js +++ b/tests/functional/raw-node/test/GCP/object/initiateMpu.js @@ -3,10 +3,8 @@ const async = require('async'); const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); -const { gcpRequestRetry, setBucketClass, genUniqID } = - require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { gcpRequestRetry, setBucketClass, genUniqID } = require('../../../utils/gcpUtils'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const bucketNames = { @@ -28,79 +26,103 @@ describe('GCP: Initiate MPU', function testSuite() { before(done => { config = getRealAwsConfig(credentialOne); gcpClient = new GCP(config); - async.eachSeries(bucketNames, - (bucket, next) => gcpRequestRetry({ - method: 'PUT', - bucket: bucket.Name, - authCredentials: config.credentials, - requestBody: setBucketClass(bucket.Type), - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); - } - return next(err); - }), - done); + async.eachSeries( + bucketNames, + (bucket, next) => + gcpRequestRetry( + { + method: 'PUT', + bucket: bucket.Name, + authCredentials: config.credentials, + requestBody: setBucketClass(bucket.Type), + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}\n`); + } + return next(err); + } + ), + done + ); }); after(done => { - async.eachSeries(bucketNames, - (bucket, next) => gcpRequestRetry({ - method: 'DELETE', - bucket: bucket.Name, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}\n`); - } - return next(err); - }), - done); + async.eachSeries( + bucketNames, + (bucket, next) => + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucket.Name, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}\n`); + } + return next(err); + } + ), + done + ); }); it('Should create a multipart upload object', done => { const keyName = `somekey-${genUniqID()}`; const specialKey = `special-${genUniqID()}`; - async.waterfall([ - next => gcpClient.createMultipartUpload({ - Bucket: bucketNames.mpu.Name, - Key: keyName, - Metadata: { - special: specialKey, + async.waterfall( + [ + next => + gcpClient.createMultipartUpload( + { + Bucket: bucketNames.mpu.Name, + Key: keyName, + Metadata: { + special: specialKey, + }, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got err ${err}`); + return next(null, res.UploadId); + } + ), + (uploadId, next) => { + const mpuInitKey = `${keyName}-${uploadId}/init`; + makeGcpRequest( + { + method: 'GET', + bucket: bucketNames.mpu.Name, + objectKey: mpuInitKey, + authCredentials: config.credentials, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in retrieving object ${err}`); + return next(err); + } + assert.strictEqual(res.headers['x-goog-meta-special'], specialKey); + return next(null, uploadId); + } + ); }, - }, (err, res) => { - assert.equal(err, null, - `Expected success, but got err ${err}`); - return next(null, res.UploadId); - }), - (uploadId, next) => { - const mpuInitKey = `${keyName}-${uploadId}/init`; - makeGcpRequest({ - method: 'GET', - bucket: bucketNames.mpu.Name, - objectKey: mpuInitKey, - authCredentials: config.credentials, - }, (err, res) => { - if (err) { - process.stdout - .write(`err in retrieving object ${err}`); - return next(err); - } - assert.strictEqual(res.headers['x-goog-meta-special'], - specialKey); - return next(null, uploadId); - }); - }, - (uploadId, next) => gcpClient.abortMultipartUpload({ - Bucket: bucketNames.main.Name, - MPU: bucketNames.mpu.Name, - UploadId: uploadId, - Key: keyName, - }, err => { - assert.equal(err, null, - `Expected success, but got err ${err}`); - return next(); - }), - ], done); + (uploadId, next) => + gcpClient.abortMultipartUpload( + { + Bucket: bucketNames.main.Name, + MPU: bucketNames.mpu.Name, + UploadId: uploadId, + Key: keyName, + }, + err => { + assert.equal(err, null, `Expected success, but got err ${err}`); + return next(); + } + ), + ], + done + ); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/put.js b/tests/functional/raw-node/test/GCP/object/put.js index b7868fe142..0ca8eb86c4 100644 --- a/tests/functional/raw-node/test/GCP/object/put.js +++ b/tests/functional/raw-node/test/GCP/object/put.js @@ -3,8 +3,7 @@ const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); const { gcpRequestRetry, genUniqID } = require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const bucketName = `somebucket-${genUniqID()}`; @@ -15,99 +14,120 @@ describe('GCP: PUT Object', function testSuite() { const gcpClient = new GCP(config); before(done => { - gcpRequestRetry({ - method: 'PUT', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); + gcpRequestRetry( + { + method: 'PUT', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); after(done => { - gcpRequestRetry({ - method: 'DELETE', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}\n`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); afterEach(function afterFn(done) { - makeGcpRequest({ - method: 'DELETE', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, err => { - if (err) { - process.stdout.write(`err in deleting object ${err}\n`); + makeGcpRequest( + { + method: 'DELETE', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + err => { + if (err) { + process.stdout.write(`err in deleting object ${err}\n`); + } + return done(err); } - return done(err); - }); + ); }); describe('with existing object in bucket', () => { beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; - gcpRequestRetry({ - method: 'PUT', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, 0, (err, res) => { - if (err) { - process.stdout.write(`err in putting object ${err}\n`); - return done(err); + gcpRequestRetry( + { + method: 'PUT', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + 0, + (err, res) => { + if (err) { + process.stdout.write(`err in putting object ${err}\n`); + return done(err); + } + this.currentTest.uploadId = res.headers['x-goog-generation']; + return done(); } - this.currentTest.uploadId = - res.headers['x-goog-generation']; - return done(); - }); + ); }); it('should overwrite object', function testFn(done) { - gcpClient.putObject({ - Bucket: bucketName, - Key: this.test.key, - }, (err, res) => { - assert.notStrictEqual(res.VersionId, this.test.uploadId); - return done(); - }); + gcpClient.putObject( + { + Bucket: bucketName, + Key: this.test.key, + }, + (err, res) => { + assert.notStrictEqual(res.VersionId, this.test.uploadId); + return done(); + } + ); }); }); describe('without existing object in bucket', () => { it('should successfully put object', function testFn(done) { this.test.key = `somekey-${genUniqID()}`; - gcpClient.putObject({ - Bucket: bucketName, - Key: this.test.key, - }, (err, putRes) => { - assert.equal(err, null, - `Expected success, got error ${err}`); - makeGcpRequest({ - method: 'GET', - bucket: bucketName, - objectKey: this.test.key, - authCredentials: config.credentials, - }, (err, getRes) => { - if (err) { - process.stdout.write(`err in getting bucket ${err}\n`); - return done(err); - } - assert.strictEqual(getRes.headers['x-goog-generation'], - putRes.VersionId); - return done(); - }); - }); + gcpClient.putObject( + { + Bucket: bucketName, + Key: this.test.key, + }, + (err, putRes) => { + assert.equal(err, null, `Expected success, got error ${err}`); + makeGcpRequest( + { + method: 'GET', + bucket: bucketName, + objectKey: this.test.key, + authCredentials: config.credentials, + }, + (err, getRes) => { + if (err) { + process.stdout.write(`err in getting bucket ${err}\n`); + return done(err); + } + assert.strictEqual(getRes.headers['x-goog-generation'], putRes.VersionId); + return done(); + } + ); + } + ); }); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/putTagging.js b/tests/functional/raw-node/test/GCP/object/putTagging.js index b1b4ccc5ac..e8ffac9667 100644 --- a/tests/functional/raw-node/test/GCP/object/putTagging.js +++ b/tests/functional/raw-node/test/GCP/object/putTagging.js @@ -3,10 +3,8 @@ const async = require('async'); const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; const { makeGcpRequest } = require('../../../utils/makeRequest'); -const { gcpRequestRetry, genPutTagObj, genUniqID } = - require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { gcpRequestRetry, genPutTagObj, genUniqID } = require('../../../utils/gcpUtils'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const { gcpTaggingPrefix } = require('../../../../../../constants'); const credentialOne = 'gcpbackend'; @@ -20,174 +18,201 @@ describe('GCP: PUT Object Tagging', () => { before(done => { config = getRealAwsConfig(credentialOne); gcpClient = new GCP(config); - gcpRequestRetry({ - method: 'PUT', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}`); + gcpRequestRetry( + { + method: 'PUT', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in creating bucket ${err}`); + } + return done(err); } - return done(err); - }); + ); }); beforeEach(function beforeFn(done) { this.currentTest.key = `somekey-${genUniqID()}`; this.currentTest.specialKey = `veryspecial-${genUniqID()}`; - makeGcpRequest({ - method: 'PUT', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, (err, res) => { - if (err) { - process.stdout.write(`err in creating object ${err}`); - return done(err); + makeGcpRequest( + { + method: 'PUT', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in creating object ${err}`); + return done(err); + } + this.currentTest.versionId = res.headers['x-goog-generation']; + return done(); } - this.currentTest.versionId = res.headers['x-goog-generation']; - return done(); - }); + ); }); afterEach(function afterFn(done) { - makeGcpRequest({ - method: 'DELETE', - bucket: bucketName, - objectKey: this.currentTest.key, - authCredentials: config.credentials, - }, err => { - if (err) { - process.stdout.write(`err in deleting object ${err}`); + makeGcpRequest( + { + method: 'DELETE', + bucket: bucketName, + objectKey: this.currentTest.key, + authCredentials: config.credentials, + }, + err => { + if (err) { + process.stdout.write(`err in deleting object ${err}`); + } + return done(err); } - return done(err); - }); + ); }); after(done => { - gcpRequestRetry({ - method: 'DELETE', - bucket: bucketName, - authCredentials: config.credentials, - }, 0, err => { - if (err) { - process.stdout.write(`err in deleting bucket ${err}`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucketName, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}`); + } + return done(err); } - return done(err); - }); + ); }); it('should successfully put object tags', function testFn(done) { - async.waterfall([ - next => gcpClient.putObjectTagging({ - Bucket: bucketName, - Key: this.test.key, - VersionId: this.test.versionId, - Tagging: { - TagSet: [ + async.waterfall( + [ + next => + gcpClient.putObjectTagging( { - Key: this.test.specialKey, - Value: this.test.specialKey, + Bucket: bucketName, + Key: this.test.key, + VersionId: this.test.versionId, + Tagging: { + TagSet: [ + { + Key: this.test.specialKey, + Value: this.test.specialKey, + }, + ], + }, }, - ], - }, - }, err => { - assert.equal(err, null, - `Expected success, got error ${err}`); - return next(); - }), - next => makeGcpRequest({ - method: 'HEAD', - bucket: bucketName, - objectKey: this.test.key, - authCredentials: config.credentials, - headers: { - 'x-goog-generation': this.test.versionId, - }, - }, (err, res) => { - if (err) { - process.stdout.write(`err in retrieving object ${err}`); - return next(err); - } - const toCompare = - res.headers[`${gcpTagPrefix}${this.test.specialKey}`]; - assert.strictEqual(toCompare, this.test.specialKey); - return next(); - }), - ], done); + err => { + assert.equal(err, null, `Expected success, got error ${err}`); + return next(); + } + ), + next => + makeGcpRequest( + { + method: 'HEAD', + bucket: bucketName, + objectKey: this.test.key, + authCredentials: config.credentials, + headers: { + 'x-goog-generation': this.test.versionId, + }, + }, + (err, res) => { + if (err) { + process.stdout.write(`err in retrieving object ${err}`); + return next(err); + } + const toCompare = res.headers[`${gcpTagPrefix}${this.test.specialKey}`]; + assert.strictEqual(toCompare, this.test.specialKey); + return next(); + } + ), + ], + done + ); }); describe('when tagging parameter is incorrect', () => { - it('should return 400 and BadRequest if more than ' + - '10 tags are given', function testFun(done) { - return gcpClient.putObjectTagging({ - Bucket: bucketName, - Key: this.test.key, - VersionId: this.test.versionId, - Tagging: { - TagSet: genPutTagObj(11), + it('should return 400 and BadRequest if more than ' + '10 tags are given', function testFun(done) { + return gcpClient.putObjectTagging( + { + Bucket: bucketName, + Key: this.test.key, + VersionId: this.test.versionId, + Tagging: { + TagSet: genPutTagObj(11), + }, }, - }, err => { - assert(err); - assert.strictEqual(err.code, 400); - assert.strictEqual(err.message, 'BadRequest'); - return done(); - }); + err => { + assert(err); + assert.strictEqual(err.code, 400); + assert.strictEqual(err.message, 'BadRequest'); + return done(); + } + ); }); - it('should return 400 and InvalidTag if given duplicate keys', - function testFn(done) { - return gcpClient.putObjectTagging({ - Bucket: bucketName, - Key: this.test.key, - VersionId: this.test.versionId, - Tagging: { - TagSet: genPutTagObj(10, true), + it('should return 400 and InvalidTag if given duplicate keys', function testFn(done) { + return gcpClient.putObjectTagging( + { + Bucket: bucketName, + Key: this.test.key, + VersionId: this.test.versionId, + Tagging: { + TagSet: genPutTagObj(10, true), + }, }, - }, err => { - assert(err); - assert.strictEqual(err.code, 400); - assert.strictEqual(err.message, 'InvalidTag'); - return done(); - }); + err => { + assert(err); + assert.strictEqual(err.code, 400); + assert.strictEqual(err.message, 'InvalidTag'); + return done(); + } + ); }); - it('should return 400 and InvalidTag if given invalid key', - function testFn(done) { - return gcpClient.putObjectTagging({ - Bucket: bucketName, - Key: this.test.key, - VersionId: this.test.versionId, - Tagging: { - TagSet: [ - { Key: Buffer.alloc(129, 'a'), Value: 'bad tag' }, - ], + it('should return 400 and InvalidTag if given invalid key', function testFn(done) { + return gcpClient.putObjectTagging( + { + Bucket: bucketName, + Key: this.test.key, + VersionId: this.test.versionId, + Tagging: { + TagSet: [{ Key: Buffer.alloc(129, 'a'), Value: 'bad tag' }], + }, }, - }, err => { - assert(err); - assert.strictEqual(err.code, 400); - assert.strictEqual(err.message, 'InvalidTag'); - return done(); - }); + err => { + assert(err); + assert.strictEqual(err.code, 400); + assert.strictEqual(err.message, 'InvalidTag'); + return done(); + } + ); }); - it('should return 400 and InvalidTag if given invalid value', - function testFn(done) { - return gcpClient.putObjectTagging({ - Bucket: bucketName, - Key: this.test.key, - VersionId: this.test.versionId, - Tagging: { - TagSet: [ - { Key: 'badtag', Value: Buffer.alloc(257, 'a') }, - ], + it('should return 400 and InvalidTag if given invalid value', function testFn(done) { + return gcpClient.putObjectTagging( + { + Bucket: bucketName, + Key: this.test.key, + VersionId: this.test.versionId, + Tagging: { + TagSet: [{ Key: 'badtag', Value: Buffer.alloc(257, 'a') }], + }, }, - }, err => { - assert(err); - assert.strictEqual(err.code, 400); - assert.strictEqual(err.message, 'InvalidTag'); - return done(); - }); + err => { + assert(err); + assert.strictEqual(err.code, 400); + assert.strictEqual(err.message, 'InvalidTag'); + return done(); + } + ); }); }); }); diff --git a/tests/functional/raw-node/test/GCP/object/upload.js b/tests/functional/raw-node/test/GCP/object/upload.js index fc3830308a..0bbb669a5b 100644 --- a/tests/functional/raw-node/test/GCP/object/upload.js +++ b/tests/functional/raw-node/test/GCP/object/upload.js @@ -2,10 +2,8 @@ const assert = require('assert'); const async = require('async'); const arsenal = require('arsenal'); const { GCP } = arsenal.storage.data.external; -const { gcpRequestRetry, setBucketClass, genUniqID } = - require('../../../utils/gcpUtils'); -const { getRealAwsConfig } = - require('../../../../aws-node-sdk/test/support/awsConfig'); +const { gcpRequestRetry, setBucketClass, genUniqID } = require('../../../utils/gcpUtils'); +const { getRealAwsConfig } = require('../../../../aws-node-sdk/test/support/awsConfig'); const credentialOne = 'gcpbackend'; const bucketNames = { @@ -32,81 +30,102 @@ describe('GCP: Upload Object', function testSuite() { before(done => { config = getRealAwsConfig(credentialOne); gcpClient = new GCP(config); - async.eachSeries(bucketNames, - (bucket, next) => gcpRequestRetry({ - method: 'PUT', - bucket: bucket.Name, - authCredentials: config.credentials, - requestBody: setBucketClass(bucket.Type), - }, 0, err => { - if (err) { - process.stdout.write(`err in creating bucket ${err}\n`); - } - return next(err); - }), - err => done(err)); - }); - - after(done => { - async.eachSeries(bucketNames, - (bucket, next) => gcpClient.listObjects({ - Bucket: bucket.Name, - }, (err, res) => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - async.map(res.Contents, (object, moveOn) => { - const deleteParams = { - Bucket: bucket.Name, - Key: object.Key, - }; - gcpClient.deleteObject( - deleteParams, err => moveOn(err)); - }, err => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - gcpRequestRetry({ - method: 'DELETE', + async.eachSeries( + bucketNames, + (bucket, next) => + gcpRequestRetry( + { + method: 'PUT', bucket: bucket.Name, authCredentials: config.credentials, - }, 0, err => { + requestBody: setBucketClass(bucket.Type), + }, + 0, + err => { if (err) { - process.stdout.write( - `err in deleting bucket ${err}\n`); + process.stdout.write(`err in creating bucket ${err}\n`); } return next(err); - }); - }); - }), - err => done(err)); + } + ), + err => done(err) + ); + }); + + after(done => { + async.eachSeries( + bucketNames, + (bucket, next) => + gcpClient.listObjects( + { + Bucket: bucket.Name, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got error ${err}`); + async.map( + res.Contents, + (object, moveOn) => { + const deleteParams = { + Bucket: bucket.Name, + Key: object.Key, + }; + gcpClient.deleteObject(deleteParams, err => moveOn(err)); + }, + err => { + assert.equal(err, null, `Expected success, but got error ${err}`); + gcpRequestRetry( + { + method: 'DELETE', + bucket: bucket.Name, + authCredentials: config.credentials, + }, + 0, + err => { + if (err) { + process.stdout.write(`err in deleting bucket ${err}\n`); + } + return next(err); + } + ); + } + ); + } + ), + err => done(err) + ); }); it('should put an object to GCP', done => { const key = `somekey-${genUniqID()}`; - gcpClient.upload({ - Bucket: bucketNames.main.Name, - MPU: bucketNames.mpu.Name, - Key: key, - Body: body, - }, (err, res) => { - assert.equal(err, null, - `Expected success, got error ${err}`); - assert.strictEqual(res.ETag, `"${smallMD5}"`); - return done(); - }); + gcpClient.upload( + { + Bucket: bucketNames.main.Name, + MPU: bucketNames.mpu.Name, + Key: key, + Body: body, + }, + (err, res) => { + assert.equal(err, null, `Expected success, got error ${err}`); + assert.strictEqual(res.ETag, `"${smallMD5}"`); + return done(); + } + ); }); it('should put a large object to GCP', done => { const key = `somekey-${genUniqID()}`; - gcpClient.upload({ - Bucket: bucketNames.main.Name, - MPU: bucketNames.mpu.Name, - Key: key, - Body: bigBody, - }, (err, res) => { - assert.equal(err, null, - `Expected success, got error ${err}`); - assert.strictEqual(res.ETag, `"${bigMD5}"`); - return done(); - }); + gcpClient.upload( + { + Bucket: bucketNames.main.Name, + MPU: bucketNames.mpu.Name, + Key: key, + Body: bigBody, + }, + (err, res) => { + assert.equal(err, null, `Expected success, got error ${err}`); + assert.strictEqual(res.ETag, `"${bigMD5}"`); + return done(); + } + ); }); }); diff --git a/tests/functional/raw-node/test/badChunkSignatureV4.js b/tests/functional/raw-node/test/badChunkSignatureV4.js index 4a05e2d7d7..f694068f93 100644 --- a/tests/functional/raw-node/test/badChunkSignatureV4.js +++ b/tests/functional/raw-node/test/badChunkSignatureV4.js @@ -2,14 +2,12 @@ const http = require('http'); const async = require('async'); const assert = require('assert'); -const BucketUtility = - require('../../aws-node-sdk/lib/utility/bucket-util'); +const BucketUtility = require('../../aws-node-sdk/lib/utility/bucket-util'); const HttpRequestAuthV4 = require('../utils/HttpRequestAuthV4'); const config = require('../../config.json'); -const DUMMY_SIGNATURE = - 'baadc0debaadc0debaadc0debaadc0debaadc0debaadc0debaadc0debaadc0de'; +const DUMMY_SIGNATURE = 'baadc0debaadc0debaadc0debaadc0debaadc0debaadc0debaadc0debaadc0de'; http.globalAgent.keepAlive = true; @@ -31,10 +29,7 @@ function createBucket(bucketUtil, cb) { function cleanupBucket(bucketUtil, cb) { const emptyBucket = async.asyncify(bucketUtil.empty.bind(bucketUtil)); const deleteBucket = async.asyncify(bucketUtil.deleteOne.bind(bucketUtil)); - async.series([ - done => emptyBucket(BUCKET, done), - done => deleteBucket(BUCKET, done), - ], cb); + async.series([done => emptyBucket(BUCKET, done), done => deleteBucket(BUCKET, done)], cb); } class HttpChunkedUploadWithBadSignature extends HttpRequestAuthV4 { @@ -60,39 +55,45 @@ class HttpChunkedUploadWithBadSignature extends HttpRequestAuthV4 { function testChunkedPutWithBadSignature(n, alterSignatureChunkId, cb) { const req = new HttpChunkedUploadWithBadSignature( - `http://${config.ipAddress}:${PORT}/${BUCKET}/obj-${n}`, { + `http://${config.ipAddress}:${PORT}/${BUCKET}/obj-${n}`, + { accessKey: config.accessKey, secretKey: config.secretKey, method: 'PUT', headers: { 'content-length': N_DATA_CHUNKS * DATA_CHUNK_SIZE, - 'connection': 'keep-alive', + connection: 'keep-alive', }, alterSignatureChunkId, - }, res => { - if (alterSignatureChunkId >= 0 && - alterSignatureChunkId <= N_DATA_CHUNKS) { + }, + res => { + if (alterSignatureChunkId >= 0 && alterSignatureChunkId <= N_DATA_CHUNKS) { assert.strictEqual(res.statusCode, 403); } else { assert.strictEqual(res.statusCode, 200); } res.on('data', () => {}); res.on('end', cb); - }); + } + ); req.on('error', err => { assert.ifError(err); }); - async.timesSeries(N_DATA_CHUNKS, (chunkIndex, done) => { - // console.log(`SENDING NEXT CHUNK OF LENGTH ${CHUNK_DATA.length}`); - if (req.write(CHUNK_DATA)) { - process.nextTick(done); - } else { - req.once('drain', done); + async.timesSeries( + N_DATA_CHUNKS, + (chunkIndex, done) => { + // console.log(`SENDING NEXT CHUNK OF LENGTH ${CHUNK_DATA.length}`); + if (req.write(CHUNK_DATA)) { + process.nextTick(done); + } else { + req.once('drain', done); + } + }, + () => { + req.end(); } - }, () => { - req.end(); - }); + ); } describe('streaming V4 signature with bad chunk signature', () => { @@ -100,26 +101,32 @@ describe('streaming V4 signature with bad chunk signature', () => { before(done => createBucket(bucketUtil, done)); after(done => cleanupBucket(bucketUtil, done)); - it('Cloudserver should be robust against bad signature in streaming ' + - 'payload', function badSignatureInStreamingPayload(cb) { - this.timeout(120000); - async.timesLimit(N_PUTS, 10, (n, done) => { - // multiple test cases depend on the value of - // alterSignatureChunkId: - // alterSignatureChunkId >= 0 && - // alterSignatureChunkId < N_DATA_CHUNKS - // <=> alter the signature of the target data chunk - // alterSignatureChunkId == N_DATA_CHUNKS - // <=> alter the signature of the last empty chunk that - // carries the last payload signature - // alterSignatureChunkId > N_DATA_CHUNKS - // <=> no signature is altered (regular test case) - // By making n go from 0 to nDatachunks+1, we cover all - // above cases. - - const alterSignatureChunkId = ALTER_CHUNK_SIGNATURE ? - (n % (N_DATA_CHUNKS + 2)) : null; - testChunkedPutWithBadSignature(n, alterSignatureChunkId, done); - }, err => cb(err)); - }); + it( + 'Cloudserver should be robust against bad signature in streaming ' + 'payload', + function badSignatureInStreamingPayload(cb) { + this.timeout(120000); + async.timesLimit( + N_PUTS, + 10, + (n, done) => { + // multiple test cases depend on the value of + // alterSignatureChunkId: + // alterSignatureChunkId >= 0 && + // alterSignatureChunkId < N_DATA_CHUNKS + // <=> alter the signature of the target data chunk + // alterSignatureChunkId == N_DATA_CHUNKS + // <=> alter the signature of the last empty chunk that + // carries the last payload signature + // alterSignatureChunkId > N_DATA_CHUNKS + // <=> no signature is altered (regular test case) + // By making n go from 0 to nDatachunks+1, we cover all + // above cases. + + const alterSignatureChunkId = ALTER_CHUNK_SIGNATURE ? n % (N_DATA_CHUNKS + 2) : null; + testChunkedPutWithBadSignature(n, alterSignatureChunkId, done); + }, + err => cb(err) + ); + } + ); }); diff --git a/tests/functional/raw-node/test/headObject.js b/tests/functional/raw-node/test/headObject.js index c691dafd97..66219e6a47 100644 --- a/tests/functional/raw-node/test/headObject.js +++ b/tests/functional/raw-node/test/headObject.js @@ -10,36 +10,45 @@ const bucket = 'rawnodeapibucket'; describe('api tests', () => { before(done => { - makeS3Request({ - method: 'PUT', - authCredentials, - bucket, - }, err => { - assert.ifError(err); - done(); - }); + makeS3Request( + { + method: 'PUT', + authCredentials, + bucket, + }, + err => { + assert.ifError(err); + done(); + } + ); }); after(done => { - makeS3Request({ - method: 'DELETE', - authCredentials, - bucket, - }, err => { - assert.ifError(err); - done(); - }); + makeS3Request( + { + method: 'DELETE', + authCredentials, + bucket, + }, + err => { + assert.ifError(err); + done(); + } + ); }); it('should return 405 on headBucket when bucket is empty string', done => { - makeS3Request({ - method: 'HEAD', - authCredentials, - bucket: '', - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.statusCode, 405); - return done(); - }); + makeS3Request( + { + method: 'HEAD', + authCredentials, + bucket: '', + }, + (err, res) => { + assert.ifError(err); + assert.strictEqual(res.statusCode, 405); + return done(); + } + ); }); }); diff --git a/tests/functional/raw-node/test/lifecycle.js b/tests/functional/raw-node/test/lifecycle.js index af145845bb..a84423f6bd 100644 --- a/tests/functional/raw-node/test/lifecycle.js +++ b/tests/functional/raw-node/test/lifecycle.js @@ -25,109 +25,133 @@ function makeLifeCycleXML(date) { describe('api tests', () => { before(done => { - makeS3Request({ - method: 'PUT', - authCredentials, - bucket, - }, err => { - assert.ifError(err); - done(); - }); + makeS3Request( + { + method: 'PUT', + authCredentials, + bucket, + }, + err => { + assert.ifError(err); + done(); + } + ); }); after(done => { - makeS3Request({ - method: 'DELETE', - authCredentials, - bucket, - }, err => { - assert.ifError(err); - done(); - }); + makeS3Request( + { + method: 'DELETE', + authCredentials, + bucket, + }, + err => { + assert.ifError(err); + done(); + } + ); }); it('should accept a lifecycle policy with a date at midnight', done => { - makeS3Request({ - method: 'PUT', - authCredentials, - bucket, - queryObj: { lifecycle: '' }, - requestBody: makeLifeCycleXML('2024-01-08T00:00:00Z'), - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.statusCode, 200); - return done(); - }); + makeS3Request( + { + method: 'PUT', + authCredentials, + bucket, + queryObj: { lifecycle: '' }, + requestBody: makeLifeCycleXML('2024-01-08T00:00:00Z'), + }, + (err, res) => { + assert.ifError(err); + assert.strictEqual(res.statusCode, 200); + return done(); + } + ); }); it('should accept a lifecycle policy with a date at midnight', done => { - makeS3Request({ - method: 'PUT', - authCredentials, - bucket, - queryObj: { lifecycle: '' }, - requestBody: makeLifeCycleXML('2024-01-08T00:00:00'), - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.statusCode, 200); - return done(); - }); + makeS3Request( + { + method: 'PUT', + authCredentials, + bucket, + queryObj: { lifecycle: '' }, + requestBody: makeLifeCycleXML('2024-01-08T00:00:00'), + }, + (err, res) => { + assert.ifError(err); + assert.strictEqual(res.statusCode, 200); + return done(); + } + ); }); it('should accept a lifecycle policy with a date at midnight', done => { - makeS3Request({ - method: 'PUT', - authCredentials, - bucket, - queryObj: { lifecycle: '' }, - requestBody: makeLifeCycleXML('2024-01-08T06:00:00+06:00'), - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.statusCode, 200); - return done(); - }); + makeS3Request( + { + method: 'PUT', + authCredentials, + bucket, + queryObj: { lifecycle: '' }, + requestBody: makeLifeCycleXML('2024-01-08T06:00:00+06:00'), + }, + (err, res) => { + assert.ifError(err); + assert.strictEqual(res.statusCode, 200); + return done(); + } + ); }); it('should reject a lifecycle policy with a date not at midnight', done => { - makeS3Request({ - method: 'PUT', - authCredentials, - bucket, - queryObj: { lifecycle: '' }, - requestBody: makeLifeCycleXML('2024-01-08T12:34:56Z'), - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - return done(); - }); + makeS3Request( + { + method: 'PUT', + authCredentials, + bucket, + queryObj: { lifecycle: '' }, + requestBody: makeLifeCycleXML('2024-01-08T12:34:56Z'), + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + assert.strictEqual(err.statusCode, 400); + return done(); + } + ); }); it('should reject a lifecycle policy with an illegal date', done => { - makeS3Request({ - method: 'PUT', - authCredentials, - bucket, - queryObj: { lifecycle: '' }, - requestBody: makeLifeCycleXML('2024-01-08T00:00:00+34:00'), - }, err => { - // This value is catched by AWS during XML parsing - assert(err.code === 'InvalidArgument' || err.code === 'MalformedXML'); - assert.strictEqual(err.statusCode, 400); - return done(); - }); + makeS3Request( + { + method: 'PUT', + authCredentials, + bucket, + queryObj: { lifecycle: '' }, + requestBody: makeLifeCycleXML('2024-01-08T00:00:00+34:00'), + }, + err => { + // This value is catched by AWS during XML parsing + assert(err.code === 'InvalidArgument' || err.code === 'MalformedXML'); + assert.strictEqual(err.statusCode, 400); + return done(); + } + ); }); it('should reject a lifecycle policy with a date not at midnight', done => { - makeS3Request({ - method: 'PUT', - authCredentials, - bucket, - queryObj: { lifecycle: '' }, - requestBody: makeLifeCycleXML('2024-01-08T00:00:00.123Z'), - }, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - return done(); - }); + makeS3Request( + { + method: 'PUT', + authCredentials, + bucket, + queryObj: { lifecycle: '' }, + requestBody: makeLifeCycleXML('2024-01-08T00:00:00.123Z'), + }, + err => { + assert.strictEqual(err.code, 'InvalidArgument'); + assert.strictEqual(err.statusCode, 400); + return done(); + } + ); }); }); diff --git a/tests/functional/raw-node/test/routes/routeMetadata.js b/tests/functional/raw-node/test/routes/routeMetadata.js index 9a916ada5a..3b69ecdb1e 100644 --- a/tests/functional/raw-node/test/routes/routeMetadata.js +++ b/tests/functional/raw-node/test/routes/routeMetadata.js @@ -17,8 +17,7 @@ const metadataAuthCredentials = { }; function makeMetadataRequest(params, callback) { - const { method, headers, authCredentials, - requestBody, queryObj, path } = params; + const { method, headers, authCredentials, requestBody, queryObj, path } = params; const options = { authCredentials, hostname: ipAddress, @@ -34,8 +33,7 @@ function makeMetadataRequest(params, callback) { } describe('metadata routes with metadata', () => { - const bucketUtil = new BucketUtility( - 'default', { signatureVersion: 'v4' }); + const bucketUtil = new BucketUtility('default', { signatureVersion: 'v4' }); const s3 = bucketUtil.s3; const bucket1 = 'bucket1'; @@ -44,100 +42,125 @@ describe('metadata routes with metadata', () => { // E2E tests use S3C metadata, whereas functional tests use mocked metadata. if (process.env.S3_END_TO_END) { - before(done => s3.createBucket({ Bucket: bucket1 }).promise() - .then(() => s3.putObject({ Bucket: bucket1, Key: keyName, Body: '' }).promise()) - .then(() => s3.createBucket({ Bucket: bucket2 }).promise()) - .then(() => done(), err => done(err)) + before(done => + s3 + .createBucket({ Bucket: bucket1 }) + .promise() + .then(() => s3.putObject({ Bucket: bucket1, Key: keyName, Body: '' }).promise()) + .then(() => s3.createBucket({ Bucket: bucket2 }).promise()) + .then( + () => done(), + err => done(err) + ) ); - after(done => bucketUtil.empty(bucket1) - .then(() => s3.deleteBucket({ Bucket: bucket1 }).promise()) - .then(() => bucketUtil.empty(bucket2)) - .then(() => s3.deleteBucket({ Bucket: bucket2 }).promise()) - .then(() => done(), err => done(err)) + after(done => + bucketUtil + .empty(bucket1) + .then(() => s3.deleteBucket({ Bucket: bucket1 }).promise()) + .then(() => bucketUtil.empty(bucket2)) + .then(() => s3.deleteBucket({ Bucket: bucket2 }).promise()) + .then( + () => done(), + err => done(err) + ) ); } else { let httpServer; before(done => { - httpServer = http.createServer( - (req, res) => metadataMock.onRequest(req, res)).listen(9000, done); + httpServer = http.createServer((req, res) => metadataMock.onRequest(req, res)).listen(9000, done); }); after(() => httpServer.close()); } it('should retrieve list of buckets', done => { - makeMetadataRequest({ - method: 'GET', - authCredentials: metadataAuthCredentials, - path: '/_/metadata/admin/raft_sessions/1/bucket', - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.statusCode, 200); - assert(res.body); - const expectedArray = [bucket1, 'users..bucket', bucket2]; - const responseArray = JSON.parse(res.body); - - expectedArray.sort(); - responseArray.sort(); - - assert.deepStrictEqual(responseArray, expectedArray); - return done(); - }); + makeMetadataRequest( + { + method: 'GET', + authCredentials: metadataAuthCredentials, + path: '/_/metadata/admin/raft_sessions/1/bucket', + }, + (err, res) => { + assert.ifError(err); + assert.strictEqual(res.statusCode, 200); + assert(res.body); + const expectedArray = [bucket1, 'users..bucket', bucket2]; + const responseArray = JSON.parse(res.body); + + expectedArray.sort(); + responseArray.sort(); + + assert.deepStrictEqual(responseArray, expectedArray); + return done(); + } + ); }); it('should retrieve list of objects from bucket', done => { - makeMetadataRequest({ - method: 'GET', - authCredentials: metadataAuthCredentials, - path: `/_/metadata/default/bucket/${bucket1}`, - queryObj: { listingType: 'Delimiter' }, - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.statusCode, 200); - const body = JSON.parse(res.body); - assert.strictEqual(body.Contents[0].key, 'testobject1'); - return done(); - }); + makeMetadataRequest( + { + method: 'GET', + authCredentials: metadataAuthCredentials, + path: `/_/metadata/default/bucket/${bucket1}`, + queryObj: { listingType: 'Delimiter' }, + }, + (err, res) => { + assert.ifError(err); + assert.strictEqual(res.statusCode, 200); + const body = JSON.parse(res.body); + assert.strictEqual(body.Contents[0].key, 'testobject1'); + return done(); + } + ); }); it('should retrieve metadata of bucket', done => { - makeMetadataRequest({ - method: 'GET', - authCredentials: metadataAuthCredentials, - path: `/_/metadata/default/attributes/${bucket1}`, - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.statusCode, 200); - assert(res.body); - return done(); - }); + makeMetadataRequest( + { + method: 'GET', + authCredentials: metadataAuthCredentials, + path: `/_/metadata/default/attributes/${bucket1}`, + }, + (err, res) => { + assert.ifError(err); + assert.strictEqual(res.statusCode, 200); + assert(res.body); + return done(); + } + ); }); it('should retrieve metadata of object', done => { - makeMetadataRequest({ - method: 'GET', - authCredentials: metadataAuthCredentials, - path: `/_/metadata/default/bucket/${bucket1}/${keyName}`, - }, (err, res) => { - assert.ifError(err); - assert(res.body); - assert.strictEqual(res.statusCode, 200); - const body = JSON.parse(res.body); - assert(body['owner-id']); - return done(); - }); + makeMetadataRequest( + { + method: 'GET', + authCredentials: metadataAuthCredentials, + path: `/_/metadata/default/bucket/${bucket1}/${keyName}`, + }, + (err, res) => { + assert.ifError(err); + assert(res.body); + assert.strictEqual(res.statusCode, 200); + const body = JSON.parse(res.body); + assert(body['owner-id']); + return done(); + } + ); }); it('should get an error for accessing invalid routes', done => { - makeMetadataRequest({ - method: 'GET', - authCredentials: metadataAuthCredentials, - path: '/_/metadata/admin/raft_sessions', - }, err => { - assert.strictEqual(err.code, 'NotImplemented'); - return done(); - }); + makeMetadataRequest( + { + method: 'GET', + authCredentials: metadataAuthCredentials, + path: '/_/metadata/admin/raft_sessions', + }, + err => { + assert.strictEqual(err.code, 'NotImplemented'); + return done(); + } + ); }); }); diff --git a/tests/functional/raw-node/test/trailingChecksums.js b/tests/functional/raw-node/test/trailingChecksums.js index bad429c3c8..bee5fb925d 100644 --- a/tests/functional/raw-node/test/trailingChecksums.js +++ b/tests/functional/raw-node/test/trailingChecksums.js @@ -7,9 +7,8 @@ const bucket = 'testunsupportedchecksumsbucket'; const objectKey = 'key'; const objData = Buffer.alloc(1024, 'a'); // note this is not the correct checksum in objDataWithTrailingChecksum -const objDataWithTrailingChecksum = '10\r\n0123456789abcdef\r\n' + - '10\r\n0123456789abcdef\r\n' + - '0\r\nx-amz-checksum-crc64nvme:YeIDuLa7tU0=\r\n'; +const objDataWithTrailingChecksum = + '10\r\n0123456789abcdef\r\n' + '10\r\n0123456789abcdef\r\n' + '0\r\nx-amz-checksum-crc64nvme:YeIDuLa7tU0=\r\n'; const objDataWithoutTrailingChecksum = '0123456789abcdef0123456789abcdef'; const config = require('../../config.json'); @@ -22,33 +21,47 @@ const itSkipIfAWS = process.env.AWS_ON_AIR ? it.skip : it; describe('trailing checksum requests:', () => { before(done => { - makeS3Request({ - method: 'PUT', - authCredentials, - bucket, - }, err => { - assert.ifError(err); - done(); - }); + makeS3Request( + { + method: 'PUT', + authCredentials, + bucket, + }, + err => { + assert.ifError(err); + done(); + } + ); }); after(done => { - async.series([ - next => makeS3Request({ - method: 'DELETE', - authCredentials, - bucket, - objectKey, - }, next), - next => makeS3Request({ - method: 'DELETE', - authCredentials, - bucket, - }, next), - ], err => { - assert.ifError(err); - done(); - }); + async.series( + [ + next => + makeS3Request( + { + method: 'DELETE', + authCredentials, + bucket, + objectKey, + }, + next + ), + next => + makeS3Request( + { + method: 'DELETE', + authCredentials, + bucket, + }, + next + ), + ], + err => { + assert.ifError(err); + done(); + } + ); }); it('should accept unsigned trailing checksum', done => { @@ -85,18 +98,21 @@ describe('trailing checksum requests:', () => { }); it('should have correct object content for unsigned trailing checksum', done => { - makeS3Request({ - method: 'GET', - authCredentials, - bucket, - objectKey, - }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.statusCode, 200); - // check that the object data is the input stripped of the trailing checksum - assert.strictEqual(res.body, objDataWithoutTrailingChecksum); - return done(); - }); + makeS3Request( + { + method: 'GET', + authCredentials, + bucket, + objectKey, + }, + (err, res) => { + assert.ifError(err); + assert.strictEqual(res.statusCode, 200); + // check that the object data is the input stripped of the trailing checksum + assert.strictEqual(res.body, objDataWithoutTrailingChecksum); + return done(); + } + ); }); itSkipIfAWS('should respond with BadRequest for signed trailing checksum', done => { diff --git a/tests/functional/raw-node/test/unsupportedChecksums.js b/tests/functional/raw-node/test/unsupportedChecksums.js index dd04be0f5f..50b363c444 100644 --- a/tests/functional/raw-node/test/unsupportedChecksums.js +++ b/tests/functional/raw-node/test/unsupportedChecksums.js @@ -15,25 +15,31 @@ const itSkipIfAWS = process.env.AWS_ON_AIR ? it.skip : it; describe('unsupported checksum requests:', () => { before(done => { - makeS3Request({ - method: 'PUT', - authCredentials, - bucket, - }, err => { - assert.ifError(err); - done(); - }); + makeS3Request( + { + method: 'PUT', + authCredentials, + bucket, + }, + err => { + assert.ifError(err); + done(); + } + ); }); after(done => { - makeS3Request({ - method: 'DELETE', - authCredentials, - bucket, - }, err => { - assert.ifError(err); - done(); - }); + makeS3Request( + { + method: 'DELETE', + authCredentials, + bucket, + }, + err => { + assert.ifError(err); + done(); + } + ); }); itSkipIfAWS('should respond with BadRequest for trailing checksum', done => { diff --git a/tests/functional/raw-node/test/unsupportedQuries.js b/tests/functional/raw-node/test/unsupportedQuries.js index 9f3995f666..20e09f72ce 100644 --- a/tests/functional/raw-node/test/unsupportedQuries.js +++ b/tests/functional/raw-node/test/unsupportedQuries.js @@ -12,10 +12,8 @@ describe('unsupported query requests:', () => { const queryObj = {}; queryObj[query] = ''; - itSkipIfAWS(`should respond with NotImplemented for ?${query} request`, - done => { - makeS3Request({ method: 'GET', queryObj, bucket, objectKey }, - err => { + itSkipIfAWS(`should respond with NotImplemented for ?${query} request`, done => { + makeS3Request({ method: 'GET', queryObj, bucket, objectKey }, err => { assert.strictEqual(err.code, 'NotImplemented'); assert.strictEqual(err.statusCode, 501); done(); @@ -29,10 +27,8 @@ describe('unsupported bucket query requests:', () => { const queryObj = {}; queryObj[query] = ''; - itSkipIfAWS(`should respond with NotImplemented for ?${query} request`, - done => { - makeS3Request({ method: 'GET', queryObj, bucket }, - err => { + itSkipIfAWS(`should respond with NotImplemented for ?${query} request`, done => { + makeS3Request({ method: 'GET', queryObj, bucket }, err => { assert.strictEqual(err.code, 'NotImplemented'); assert.strictEqual(err.statusCode, 501); done(); diff --git a/tests/functional/raw-node/utils/HttpRequestAuthV4.js b/tests/functional/raw-node/utils/HttpRequestAuthV4.js index f766421eac..f5492ec565 100644 --- a/tests/functional/raw-node/utils/HttpRequestAuthV4.js +++ b/tests/functional/raw-node/utils/HttpRequestAuthV4.js @@ -63,30 +63,24 @@ class HttpRequestAuthV4 extends stream.Writable { getCredentialScope() { const signingDate = this._timestamp.slice(0, 8); - const credentialScope = - `${signingDate}/${REGION}/${SERVICE}/aws4_request`; + const credentialScope = `${signingDate}/${REGION}/${SERVICE}/aws4_request`; // console.log(`CREDENTIAL SCOPE: "${credentialScope}"`); return credentialScope; } getSigningKey() { const signingDate = this._timestamp.slice(0, 8); - const dateKey = crypto.createHmac('sha256', `AWS4${this._secretKey}`) - .update(signingDate, 'binary').digest(); - const dateRegionKey = crypto.createHmac('sha256', dateKey) - .update(REGION, 'binary').digest(); - const dateRegionServiceKey = crypto.createHmac('sha256', dateRegionKey) - .update(SERVICE, 'binary').digest(); - this._signingKey = crypto.createHmac('sha256', dateRegionServiceKey) - .update('aws4_request', 'binary').digest(); + const dateKey = crypto.createHmac('sha256', `AWS4${this._secretKey}`).update(signingDate, 'binary').digest(); + const dateRegionKey = crypto.createHmac('sha256', dateKey).update(REGION, 'binary').digest(); + const dateRegionServiceKey = crypto.createHmac('sha256', dateRegionKey).update(SERVICE, 'binary').digest(); + this._signingKey = crypto.createHmac('sha256', dateRegionServiceKey).update('aws4_request', 'binary').digest(); } createSignature(stringToSign) { if (!this._signingKey) { this.getSigningKey(); } - return crypto.createHmac('sha256', this._signingKey) - .update(stringToSign).digest('hex'); + return crypto.createHmac('sha256', this._signingKey).update(stringToSign).digest('hex'); } getCanonicalRequest(urlObj, signedHeaders) { @@ -96,19 +90,16 @@ class HttpRequestAuthV4 extends stream.Writable { urlObj.searchParams.forEach((value, key) => { qsParams.push({ key, value }); }); - const canonicalQueryString = - qsParams - .sort((a, b) => { - if (a.key !== b.key) { - return a.key < b.key ? -1 : 1; - } - return a.value < b.value ? -1 : 1; - }) - .map(param => `${encodeURI(param.key)}=${encodeURI(param.value)}`) - .join('&'); - const canonicalSignedHeaders = signedHeadersList - .map(header => `${header}:${signedHeaders[header]}\n`) - .join(''); + const canonicalQueryString = qsParams + .sort((a, b) => { + if (a.key !== b.key) { + return a.key < b.key ? -1 : 1; + } + return a.value < b.value ? -1 : 1; + }) + .map(param => `${encodeURI(param.key)}=${encodeURI(param.value)}`) + .join('&'); + const canonicalSignedHeaders = signedHeadersList.map(header => `${header}:${signedHeaders[header]}\n`).join(''); const canonicalRequest = [ method, urlObj.pathname, @@ -123,41 +114,37 @@ class HttpRequestAuthV4 extends stream.Writable { } constructRequestStringToSign(canonicalReq) { - const canonicalReqHash = - crypto.createHash('sha256').update(canonicalReq).digest('hex'); - const stringToSign = `AWS4-HMAC-SHA256\n${this._timestamp}\n` + - `${this.getCredentialScope()}\n${canonicalReqHash}`; + const canonicalReqHash = crypto.createHash('sha256').update(canonicalReq).digest('hex'); + const stringToSign = + `AWS4-HMAC-SHA256\n${this._timestamp}\n` + `${this.getCredentialScope()}\n${canonicalReqHash}`; // console.log(`STRING TO SIGN: "${stringToSign}"`); return stringToSign; } getAuthorizationSignature(urlObj, signedHeaders) { - const canonicalRequest = - this.getCanonicalRequest(urlObj, signedHeaders); - this._lastSignature = this.createSignature( - this.constructRequestStringToSign(canonicalRequest)); + const canonicalRequest = this.getCanonicalRequest(urlObj, signedHeaders); + this._lastSignature = this.createSignature(this.constructRequestStringToSign(canonicalRequest)); return this._lastSignature; } getAuthorizationHeader(urlObj, signedHeaders) { - const authorizationSignature = - this.getAuthorizationSignature(urlObj, signedHeaders); + const authorizationSignature = this.getAuthorizationSignature(urlObj, signedHeaders); const signedHeadersList = Object.keys(signedHeaders).sort(); - return ['AWS4-HMAC-SHA256', - `Credential=${this._accessKey}/${this.getCredentialScope()},`, - `SignedHeaders=${signedHeadersList.join(';')},`, - `Signature=${authorizationSignature}`, - ].join(' '); + return [ + 'AWS4-HMAC-SHA256', + `Credential=${this._accessKey}/${this.getCredentialScope()},`, + `SignedHeaders=${signedHeadersList.join(';')},`, + `Signature=${authorizationSignature}`, + ].join(' '); } constructChunkStringToSign(chunkData) { - const currentChunkHash = - crypto.createHash('sha256').update(chunkData.toString()) - .digest('hex'); - const stringToSign = `AWS4-HMAC-SHA256-PAYLOAD\n${this._timestamp}\n` + - `${this.getCredentialScope()}\n${this._lastSignature}\n` + - `${EMPTY_STRING_HASH}\n${currentChunkHash}`; + const currentChunkHash = crypto.createHash('sha256').update(chunkData.toString()).digest('hex'); + const stringToSign = + `AWS4-HMAC-SHA256-PAYLOAD\n${this._timestamp}\n` + + `${this.getCredentialScope()}\n${this._lastSignature}\n` + + `${EMPTY_STRING_HASH}\n${currentChunkHash}`; // console.log(`CHUNK STRING TO SIGN: "${stringToSign}"`); return stringToSign; } @@ -173,13 +160,7 @@ class HttpRequestAuthV4 extends stream.Writable { return chunkData; } const chunkSignature = this.getChunkSignature(chunkData); - return [chunkData.length.toString(16), - ';chunk-signature=', - chunkSignature, - '\r\n', - chunkData, - '\r\n', - ].join(''); + return [chunkData.length.toString(16), ';chunk-signature=', chunkSignature, '\r\n', chunkData, '\r\n'].join(''); } _constructRequest(hasDataToSend) { @@ -196,7 +177,7 @@ class HttpRequestAuthV4 extends stream.Writable { const urlObj = new url.URL(this._url); const signedHeaders = { - 'host': urlObj.host, + host: urlObj.host, 'x-amz-date': this._timestamp, }; const httpHeaders = Object.assign({}, this._httpParams.headers); @@ -206,20 +187,17 @@ class HttpRequestAuthV4 extends stream.Writable { if (lowerHeader === 'content-length') { contentLengthHeader = header; } - if (!['connection', - 'transfer-encoding'].includes(lowerHeader)) { + if (!['connection', 'transfer-encoding'].includes(lowerHeader)) { signedHeaders[lowerHeader] = httpHeaders[header]; } }); if (!signedHeaders['x-amz-content-sha256']) { if (hasDataToSend) { - signedHeaders['x-amz-content-sha256'] = - 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'; + signedHeaders['x-amz-content-sha256'] = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'; signedHeaders['content-encoding'] = 'aws-chunked'; this._chunkedUpload = true; if (contentLengthHeader !== undefined) { - signedHeaders['x-amz-decoded-content-length'] = - httpHeaders[contentLengthHeader]; + signedHeaders['x-amz-decoded-content-length'] = httpHeaders[contentLengthHeader]; delete signedHeaders['content-length']; delete httpHeaders[contentLengthHeader]; httpHeaders['transfer-encoding'] = 'chunked'; @@ -228,8 +206,7 @@ class HttpRequestAuthV4 extends stream.Writable { signedHeaders['x-amz-content-sha256'] = EMPTY_STRING_HASH; } } - httpHeaders.Authorization = - this.getAuthorizationHeader(urlObj, signedHeaders); + httpHeaders.Authorization = this.getAuthorizationHeader(urlObj, signedHeaders); return Object.assign(httpHeaders, signedHeaders); } diff --git a/tests/functional/raw-node/utils/MetadataMock.js b/tests/functional/raw-node/utils/MetadataMock.js index 008a284b41..cf49560535 100644 --- a/tests/functional/raw-node/utils/MetadataMock.js +++ b/tests/functional/raw-node/utils/MetadataMock.js @@ -8,10 +8,10 @@ const dummyBucketMD = { WRITE: [], WRITE_ACP: [], READ: [], - READ_ACP: [] }, + READ_ACP: [], + }, _name: 'xxxfriday10', - _owner: - '94224c921648ada653f584f3caf42654ccf3f1cbd2e569a24e88eb460f2f84d8', + _owner: '94224c921648ada653f584f3caf42654ccf3f1cbd2e569a24e88eb460f2f84d8', _ownerDisplayName: 'test_1518720219', _creationDate: '2018-02-16T21:55:16.415Z', _mdBucketModelVersion: 5, @@ -33,10 +33,10 @@ const dummyBucketMD = { WRITE: [], WRITE_ACP: [], READ: [], - READ_ACP: [] }, + READ_ACP: [], + }, _name: 'xxxfriday11', - _owner: - '94224c921648ada653f584f3caf42654ccf3f1cbd2e569a24e88eb460f2f84d8', + _owner: '94224c921648ada653f584f3caf42654ccf3f1cbd2e569a24e88eb460f2f84d8', _ownerDisplayName: 'test_1518720219', _creationDate: '2018-02-16T21:55:16.415Z', _mdBucketModelVersion: 5, @@ -55,157 +55,207 @@ const dummyBucketMD = { const objectList = { Contents: [ - { key: 'testobject1', - value: JSON.stringify({ - 'owner-display-name': 'test_1518720219', - 'owner-id': - '94224c921648ada653f584f3caf42654ccf3f1cbd2e569a24e88eb460f2f84d8', - 'content-length': 0, - 'content-md5': 'd41d8cd98f00b204e9800998ecf8427e', - 'x-amz-version-id': 'null', - 'x-amz-server-version-id': '', - 'x-amz-storage-class': 'STANDARD', - 'x-amz-server-side-encryption': '', - 'x-amz-server-side-encryption-aws-kms-key-id': '', - 'x-amz-server-side-encryption-customer-algorithm': '', - 'x-amz-website-redirect-location': '', - 'acl': { - Canned: 'private', - FULL_CONTROL: [], - WRITE_ACP: [], - READ: [], - READ_ACP: [], - }, - 'key': '', - 'location': null, - 'isDeleteMarker': false, - 'tags': {}, - 'replicationInfo': { - status: '', - backends: [], - content: [], - destination: '', - storageClass: '', - role: '', - storageType: '', - dataStoreVersionId: '', - }, - 'dataStoreName': 'us-east-1', - 'last-modified': '2018-02-16T22:43:37.174Z', - 'md-model-version': 3, - }) }, + { + key: 'testobject1', + value: JSON.stringify({ + 'owner-display-name': 'test_1518720219', + 'owner-id': '94224c921648ada653f584f3caf42654ccf3f1cbd2e569a24e88eb460f2f84d8', + 'content-length': 0, + 'content-md5': 'd41d8cd98f00b204e9800998ecf8427e', + 'x-amz-version-id': 'null', + 'x-amz-server-version-id': '', + 'x-amz-storage-class': 'STANDARD', + 'x-amz-server-side-encryption': '', + 'x-amz-server-side-encryption-aws-kms-key-id': '', + 'x-amz-server-side-encryption-customer-algorithm': '', + 'x-amz-website-redirect-location': '', + acl: { + Canned: 'private', + FULL_CONTROL: [], + WRITE_ACP: [], + READ: [], + READ_ACP: [], + }, + key: '', + location: null, + isDeleteMarker: false, + tags: {}, + replicationInfo: { + status: '', + backends: [], + content: [], + destination: '', + storageClass: '', + role: '', + storageType: '', + dataStoreVersionId: '', + }, + dataStoreName: 'us-east-1', + 'last-modified': '2018-02-16T22:43:37.174Z', + 'md-model-version': 3, + }), + }, ], }; const mockLogs = { info: { start: 1, cseq: 7, prune: 1 }, log: [ - { db: 'friday', method: 0, entries: [ - { value: '{\"attributes\":\"{\\\"name\\\":\\\"friday\\\",' + - '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + - 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + - '\\\"test_1518720219\\\",\\\"creationDate\\\":' + - '\\\"2018-02-16T19:59:31.664Z\\\",\\\"mdBucketModelVersion\\\":5,' + - '\\\"transient\\\":true,\\\"deleted\\\":false,' + - '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + - '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + - '\\\":null,\\\"replicationConfiguration\\\":null}\"}' }, - ] }, - { db: 'friday', method: 7, entries: [ - { value: '{\"attributes\":\"{\\\"name\\\":\\\"friday\\\",' + - '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + - 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + - '\\\"test_1518720219\\\",\\\"creationDate\\\":' + - '\\\"2018-02-16T19:59:31.664Z\\\",\\\"mdBucketModelVersion\\\":5,' + - '\\\"transient\\\":false,\\\"deleted\\\":false,' + - '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + - '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + - '\\\":null,\\\"replicationConfiguration\\\":null}\",' + - '\"raftSession\":1}' }, - ] }, - { db: 'friday7', method: 0, entries: [ - { value: '{\"attributes\":\"{\\\"name\\\":\\\"friday7\\\",' + - '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + - 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + - '\\\"test_1518720219\\\",\\\"creationDate\\\":' + - '\\\"2018-02-16T20:41:34.253Z\\\",\\\"mdBucketModelVersion\\\":5,' + - '\\\"transient\\\":true,\\\"deleted\\\":false,' + - '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + - '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + - '\\\":null,\\\"replicationConfiguration\\\":null}\"}' }, - ] }, - { db: 'friday7', method: 7, entries: [ - { value: '{\"attributes\":\"{\\\"name\\\":\\\"friday7\\\",' + - '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + - 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + - '\\\"test_1518720219\\\",\\\"creationDate\\\":' + - '\\\"2018-02-16T20:41:34.253Z\\\",\\\"mdBucketModelVersion\\\":5,' + - '\\\"transient\\\":false,\\\"deleted\\\":false,' + - '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + - '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + - '\\\":null,\\\"replicationConfiguration\\\":null}\",' + - '\"raftSession\":1}' }, - ] }, - { db: 'xxxfriday10', method: 0, entries: [ - { value: '{\"attributes\":\"{\\\"name\\\":\\\"xxxfriday10\\\",' + - '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + - 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + - '\\\"test_1518720219\\\",\\\"creationDate\\\":' + - '\\\"2018-02-16T21:55:16.415Z\\\",\\\"mdBucketModelVersion\\\":5,' + - '\\\"transient\\\":true,\\\"deleted\\\":false,' + - '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + - '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + - '\\\":null,\\\"replicationConfiguration\\\":null}\"}' }, - ] }, - { db: 'xxxfriday10', method: 7, entries: [ - { value: '{\"attributes\":\"{\\\"name\\\":\\\"xxxfriday10\\\",' + - '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + - 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + - '\\\"test_1518720219\\\",\\\"creationDate\\\":' + - '\\\"2018-02-16T21:55:16.415Z\\\",\\\"mdBucketModelVersion\\\":5,' + - '\\\"transient\\\":false,\\\"deleted\\\":false,' + - '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + - '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + - '\\\":null,\\\"replicationConfiguration\\\":null}\",' + - '\"raftSession\":1}' }, - ] }, - { db: 'xxxfriday10', method: 8, entries: [ - { - key: 'afternoon', - value: '{\"owner-display-name\":\"test_1518720219\",' + - '\"owner-id\":\"94224c921648ada653f584f3caf42654ccf3f1cb' + - 'd2e569a24e88eb460f2f84d8\",\"content-length\":0,' + - '\"content-md5\":\"d41d8cd98f00b204e9800998ecf8427e\",' + - '\"x-amz-version-id\":\"null\",' + - '\"x-amz-server-version-id\":\"\",\"x-amz-storage-class' + - '\":\"STANDARD\",\"x-amz-server-side-encryption\":\"\",' + - '\"x-amz-server-side-encryption-aws-kms-key-id\":\"\",' + - '\"x-amz-server-side-encryption-customer-algorithm\":' + - '\"\",\"x-amz-website-redirect-location\":\"\",\"acl\":' + - '{\"Canned\":\"private\",\"FULL_CONTROL\":[],' + - '\"WRITE_ACP\":[],\"READ\":[],\"READ_ACP\":[]},\"key\":' + - '\"\",\"location\":null,\"isDeleteMarker\":false,\"tags' + - '\":{},\"replicationInfo\":{\"status\":\"\",\"backends\":' + - '[],\"content\":[],\"destination\":\"\",\"storageClass\":' + - '\"\",\"role\":\"\",\"storageType\":\"\",' + - '\"dataStoreVersionId\":\"\"},\"dataStoreName\":' + - '\"us-east-1\",\"last-modified\":\"2018-02-16T21:56:52.' + - '690Z\",\"md-model-version\":3}', - }, - ] }, - ] }; + { + db: 'friday', + method: 0, + entries: [ + { + value: + '{\"attributes\":\"{\\\"name\\\":\\\"friday\\\",' + + '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + + 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + + '\\\"test_1518720219\\\",\\\"creationDate\\\":' + + '\\\"2018-02-16T19:59:31.664Z\\\",\\\"mdBucketModelVersion\\\":5,' + + '\\\"transient\\\":true,\\\"deleted\\\":false,' + + '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + + '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + + '\\\":null,\\\"replicationConfiguration\\\":null}\"}', + }, + ], + }, + { + db: 'friday', + method: 7, + entries: [ + { + value: + '{\"attributes\":\"{\\\"name\\\":\\\"friday\\\",' + + '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + + 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + + '\\\"test_1518720219\\\",\\\"creationDate\\\":' + + '\\\"2018-02-16T19:59:31.664Z\\\",\\\"mdBucketModelVersion\\\":5,' + + '\\\"transient\\\":false,\\\"deleted\\\":false,' + + '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + + '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + + '\\\":null,\\\"replicationConfiguration\\\":null}\",' + + '\"raftSession\":1}', + }, + ], + }, + { + db: 'friday7', + method: 0, + entries: [ + { + value: + '{\"attributes\":\"{\\\"name\\\":\\\"friday7\\\",' + + '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + + 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + + '\\\"test_1518720219\\\",\\\"creationDate\\\":' + + '\\\"2018-02-16T20:41:34.253Z\\\",\\\"mdBucketModelVersion\\\":5,' + + '\\\"transient\\\":true,\\\"deleted\\\":false,' + + '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + + '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + + '\\\":null,\\\"replicationConfiguration\\\":null}\"}', + }, + ], + }, + { + db: 'friday7', + method: 7, + entries: [ + { + value: + '{\"attributes\":\"{\\\"name\\\":\\\"friday7\\\",' + + '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + + 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + + '\\\"test_1518720219\\\",\\\"creationDate\\\":' + + '\\\"2018-02-16T20:41:34.253Z\\\",\\\"mdBucketModelVersion\\\":5,' + + '\\\"transient\\\":false,\\\"deleted\\\":false,' + + '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + + '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + + '\\\":null,\\\"replicationConfiguration\\\":null}\",' + + '\"raftSession\":1}', + }, + ], + }, + { + db: 'xxxfriday10', + method: 0, + entries: [ + { + value: + '{\"attributes\":\"{\\\"name\\\":\\\"xxxfriday10\\\",' + + '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + + 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + + '\\\"test_1518720219\\\",\\\"creationDate\\\":' + + '\\\"2018-02-16T21:55:16.415Z\\\",\\\"mdBucketModelVersion\\\":5,' + + '\\\"transient\\\":true,\\\"deleted\\\":false,' + + '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + + '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + + '\\\":null,\\\"replicationConfiguration\\\":null}\"}', + }, + ], + }, + { + db: 'xxxfriday10', + method: 7, + entries: [ + { + value: + '{\"attributes\":\"{\\\"name\\\":\\\"xxxfriday10\\\",' + + '\\\"owner\\\":\\\"94224c921648ada653f584f3caf42654ccf3f1cb' + + 'd2e569a24e88eb460f2f84d8\\\",\\\"ownerDisplayName\\\":' + + '\\\"test_1518720219\\\",\\\"creationDate\\\":' + + '\\\"2018-02-16T21:55:16.415Z\\\",\\\"mdBucketModelVersion\\\":5,' + + '\\\"transient\\\":false,\\\"deleted\\\":false,' + + '\\\"serverSideEncryption\\\":null,\\\"versioningConfiguration' + + '\\\":null,\\\"locationConstraint\\\":\\\"us-east-1\\\",\\\"cors' + + '\\\":null,\\\"replicationConfiguration\\\":null}\",' + + '\"raftSession\":1}', + }, + ], + }, + { + db: 'xxxfriday10', + method: 8, + entries: [ + { + key: 'afternoon', + value: + '{\"owner-display-name\":\"test_1518720219\",' + + '\"owner-id\":\"94224c921648ada653f584f3caf42654ccf3f1cb' + + 'd2e569a24e88eb460f2f84d8\",\"content-length\":0,' + + '\"content-md5\":\"d41d8cd98f00b204e9800998ecf8427e\",' + + '\"x-amz-version-id\":\"null\",' + + '\"x-amz-server-version-id\":\"\",\"x-amz-storage-class' + + '\":\"STANDARD\",\"x-amz-server-side-encryption\":\"\",' + + '\"x-amz-server-side-encryption-aws-kms-key-id\":\"\",' + + '\"x-amz-server-side-encryption-customer-algorithm\":' + + '\"\",\"x-amz-website-redirect-location\":\"\",\"acl\":' + + '{\"Canned\":\"private\",\"FULL_CONTROL\":[],' + + '\"WRITE_ACP\":[],\"READ\":[],\"READ_ACP\":[]},\"key\":' + + '\"\",\"location\":null,\"isDeleteMarker\":false,\"tags' + + '\":{},\"replicationInfo\":{\"status\":\"\",\"backends\":' + + '[],\"content\":[],\"destination\":\"\",\"storageClass\":' + + '\"\",\"role\":\"\",\"storageType\":\"\",' + + '\"dataStoreVersionId\":\"\"},\"dataStoreName\":' + + '\"us-east-1\",\"last-modified\":\"2018-02-16T21:56:52.' + + '690Z\",\"md-model-version\":3}', + }, + ], + }, + ], +}; -const mockLogString = '\\/_\\/raft_sessions\\/[\\d]*\\/log\\?begin=' + - '[\\d]*&limit=[\\d]*&targetLeader=false'; +const mockLogString = '\\/_\\/raft_sessions\\/[\\d]*\\/log\\?begin=' + '[\\d]*&limit=[\\d]*&targetLeader=false'; const mockLogURLRegex = new RegExp(mockLogString); class MetadataMock { onRequest(req, res) { if (req.method !== 'GET') { res.writeHead(501); - return res.end(JSON.stringify({ - error: 'mock server only supports GET requests', - })); + return res.end( + JSON.stringify({ + error: 'mock server only supports GET requests', + }) + ); } if (/\/_\/raft_sessions\/[1-8]\/bucket/.test(req.url)) { const value = ['bucket1', 'bucket2', 'users..bucket']; @@ -214,30 +264,41 @@ class MetadataMock { } else if (/\/default\/attributes\/[a-z0-9]/.test(req.url)) { const bucketName = req.url.split('/'); const bucketMd = dummyBucketMD[bucketName[bucketName.length - 1]]; - const dummyBucketMdObj = new BucketInfo(bucketMd._name, - bucketMd._owner, bucketMd._ownerDisplayName, - bucketMd._creationDate, bucketMd._mdBucketModelVersion, - bucketMd._acl, bucketMd._transient, bucketMd._deleted, + const dummyBucketMdObj = new BucketInfo( + bucketMd._name, + bucketMd._owner, + bucketMd._ownerDisplayName, + bucketMd._creationDate, + bucketMd._mdBucketModelVersion, + bucketMd._acl, + bucketMd._transient, + bucketMd._deleted, bucketMd._serverSideEncryption, - bucketMd.versioningConfiguration, bucketMd._locationContraint, - bucketMd._websiteConfiguration, bucketMd._cors, - bucketMd._lifeCycle); + bucketMd.versioningConfiguration, + bucketMd._locationContraint, + bucketMd._websiteConfiguration, + bucketMd._cors, + bucketMd._lifeCycle + ); return res.end(dummyBucketMdObj.serialize()); - } else if - (/\/default\/bucket\/.*?listingType=Delimiter/.test(req.url)) { + } else if (/\/default\/bucket\/.*?listingType=Delimiter/.test(req.url)) { return res.end(JSON.stringify(objectList)); } else if (/\/default\/bucket\/.*\/.*?/.test(req.url)) { - return res.end(JSON.stringify({ - 'owner-id': '123', - 'metadata': 'dogsAreGood', - })); + return res.end( + JSON.stringify({ + 'owner-id': '123', + metadata: 'dogsAreGood', + }) + ); } else if (mockLogURLRegex.test(req.url)) { return res.end(JSON.stringify(mockLogs)); } res.writeHead(404); - return res.end(JSON.stringify({ - error: 'invalid path', - })); + return res.end( + JSON.stringify({ + error: 'invalid path', + }) + ); } } diff --git a/tests/functional/raw-node/utils/gcpUtils.js b/tests/functional/raw-node/utils/gcpUtils.js index 6a8db5deb3..776bf9210a 100644 --- a/tests/functional/raw-node/utils/gcpUtils.js +++ b/tests/functional/raw-node/utils/gcpUtils.js @@ -37,54 +37,67 @@ function gcpClientRetry(fn, params, callback, retry = 0) { // mpu test helpers function gcpMpuSetup(params, callback) { const { gcpClient, bucketNames, key, partCount, partSize } = params; - return async.waterfall([ - next => gcpClient.createMultipartUpload({ - Bucket: bucketNames.mpu.Name, - Key: key, - }, (err, res) => { - assert.equal(err, null, - `Expected success, but got error ${err}`); - return next(null, res.UploadId); - }), - (uploadId, next) => { - if (partCount <= 0) { - return next('SkipPutPart', { uploadId }); - } - const arrayData = Array.from(Array(partCount).keys()); - const etagList = Array(partCount); - let count = 0; - return async.eachLimit(arrayData, 10, - (info, moveOn) => { - gcpClient.uploadPart({ - Bucket: bucketNames.mpu.Name, - Key: key, - UploadId: uploadId, - PartNumber: info + 1, - Body: Buffer.alloc(partSize), - ContentLength: partSize, - }, (err, res) => { - if (err) { - return moveOn(err); + return async.waterfall( + [ + next => + gcpClient.createMultipartUpload( + { + Bucket: bucketNames.mpu.Name, + Key: key, + }, + (err, res) => { + assert.equal(err, null, `Expected success, but got error ${err}`); + return next(null, res.UploadId); } - if (!(++count % 100)) { - process.stdout.write(`Uploaded Parts: ${count}\n`); + ), + (uploadId, next) => { + if (partCount <= 0) { + return next('SkipPutPart', { uploadId }); + } + const arrayData = Array.from(Array(partCount).keys()); + const etagList = Array(partCount); + let count = 0; + return async.eachLimit( + arrayData, + 10, + (info, moveOn) => { + gcpClient.uploadPart( + { + Bucket: bucketNames.mpu.Name, + Key: key, + UploadId: uploadId, + PartNumber: info + 1, + Body: Buffer.alloc(partSize), + ContentLength: partSize, + }, + (err, res) => { + if (err) { + return moveOn(err); + } + if (!(++count % 100)) { + process.stdout.write(`Uploaded Parts: ${count}\n`); + } + etagList[info] = res.ETag; + return moveOn(null); + } + ); + }, + err => { + next(err, { uploadId, etagList }); } - etagList[info] = res.ETag; - return moveOn(null); - }); - }, err => { - next(err, { uploadId, etagList }); - }); - }, - ], (err, result) => { - if (err) { - if (err === 'SkipPutPart') { - return callback(null, result); + ); + }, + ], + (err, result) => { + if (err) { + if (err === 'SkipPutPart') { + return callback(null, result); + } + return callback(err); } - return callback(err); + return callback(null, result); } - return callback(null, result); - }); + ); } function genPutTagObj(size, duplicate) { @@ -133,12 +146,13 @@ function genDelTagObj(size, tagPrefix) { const regionalLoc = 'us-west1'; const multiRegionalLoc = 'us'; function setBucketClass(storageClass) { - const locationConstraint = - storageClass === 'REGIONAL' ? regionalLoc : multiRegionalLoc; - return '' + + const locationConstraint = storageClass === 'REGIONAL' ? regionalLoc : multiRegionalLoc; + return ( + '' + `${locationConstraint}` + `${storageClass}` + - ''; + '' + ); } module.exports = { diff --git a/tests/functional/raw-node/utils/makeRequest.js b/tests/functional/raw-node/utils/makeRequest.js index 4f0b8f070a..73749becb7 100644 --- a/tests/functional/raw-node/utils/makeRequest.js +++ b/tests/functional/raw-node/utils/makeRequest.js @@ -53,9 +53,18 @@ function _decodeURI(uri) { * @return {undefined} - and call callback */ function makeRequest(params, callback) { - const { hostname, port, method, queryObj, headers, path, - authCredentials, requestBody, jsonResponse, - urlForSignature } = params; + const { + hostname, + port, + method, + queryObj, + headers, + path, + authCredentials, + requestBody, + jsonResponse, + urlForSignature, + } = params; const options = { hostname, port, @@ -115,8 +124,16 @@ function makeRequest(params, callback) { // decode path because signing code re-encodes it req.path = _decodeURI(encodedPath); if (authCredentials && !params.GCP) { - auth.client.generateV4Headers(req, queryObj || '', - authCredentials.accessKey, authCredentials.secretKey, 's3', undefined, undefined, requestBody); + auth.client.generateV4Headers( + req, + queryObj || '', + authCredentials.accessKey, + authCredentials.secretKey, + 's3', + undefined, + undefined, + requestBody + ); } // restore original URL-encoded path req.path = savedPath; @@ -143,8 +160,7 @@ function makeRequest(params, callback) { * @return {undefined} - and call callback */ function makeS3Request(params, callback) { - const { method, queryObj, headers, bucket, objectKey, authCredentials, requestBody } - = params; + const { method, queryObj, headers, bucket, objectKey, authCredentials, requestBody } = params; const options = { authCredentials, hostname: process.env.AWS_ON_AIR ? 's3.amazonaws.com' : ipAddress, @@ -175,8 +191,7 @@ function makeS3Request(params, callback) { * @return {undefined} - and call callback */ function makeGcpRequest(params, callback) { - const { method, queryObj, headers, bucket, objectKey, authCredentials, - requestBody } = params; + const { method, queryObj, headers, bucket, objectKey, authCredentials, requestBody } = params; const options = { authCredentials, requestBody, @@ -211,8 +226,7 @@ function makeGcpRequest(params, callback) { * @return {undefined} - and call callback */ function makeBackbeatRequest(params, callback) { - const { method, headers, bucket, objectKey, resourceType, - authCredentials, requestBody, queryObj } = params; + const { method, headers, bucket, objectKey, resourceType, authCredentials, requestBody, queryObj } = params; const options = { authCredentials, hostname: ipAddress, diff --git a/tests/functional/report/master.json b/tests/functional/report/master.json index cc83b902dd..9fe2fc66b9 100644 --- a/tests/functional/report/master.json +++ b/tests/functional/report/master.json @@ -1,6 +1,6 @@ { "tests": { - "files": [ "/test" ], + "files": ["/test"], "on": "aggressor" } } diff --git a/tests/functional/report/monitoring.js b/tests/functional/report/monitoring.js index 529aeaebef..bc12480ffb 100644 --- a/tests/functional/report/monitoring.js +++ b/tests/functional/report/monitoring.js @@ -7,13 +7,20 @@ describe('Monitoring - getting metrics', () => { const conf = require('../config.json'); async function query(path, method = 'GET', token = 'report-token-1') { - return new Promise(resolve => http.request({ - method, - host: conf.ipAddress, - path, - port: 8000, - headers: { 'x-scal-report-token': token }, - }, () => resolve()).end()); + return new Promise(resolve => + http + .request( + { + method, + host: conf.ipAddress, + path, + port: 8000, + headers: { 'x-scal-report-token': token }, + }, + () => resolve() + ) + .end() + ); } async function getMetrics() { @@ -22,14 +29,18 @@ describe('Monitoring - getting metrics', () => { assert.strictEqual(res.statusCode, 200); const body = []; - res.on('data', chunk => { body.push(chunk); }); + res.on('data', chunk => { + body.push(chunk); + }); res.on('end', () => resolve(body.join(''))); }); }); } function parseMetric(metrics, name, labels) { - const labelsString = Object.entries(labels).map(e => `${e[0]}="${e[1]}"`).join(','); + const labelsString = Object.entries(labels) + .map(e => `${e[0]}="${e[1]}"`) + .join(','); const metric = metrics.match(new RegExp(`^${name}{${labelsString}} (.*)$`, 'm')); return metric ? metric[1] : null; } @@ -51,26 +62,26 @@ describe('Monitoring - getting metrics', () => { [ // Check all methods are reported (on unsupported route) - ['/_/fooooo', { method: 'GET', code: '400' }], - ['/_/fooooo', { method: 'PUT', code: '400' }], - ['/_/fooooo', { method: 'POST', code: '400' }], - ['/_/fooooo', { method: 'DELETE', code: '400' }], + ['/_/fooooo', { method: 'GET', code: '400' }], + ['/_/fooooo', { method: 'PUT', code: '400' }], + ['/_/fooooo', { method: 'POST', code: '400' }], + ['/_/fooooo', { method: 'DELETE', code: '400' }], // S3/api routes - ['/', { method: 'GET', code: '403', action: 'serviceGet' }], - ['/foo', { method: 'GET', code: '404', action: 'bucketGet' }], - ['/foo/bar', { method: 'GET', code: '404', action: 'objectGet' }], + ['/', { method: 'GET', code: '403', action: 'serviceGet' }], + ['/foo', { method: 'GET', code: '404', action: 'bucketGet' }], + ['/foo/bar', { method: 'GET', code: '404', action: 'objectGet' }], // Internal handlers - ['/_/report', { method: 'GET', code: '200', action: 'report' }], - ['/_/backbeat', { method: 'GET', code: '405', action: 'routeBackbeat' }], - ['/_/metadata', { method: 'GET', code: '403', action: 'routeMetadata' }], - ['/_/workflow-engine-operator', - { method: 'GET', code: '405', action: 'routeWorkflowEngineOperator' }], + ['/_/report', { method: 'GET', code: '200', action: 'report' }], + ['/_/backbeat', { method: 'GET', code: '405', action: 'routeBackbeat' }], + ['/_/metadata', { method: 'GET', code: '403', action: 'routeMetadata' }], + ['/_/workflow-engine-operator', { method: 'GET', code: '405', action: 'routeWorkflowEngineOperator' }], ].forEach(([path, labels]) => { it(`should count http ${labels.method} requests metrics on ${path}`, async () => { const count = parseRequestsCount(await getMetrics(), labels); - for (let i = 1; i <= 3; i++) { /* eslint no-await-in-loop: "off" */ + for (let i = 1; i <= 3; i++) { + /* eslint no-await-in-loop: "off" */ await query(path, labels.method); const c = parseRequestsCount(await getMetrics(), labels); diff --git a/tests/functional/s3cmd/tests.js b/tests/functional/s3cmd/tests.js index 9140714ad6..466accbae1 100644 --- a/tests/functional/s3cmd/tests.js +++ b/tests/functional/s3cmd/tests.js @@ -14,11 +14,7 @@ const emptyUpload = 'Utest0B'; const emptyDownload = 'Dtest0B'; const download = 'tmpfile'; const MPUpload = 'test60MB'; -const MPUploadSplitter = [ - 'test60..|..MB', - '..|..test60MB', - 'test60MB..|..', -]; +const MPUploadSplitter = ['test60..|..MB', '..|..test60MB', 'test60MB..|..']; const MPDownload = 'MPtmpfile'; const MPDownloadCopy = 'MPtmpfile2'; const downloadCopy = 'tmpfile2'; @@ -51,8 +47,7 @@ function diff(putFile, receivedFile, done) { function createFile(name, bytes, callback) { process.stdout.write(`dd if=/dev/urandom of=${name} bs=${bytes} count=1\n`); - const ret = proc.spawnSync('dd', ['if=/dev/urandom', `of=${name}`, - `bs=${bytes}`, 'count=1'], { stdio: 'inherit' }); + const ret = proc.spawnSync('dd', ['if=/dev/urandom', `of=${name}`, `bs=${bytes}`, 'count=1'], { stdio: 'inherit' }); assert.strictEqual(ret.status, 0); callback(); } @@ -81,8 +76,7 @@ function exec(args, done, exitCode) { } process.stdout.write(`${program} ${av}\n`); const ret = proc.spawnSync(program, av, { stdio: 'inherit' }); - assert.strictEqual(ret.status, exit, - 's3cmd did not yield expected exit status.'); + assert.strictEqual(ret.status, exit, 's3cmd did not yield expected exit status.'); done(); } @@ -106,12 +100,16 @@ function checkRawOutput(args, lineFinder, testString, stream, cb) { }); child.on('close', () => { if (stream === 'stderr') { - const foundIt = allErrData.join('').split('\n') + const foundIt = allErrData + .join('') + .split('\n') .filter(item => item.indexOf(lineFinder) > -1) .some(item => item.indexOf(testString) > -1); return cb(foundIt); } - const foundIt = allData.join('').split('\n') + const foundIt = allData + .join('') + .split('\n') .filter(item => item.indexOf(lineFinder) > -1) .some(item => item.indexOf(testString) > -1); return cb(foundIt); @@ -123,12 +121,12 @@ function findEndString(data, start) { const end = data.length; for (let i = start + 1; i < end; ++i) { if (data[i] === delimiter) { - return (i); + return i; } else if (data[i] === '\\') { ++i; } } - return (-1); + return -1; } function findEndJson(data, start) { @@ -143,10 +141,10 @@ function findEndJson(data, start) { i = findEndString(data, i); } if (count === 0) { - return (i); + return i; } } - return (-1); + return -1; } function readJsonFromChild(child, lineFinder, cb) { @@ -160,9 +158,11 @@ function readJsonFromChild(child, lineFinder, cb) { const findLine = data.indexOf(lineFinder); const findBrace = data.indexOf('{', findLine); const findEnd = findEndJson(data, findBrace); - const endJson = data.substring(findBrace, findEnd + 1) - .replace(/"/g, '\\"').replace(/'/g, '"') - .replace(/b'/g, '\'') + const endJson = data + .substring(findBrace, findEnd + 1) + .replace(/"/g, '\\"') + .replace(/'/g, '"') + .replace(/b'/g, "'") .replace(/b"/g, '"'); return cb(JSON.parse(endJson)); }); @@ -213,37 +213,32 @@ function retrieveInfo() { function createEncryptedBucket(name, cb) { const res = retrieveInfo(); const prog = `${__dirname}/../../../bin/create_encrypted_bucket.js`; - let args = [ - prog, - '-a', res.accessKey, - '-k', res.secretKey, - '-b', name, - '-h', res.host, - '-p', res.port, - '-v', - ]; + let args = [prog, '-a', res.accessKey, '-k', res.secretKey, '-b', name, '-h', res.host, '-p', res.port, '-v']; if (conf.https) { args = args.concat('-s'); } const body = []; - const child = proc.spawn(args[0], args) - .on('exit', () => { - const hasSucceed = body.join('').split('\n').find(item => { - const json = safeJSONParse(item); - const test = !(json instanceof Error) && json.name === 'S3' && - json.statusCode === 200; - if (test) { - return true; + const child = proc + .spawn(args[0], args) + .on('exit', () => { + const hasSucceed = body + .join('') + .split('\n') + .find(item => { + const json = safeJSONParse(item); + const test = !(json instanceof Error) && json.name === 'S3' && json.statusCode === 200; + if (test) { + return true; + } + return false; + }); + if (!hasSucceed) { + process.stderr.write(`${body.join('')}\n`); + return cb(new Error('Cannot create encrypted bucket')); } - return false; - }); - if (!hasSucceed) { - process.stderr.write(`${body.join('')}\n`); - return cb(new Error('Cannot create encrypted bucket')); - } - return cb(); - }) - .on('error', cb); + return cb(); + }) + .on('error', cb); child.stdout.on('data', chunk => body.push(chunk.toString())); } @@ -257,10 +252,7 @@ describe('s3cmd putBucket', () => { // pass by returning error. If legacyAWSBehvior, request // would return a 200 it('put the same bucket, should fail', done => { - exec([ - 'mb', `s3://${bucket}`, - '--bucket-location=scality-us-west-1', - ], done, 13); + exec(['mb', `s3://${bucket}`, '--bucket-location=scality-us-west-1'], done, 13); }); it('put an invalid bucket, should fail', done => { @@ -276,16 +268,15 @@ describe('s3cmd putBucket', () => { }); if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { - it('creates a valid bucket with server side encryption', - function f(done) { - this.timeout(5000); - exec(['rb', `s3://${bucket}`], err => { - if (err) { - return done(err); - } - return createEncryptedBucket(bucket, done); - }); - }); + it('creates a valid bucket with server side encryption', function f(done) { + this.timeout(5000); + exec(['rb', `s3://${bucket}`], err => { + if (err) { + return done(err); + } + return createEncryptedBucket(bucket, done); + }); + }); } }); @@ -299,23 +290,18 @@ describe('s3cmd put and get bucket ACLs', function aclBuck() { }); it('should get canned ACL that was set', done => { - checkRawOutput(['info', `s3://${bucket}`], 'ACL', '*anon*: READ', - 'stdout', foundIt => { + checkRawOutput(['info', `s3://${bucket}`], 'ACL', '*anon*: READ', 'stdout', foundIt => { assert(foundIt); done(); }); }); it('should set a specific ACL', done => { - exec([ - 'setacl', `s3://${bucket}`, - `--acl-grant=write:${emailAccount}`, - ], done); + exec(['setacl', `s3://${bucket}`, `--acl-grant=write:${emailAccount}`], done); }); it('should get specific ACL that was set', done => { - checkRawOutput(['info', `s3://${bucket}`], 'ACL', - `${lowerCaseEmail}: WRITE`, 'stdout', foundIt => { + checkRawOutput(['info', `s3://${bucket}`], 'ACL', `${lowerCaseEmail}: WRITE`, 'stdout', foundIt => { assert(foundIt); done(); }); @@ -340,23 +326,18 @@ describe('s3cmd getService', () => { }); }); - it("should have response headers matching AWS's response headers", - done => { - provideLineOfInterest(['ls', '--debug'], '\'headers\': {', - parsedObject => { - assert(parsedObject['x-amz-id-2']); - assert(parsedObject['transfer-encoding']); - assert(parsedObject['x-amz-request-id']); - const gmtDate = new Date(parsedObject.date) - .toUTCString(); - assert.strictEqual(parsedObject.date, gmtDate); - assert.strictEqual(parsedObject - ['content-type'], 'application/xml'); - assert.strictEqual(parsedObject - ['set-cookie'], undefined); - done(); - }); + it("should have response headers matching AWS's response headers", done => { + provideLineOfInterest(['ls', '--debug'], "'headers': {", parsedObject => { + assert(parsedObject['x-amz-id-2']); + assert(parsedObject['transfer-encoding']); + assert(parsedObject['x-amz-request-id']); + const gmtDate = new Date(parsedObject.date).toUTCString(); + assert.strictEqual(parsedObject.date, gmtDate); + assert.strictEqual(parsedObject['content-type'], 'application/xml'); + assert.strictEqual(parsedObject['set-cookie'], undefined); + done(); }); + }); }); describe('s3cmd putObject', function toto() { @@ -409,10 +390,7 @@ describe('s3cmd copyObject without MPU to same bucket', function copyStuff() { }); it('should copy an object to the same bucket', done => { - exec([ - 'cp', `s3://${bucket}/${upload}`, - `s3://${bucket}/${upload}copy`, - ], done); + exec(['cp', `s3://${bucket}/${upload}`, `s3://${bucket}/${upload}copy`], done); }); it('should get an object that was copied', done => { @@ -428,42 +406,36 @@ describe('s3cmd copyObject without MPU to same bucket', function copyStuff() { }); }); -describe('s3cmd copyObject without MPU to different bucket ' + - '(always unencrypted)', - function copyStuff() { - const copyBucket = 'receiverbucket'; - this.timeout(40000); +describe('s3cmd copyObject without MPU to different bucket ' + '(always unencrypted)', function copyStuff() { + const copyBucket = 'receiverbucket'; + this.timeout(40000); - before('create receiver bucket', done => { - exec(['mb', `s3://${copyBucket}`], done); - }); + before('create receiver bucket', done => { + exec(['mb', `s3://${copyBucket}`], done); + }); - after('delete downloaded file and receiver bucket' + - 'copied', done => { - deleteFile(downloadCopy, () => { - exec(['rb', `s3://${copyBucket}`], done); - }); + after('delete downloaded file and receiver bucket' + 'copied', done => { + deleteFile(downloadCopy, () => { + exec(['rb', `s3://${copyBucket}`], done); }); + }); - it('should copy an object to the new bucket', done => { - exec([ - 'cp', `s3://${bucket}/${upload}`, - `s3://${copyBucket}/${upload}`, - ], done); - }); + it('should copy an object to the new bucket', done => { + exec(['cp', `s3://${bucket}/${upload}`, `s3://${copyBucket}/${upload}`], done); + }); - it('should get an object that was copied', done => { - exec(['get', `s3://${copyBucket}/${upload}`, downloadCopy], done); - }); + it('should get an object that was copied', done => { + exec(['get', `s3://${copyBucket}/${upload}`, downloadCopy], done); + }); - it('downloaded copy file should equal original uploaded file', done => { - diff(upload, downloadCopy, done); - }); + it('downloaded copy file should equal original uploaded file', done => { + diff(upload, downloadCopy, done); + }); - it('should delete copy of object', done => { - exec(['rm', `s3://${copyBucket}/${upload}`], done); - }); + it('should delete copy of object', done => { + exec(['rm', `s3://${copyBucket}/${upload}`], done); }); +}); describe('s3cmd put and get object ACLs', function aclObj() { this.timeout(60000); @@ -475,30 +447,25 @@ describe('s3cmd put and get object ACLs', function aclObj() { }); it('should get canned ACL that was set', done => { - checkRawOutput(['info', `s3://${bucket}/${upload}`], 'ACL', - '*anon*: READ', 'stdout', foundIt => { + checkRawOutput(['info', `s3://${bucket}/${upload}`], 'ACL', '*anon*: READ', 'stdout', foundIt => { assert(foundIt); done(); }); }); it('should set a specific ACL', done => { - exec(['setacl', `s3://${bucket}/${upload}`, - `--acl-grant=read:${emailAccount}`], done); + exec(['setacl', `s3://${bucket}/${upload}`, `--acl-grant=read:${emailAccount}`], done); }); it('should get specific ACL that was set', done => { - checkRawOutput(['info', `s3://${bucket}/${upload}`], 'ACL', - `${lowerCaseEmail}: READ`, 'stdout', foundIt => { + checkRawOutput(['info', `s3://${bucket}/${upload}`], 'ACL', `${lowerCaseEmail}: READ`, 'stdout', foundIt => { assert(foundIt); done(); }); }); - it('should return error if set acl for ' + - 'nonexistent object', done => { - exec(['setacl', `s3://${bucket}/${nonexist}`, - '--acl-public'], done, 12); + it('should return error if set acl for ' + 'nonexistent object', done => { + exec(['setacl', `s3://${bucket}/${nonexist}`, '--acl-public'], done, 12); }); }); @@ -508,16 +475,14 @@ describe('s3cmd delObject', () => { }); it('delete an already deleted object, should return a 204', done => { - provideLineOfInterest(['rm', `s3://${bucket}/${upload}`, '--debug'], - 'DEBUG: Response:\n{', parsedObject => { + provideLineOfInterest(['rm', `s3://${bucket}/${upload}`, '--debug'], 'DEBUG: Response:\n{', parsedObject => { assert.strictEqual(parsedObject.status, 204); done(); }); }); it('delete non-existing object, should return a 204', done => { - provideLineOfInterest(['rm', `s3://${bucket}/${nonexist}`, '--debug'], - 'DEBUG: Response:\n{', parsedObject => { + provideLineOfInterest(['rm', `s3://${bucket}/${nonexist}`, '--debug'], 'DEBUG: Response:\n{', parsedObject => { assert.strictEqual(parsedObject.status, 204); done(); }); @@ -596,10 +561,7 @@ describe('s3cmd multipart upload', function titi() { }); it('should copy an object that was put via multipart upload', done => { - exec([ - 'cp', `s3://${bucket}/${MPUpload}`, - `s3://${bucket}/${MPUpload}copy`, - ], done); + exec(['cp', `s3://${bucket}/${MPUpload}`, `s3://${bucket}/${MPUpload}copy`], done); }); it('should get an object that was copied', done => { @@ -663,9 +625,7 @@ MPUploadSplitter.forEach(file => { }); }); - -describe('s3cmd put, get and delete object with spaces ' + - 'in object key names', function test() { +describe('s3cmd put, get and delete object with spaces ' + 'in object key names', function test() { this.timeout(0); const keyWithSpacesAndPluses = 'key with spaces and + pluses +'; before('create file to put', done => { @@ -688,13 +648,11 @@ describe('s3cmd put, get and delete object with spaces ' + }); it('should get file with spaces', done => { - exec(['get', `s3://${bucket}/${keyWithSpacesAndPluses}`, download], - done); + exec(['get', `s3://${bucket}/${keyWithSpacesAndPluses}`, download], done); }); it('should list bucket showing file with spaces', done => { - checkRawOutput(['ls', `s3://${bucket}`], `s3://${bucket}`, - keyWithSpacesAndPluses, 'stdout', foundIt => { + checkRawOutput(['ls', `s3://${bucket}`], `s3://${bucket}`, keyWithSpacesAndPluses, 'stdout', foundIt => { assert(foundIt); done(); }); @@ -726,27 +684,26 @@ describe('s3cmd info', () => { // test that POLICY and CORS are returned as 'none' it('should find that policy has a value of none', done => { - checkRawOutput(['info', `s3://${bucket}`], 'Policy', 'none', - 'stdout', foundIt => { + checkRawOutput(['info', `s3://${bucket}`], 'Policy', 'none', 'stdout', foundIt => { assert(foundIt); done(); }); }); it('should find that cors has a value of none', done => { - checkRawOutput(['info', `s3://${bucket}`], 'CORS', 'none', - 'stdout', foundIt => { + checkRawOutput(['info', `s3://${bucket}`], 'CORS', 'none', 'stdout', foundIt => { assert(foundIt); done(); }); }); describe('after putting cors configuration', () => { - const corsConfig = '' + - 'PUT' + - 'http://www.allowedorigin.com' + - ''; + const corsConfig = + '' + + 'PUT' + + 'http://www.allowedorigin.com' + + ''; const filename = 'corss3cmdfile'; beforeEach(done => { @@ -760,8 +717,7 @@ describe('s3cmd info', () => { }); it('should find that cors has a value', done => { - checkRawOutput(['info', `s3://${bucket}`], 'CORS', corsConfig, - 'stdout', foundIt => { + checkRawOutput(['info', `s3://${bucket}`], 'CORS', corsConfig, 'stdout', foundIt => { assert(foundIt, 'Did not find value for cors'); done(); }); @@ -793,12 +749,14 @@ describe('s3cmd recursive delete with objects put by MPU', () => { this.timeout(120000); exec(['mb', `s3://${bucket}`], () => { createFile(upload16MB, 16777216, () => { - async.timesLimit(50, 1, (n, next) => { - exec([ - 'put', upload16MB, `s3://${bucket}/key${n}`, - '--multipart-chunk-size-mb=5', - ], next); - }, done); + async.timesLimit( + 50, + 1, + (n, next) => { + exec(['put', upload16MB, `s3://${bucket}/key${n}`, '--multipart-chunk-size-mb=5'], next); + }, + done + ); }); }); }); @@ -822,9 +780,7 @@ describeSkipIfE2E('If no location is sent with the request', () => { // WARNING: change "us-east-1" to another locationConstraint depending // on the restEndpoints (./config.json) it('endpoint should be used to determine the locationConstraint', done => { - checkRawOutput(['info', `s3://${bucket}`], 'Location', 'us-east-1', - 'stdout', - foundIt => { + checkRawOutput(['info', `s3://${bucket}`], 'Location', 'us-east-1', 'stdout', foundIt => { assert(foundIt); done(); }); diff --git a/tests/functional/s3curl/tests.js b/tests/functional/s3curl/tests.js index c6562eff97..27fa79ae59 100644 --- a/tests/functional/s3curl/tests.js +++ b/tests/functional/s3curl/tests.js @@ -22,8 +22,7 @@ const aclBucket = 'acluniverse'; const nonexist = 'nonexist'; const prefix = 'topLevel'; const delimiter = '/'; -let ownerCanonicalId = '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d52' - + '18e7cd47ef2be'; +let ownerCanonicalId = '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d52' + '18e7cd47ef2be'; const endpoint = `${transport}://${ipAddress}:8000`; // Let's precompute a few paths @@ -51,11 +50,9 @@ function diff(putFile, receivedFile, done) { }); } - function createFile(name, bytes, callback) { process.stdout.write(`dd if=/dev/urandom of=${name} bs=${bytes} count=1\n`); - let ret = proc.spawnSync('dd', ['if=/dev/urandom', `of=${name}`, - `bs=${bytes}`, 'count=1'], { stdio: 'inherit' }); + let ret = proc.spawnSync('dd', ['if=/dev/urandom', `of=${name}`, `bs=${bytes}`, 'count=1'], { stdio: 'inherit' }); assert.strictEqual(ret.status, 0); process.stdout.write(`chmod ugoa+rw ${name}\n`); ret = proc.spawnSync('chmod', ['ugo+rw', name], { stdio: 'inherit' }); @@ -101,15 +98,13 @@ function provideRawOutput(args, cb) { httpCode = lines.find(line => { const trimmed = line.trim().toUpperCase(); // ignore 100 Continue HTTP code - if (trimmed.startsWith('HTTP/1.1 ') && - !trimmed.includes('100 CONTINUE')) { + if (trimmed.startsWith('HTTP/1.1 ') && !trimmed.includes('100 CONTINUE')) { return true; } return false; }); if (httpCode) { - httpCode = httpCode.trim().replace('HTTP/1.1 ', '') - .toUpperCase(); + httpCode = httpCode.trim().replace('HTTP/1.1 ', '').toUpperCase(); } else { process.stdout.write(`${lines.join('\n')}\n`); return cb(new Error("Can't find line in http response code")); @@ -134,15 +129,13 @@ function provideRawOutput(args, cb) { * @return {undefined} */ function putObjects(filepath, objectPaths, cb) { - provideRawOutput( - [`--put=${filepath}`, '--', objectPaths[0], '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - if (objectPaths.length > 1) { - return putObjects(filepath, objectPaths.slice(1), cb); - } - return cb(); - }); + provideRawOutput([`--put=${filepath}`, '--', objectPaths[0], '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + if (objectPaths.length > 1) { + return putObjects(filepath, objectPaths.slice(1), cb); + } + return cb(); + }); } /** @@ -157,15 +150,13 @@ function putObjects(filepath, objectPaths, cb) { * @return {undefined} */ function deleteRemoteItems(items, cb) { - provideRawOutput( - ['--delete', '--', items[0], '-v'], - httpCode => { - assert.strictEqual(httpCode, '204 NO CONTENT'); - if (items.length > 1) { - return deleteRemoteItems(items.slice(1), cb); - } - return cb(); - }); + provideRawOutput(['--delete', '--', items[0], '-v'], httpCode => { + assert.strictEqual(httpCode, '204 NO CONTENT'); + if (items.length > 1) { + return deleteRemoteItems(items.slice(1), cb); + } + return cb(); + }); } describe('s3curl put delete buckets', () => { @@ -175,84 +166,68 @@ describe('s3curl put delete buckets', () => { }); it('should put a valid bucket', done => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + }); }); - it('should return 409 error in new regions and 200 in us-east-1 ' + - '(legacyAWSBehvior) when try to put a bucket with a name ' + - 'already being used', done => { - provideRawOutput(['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert(httpCode === '200 OK' - || httpCode === '409 CONFLICT'); + it( + 'should return 409 error in new regions and 200 in us-east-1 ' + + '(legacyAWSBehvior) when try to put a bucket with a name ' + + 'already being used', + done => { + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert(httpCode === '200 OK' || httpCode === '409 CONFLICT'); done(); }); - }); + } + ); - it('should not be able to put a bucket with invalid xml' + - ' in the post body', done => { - provideRawOutput([ - '--createBucket', - '--', - '--data', - 'malformedxml', - bucketPath, - '-v', - ], (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'MalformedXML', - done); - }); + it('should not be able to put a bucket with invalid xml' + ' in the post body', done => { + provideRawOutput( + ['--createBucket', '--', '--data', 'malformedxml', bucketPath, '-v'], + (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '400 BAD REQUEST'); + assertError(rawOutput.stdout, 'MalformedXML', done); + } + ); }); - it('should not be able to put a bucket with xml that does' + - ' not conform to s3 docs for locationConstraint', done => { - provideRawOutput([ - '--createBucket', - '--', - '--data', - 'a', - bucketPath, - '-v', - ], (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'MalformedXML', - done); - }); - }); + it( + 'should not be able to put a bucket with xml that does' + ' not conform to s3 docs for locationConstraint', + done => { + provideRawOutput( + ['--createBucket', '--', '--data', 'a', bucketPath, '-v'], + (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '400 BAD REQUEST'); + assertError(rawOutput.stdout, 'MalformedXML', done); + } + ); + } + ); it('should not be able to put a bucket with an invalid name', done => { - provideRawOutput( - ['--createBucket', '--', `${endpoint}/2`, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'InvalidBucketName', done); - }); + provideRawOutput(['--createBucket', '--', `${endpoint}/2`, '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '400 BAD REQUEST'); + assertError(rawOutput.stdout, 'InvalidBucketName', done); + }); }); it('should not be able to put a bucket with an empty name', done => { - provideRawOutput( - ['--createBucket', '--', `${endpoint}/`, '-v'], - httpCode => { - assert.strictEqual(httpCode, '405 METHOD NOT ALLOWED'); - done(); - }); + provideRawOutput(['--createBucket', '--', `${endpoint}/`, '-v'], httpCode => { + assert.strictEqual(httpCode, '405 METHOD NOT ALLOWED'); + done(); + }); }); }); describe('s3curl delete bucket', () => { before(done => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + }); }); after(done => { @@ -264,316 +239,256 @@ describe('s3curl put delete buckets', () => { }); it('should not be able to get a bucket that was deleted', done => { - provideRawOutput( - ['--', bucketPath, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '404 NOT FOUND'); - assertError(rawOutput.stdout, 'NoSuchBucket', done); - }); + provideRawOutput(['--', bucketPath, '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '404 NOT FOUND'); + assertError(rawOutput.stdout, 'NoSuchBucket', done); + }); }); - it('should be able to create a bucket with a name' + - 'of a bucket that has previously been deleted', done => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + it('should be able to create a bucket with a name' + 'of a bucket that has previously been deleted', done => { + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + }); }); }); }); describe('s3curl put and get bucket ACLs', () => { after(done => { - deleteRemoteItems([ - `${endpoint}/${aclBucket}`, - `${endpoint}/${aclBucket}2`, - ], done); + deleteRemoteItems([`${endpoint}/${aclBucket}`, `${endpoint}/${aclBucket}2`], done); }); it('should be able to create a bucket with a canned ACL', done => { - provideRawOutput([ - '--createBucket', - '--', - '-H', - 'x-amz-acl:public-read', - `${endpoint}/${aclBucket}`, - '-v', - ], httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); - }); - - it('should be able to get a canned ACL', done => { provideRawOutput( - ['--', `${endpoint}/${aclBucket}?acl`, '-v'], - (httpCode, rawOutput) => { + ['--createBucket', '--', '-H', 'x-amz-acl:public-read', `${endpoint}/${aclBucket}`, '-v'], + httpCode => { assert.strictEqual(httpCode, '200 OK'); - parseString(rawOutput.stdout, (err, xml) => { - if (err) { - assert.ifError(err); - } - assert.strictEqual(xml.AccessControlPolicy - .Owner[0].ID[0], ownerCanonicalId); - assert.strictEqual(xml.AccessControlPolicy - .AccessControlList[0].Grant[0] - .Grantee[0].ID[0], ownerCanonicalId); - assert.strictEqual(xml.AccessControlPolicy - .AccessControlList[0].Grant[0] - .Permission[0], 'FULL_CONTROL'); - assert.strictEqual(xml.AccessControlPolicy - .AccessControlList[0].Grant[1] - .Grantee[0].URI[0], - 'http://acs.amazonaws.com/groups/global/AllUsers'); - assert.strictEqual(xml.AccessControlPolicy - .AccessControlList[0].Grant[1] - .Permission[0], 'READ'); - done(); - }); - }); + done(); + } + ); }); - it('should be able to create a bucket with a specific ACL', done => { - provideRawOutput([ - '--createBucket', - '--', - '-H', - 'x-amz-grant-read:uri=' + - 'http://acs.amazonaws.com/groups/global/AllUsers', - `${endpoint}/${aclBucket}2`, - '-v', - ], httpCode => { + it('should be able to get a canned ACL', done => { + provideRawOutput(['--', `${endpoint}/${aclBucket}?acl`, '-v'], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '200 OK'); - done(); + parseString(rawOutput.stdout, (err, xml) => { + if (err) { + assert.ifError(err); + } + assert.strictEqual(xml.AccessControlPolicy.Owner[0].ID[0], ownerCanonicalId); + assert.strictEqual( + xml.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + ownerCanonicalId + ); + assert.strictEqual(xml.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], 'FULL_CONTROL'); + assert.strictEqual( + xml.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].URI[0], + 'http://acs.amazonaws.com/groups/global/AllUsers' + ); + assert.strictEqual(xml.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], 'READ'); + done(); + }); }); }); - it('should be able to get a specifically set ACL', done => { + it('should be able to create a bucket with a specific ACL', done => { provideRawOutput( - ['--', `${endpoint}/${aclBucket}2?acl`, '-v'], - (httpCode, rawOutput) => { + [ + '--createBucket', + '--', + '-H', + 'x-amz-grant-read:uri=' + 'http://acs.amazonaws.com/groups/global/AllUsers', + `${endpoint}/${aclBucket}2`, + '-v', + ], + httpCode => { assert.strictEqual(httpCode, '200 OK'); - parseString(rawOutput.stdout, (err, xml) => { - if (err) { - assert.ifError(err); - } - assert.strictEqual(xml.AccessControlPolicy - .Owner[0].ID[0], ownerCanonicalId); - assert.strictEqual(xml.AccessControlPolicy - .AccessControlList[0].Grant[0] - .Grantee[0].URI[0], - 'http://acs.amazonaws.com/groups/global/AllUsers'); - assert.strictEqual(xml.AccessControlPolicy - .AccessControlList[0].Grant[0] - .Permission[0], 'READ'); - done(); - }); + done(); + } + ); + }); + + it('should be able to get a specifically set ACL', done => { + provideRawOutput(['--', `${endpoint}/${aclBucket}2?acl`, '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '200 OK'); + parseString(rawOutput.stdout, (err, xml) => { + if (err) { + assert.ifError(err); + } + assert.strictEqual(xml.AccessControlPolicy.Owner[0].ID[0], ownerCanonicalId); + assert.strictEqual( + xml.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].URI[0], + 'http://acs.amazonaws.com/groups/global/AllUsers' + ); + assert.strictEqual(xml.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], 'READ'); + done(); }); + }); }); }); describe('s3curl getService', () => { before(done => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + provideRawOutput(['--createBucket', '--', `${endpoint}/${aclBucket}`, '-v'], httpCode => { assert.strictEqual(httpCode, '200 OK'); - provideRawOutput( - ['--createBucket', '--', `${endpoint}/${aclBucket}`, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + done(); }); + }); }); after(done => { - deleteRemoteItems([ - bucketPath, - `${endpoint}/${aclBucket}`, - ], done); + deleteRemoteItems([bucketPath, `${endpoint}/${aclBucket}`], done); }); it('should get a list of all buckets created by user account', done => { - provideRawOutput( - ['--', `${endpoint}`, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '200 OK'); - parseString(rawOutput.stdout, (err, xml) => { - if (err) { - assert.ifError(err); - } - const bucketNames = xml.ListAllMyBucketsResult - .Buckets[0].Bucket - .map(item => item.Name[0]); - const whereIsMyBucket = bucketNames.indexOf(bucket); - assert(whereIsMyBucket > -1); - const whereIsMyAclBucket = bucketNames.indexOf(aclBucket); - assert(whereIsMyAclBucket > -1); - done(); - }); + provideRawOutput(['--', `${endpoint}`, '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '200 OK'); + parseString(rawOutput.stdout, (err, xml) => { + if (err) { + assert.ifError(err); + } + const bucketNames = xml.ListAllMyBucketsResult.Buckets[0].Bucket.map(item => item.Name[0]); + const whereIsMyBucket = bucketNames.indexOf(bucket); + assert(whereIsMyBucket > -1); + const whereIsMyAclBucket = bucketNames.indexOf(aclBucket); + assert(whereIsMyAclBucket > -1); + done(); }); + }); }); }); describe('s3curl putObject', () => { before(done => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - createFile(upload, 1048576, done); - }); + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + createFile(upload, 1048576, done); + }); }); after(done => { - deleteRemoteItems([ - `${prefixedPath}${upload}1`, - `${prefixedPath}${upload}2`, - `${prefixedPath}${upload}3`, - bucketPath, - ], done); + deleteRemoteItems( + [`${prefixedPath}${upload}1`, `${prefixedPath}${upload}2`, `${prefixedPath}${upload}3`, bucketPath], + done + ); }); // curl behavior is not consistent across the environments // skipping the test for now - it.skip('should not be able to put an object if request does not have ' + - 'content-length header', - done => { - provideRawOutput([ - '--debug', - `--put=${upload}`, - '--', - '-H', - 'content-length:', - `${prefixedPath}${upload}1`, - '-v', - ], (httpCode, rawOutput) => { + it.skip('should not be able to put an object if request does not have ' + 'content-length header', done => { + provideRawOutput( + ['--debug', `--put=${upload}`, '--', '-H', 'content-length:', `${prefixedPath}${upload}1`, '-v'], + (httpCode, rawOutput) => { assert.strictEqual(httpCode, '411 LENGTH REQUIRED'); assertError(rawOutput.stdout, 'MissingContentLength', done); - }); - }); - - it('should not be able to put an object if content-md5 header is ' + - 'invalid', - done => { - provideRawOutput(['--debug', `--put=${upload}`, - '--contentMd5', 'toto', '--', - `${endpoint}/${bucket}/` + - `${prefix}${delimiter}${upload}1`, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'InvalidDigest', done); - }); - }); - - // skip until we figure out how to parse the response in the CI - it.skip('should not be able to put an object if content-md5 header is ' + - 'mismatched MD5', - done => { - provideRawOutput(['--debug', `--put=${upload}`, - '--contentMd5', 'rL0Y20zC+Fzt72VPzMSk2A==', '--', - `${endpoint}/${bucket}/` + - `${prefix}${delimiter}${upload}1`, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'BadDigest', done); - }); - }); + } + ); + }); - it('should not be able to put an object if using streaming ' + - 'chunked-upload with a valid V2 signature', - done => { - provideRawOutput([ + it('should not be able to put an object if content-md5 header is ' + 'invalid', done => { + provideRawOutput( + [ '--debug', `--put=${upload}`, + '--contentMd5', + 'toto', '--', - '-H', - 'x-amz-content-sha256: STREAMING-AWS4-HMAC-SHA256-PAYLOAD', - `${endpoint}/${bucket}/${prefix}${delimiter}${upload}1`, - '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'InvalidArgument', done); - }); - }); + `${endpoint}/${bucket}/` + `${prefix}${delimiter}${upload}1`, + '-v', + ], + (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '400 BAD REQUEST'); + assertError(rawOutput.stdout, 'InvalidDigest', done); + } + ); + }); - it('should not be able to put an object in a bucket with an invalid name', - done => { - provideRawOutput([ + // skip until we figure out how to parse the response in the CI + it.skip('should not be able to put an object if content-md5 header is ' + 'mismatched MD5', done => { + provideRawOutput( + [ '--debug', `--put=${upload}`, + '--contentMd5', + 'rL0Y20zC+Fzt72VPzMSk2A==', '--', - `${endpoint}/2/${basePath}${upload}1`, + `${endpoint}/${bucket}/` + `${prefix}${delimiter}${upload}1`, '-v', - ], (httpCode, rawOutput) => { + ], + (httpCode, rawOutput) => { assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'InvalidBucketName', done); - }); - }); + assertError(rawOutput.stdout, 'BadDigest', done); + } + ); + }); - it('should not be able to put an object in a bucket that does not exist', - done => { - provideRawOutput([ + it('should not be able to put an object if using streaming ' + 'chunked-upload with a valid V2 signature', done => { + provideRawOutput( + [ '--debug', `--put=${upload}`, '--', - `${endpoint}/${nonexist}/${basePath}${upload}1`, + '-H', + 'x-amz-content-sha256: STREAMING-AWS4-HMAC-SHA256-PAYLOAD', + `${endpoint}/${bucket}/${prefix}${delimiter}${upload}1`, '-v', - ], (httpCode, rawOutput) => { + ], + (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '400 BAD REQUEST'); + assertError(rawOutput.stdout, 'InvalidArgument', done); + } + ); + }); + + it('should not be able to put an object in a bucket with an invalid name', done => { + provideRawOutput( + ['--debug', `--put=${upload}`, '--', `${endpoint}/2/${basePath}${upload}1`, '-v'], + (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '400 BAD REQUEST'); + assertError(rawOutput.stdout, 'InvalidBucketName', done); + } + ); + }); + + it('should not be able to put an object in a bucket that does not exist', done => { + provideRawOutput( + ['--debug', `--put=${upload}`, '--', `${endpoint}/${nonexist}/${basePath}${upload}1`, '-v'], + (httpCode, rawOutput) => { assert.strictEqual(httpCode, '404 NOT FOUND'); assertError(rawOutput.stdout, 'NoSuchBucket', done); - }); - }); + } + ); + }); - it('should not be able to put an object in a bucket with an empty name', - done => { - provideRawOutput([ - '--debug', - `--put=${upload}`, - '--', - `${endpoint}//${basePath}/${upload}1`, - '-v', - ], httpCode => { - assert.strictEqual(httpCode, '405 METHOD NOT ALLOWED'); - done(); - }); + it('should not be able to put an object in a bucket with an empty name', done => { + provideRawOutput( + ['--debug', `--put=${upload}`, '--', `${endpoint}//${basePath}/${upload}1`, '-v'], + httpCode => { + assert.strictEqual(httpCode, '405 METHOD NOT ALLOWED'); + done(); + } + ); }); - it('should put first object in existing bucket with prefix ' + - 'and delimiter', done => { - provideRawOutput([ - '--debug', - `--put=${upload}`, - '--', - `${prefixedPath}${upload}1`, - '-v', - ], httpCode => { + it('should put first object in existing bucket with prefix ' + 'and delimiter', done => { + provideRawOutput(['--debug', `--put=${upload}`, '--', `${prefixedPath}${upload}1`, '-v'], httpCode => { assert.strictEqual(httpCode, '200 OK'); done(); }); }); - it('should put second object in existing bucket with prefix ' + - 'and delimiter', done => { - provideRawOutput( - [`--put=${upload}`, '--', `${prefixedPath}${upload}2`, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + it('should put second object in existing bucket with prefix ' + 'and delimiter', done => { + provideRawOutput([`--put=${upload}`, '--', `${prefixedPath}${upload}2`, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + }); }); - it('should put third object in existing bucket with prefix ' + - 'and delimiter', done => { - provideRawOutput([ - `--put=${upload}`, - '--', - `${prefixedPath}${upload}3`, - '-v', - ], httpCode => { + it('should put third object in existing bucket with prefix ' + 'and delimiter', done => { + provideRawOutput([`--put=${upload}`, '--', `${prefixedPath}${upload}3`, '-v'], httpCode => { assert.strictEqual(httpCode, '200 OK'); done(); }); @@ -581,21 +496,15 @@ describe('s3curl putObject', () => { }); describe('s3curl getBucket', () => { - const objects = [ - `${prefixedPath}${upload}1`, - `${prefixedPath}${upload}2`, - `${prefixedPath}${upload}3`, - ]; + const objects = [`${prefixedPath}${upload}1`, `${prefixedPath}${upload}2`, `${prefixedPath}${upload}3`]; before(done => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - createFile(upload, 1048576, () => { - putObjects(upload, objects, done); - }); + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + createFile(upload, 1048576, () => { + putObjects(upload, objects, done); }); + }); }); after(done => { @@ -604,143 +513,114 @@ describe('s3curl getBucket', () => { }); it('should list all objects if no prefix or delimiter specified', done => { - provideRawOutput( - ['--', bucketPath, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '200 OK'); - parseString(rawOutput.stdout, (err, result) => { - if (err) { - assert.ifError(err); - } - assert.strictEqual(result.ListBucketResult - .Contents[0].Key[0], `${basePath}${upload}1`); - assert.strictEqual(result.ListBucketResult - .Contents[1].Key[0], `${basePath}${upload}2`); - assert.strictEqual(result.ListBucketResult - .Contents[2].Key[0], `${basePath}${upload}3`); - done(); - }); - }); - }); - - it('should list a common prefix if a common prefix and delimiter are ' + - 'specified', done => { - provideRawOutput([ - '--', - `${bucketPath}?delimiter=${delimiter}&prefix=${prefix}`, - '-v', - ], (httpCode, rawOutput) => { + provideRawOutput(['--', bucketPath, '-v'], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '200 OK'); parseString(rawOutput.stdout, (err, result) => { if (err) { assert.ifError(err); } - assert.strictEqual(result.ListBucketResult - .CommonPrefixes[0].Prefix[0], basePath); + assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], `${basePath}${upload}1`); + assert.strictEqual(result.ListBucketResult.Contents[1].Key[0], `${basePath}${upload}2`); + assert.strictEqual(result.ListBucketResult.Contents[2].Key[0], `${basePath}${upload}3`); done(); }); }); }); - it('should not list a common prefix if no delimiter is specified', done => { + it('should list a common prefix if a common prefix and delimiter are ' + 'specified', done => { provideRawOutput( - ['--', `${bucketPath}?&prefix=${prefix}`, '-v'], + ['--', `${bucketPath}?delimiter=${delimiter}&prefix=${prefix}`, '-v'], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '200 OK'); parseString(rawOutput.stdout, (err, result) => { if (err) { assert.ifError(err); } - const keys = Object.keys(result.ListBucketResult); - const location = keys.indexOf('CommonPrefixes'); - assert.strictEqual(location, -1); - assert.strictEqual(result.ListBucketResult - .Contents[0].Key[0], `${basePath}${upload}1`); + assert.strictEqual(result.ListBucketResult.CommonPrefixes[0].Prefix[0], basePath); done(); }); + } + ); + }); + + it('should not list a common prefix if no delimiter is specified', done => { + provideRawOutput(['--', `${bucketPath}?&prefix=${prefix}`, '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '200 OK'); + parseString(rawOutput.stdout, (err, result) => { + if (err) { + assert.ifError(err); + } + const keys = Object.keys(result.ListBucketResult); + const location = keys.indexOf('CommonPrefixes'); + assert.strictEqual(location, -1); + assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], `${basePath}${upload}1`); + done(); }); + }); }); - it('should provide a next marker if maxs keys exceeded ' + - 'and delimiter specified', done => { - provideRawOutput( - ['--', `${bucketPath}?delimiter=x&max-keys=2`, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '200 OK'); - parseString(rawOutput.stdout, (err, result) => { - if (err) { - assert.ifError(err); - } - assert.strictEqual(result.ListBucketResult - .NextMarker[0], `${basePath}${upload}2`); - assert.strictEqual(result.ListBucketResult - .IsTruncated[0], 'true'); - done(); - }); + it('should provide a next marker if maxs keys exceeded ' + 'and delimiter specified', done => { + provideRawOutput(['--', `${bucketPath}?delimiter=x&max-keys=2`, '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '200 OK'); + parseString(rawOutput.stdout, (err, result) => { + if (err) { + assert.ifError(err); + } + assert.strictEqual(result.ListBucketResult.NextMarker[0], `${basePath}${upload}2`); + assert.strictEqual(result.ListBucketResult.IsTruncated[0], 'true'); + done(); }); + }); }); it('should return InvalidArgument error with negative max-keys', done => { - provideRawOutput( - ['--', `${bucketPath}?&max-keys=-2`, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'InvalidArgument', done); - }); + provideRawOutput(['--', `${bucketPath}?&max-keys=-2`, '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '400 BAD REQUEST'); + assertError(rawOutput.stdout, 'InvalidArgument', done); + }); }); it('should return InvalidArgument error with invalid max-keys', done => { - provideRawOutput( - ['--', `${bucketPath}?max-keys='slash'`, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'InvalidArgument', done); - }); + provideRawOutput(['--', `${bucketPath}?max-keys='slash'`, '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '400 BAD REQUEST'); + assertError(rawOutput.stdout, 'InvalidArgument', done); + }); }); it('should return an EncodingType XML tag with the value "url"', done => { - provideRawOutput( - ['--', bucketPath, '-G', '-d', 'encoding-type=url', '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '200 OK'); - parseString(rawOutput.stdout, (err, result) => { - if (err) { - assert.ifError(err); - } - assert.strictEqual(result.ListBucketResult - .EncodingType[0], 'url'); - done(); - }); + provideRawOutput(['--', bucketPath, '-G', '-d', 'encoding-type=url', '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '200 OK'); + parseString(rawOutput.stdout, (err, result) => { + if (err) { + assert.ifError(err); + } + assert.strictEqual(result.ListBucketResult.EncodingType[0], 'url'); + done(); }); + }); }); - it('should return an InvalidArgument error when given an invalid ' + - 'encoding type', done => { - provideRawOutput( - ['--', bucketPath, '-G', '-d', 'encoding-type=invalidURI', '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '400 BAD REQUEST'); - parseString(rawOutput.stdout, (err, result) => { - if (err) { - assert.ifError(err); - } - assert.strictEqual(result.Error.Code[0], 'InvalidArgument'); - assert.strictEqual(result.Error.Message[0], - 'Invalid Encoding Method specified in Request'); - done(); - }); + it('should return an InvalidArgument error when given an invalid ' + 'encoding type', done => { + provideRawOutput(['--', bucketPath, '-G', '-d', 'encoding-type=invalidURI', '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '400 BAD REQUEST'); + parseString(rawOutput.stdout, (err, result) => { + if (err) { + assert.ifError(err); + } + assert.strictEqual(result.Error.Code[0], 'InvalidArgument'); + assert.strictEqual(result.Error.Message[0], 'Invalid Encoding Method specified in Request'); + done(); }); + }); }); }); describe('s3curl head bucket', () => { before(done => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + }); }); after(done => { @@ -748,78 +628,59 @@ describe('s3curl head bucket', () => { }); it('should return a 404 response if bucket does not exist', done => { - provideRawOutput( - ['--head', '--', `${endpoint}/${nonexist}`, '-v'], - httpCode => { - assert.strictEqual(httpCode, '404 NOT FOUND'); - done(); - }); + provideRawOutput(['--head', '--', `${endpoint}/${nonexist}`, '-v'], httpCode => { + assert.strictEqual(httpCode, '404 NOT FOUND'); + done(); + }); }); - it('should return a 200 response if bucket exists' + - ' and user is authorized', done => { - provideRawOutput( - ['--head', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + it('should return a 200 response if bucket exists' + ' and user is authorized', done => { + provideRawOutput(['--head', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + }); }); }); describe('s3curl getObject', () => { before(done => { createFile(upload, 1048576, () => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + }); }); }); after('delete created file and downloaded file', done => { - const objects = [ - `${bucketPath}/getter`, - bucketPath, - ]; + const objects = [`${bucketPath}/getter`, bucketPath]; deleteRemoteItems(objects, () => { deleteFile(upload, () => deleteFile(download, done)); }); }); it('should put object with metadata', done => { - provideRawOutput([ - `--put=${upload}`, - '--', - '-H', - 'x-amz-meta-mine:BestestObjectEver', - `${bucketPath}/getter`, - '-v', - ], httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); - }); - - it('should get an existing file in an existing bucket', done => { provideRawOutput( - ['--', '-o', download, `${bucketPath}/getter`, '-v'], + [`--put=${upload}`, '--', '-H', 'x-amz-meta-mine:BestestObjectEver', `${bucketPath}/getter`, '-v'], httpCode => { assert.strictEqual(httpCode, '200 OK'); done(); - }); + } + ); }); - it('should return an error if getting object with empty bucket name', - done => { - provideRawOutput( - ['--', '-o', download, `${endpoint}//getter`, '-v'], - httpCode => { - assert.strictEqual(httpCode, '405 METHOD NOT ALLOWED'); - done(); - }); + it('should get an existing file in an existing bucket', done => { + provideRawOutput(['--', '-o', download, `${bucketPath}/getter`, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + }); + }); + + it('should return an error if getting object with empty bucket name', done => { + provideRawOutput(['--', '-o', download, `${endpoint}//getter`, '-v'], httpCode => { + assert.strictEqual(httpCode, '405 METHOD NOT ALLOWED'); + done(); + }); }); it.skip('downloaded file should equal uploaded file', done => { @@ -830,160 +691,135 @@ describe('s3curl getObject', () => { describe('s3curl head object', () => { before(done => { createFile(upload, 1048576, () => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - provideRawOutput([ - `--put=${upload}`, - '--', - '-H', - 'x-amz-meta-mine:BestestObjectEver', - `${bucketPath}/getter`, - '-v', - ], httpCode => { + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + provideRawOutput( + [`--put=${upload}`, '--', '-H', 'x-amz-meta-mine:BestestObjectEver', `${bucketPath}/getter`, '-v'], + httpCode => { assert.strictEqual(httpCode, '200 OK'); done(); - }); - }); + } + ); + }); }); }); after(done => { - deleteRemoteItems([ - `${bucketPath}/getter`, - bucketPath, - ], done); + deleteRemoteItems([`${bucketPath}/getter`, bucketPath], done); }); it("should get object's metadata", done => { - provideRawOutput( - ['--head', '--', `${bucketPath}/getter`, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '200 OK'); - const lines = rawOutput.stdout.split('\n'); - const userMetadata = 'x-amz-meta-mine: BestestObjectEver\r'; - assert(lines.indexOf(userMetadata) > -1); - assert(rawOutput.stdout.indexOf('ETag') > -1); - done(); - }); + provideRawOutput(['--head', '--', `${bucketPath}/getter`, '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '200 OK'); + const lines = rawOutput.stdout.split('\n'); + const userMetadata = 'x-amz-meta-mine: BestestObjectEver\r'; + assert(lines.indexOf(userMetadata) > -1); + assert(rawOutput.stdout.indexOf('ETag') > -1); + done(); + }); }); }); describe('s3curl object ACLs', () => { before(done => { createFile(aclUpload, 512000, () => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + }); }); }); after(done => { - deleteRemoteItems([ - `${bucketPath}/${aclUpload}withcannedacl`, - `${bucketPath}/${aclUpload}withspecificacl`, - bucketPath, - ], () => deleteFile(aclUpload, done)); + deleteRemoteItems( + [`${bucketPath}/${aclUpload}withcannedacl`, `${bucketPath}/${aclUpload}withspecificacl`, bucketPath], + () => deleteFile(aclUpload, done) + ); }); it('should put an object with a canned ACL', done => { - provideRawOutput([ - `--put=${aclUpload}`, - '--', - '-H', - 'x-amz-acl:public-read', - `${bucketPath}/${aclUpload}withcannedacl`, - '-v', - ], httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + provideRawOutput( + [ + `--put=${aclUpload}`, + '--', + '-H', + 'x-amz-acl:public-read', + `${bucketPath}/${aclUpload}withcannedacl`, + '-v', + ], + httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + } + ); }); it("should get an object's canned ACL", done => { - provideRawOutput([ - '--', - `${bucketPath}/${aclUpload}withcannedacl?acl`, - '-v', - ], (httpCode, rawOutput) => { + provideRawOutput(['--', `${bucketPath}/${aclUpload}withcannedacl?acl`, '-v'], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '200 OK'); parseString(rawOutput.stdout, (err, result) => { if (err) { assert.ifError(err); } - assert.strictEqual(result.AccessControlPolicy - .Owner[0].ID[0], ownerCanonicalId); - assert.strictEqual(result.AccessControlPolicy - .AccessControlList[0].Grant[0] - .Grantee[0].ID[0], ownerCanonicalId); - assert.strictEqual(result.AccessControlPolicy - .AccessControlList[0].Grant[0] - .Permission[0], 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy - .AccessControlList[0].Grant[1] - .Grantee[0].URI[0], - 'http://acs.amazonaws.com/groups/global/AllUsers'); - assert.strictEqual(result.AccessControlPolicy - .AccessControlList[0].Grant[1] - .Permission[0], 'READ'); + assert.strictEqual(result.AccessControlPolicy.Owner[0].ID[0], ownerCanonicalId); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + ownerCanonicalId + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].URI[0], + 'http://acs.amazonaws.com/groups/global/AllUsers' + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], 'READ'); done(); }); }); }); it('should put an object with a specific ACL', done => { - provideRawOutput([ - `--put=${aclUpload}`, - '--', - '-H', - 'x-amz-grant-read:uri=' + - 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers', - `${bucketPath}/${aclUpload}withspecificacl`, - '-v', - ], httpCode => { - assert.strictEqual(httpCode, '200 OK'); - done(); - }); + provideRawOutput( + [ + `--put=${aclUpload}`, + '--', + '-H', + 'x-amz-grant-read:uri=' + 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers', + `${bucketPath}/${aclUpload}withspecificacl`, + '-v', + ], + httpCode => { + assert.strictEqual(httpCode, '200 OK'); + done(); + } + ); }); it("should get an object's specific ACL", done => { - provideRawOutput([ - '--', - `${bucketPath}/${aclUpload}withspecificacl?acl`, - '-v', - ], (httpCode, rawOutput) => { + provideRawOutput(['--', `${bucketPath}/${aclUpload}withspecificacl?acl`, '-v'], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '200 OK'); parseString(rawOutput.stdout, (err, result) => { if (err) { assert.ifError(err); } - assert.strictEqual(result.AccessControlPolicy - .Owner[0].ID[0], ownerCanonicalId); - assert.strictEqual(result.AccessControlPolicy - .AccessControlList[0].Grant[0] - .Grantee[0].URI[0], - 'http://acs.amazonaws.com/groups/global/' + - 'AuthenticatedUsers'); - assert.strictEqual(result.AccessControlPolicy - .AccessControlList[0].Grant[0] - .Permission[0], 'READ'); + assert.strictEqual(result.AccessControlPolicy.Owner[0].ID[0], ownerCanonicalId); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].URI[0], + 'http://acs.amazonaws.com/groups/global/' + 'AuthenticatedUsers' + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], 'READ'); done(); }); }); }); - it('should return a NoSuchKey error if try to get an object' + - 'ACL for an object that does not exist', done => { - provideRawOutput( - ['--', `${bucketPath}/keydoesnotexist?acl`, '-v'], - (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '404 NOT FOUND'); - assertError(rawOutput.stdout, 'NoSuchKey', done); - }); + it('should return a NoSuchKey error if try to get an object' + 'ACL for an object that does not exist', done => { + provideRawOutput(['--', `${bucketPath}/keydoesnotexist?acl`, '-v'], (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '404 NOT FOUND'); + assertError(rawOutput.stdout, 'NoSuchKey', done); + }); }); }); @@ -993,104 +829,70 @@ describe('s3curl multipart upload', () => { let uploadId = null; before(done => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - // initiate mpu - provideRawOutput([ - '--', - '-X', - 'POST', - `${bucketPath}/${key}?uploads`, - '-v', - ], (httpCode, rawOutput) => { - parseString(rawOutput.stdout, (err, result) => { - if (err) { - assert.ifError(err); - } - uploadId = - result.InitiateMultipartUploadResult.UploadId[0]; - // create file to copy - createFile(upload, 100, () => { - // put file to copy - putObjects(upload, [`${bucketPath}/copyme`], done); - }); + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + // initiate mpu + provideRawOutput(['--', '-X', 'POST', `${bucketPath}/${key}?uploads`, '-v'], (httpCode, rawOutput) => { + parseString(rawOutput.stdout, (err, result) => { + if (err) { + assert.ifError(err); + } + uploadId = result.InitiateMultipartUploadResult.UploadId[0]; + // create file to copy + createFile(upload, 100, () => { + // put file to copy + putObjects(upload, [`${bucketPath}/copyme`], done); }); }); }); + }); }); after(done => { - deleteRemoteItems([ - `${bucketPath}/copyme`, - `${bucketPath}/${key}?uploadId=${uploadId}`, - bucketPath, - ], () => deleteFile(upload, done)); + deleteRemoteItems([`${bucketPath}/copyme`, `${bucketPath}/${key}?uploadId=${uploadId}`, bucketPath], () => + deleteFile(upload, done) + ); }); it('should return error for list parts call if no key sent', done => { - provideRawOutput([ - '--', - `${bucketPath}?uploadId=${uploadId}`, - '-v', - ], (httpCode, rawOutput) => { + provideRawOutput(['--', `${bucketPath}?uploadId=${uploadId}`, '-v'], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '400 BAD REQUEST'); assertError(rawOutput.stdout, 'InvalidRequest', done); }); }); it('should return error for put part call if no key sent', done => { - provideRawOutput([ - '--', - '-X', 'PUT', - `${bucketPath}?partNumber=1&uploadId=${uploadId}`, - '-v', - ], (httpCode, rawOutput) => { - assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'InvalidRequest', done); - }); + provideRawOutput( + ['--', '-X', 'PUT', `${bucketPath}?partNumber=1&uploadId=${uploadId}`, '-v'], + (httpCode, rawOutput) => { + assert.strictEqual(httpCode, '400 BAD REQUEST'); + assertError(rawOutput.stdout, 'InvalidRequest', done); + } + ); }); it('should return error for complete mpu call if no key sent', done => { - provideRawOutput([ - '--', - '-X', 'POST', - `${bucketPath}?uploadId=${uploadId}`, - '-v', - ], (httpCode, rawOutput) => { + provideRawOutput(['--', '-X', 'POST', `${bucketPath}?uploadId=${uploadId}`, '-v'], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '400 BAD REQUEST'); assertError(rawOutput.stdout, 'InvalidRequest', done); }); }); it('should return error for abort mpu call if no key sent', done => { - provideRawOutput([ - '--', - '-X', 'DELETE', - `${bucketPath}?uploadId=${uploadId}`, - '-v', - ], (httpCode, rawOutput) => { + provideRawOutput(['--', '-X', 'DELETE', `${bucketPath}?uploadId=${uploadId}`, '-v'], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '400 BAD REQUEST'); assertError(rawOutput.stdout, 'InvalidRequest', done); }); }); it('should list parts of multipart upload with no parts', done => { - provideRawOutput([ - '--', - `${bucketPath}/${key}?uploadId=${uploadId}`, - '-v', - ], (httpCode, rawOutput) => { + provideRawOutput(['--', `${bucketPath}/${key}?uploadId=${uploadId}`, '-v'], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '200 OK'); parseString(rawOutput.stdout, (err, result) => { - assert.strictEqual(result.ListPartsResult.UploadId[0], - uploadId); - assert.strictEqual(result.ListPartsResult.Bucket[0], - bucket); + assert.strictEqual(result.ListPartsResult.UploadId[0], uploadId); + assert.strictEqual(result.ListPartsResult.Bucket[0], bucket); assert.strictEqual(result.ListPartsResult.Key[0], key); - assert.strictEqual(result.ListPartsResult.Part, - undefined); + assert.strictEqual(result.ListPartsResult.Part, undefined); done(); }); }); @@ -1098,68 +900,76 @@ describe('s3curl multipart upload', () => { it('should copy a part and return lastModified as ISO', done => { provideRawOutput( - ['--', `${bucketPath}/${key}?uploadId=${uploadId}&partNumber=1`, - '-X', 'PUT', '-H', - `x-amz-copy-source:${bucket}/copyme`, '-v'], + [ + '--', + `${bucketPath}/${key}?uploadId=${uploadId}&partNumber=1`, + '-X', + 'PUT', + '-H', + `x-amz-copy-source:${bucket}/copyme`, + '-v', + ], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '200 OK'); parseString(rawOutput.stdout, (err, result) => { - const lastModified = result.CopyPartResult - .LastModified[0]; + const lastModified = result.CopyPartResult.LastModified[0]; const isoDateString = new Date(lastModified).toISOString(); assert.strictEqual(lastModified, isoDateString); done(); }); - }); + } + ); }); }); describe('s3curl copy object', () => { before(done => { createFile(upload, 1048576, () => { - provideRawOutput( - ['--createBucket', '--', bucketPath, '-v'], - httpCode => { - assert.strictEqual(httpCode, '200 OK'); - putObjects(upload, [`${bucketPath}/copyme`], done); - }); + provideRawOutput(['--createBucket', '--', bucketPath, '-v'], httpCode => { + assert.strictEqual(httpCode, '200 OK'); + putObjects(upload, [`${bucketPath}/copyme`], done); + }); }); }); after(done => { - deleteRemoteItems([ - `${bucketPath}/copyme`, - `${bucketPath}/iamacopy`, - bucketPath, - ], () => deleteFile(upload, done)); + deleteRemoteItems([`${bucketPath}/copyme`, `${bucketPath}/iamacopy`, bucketPath], () => + deleteFile(upload, done) + ); }); it('should copy an object and return lastModified as ISO', done => { provideRawOutput( - ['--', `${bucketPath}/iamacopy`, '-X', 'PUT', '-H', - `x-amz-copy-source:${bucket}/copyme`, '-v'], + ['--', `${bucketPath}/iamacopy`, '-X', 'PUT', '-H', `x-amz-copy-source:${bucket}/copyme`, '-v'], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '200 OK'); parseString(rawOutput.stdout, (err, result) => { - const lastModified = result.CopyObjectResult - .LastModified[0]; + const lastModified = result.CopyObjectResult.LastModified[0]; const isoDateString = new Date(lastModified).toISOString(); assert.strictEqual(lastModified, isoDateString); done(); }); - }); + } + ); }); }); describe('s3curl multi-object delete', () => { it('should return an error if md5 is wrong', done => { - provideRawOutput(['--post', 'multiDelete.xml', '--contentMd5', - 'p5/WA/oEr30qrEEl21PAqw==', '--', - `${endpoint}/${bucket}/?delete`, '-v'], + provideRawOutput( + [ + '--post', + 'multiDelete.xml', + '--contentMd5', + 'p5/WA/oEr30qrEEl21PAqw==', + '--', + `${endpoint}/${bucket}/?delete`, + '-v', + ], (httpCode, rawOutput) => { assert.strictEqual(httpCode, '400 BAD REQUEST'); - assertError(rawOutput.stdout, 'BadDigest', - done); - }); + assertError(rawOutput.stdout, 'BadDigest', done); + } + ); }); }); diff --git a/tests/functional/sse-kms-migration/arnPrefix.js b/tests/functional/sse-kms-migration/arnPrefix.js index da93645ddc..1515a9ea46 100644 --- a/tests/functional/sse-kms-migration/arnPrefix.js +++ b/tests/functional/sse-kms-migration/arnPrefix.js @@ -30,53 +30,61 @@ describe('SSE KMS arnPrefix', () => { bkts[bktConf.name] = bkt; if (bktConf.algo && bktConf.masterKeyId) { bkt.kmsKeyInfo = await helpers.createKmsKey(log); - bkt.kmsKey = bktConf.arnPrefix - ? bkt.kmsKeyInfo.masterKeyArn - : bkt.kmsKeyInfo.masterKeyId; + bkt.kmsKey = bktConf.arnPrefix ? bkt.kmsKeyInfo.masterKeyArn : bkt.kmsKeyInfo.masterKeyId; } - await helpers.s3.createBucket(({ Bucket: bkt.name })).promise(); - await helpers.s3.createBucket(({ Bucket: bkt.vname })).promise(); + await helpers.s3.createBucket({ Bucket: bkt.name }).promise(); + await helpers.s3.createBucket({ Bucket: bkt.vname }).promise(); if (bktConf.deleteSSE) { await scenarios.deleteBucketSSEBeforeEach(bkt.name, log); await scenarios.deleteBucketSSEBeforeEach(bkt.vname, log); } if (bktConf.algo) { // bucket encryption will be asserted in bucket test - await helpers.s3.putBucketEncryption({ - Bucket: bkt.name, - ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ - algo: bktConf.algo, masterKeyId: bkt.kmsKey }), - }).promise(); - await helpers.s3.putBucketEncryption({ - Bucket: bkt.vname, - ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ - algo: bktConf.algo, masterKeyId: bkt.kmsKey }), - }).promise(); + await helpers.s3 + .putBucketEncryption({ + Bucket: bkt.name, + ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ + algo: bktConf.algo, + masterKeyId: bkt.kmsKey, + }), + }) + .promise(); + await helpers.s3 + .putBucketEncryption({ + Bucket: bkt.vname, + ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ + algo: bktConf.algo, + masterKeyId: bkt.kmsKey, + }), + }) + .promise(); } // Put an object for each SSE conf in each bucket - await Promise.all(scenarios.testCases.map(async objConf => { - const obj = { - name: `for-copy-enc-obj-${objConf.name}`, - kmsKeyInfo: null, - kmsKey: null, - body: `BODY(for-copy-enc-obj-${objConf.name})`, - }; - bkt.objs[objConf.name] = obj; - if (objConf.algo && objConf.masterKeyId) { - obj.kmsKeyInfo = await helpers.createKmsKey(log); - obj.kmsKey = objConf.arnPrefix - ? obj.kmsKeyInfo.masterKeyArn - : obj.kmsKeyInfo.masterKeyId; - } + await Promise.all( + scenarios.testCases.map(async objConf => { + const obj = { + name: `for-copy-enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(for-copy-enc-obj-${objConf.name})`, + }; + bkt.objs[objConf.name] = obj; + if (objConf.algo && objConf.masterKeyId) { + obj.kmsKeyInfo = await helpers.createKmsKey(log); + obj.kmsKey = objConf.arnPrefix ? obj.kmsKeyInfo.masterKeyArn : obj.kmsKeyInfo.masterKeyId; + } - return await helpers.putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); - })); + return await helpers.putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); + }) + ); }; before('setup', async () => { - console.log('Run arnPrefix', - { profile: helpers.credsProfile, accessKeyId: helpers.s3.config.credentials.accessKeyId }); + console.log('Run arnPrefix', { + profile: helpers.credsProfile, + accessKeyId: helpers.s3.config.credentials.accessKeyId, + }); const allBuckets = (await helpers.s3.listBuckets().promise()).Buckets.map(b => b.Name); console.log('List buckets:', allBuckets); await helpers.MD.setup(); @@ -85,19 +93,28 @@ describe('SSE KMS arnPrefix', () => { // pre cleanup await helpers.cleanup(copyBkt); await helpers.cleanup(mpuCopyBkt); - await Promise.all(Object.values(bkts).map(async bkt => { - await helpers.cleanup(bkt.name); - return await helpers.cleanup(bkt.vname); - })); - } catch (e) { void e; } + await Promise.all( + Object.values(bkts).map(async bkt => { + await helpers.cleanup(bkt.name); + return await helpers.cleanup(bkt.vname); + }) + ); + } catch (e) { + void e; + } // init copy bucket - await helpers.s3.createBucket(({ Bucket: copyBkt })).promise(); - await helpers.s3.createBucket(({ Bucket: mpuCopyBkt })).promise(); - await helpers.s3.putBucketEncryption({ - Bucket: copyBkt, - ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ algo: 'aws:kms', masterKeyId: copyKmsKey }), - }).promise(); + await helpers.s3.createBucket({ Bucket: copyBkt }).promise(); + await helpers.s3.createBucket({ Bucket: mpuCopyBkt }).promise(); + await helpers.s3 + .putBucketEncryption({ + Bucket: copyBkt, + ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ + algo: 'aws:kms', + masterKeyId: copyKmsKey, + }), + }) + .promise(); await helpers.s3.putObject({ Bucket: copyBkt, Key: copyObj, Body: 'BODY(copy)' }).promise(); // Prepare every buckets with 1 object (for copy) @@ -108,269 +125,367 @@ describe('SSE KMS arnPrefix', () => { await helpers.cleanup(copyBkt); await helpers.cleanup(mpuCopyBkt); // Clean every bucket - await Promise.all(Object.values(bkts).map(async bkt => { - await helpers.cleanup(bkt.name); - return await helpers.cleanup(bkt.vname); - })); + await Promise.all( + Object.values(bkts).map(async bkt => { + await helpers.cleanup(bkt.name); + return await helpers.cleanup(bkt.vname); + }) + ); }); - scenarios.testCases.forEach(bktConf => describe(`bucket enc-bkt-${bktConf.name}`, () => { - let bkt = bkts[bktConf.name]; + scenarios.testCases.forEach(bktConf => + describe(`bucket enc-bkt-${bktConf.name}`, () => { + let bkt = bkts[bktConf.name]; - before(() => { - bkt = bkts[bktConf.name]; - }); - - if (bktConf.deleteSSE) { - beforeEach(async () => { - await scenarios.deleteBucketSSEBeforeEach(bkt.name, log); - await scenarios.deleteBucketSSEBeforeEach(bkt.vname, log); + before(() => { + bkt = bkts[bktConf.name]; }); - } - if (!bktConf.algo) { - if (!bktConf.deleteSSE && helpers.config.globalEncryptionEnabled) { - it('GetBucketEncryption should return AES256 because of globalEncryptionEnabled', - async () => await scenarios.tests.getBucketSSE(bkt.name, log, 'AES256', null, 'after')); - } else { - it('GetBucketEncryption should return ServerSideEncryptionConfigurationNotFoundError', - async () => await scenarios.tests.getBucketSSEError(bkt.name)); - if (!bktConf.deleteSSE) { - it('should have non mandatory SSE in bucket MD as test init put an object with AES256', - async () => await scenarios.tests.getBucketNonMandatorySSE(bkt.name, log, 'after')); - } + if (bktConf.deleteSSE) { + beforeEach(async () => { + await scenarios.deleteBucketSSEBeforeEach(bkt.name, log); + await scenarios.deleteBucketSSEBeforeEach(bkt.vname, log); + }); } - } else { - it('GetBucketEncryption should return SSE with arnPrefix to key', - async () => await scenarios.tests.getBucketSSE(bkt.name, log, bktConf.algo, - bktConf.masterKeyId ? bkt.kmsKeyInfo.masterKeyArn : null, 'after')); - } - scenarios.testCasesObj.forEach(objConf => it(`should assert uploaded objects with SSE ${objConf.name}`, - async () => scenarios.tests.getPreUploadedObject(bkt.name, - { objConf, obj: bkt.objs[objConf.name] }, { bktConf, bkt }, 'after'))); - - scenarios.testCasesObj.forEach(objConf => describe(`object enc-obj-${objConf.name}`, () => { - const obj = { - name: `enc-obj-${objConf.name}`, - kmsKeyInfo: null, - kmsKey: null, - body: `BODY(enc-obj-${objConf.name})`, - }; - /** to be used as source of copy */ - let objForCopy; - - before(async () => { - if (objConf.algo && objConf.masterKeyId) { - obj.kmsKeyInfo = await helpers.createKmsKey(log); - obj.kmsKey = objConf.arnPrefix - ? obj.kmsKeyInfo.masterKeyArn - : obj.kmsKeyInfo.masterKeyId; + if (!bktConf.algo) { + if (!bktConf.deleteSSE && helpers.config.globalEncryptionEnabled) { + it('GetBucketEncryption should return AES256 because of globalEncryptionEnabled', async () => + await scenarios.tests.getBucketSSE(bkt.name, log, 'AES256', null, 'after')); + } else { + it('GetBucketEncryption should return ServerSideEncryptionConfigurationNotFoundError', async () => + await scenarios.tests.getBucketSSEError(bkt.name)); + if (!bktConf.deleteSSE) { + it('should have non mandatory SSE in bucket MD as test init put an object with AES256', async () => + await scenarios.tests.getBucketNonMandatorySSE(bkt.name, log, 'after')); + } } - objForCopy = bkt.objs[objConf.name]; - }); + } else { + it('GetBucketEncryption should return SSE with arnPrefix to key', async () => + await scenarios.tests.getBucketSSE( + bkt.name, + log, + bktConf.algo, + bktConf.masterKeyId ? bkt.kmsKeyInfo.masterKeyArn : null, + 'after' + )); + } - it(`should PutObject ${obj.name} overriding bucket SSE`, - async () => scenarios.tests.putObjectOverrideSSE({ objConf, obj }, { bktConf, bkt }, 'after')); - - // CopyObject scenarios - [ - { name: `${obj.name} into encrypted destination bucket`, forceBktSSE: true }, - { name: `${obj.name} into same bucket with object SSE config` }, - { name: `from encrypted source into ${obj.name} with object SSE config` }, - ].forEach(({ name, forceBktSSE }, index) => - it(`should CopyObject ${name}`, async () => - await scenarios.tests.copyObjectAndSSE( - { copyBkt, objForCopy, copyObj }, - { objConf, obj }, + scenarios.testCasesObj.forEach(objConf => + it(`should assert uploaded objects with SSE ${objConf.name}`, async () => + scenarios.tests.getPreUploadedObject( + bkt.name, + { objConf, obj: bkt.objs[objConf.name] }, { bktConf, bkt }, - { index, forceBktSSE }, - 'after', - ))); - - // after SSE migration implementation all mpu with sse are fixed - it('should encrypt MPU and put 2 encrypted parts', async () => { - const mpuKey = `${obj.name}-mpu`; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); - const partsBody = [`${obj.body}-MPU1`, `${obj.body}-MPU2`]; - const newParts = []; - for (const [index, body] of partsBody.entries()) { - const part = await scenarios.tests.mpuUploadPart({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Body: body, - Key: mpuKey, - PartNumber: index + 1, - }, mpu, objConf.algo || bktConf.algo, 'after'); - newParts.push(part); - } - await scenarios.tests.mpuComplete( - { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, - { existingParts: [], newParts }, - mpu, objConf.algo || bktConf.algo, 'after'); - const assertion = { - Bucket: bkt.name, - Key: mpuKey, - Body: `${obj.body}-MPU1${obj.body}-MPU2`, - }; - await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, {}, 'after'); - }); - - it('should encrypt MPU and copy an encrypted parts from encrypted bucket', async () => { - const mpuKey = `${obj.name}-mpucopy`; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); - const part1 = await scenarios.tests.mpuUploadPartCopy({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Key: mpuKey, - PartNumber: 1, - CopySource: `${copyBkt}/${copyObj}`, - }, mpu, objConf.algo || bktConf.algo, 'after'); - const part2 = await scenarios.tests.mpuUploadPart({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Body: `${obj.body}-MPU2`, - Key: mpuKey, - PartNumber: 2, - }, mpu, objConf.algo || bktConf.algo, 'after'); - - await scenarios.tests.mpuComplete( - { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, - { existingParts: [], newParts: [part1, part2] }, - mpu, objConf.algo || bktConf.algo, 'after'); - const assertion = { - Bucket: bkt.name, - Key: mpuKey, - Body: `BODY(copy)${obj.body}-MPU2`, - }; - await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, {}, 'after'); - }); - - it('should encrypt MPU and copy an encrypted range parts from encrypted bucket', async () => { - const mpuKey = `${obj.name}-mpucopy`; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); - // source body is "BODY(copy)" - // [copy, BODY] - const sourceRanges = ['bytes=5-8', 'bytes=0-3']; - const newParts = []; - for (const [index, range] of sourceRanges.entries()) { - const part = await scenarios.tests.mpuUploadPartCopy({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Key: mpuKey, - PartNumber: index + 1, - CopySource: `${copyBkt}/${copyObj}`, - CopySourceRange: range, - }, mpu, objConf.algo || bktConf.algo, 'after'); - newParts.push(part); - } - - await scenarios.tests.mpuComplete( - { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, - { existingParts: [], newParts }, - mpu, objConf.algo || bktConf.algo, 'after'); - const assertion = { - Bucket: bkt.name, - Key: mpuKey, - Body: 'copyBODY', - }; - await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, {}, 'after'); - }); + 'after' + )) + ); - it(`should PutObject versioned with SSE ${obj.name}`, async () => { - // ensure versioned bucket is empty - await helpers.bucketUtil.empty(bkt.vname); - let { Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise(); - // regularly count versioned objects - assert.strictEqual(Versions.length, 0); - - const bodyBase = `BODY(${obj.name})-base`; - await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyBase); - const baseAssertion = { Bucket: bkt.vname, Key: obj.name }; - await scenarios.assertObjectSSE( - { ...baseAssertion, Body: bodyBase }, - { objConf, obj }, { bktConf, bkt }, {}, 'after'); - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 1); - - await helpers.s3.putBucketVersioning({ Bucket: bkt.vname, - VersioningConfiguration: { Status: 'Enabled' }, - }).promise(); - - const bodyV1 = `BODY(${obj.name})-v1`; - const v1 = await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV1); - const bodyV2 = `BODY(${obj.name})-v2`; - const v2 = await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV2); - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 3); - - const current = await helpers.s3.headObject({ Bucket: bkt.vname, Key: obj.name }).promise(); - assert.strictEqual(current.VersionId, v2.VersionId); // ensure versioning as expected - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 3); - - await scenarios.assertObjectSSE( - { ...baseAssertion, Body: bodyV2 }, { objConf, obj }, { bktConf, bkt }, - {}, 'after'); // v2 - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: 'null', Body: bodyBase }, { objConf, obj }, { bktConf, bkt }, - {}, 'after'); - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: v1.VersionId, Body: bodyV1 }, { objConf, obj }, { bktConf, bkt }, - {}, 'after'); - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: v2.VersionId, Body: bodyV2 }, { objConf, obj }, { bktConf, bkt }, - {}, 'after'); - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 3); - - await helpers.s3.putBucketVersioning({ Bucket: bkt.vname, - VersioningConfiguration: { Status: 'Suspended' }, - }).promise(); - - // should be fine after version suspension - await scenarios.assertObjectSSE( - { ...baseAssertion, Body: bodyV2 }, { objConf, obj }, { bktConf, bkt }, - {}, 'after'); // v2 - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: 'null', Body: bodyBase }, { objConf, obj }, { bktConf, bkt }, - {}, 'after'); - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: v1.VersionId, Body: bodyV1 }, { objConf, obj }, { bktConf, bkt }, - {}, 'after'); - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: v2.VersionId, Body: bodyV2 }, { objConf, obj }, { bktConf, bkt }, - {}, 'after'); - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 3); - - // put a new null version - const bodyFinal = `BODY(${obj.name})-final`; - await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyFinal); - await scenarios.assertObjectSSE( - { ...baseAssertion, Body: bodyFinal }, { objConf, obj }, { bktConf, bkt }, - {}, 'after'); // null - await scenarios.assertObjectSSE( - { ...baseAssertion, Body: bodyFinal }, { objConf, obj }, { bktConf, bkt }, - 'null', 'after'); - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 3); - }); - })); - })); + scenarios.testCasesObj.forEach(objConf => + describe(`object enc-obj-${objConf.name}`, () => { + const obj = { + name: `enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(enc-obj-${objConf.name})`, + }; + /** to be used as source of copy */ + let objForCopy; + + before(async () => { + if (objConf.algo && objConf.masterKeyId) { + obj.kmsKeyInfo = await helpers.createKmsKey(log); + obj.kmsKey = objConf.arnPrefix ? obj.kmsKeyInfo.masterKeyArn : obj.kmsKeyInfo.masterKeyId; + } + objForCopy = bkt.objs[objConf.name]; + }); + + it(`should PutObject ${obj.name} overriding bucket SSE`, async () => + scenarios.tests.putObjectOverrideSSE({ objConf, obj }, { bktConf, bkt }, 'after')); + + // CopyObject scenarios + [ + { name: `${obj.name} into encrypted destination bucket`, forceBktSSE: true }, + { name: `${obj.name} into same bucket with object SSE config` }, + { name: `from encrypted source into ${obj.name} with object SSE config` }, + ].forEach(({ name, forceBktSSE }, index) => + it(`should CopyObject ${name}`, async () => + await scenarios.tests.copyObjectAndSSE( + { copyBkt, objForCopy, copyObj }, + { objConf, obj }, + { bktConf, bkt }, + { index, forceBktSSE }, + 'after' + )) + ); + + // after SSE migration implementation all mpu with sse are fixed + it('should encrypt MPU and put 2 encrypted parts', async () => { + const mpuKey = `${obj.name}-mpu`; + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)) + .promise(); + const partsBody = [`${obj.body}-MPU1`, `${obj.body}-MPU2`]; + const newParts = []; + for (const [index, body] of partsBody.entries()) { + const part = await scenarios.tests.mpuUploadPart( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: body, + Key: mpuKey, + PartNumber: index + 1, + }, + mpu, + objConf.algo || bktConf.algo, + 'after' + ); + newParts.push(part); + } + await scenarios.tests.mpuComplete( + { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, + { existingParts: [], newParts }, + mpu, + objConf.algo || bktConf.algo, + 'after' + ); + const assertion = { + Bucket: bkt.name, + Key: mpuKey, + Body: `${obj.body}-MPU1${obj.body}-MPU2`, + }; + await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, {}, 'after'); + }); + + it('should encrypt MPU and copy an encrypted parts from encrypted bucket', async () => { + const mpuKey = `${obj.name}-mpucopy`; + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)) + .promise(); + const part1 = await scenarios.tests.mpuUploadPartCopy( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 1, + CopySource: `${copyBkt}/${copyObj}`, + }, + mpu, + objConf.algo || bktConf.algo, + 'after' + ); + const part2 = await scenarios.tests.mpuUploadPart( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: 2, + }, + mpu, + objConf.algo || bktConf.algo, + 'after' + ); + + await scenarios.tests.mpuComplete( + { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, + { existingParts: [], newParts: [part1, part2] }, + mpu, + objConf.algo || bktConf.algo, + 'after' + ); + const assertion = { + Bucket: bkt.name, + Key: mpuKey, + Body: `BODY(copy)${obj.body}-MPU2`, + }; + await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, {}, 'after'); + }); + + it('should encrypt MPU and copy an encrypted range parts from encrypted bucket', async () => { + const mpuKey = `${obj.name}-mpucopy`; + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)) + .promise(); + // source body is "BODY(copy)" + // [copy, BODY] + const sourceRanges = ['bytes=5-8', 'bytes=0-3']; + const newParts = []; + for (const [index, range] of sourceRanges.entries()) { + const part = await scenarios.tests.mpuUploadPartCopy( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: index + 1, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: range, + }, + mpu, + objConf.algo || bktConf.algo, + 'after' + ); + newParts.push(part); + } + + await scenarios.tests.mpuComplete( + { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, + { existingParts: [], newParts }, + mpu, + objConf.algo || bktConf.algo, + 'after' + ); + const assertion = { + Bucket: bkt.name, + Key: mpuKey, + Body: 'copyBODY', + }; + await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, {}, 'after'); + }); + + it(`should PutObject versioned with SSE ${obj.name}`, async () => { + // ensure versioned bucket is empty + await helpers.bucketUtil.empty(bkt.vname); + let { Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise(); + // regularly count versioned objects + assert.strictEqual(Versions.length, 0); + + const bodyBase = `BODY(${obj.name})-base`; + await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyBase); + const baseAssertion = { Bucket: bkt.vname, Key: obj.name }; + await scenarios.assertObjectSSE( + { ...baseAssertion, Body: bodyBase }, + { objConf, obj }, + { bktConf, bkt }, + {}, + 'after' + ); + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 1); + + await helpers.s3 + .putBucketVersioning({ Bucket: bkt.vname, VersioningConfiguration: { Status: 'Enabled' } }) + .promise(); + + const bodyV1 = `BODY(${obj.name})-v1`; + const v1 = await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV1); + const bodyV2 = `BODY(${obj.name})-v2`; + const v2 = await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV2); + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + const current = await helpers.s3.headObject({ Bucket: bkt.vname, Key: obj.name }).promise(); + assert.strictEqual(current.VersionId, v2.VersionId); // ensure versioning as expected + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + await scenarios.assertObjectSSE( + { ...baseAssertion, Body: bodyV2 }, + { objConf, obj }, + { bktConf, bkt }, + {}, + 'after' + ); // v2 + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: 'null', Body: bodyBase }, + { objConf, obj }, + { bktConf, bkt }, + {}, + 'after' + ); + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: v1.VersionId, Body: bodyV1 }, + { objConf, obj }, + { bktConf, bkt }, + {}, + 'after' + ); + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: v2.VersionId, Body: bodyV2 }, + { objConf, obj }, + { bktConf, bkt }, + {}, + 'after' + ); + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + await helpers.s3 + .putBucketVersioning({ + Bucket: bkt.vname, + VersioningConfiguration: { Status: 'Suspended' }, + }) + .promise(); + + // should be fine after version suspension + await scenarios.assertObjectSSE( + { ...baseAssertion, Body: bodyV2 }, + { objConf, obj }, + { bktConf, bkt }, + {}, + 'after' + ); // v2 + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: 'null', Body: bodyBase }, + { objConf, obj }, + { bktConf, bkt }, + {}, + 'after' + ); + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: v1.VersionId, Body: bodyV1 }, + { objConf, obj }, + { bktConf, bkt }, + {}, + 'after' + ); + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: v2.VersionId, Body: bodyV2 }, + { objConf, obj }, + { bktConf, bkt }, + {}, + 'after' + ); + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + // put a new null version + const bodyFinal = `BODY(${obj.name})-final`; + await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyFinal); + await scenarios.assertObjectSSE( + { ...baseAssertion, Body: bodyFinal }, + { objConf, obj }, + { bktConf, bkt }, + {}, + 'after' + ); // null + await scenarios.assertObjectSSE( + { ...baseAssertion, Body: bodyFinal }, + { objConf, obj }, + { bktConf, bkt }, + 'null', + 'after' + ); + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + }); + }) + ); + }) + ); it('should encrypt MPU and copy parts from every buckets and objects matrice', async () => { - await helpers.s3.putBucketEncryption({ - Bucket: mpuCopyBkt, - // AES256 because input key is broken for now - ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ algo: 'AES256' }), - }).promise(); + await helpers.s3 + .putBucketEncryption({ + Bucket: mpuCopyBkt, + // AES256 because input key is broken for now + ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ algo: 'AES256' }), + }) + .promise(); const mpuKey = 'mpucopy'; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(mpuCopyBkt, mpuKey, {}, null)).promise(); + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(mpuCopyBkt, mpuKey, {}, null)) + .promise(); const copyPartArg = { UploadId: mpu.UploadId, Bucket: mpuCopyBkt, @@ -380,37 +495,48 @@ describe('SSE KMS arnPrefix', () => { const uploadPromises = scenarios.testCases.reduce((acc, bktConf, bktIdx) => { const bkt = bkts[bktConf.name]; - return acc.concat(scenarios.testCasesObj.map(async (objConf, objIdx) => { - const obj = bkt.objs[objConf.name]; - - const partNumber = bktIdx * scenarios.testCasesObj.length + objIdx + 1; - const res = await helpers.s3.uploadPartCopy({ - ...copyPartArg, - PartNumber: partNumber, - CopySource: `${bkt.name}/${obj.name}`, - }).promise(); - - return { partNumber, body: obj.body, res: res.CopyPartResult }; - })); + return acc.concat( + scenarios.testCasesObj.map(async (objConf, objIdx) => { + const obj = bkt.objs[objConf.name]; + + const partNumber = bktIdx * scenarios.testCasesObj.length + objIdx + 1; + const res = await helpers.s3 + .uploadPartCopy({ + ...copyPartArg, + PartNumber: partNumber, + CopySource: `${bkt.name}/${obj.name}`, + }) + .promise(); + + return { partNumber, body: obj.body, res: res.CopyPartResult }; + }) + ); }, []); const parts = await Promise.all(uploadPromises); - await helpers.s3.completeMultipartUpload({ - UploadId: mpu.UploadId, - Bucket: mpuCopyBkt, - Key: mpuKey, - MultipartUpload: { - Parts: parts.map(part => ({ PartNumber: part.partNumber, ETag: part.res.ETag })), - }, - }).promise(); + await helpers.s3 + .completeMultipartUpload({ + UploadId: mpu.UploadId, + Bucket: mpuCopyBkt, + Key: mpuKey, + MultipartUpload: { + Parts: parts.map(part => ({ PartNumber: part.partNumber, ETag: part.res.ETag })), + }, + }) + .promise(); const assertion = { Bucket: mpuCopyBkt, Key: mpuKey, Body: parts.reduce((acc, part) => `${acc}${part.body}`, ''), }; - await scenarios.assertObjectSSE(assertion, { objConf: {}, obj: {} }, - { bktConf: { algo: 'AES256' }, bkt: {} }, {}, 'after'); + await scenarios.assertObjectSSE( + assertion, + { objConf: {}, obj: {} }, + { bktConf: { algo: 'AES256' }, bkt: {} }, + {}, + 'after' + ); }); }); @@ -422,10 +548,15 @@ describe('ensure MPU use good SSE', () => { kmsKeympuKmsBkt = (await helpers.createKmsKey(log)).masterKeyArn; await helpers.MD.setup(); await helpers.s3.createBucket({ Bucket: mpuKmsBkt }).promise(); - await helpers.s3.putBucketEncryption({ - Bucket: mpuKmsBkt, - ServerSideEncryptionConfiguration: - helpers.hydrateSSEConfig({ algo: 'aws:kms', masterKeyId: kmsKeympuKmsBkt }) }).promise(); + await helpers.s3 + .putBucketEncryption({ + Bucket: mpuKmsBkt, + ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ + algo: 'aws:kms', + masterKeyId: kmsKeympuKmsBkt, + }), + }) + .promise(); }); after(async () => { @@ -434,8 +565,12 @@ describe('ensure MPU use good SSE', () => { it('mpu upload part should fail with sse header', async () => { const key = 'mpuKeyBadUpload'; - const mpu = await helpers.s3.createMultipartUpload({ - Bucket: mpuKmsBkt, Key: key }).promise(); + const mpu = await helpers.s3 + .createMultipartUpload({ + Bucket: mpuKmsBkt, + Key: key, + }) + .promise(); const res = await promisify(makeRequest)({ method: 'PUT', hostname: helpers.s3.endpoint.hostname, @@ -464,22 +599,36 @@ describe('ensure MPU use good SSE', () => { it('mpu should use encryption from createMPU', async () => { const key = 'mpuKey'; const mpuKms = (await helpers.createKmsKey(log)).masterKeyArn; - const mpu = await helpers.s3.createMultipartUpload({ - Bucket: mpuKmsBkt, Key: key, ServerSideEncryption: 'aws:kms', SSEKMSKeyId: mpuKms }).promise(); + const mpu = await helpers.s3 + .createMultipartUpload({ + Bucket: mpuKmsBkt, + Key: key, + ServerSideEncryption: 'aws:kms', + SSEKMSKeyId: mpuKms, + }) + .promise(); assert.strictEqual(mpu.ServerSideEncryption, 'aws:kms'); assert.strictEqual(mpu.SSEKMSKeyId, helpers.getKey(mpuKms)); - const part1 = await scenarios.tests.mpuUploadPart({ - UploadId: mpu.UploadId, - Bucket: mpuKmsBkt, - Body: 'Scality', - Key: key, - PartNumber: 1, - }, mpu, 'aws:kms', 'after'); + const part1 = await scenarios.tests.mpuUploadPart( + { + UploadId: mpu.UploadId, + Bucket: mpuKmsBkt, + Body: 'Scality', + Key: key, + PartNumber: 1, + }, + mpu, + 'aws:kms', + 'after' + ); await scenarios.tests.mpuComplete( { UploadId: mpu.UploadId, Bucket: mpuKmsBkt, Key: key }, { existingParts: [], newParts: [part1] }, - mpu, 'aws:kms', 'after'); + mpu, + 'aws:kms', + 'after' + ); const assertion = { Bucket: mpuKmsBkt, @@ -492,16 +641,12 @@ describe('ensure MPU use good SSE', () => { }; const bktForAssert = { bktConf: { algo: 'aws:kms', masterKeyId: true }, - bkt: { kmsKey: kmsKeympuKmsBkt, - kmsKeyInfo: { masterKeyId: kmsKeympuKmsBkt, masterKeyArn: kmsKeympuKmsBkt } }, + bkt: { + kmsKey: kmsKeympuKmsBkt, + kmsKeyInfo: { masterKeyId: kmsKeympuKmsBkt, masterKeyArn: kmsKeympuKmsBkt }, + }, }; - await scenarios.assertObjectSSE( - assertion, - objForAssert, - bktForAssert, - {}, - 'after', - ); + await scenarios.assertObjectSSE(assertion, objForAssert, bktForAssert, {}, 'after'); }); }); @@ -533,10 +678,11 @@ describe('KMS error', () => { */ const expectedLocalKms = { code: 'KMS.AccessDeniedException', - msg: () => new RegExp( - 'The ciphertext refers to a customer master key that does not exist, ' + - 'does not exist in this region, or you are not allowed to access\\.' - ), + msg: () => + new RegExp( + 'The ciphertext refers to a customer master key that does not exist, ' + + 'does not exist in this region, or you are not allowed to access\\.' + ), }; if (helpers.config.backends.kms === 'kmip') { expected = expectedKMIP; @@ -562,13 +708,16 @@ describe('KMS error', () => { before(async () => { await helpers.s3.createBucket({ Bucket }).promise(); - await helpers.s3.putObject({ - ...helpers.putObjParams(Bucket, 'plaintext', {}, null), - Body: body, - }).promise(); + await helpers.s3 + .putObject({ + ...helpers.putObjParams(Bucket, 'plaintext', {}, null), + Body: body, + }) + .promise(); - mpuPlaintext = await helpers.s3.createMultipartUpload( - helpers.putObjParams(Bucket, 'mpuPlaintext', {}, null)).promise(); + mpuPlaintext = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(Bucket, 'mpuPlaintext', {}, null)) + .promise(); ({ masterKeyId, masterKeyArn } = await helpers.createKmsKey(log)); @@ -577,8 +726,9 @@ describe('KMS error', () => { const obj = await helpers.s3.getObject({ Bucket, Key }).promise(); assert.strictEqual(obj.Body.toString(), body); - mpuEncrypted = await helpers.s3.createMultipartUpload( - helpers.putObjParams(Bucket, 'mpuEncrypted', sseConfig, masterKeyArn)).promise(); + mpuEncrypted = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(Bucket, 'mpuEncrypted', sseConfig, masterKeyArn)) + .promise(); // make key unavailable await helpers.destroyKmsKey(masterKeyArn, log); @@ -589,70 +739,97 @@ describe('KMS error', () => { if (masterKeyArn) { try { await helpers.destroyKmsKey(masterKeyArn, log); - } catch (e) { void e; } + } catch (e) { + void e; + } [masterKeyArn, masterKeyId] = [null, null]; } }); const testCases = [ { - action: 'putObject', kmsAction: 'Encrypt', - fct: async ({ masterKeyArn }) => - helpers.putEncryptedObject(Bucket, 'fail', sseConfig, masterKeyArn, body), + action: 'putObject', + kmsAction: 'Encrypt', + fct: async ({ masterKeyArn }) => helpers.putEncryptedObject(Bucket, 'fail', sseConfig, masterKeyArn, body), }, { - action: 'getObject', kmsAction: 'Decrypt', + action: 'getObject', + kmsAction: 'Decrypt', fct: async () => helpers.s3.getObject({ Bucket, Key }).promise(), }, { - action: 'copyObject', detail: ' when getting from source', kmsAction: 'Decrypt', - fct: async () => - helpers.s3.copyObject({ Bucket, Key: 'copy', CopySource: `${Bucket}/${Key}` }).promise(), + action: 'copyObject', + detail: ' when getting from source', + kmsAction: 'Decrypt', + fct: async () => helpers.s3.copyObject({ Bucket, Key: 'copy', CopySource: `${Bucket}/${Key}` }).promise(), }, { - action: 'copyObject', detail: ' when putting to destination', kmsAction: 'Encrypt', - fct: async ({ masterKeyArn }) => helpers.s3.copyObject({ - Bucket, - Key: 'copyencrypted', - CopySource: `${Bucket}/plaintext`, - ServerSideEncryption: 'aws:kms', - SSEKMSKeyId: masterKeyArn, - }).promise(), + action: 'copyObject', + detail: ' when putting to destination', + kmsAction: 'Encrypt', + fct: async ({ masterKeyArn }) => + helpers.s3 + .copyObject({ + Bucket, + Key: 'copyencrypted', + CopySource: `${Bucket}/plaintext`, + ServerSideEncryption: 'aws:kms', + SSEKMSKeyId: masterKeyArn, + }) + .promise(), }, { - action: 'createMPU', kmsAction: 'Encrypt', - fct: async ({ masterKeyArn }) => helpers.s3.createMultipartUpload( - helpers.putObjParams(Bucket, 'mpuKeyEncryptedFail', sseConfig, masterKeyArn)).promise(), + action: 'createMPU', + kmsAction: 'Encrypt', + fct: async ({ masterKeyArn }) => + helpers.s3 + .createMultipartUpload(helpers.putObjParams(Bucket, 'mpuKeyEncryptedFail', sseConfig, masterKeyArn)) + .promise(), }, { - action: 'mpu uploadPartCopy', detail: ' when getting from source', kmsAction: 'Decrypt', - fct: async ({ mpuPlaintext }) => helpers.s3.uploadPartCopy({ - UploadId: mpuPlaintext.UploadId, - Bucket, - Key: 'mpuPlaintext', - PartNumber: 1, - CopySource: `${Bucket}/${Key}`, - }).promise(), + action: 'mpu uploadPartCopy', + detail: ' when getting from source', + kmsAction: 'Decrypt', + fct: async ({ mpuPlaintext }) => + helpers.s3 + .uploadPartCopy({ + UploadId: mpuPlaintext.UploadId, + Bucket, + Key: 'mpuPlaintext', + PartNumber: 1, + CopySource: `${Bucket}/${Key}`, + }) + .promise(), }, { - action: 'mpu uploadPart', detail: ' when putting to destination', kmsAction: 'Encrypt', - fct: async ({ mpuEncrypted }) => helpers.s3.uploadPart({ - UploadId: mpuEncrypted.UploadId, - Bucket, - Key: 'mpuEncrypted', - PartNumber: 1, - Body: body, - }).promise(), + action: 'mpu uploadPart', + detail: ' when putting to destination', + kmsAction: 'Encrypt', + fct: async ({ mpuEncrypted }) => + helpers.s3 + .uploadPart({ + UploadId: mpuEncrypted.UploadId, + Bucket, + Key: 'mpuEncrypted', + PartNumber: 1, + Body: body, + }) + .promise(), }, { - action: 'mpu uploadPartCopy', detail: ' when putting to destination', kmsAction: 'Encrypt', - fct: async ({ mpuEncrypted }) => helpers.s3.uploadPartCopy({ - UploadId: mpuEncrypted.UploadId, - Bucket, - Key: 'mpuEncrypted', - PartNumber: 1, - CopySource: `${Bucket}/plaintext`, - }).promise(), + action: 'mpu uploadPartCopy', + detail: ' when putting to destination', + kmsAction: 'Encrypt', + fct: async ({ mpuEncrypted }) => + helpers.s3 + .uploadPartCopy({ + UploadId: mpuEncrypted.UploadId, + Bucket, + Key: 'mpuEncrypted', + PartNumber: 1, + CopySource: `${Bucket}/plaintext`, + }) + .promise(), }, ]; @@ -660,7 +837,7 @@ describe('KMS error', () => { it(`${action} should fail with kms error${detail || ''}`, async () => { await assert.rejects( fct({ masterKeyArn, mpuEncrypted, mpuPlaintext }), - assertKmsError(kmsAction, masterKeyId), + assertKmsError(kmsAction, masterKeyId) ); }); }); diff --git a/tests/functional/sse-kms-migration/beforeMigration.js b/tests/functional/sse-kms-migration/beforeMigration.js index e2c06272d1..984e72b7ac 100644 --- a/tests/functional/sse-kms-migration/beforeMigration.js +++ b/tests/functional/sse-kms-migration/beforeMigration.js @@ -12,7 +12,11 @@ const scenarios = require('./scenarios'); // copy part of aws-node-sdk/test/object/encryptionHeaders.js and add more tests // Fix for before migration run to not add a prefix -Object.defineProperty(kms, 'arnPrefix', { get() { return ''; } }); +Object.defineProperty(kms, 'arnPrefix', { + get() { + return ''; + }, +}); describe('SSE KMS before migration', () => { /** Bucket to test CopyObject from and to */ @@ -36,398 +40,536 @@ describe('SSE KMS before migration', () => { if (bktConf.algo && bktConf.masterKeyId) { const key = crypto.randomBytes(32).toString('hex'); bkt.kmsKeyInfo = { masterKeyId: key, masterKeyArn: `${kms.arnPrefix}${key}` }; - bkt.kmsKey = bktConf.arnPrefix - ? bkt.kmsKeyInfo.masterKeyArn - : bkt.kmsKeyInfo.masterKeyId; + bkt.kmsKey = bktConf.arnPrefix ? bkt.kmsKeyInfo.masterKeyArn : bkt.kmsKeyInfo.masterKeyId; } - await helpers.s3.createBucket(({ Bucket: bkt.name })).promise(); - await helpers.s3.createBucket(({ Bucket: bkt.vname })).promise(); + await helpers.s3.createBucket({ Bucket: bkt.name }).promise(); + await helpers.s3.createBucket({ Bucket: bkt.vname }).promise(); if (bktConf.algo) { // bucket encryption will be asserted in bucket test - await helpers.s3.putBucketEncryption({ - Bucket: bkt.name, - ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ - algo: bktConf.algo, masterKeyId: bkt.kmsKey }), - }).promise(); - await helpers.s3.putBucketEncryption({ - Bucket: bkt.vname, - ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ - algo: bktConf.algo, masterKeyId: bkt.kmsKey }), - }).promise(); + await helpers.s3 + .putBucketEncryption({ + Bucket: bkt.name, + ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ + algo: bktConf.algo, + masterKeyId: bkt.kmsKey, + }), + }) + .promise(); + await helpers.s3 + .putBucketEncryption({ + Bucket: bkt.vname, + ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ + algo: bktConf.algo, + masterKeyId: bkt.kmsKey, + }), + }) + .promise(); } // Put an object for each SSE conf in each bucket - await Promise.all(scenarios.testCases.map(async objConf => { - const obj = { - name: `for-copy-enc-obj-${objConf.name}`, - kmsKeyInfo: null, - kmsKey: null, - body: `BODY(for-copy-enc-obj-${objConf.name})`, - }; - bkt.objs[objConf.name] = obj; - if (objConf.algo && objConf.masterKeyId) { - const key = crypto.randomBytes(32).toString('hex'); - obj.kmsKeyInfo = { masterKeyId: key, masterKeyArn: `${kms.arnPrefix}${key}` }; - obj.kmsKey = objConf.arnPrefix - ? obj.kmsKeyInfo.masterKeyArn - : obj.kmsKeyInfo.masterKeyId; - } - return await helpers.putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); - })); + await Promise.all( + scenarios.testCases.map(async objConf => { + const obj = { + name: `for-copy-enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(for-copy-enc-obj-${objConf.name})`, + }; + bkt.objs[objConf.name] = obj; + if (objConf.algo && objConf.masterKeyId) { + const key = crypto.randomBytes(32).toString('hex'); + obj.kmsKeyInfo = { masterKeyId: key, masterKeyArn: `${kms.arnPrefix}${key}` }; + obj.kmsKey = objConf.arnPrefix ? obj.kmsKeyInfo.masterKeyArn : obj.kmsKeyInfo.masterKeyId; + } + return await helpers.putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); + }) + ); }; before(async () => { - console.log('Run before migration', - { profile: helpers.credsProfile, accessKeyId: helpers.s3.config.credentials.accessKeyId }); + console.log('Run before migration', { + profile: helpers.credsProfile, + accessKeyId: helpers.s3.config.credentials.accessKeyId, + }); const allBuckets = (await helpers.s3.listBuckets().promise()).Buckets.map(b => b.Name); console.log('List buckets:', allBuckets); await promisify(metadata.setup.bind(metadata))(); // init copy bucket - await helpers.s3.createBucket(({ Bucket: copyBkt })).promise(); - await helpers.s3.createBucket(({ Bucket: mpuCopyBkt })).promise(); - await helpers.s3.putBucketEncryption({ - Bucket: copyBkt, - ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ algo: 'aws:kms', masterKeyId: copyKmsKey }), - }).promise(); + await helpers.s3.createBucket({ Bucket: copyBkt }).promise(); + await helpers.s3.createBucket({ Bucket: mpuCopyBkt }).promise(); + await helpers.s3 + .putBucketEncryption({ + Bucket: copyBkt, + ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ + algo: 'aws:kms', + masterKeyId: copyKmsKey, + }), + }) + .promise(); await helpers.s3.putObject({ Bucket: copyBkt, Key: copyObj, Body: 'BODY(copy)' }).promise(); // Prepare every buckets with 1 object (for copy) await Promise.all(scenarios.testCases.map(async bktConf => this.initBucket(bktConf))); }); - scenarios.testCases.forEach(bktConf => describe(`bucket enc-bkt-${bktConf.name}`, () => { - let bkt = bkts[bktConf.name]; - - before(() => { - bkt = bkts[bktConf.name]; - }); - - if (bktConf.deleteSSE) { - beforeEach(async () => scenarios.deleteBucketSSEBeforeEach(bkt.name, log)); - } - - if (!bktConf.algo) { - it('GetBucketEncryption should return ServerSideEncryptionConfigurationNotFoundError', - async () => await scenarios.tests.getBucketSSEError(bkt.name)); - - if (!bktConf.deleteSSE) { - it('should have non mandatory SSE in bucket MD as test init put an object with AES256', - async () => await scenarios.tests.getBucketNonMandatorySSE(bkt.name, log, 'before')); - } - } else { - it('GetBucketEncryption should return SSE with arnPrefix to key', - async () => await scenarios.tests.getBucketSSE(bkt.name, log, bktConf.algo, - bktConf.masterKeyId ? bkt.kmsKeyInfo.masterKeyArn : null, 'before')); - } - - scenarios.testCasesObj.forEach(objConf => it(`should have pre uploaded object with SSE ${objConf.name}`, - async () => scenarios.tests.getPreUploadedObject(bkt.name, - { objConf, obj: bkt.objs[objConf.name] }, { bktConf, bkt }))); - - scenarios.testCasesObj.forEach(objConf => describe(`object enc-obj-${objConf.name}`, () => { - const obj = { - name: `enc-obj-${objConf.name}`, - kmsKeyInfo: null, - kmsKey: null, - body: `BODY(enc-obj-${objConf.name})`, - }; - /** to be used as source of copy */ - let objForCopy; - - before(async () => { - if (objConf.algo && objConf.masterKeyId) { - const key = crypto.randomBytes(32).toString('hex'); - obj.kmsKeyInfo = { masterKeyId: key, masterKeyArn: `${kms.arnPrefix}${key}` }; - obj.kmsKey = objConf.arnPrefix - ? obj.kmsKeyInfo.masterKeyArn - : obj.kmsKeyInfo.masterKeyId; - } - objForCopy = bkt.objs[objConf.name]; - }); - - it(`should PutObject ${obj.name} overriding bucket SSE`, - async () => scenarios.tests.putObjectOverrideSSE({ objConf, obj }, { bktConf, bkt })); - - // CopyObject scenarios - [ - { name: `${obj.name} into encrypted destination bucket`, forceBktSSE: true }, - { name: `${obj.name} into same bucket with object SSE config` }, - { name: `from encrypted source into ${obj.name} with object SSE config` }, - ].forEach(({ name, forceBktSSE }, index) => - it(`should CopyObject ${name}`, async () => - await scenarios.tests.copyObjectAndSSE( - { copyBkt, objForCopy, copyObj }, - { objConf, obj }, - { bktConf, bkt }, - { index, forceBktSSE }, - 'before', - ))); - - // S3C-9996 The SSE was bugged with MPU, where the completion takes only the masterKeyId from bucket - // Fixed at the same time as migration, some scenario can pass only in newer version above migration - const optionalSkip = objConf.algo || bktConf.masterKeyId || (!bktConf.algo && !bktConf.deleteSSE) - ? it.skip - : it; - optionalSkip('should encrypt MPU and put 2 encrypted parts', async () => { - const mpuKey = `${obj.name}-mpu`; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); - const partsBody = [`${obj.body}-MPU1`, `${obj.body}-MPU2`]; - const newParts = []; - for (const [index, body] of partsBody.entries()) { - const part = await scenarios.tests.mpuUploadPart({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Body: body, - Key: mpuKey, - PartNumber: index + 1, - }, mpu, objConf.algo || bktConf.algo, 'before'); - newParts.push(part); - } - await scenarios.tests.mpuComplete( - { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, - { existingParts: [], newParts }, - mpu, objConf.algo || bktConf.algo, 'before'); - const assertion = { - Bucket: bkt.name, - Key: mpuKey, - Body: `${obj.body}-MPU1${obj.body}-MPU2`, - }; - await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }); - }); - - optionalSkip('should encrypt MPU and copy an encrypted parts from encrypted bucket', async () => { - const mpuKey = `${obj.name}-mpucopy`; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); - const part1 = await scenarios.tests.mpuUploadPartCopy({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Key: mpuKey, - PartNumber: 1, - CopySource: `${copyBkt}/${copyObj}`, - }, mpu, objConf.algo || bktConf.algo, 'before'); - const part2 = await scenarios.tests.mpuUploadPart({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Body: `${obj.body}-MPU2`, - Key: mpuKey, - PartNumber: 2, - }, mpu, objConf.algo || bktConf.algo, 'before'); - - await scenarios.tests.mpuComplete( - { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, - { existingParts: [], newParts: [part1, part2] }, - mpu, objConf.algo || bktConf.algo, 'before'); - const assertion = { - Bucket: bkt.name, - Key: mpuKey, - Body: `BODY(copy)${obj.body}-MPU2`, - }; - await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }); - }); - - optionalSkip('should encrypt MPU and copy an encrypted range parts from encrypted bucket', async () => { - const mpuKey = `${obj.name}-mpucopyrange`; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); - // source body is "BODY(copy)" - // [copy, BODY] - const sourceRanges = ['bytes=5-8', 'bytes=0-3']; - const newParts = []; - for (const [index, range] of sourceRanges.entries()) { - const part = await scenarios.tests.mpuUploadPartCopy({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Key: mpuKey, - PartNumber: index + 1, - CopySource: `${copyBkt}/${copyObj}`, - CopySourceRange: range, - }, mpu, objConf.algo || bktConf.algo, 'before'); - newParts.push(part); - } - - await scenarios.tests.mpuComplete( - { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, - { existingParts: [], newParts }, - mpu, objConf.algo || bktConf.algo, 'before'); - const assertion = { - Bucket: bkt.name, - Key: mpuKey, - Body: 'copyBODY', - }; - await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }); - }); + scenarios.testCases.forEach(bktConf => + describe(`bucket enc-bkt-${bktConf.name}`, () => { + let bkt = bkts[bktConf.name]; - optionalSkip('should prepare empty encrypted MPU without completion', async () => { - const mpuKey = `${obj.name}-migration-mpu-empty`; - await helpers.s3.createMultipartUpload( - helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); + before(() => { + bkt = bkts[bktConf.name]; }); - optionalSkip('should prepare encrypte MPU and put 2 encrypted parts without completion', async () => { - const mpuKey = `${obj.name}-migration-mpu`; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); - const partsBody = [`${obj.body}-MPU1`, `${obj.body}-MPU2`]; - for (const [index, body] of partsBody.entries()) { - await scenarios.tests.mpuUploadPart({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Body: body, - Key: mpuKey, - PartNumber: index + 1, - }, mpu, objConf.algo || bktConf.algo, 'before'); - } - }); + if (bktConf.deleteSSE) { + beforeEach(async () => scenarios.deleteBucketSSEBeforeEach(bkt.name, log)); + } - optionalSkip('should prepare encrypted MPU and copy an encrypted parts ' + - 'from encrypted bucket without completion', async () => { - const mpuKey = `${obj.name}-migration-mpucopy`; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); - await scenarios.tests.mpuUploadPartCopy({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Key: mpuKey, - PartNumber: 1, - CopySource: `${copyBkt}/${copyObj}`, - }, mpu, objConf.algo || bktConf.algo, 'before'); - await scenarios.tests.mpuUploadPart({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Body: `${obj.body}-MPU2`, - Key: mpuKey, - PartNumber: 2, - }, mpu, objConf.algo || bktConf.algo, 'before'); - }); + if (!bktConf.algo) { + it('GetBucketEncryption should return ServerSideEncryptionConfigurationNotFoundError', async () => + await scenarios.tests.getBucketSSEError(bkt.name)); - optionalSkip('should prepare encrypte MPU and copy an encrypted range parts ' + - 'from encrypted bucket without completion', async () => { - const mpuKey = `${obj.name}-migration-mpucopyrange`; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)).promise(); - // source body is "BODY(copy)" - // [copy, BODY] - const sourceRanges = ['bytes=5-8', 'bytes=0-3']; - for (const [index, range] of sourceRanges.entries()) { - await scenarios.tests.mpuUploadPartCopy({ - UploadId: mpu.UploadId, - Bucket: bkt.name, - Key: mpuKey, - PartNumber: index + 1, - CopySource: `${copyBkt}/${copyObj}`, - CopySourceRange: range, - }, mpu, objConf.algo || bktConf.algo, 'before'); + if (!bktConf.deleteSSE) { + it('should have non mandatory SSE in bucket MD as test init put an object with AES256', async () => + await scenarios.tests.getBucketNonMandatorySSE(bkt.name, log, 'before')); } - }); - - it(`should PutObject versioned with SSE ${obj.name}`, async () => { - // ensure versioned bucket is empty - await helpers.bucketUtil.empty(bkt.vname); - let { Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise(); - // regularly count versioned objects - assert.strictEqual(Versions.length, 0); - - const bodyBase = `BODY(${obj.name})-base`; - await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyBase); - const baseAssertion = { Bucket: bkt.vname, Key: obj.name }; - await scenarios.assertObjectSSE( - { ...baseAssertion, Body: bodyBase }, - { objConf, obj }, { bktConf, bkt }); - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 1); - - await helpers.s3.putBucketVersioning({ Bucket: bkt.vname, - VersioningConfiguration: { Status: 'Enabled' }, - }).promise(); - - const bodyV1 = `BODY(${obj.name})-v1`; - const v1 = await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV1); - const bodyV2 = `BODY(${obj.name})-v2`; - const v2 = await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV2); - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 3); - - const current = await helpers.s3.headObject({ Bucket: bkt.vname, Key: obj.name }).promise(); - assert.strictEqual(current.VersionId, v2.VersionId); // ensure versioning as expected - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 3); - - await scenarios.assertObjectSSE( - { ...baseAssertion, Body: bodyV2 }, { objConf, obj }, { bktConf, bkt }); // v2 - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: 'null', Body: bodyBase }, { objConf, obj }, { bktConf, bkt }); - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: v1.VersionId, Body: bodyV1 }, { objConf, obj }, { bktConf, bkt }); - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: v2.VersionId, Body: bodyV2 }, { objConf, obj }, { bktConf, bkt }); - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 3); - - await helpers.s3.putBucketVersioning({ Bucket: bkt.vname, - VersioningConfiguration: { Status: 'Suspended' }, - }).promise(); - - // should be fine after version suspension - await scenarios.assertObjectSSE( - { ...baseAssertion, Body: bodyV2 }, { objConf, obj }, { bktConf, bkt }); // v2 - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: 'null', Body: bodyBase }, { objConf, obj }, { bktConf, bkt }); - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: v1.VersionId, Body: bodyV1 }, { objConf, obj }, { bktConf, bkt }); - await scenarios.assertObjectSSE( - { ...baseAssertion, VersionId: v2.VersionId, Body: bodyV2 }, { objConf, obj }, { bktConf, bkt }); - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 3); - - // put a new null version - const bodyFinal = `BODY(${obj.name})-final`; - await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyFinal); - await scenarios.assertObjectSSE( - { ...baseAssertion, Body: bodyFinal }, { objConf, obj }, { bktConf, bkt }); // null - await scenarios.assertObjectSSE( - { ...baseAssertion, Body: bodyFinal }, { objConf, obj }, { bktConf, bkt }, 'null'); - ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); - assert.strictEqual(Versions.length, 3); - }); - })); - })); - - it('should prepare encrypted MPU and copy parts from ' + - 'every buckets and objects matrice without completion', async () => { - await helpers.s3.putBucketEncryption({ - Bucket: mpuCopyBkt, - // AES256 because input key is broken for now - ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ algo: 'AES256' }), - }).promise(); - const mpuKey = 'mpucopy'; - const mpu = await helpers.s3.createMultipartUpload( - helpers.putObjParams(mpuCopyBkt, mpuKey, {}, null)).promise(); - const copyPartArg = { - UploadId: mpu.UploadId, - Bucket: mpuCopyBkt, - Key: mpuKey, - }; - // For each test Case bucket and object copy a part - const uploadPromises = scenarios.testCases.reduce((acc, bktConf, bktIdx) => { - const bkt = bkts[bktConf.name]; - - return acc.concat(scenarios.testCasesObj.map(async (objConf, objIdx) => { - const obj = bkt.objs[objConf.name]; - - const partNumber = bktIdx * scenarios.testCasesObj.length + objIdx + 1; - const res = await helpers.s3.uploadPartCopy({ - ...copyPartArg, - PartNumber: partNumber, - CopySource: `${bkt.name}/${obj.name}`, - }).promise(); - - return { partNumber, body: obj.body, res: res.CopyPartResult }; - })); - }, []); + } else { + it('GetBucketEncryption should return SSE with arnPrefix to key', async () => + await scenarios.tests.getBucketSSE( + bkt.name, + log, + bktConf.algo, + bktConf.masterKeyId ? bkt.kmsKeyInfo.masterKeyArn : null, + 'before' + )); + } - await Promise.all(uploadPromises); - }); + scenarios.testCasesObj.forEach(objConf => + it(`should have pre uploaded object with SSE ${objConf.name}`, async () => + scenarios.tests.getPreUploadedObject( + bkt.name, + { objConf, obj: bkt.objs[objConf.name] }, + { bktConf, bkt } + )) + ); + + scenarios.testCasesObj.forEach(objConf => + describe(`object enc-obj-${objConf.name}`, () => { + const obj = { + name: `enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(enc-obj-${objConf.name})`, + }; + /** to be used as source of copy */ + let objForCopy; + + before(async () => { + if (objConf.algo && objConf.masterKeyId) { + const key = crypto.randomBytes(32).toString('hex'); + obj.kmsKeyInfo = { masterKeyId: key, masterKeyArn: `${kms.arnPrefix}${key}` }; + obj.kmsKey = objConf.arnPrefix ? obj.kmsKeyInfo.masterKeyArn : obj.kmsKeyInfo.masterKeyId; + } + objForCopy = bkt.objs[objConf.name]; + }); + + it(`should PutObject ${obj.name} overriding bucket SSE`, async () => + scenarios.tests.putObjectOverrideSSE({ objConf, obj }, { bktConf, bkt })); + + // CopyObject scenarios + [ + { name: `${obj.name} into encrypted destination bucket`, forceBktSSE: true }, + { name: `${obj.name} into same bucket with object SSE config` }, + { name: `from encrypted source into ${obj.name} with object SSE config` }, + ].forEach(({ name, forceBktSSE }, index) => + it(`should CopyObject ${name}`, async () => + await scenarios.tests.copyObjectAndSSE( + { copyBkt, objForCopy, copyObj }, + { objConf, obj }, + { bktConf, bkt }, + { index, forceBktSSE }, + 'before' + )) + ); + + // S3C-9996 The SSE was bugged with MPU, where the completion takes only the masterKeyId from bucket + // Fixed at the same time as migration, some scenario can pass only in newer version above migration + const optionalSkip = + objConf.algo || bktConf.masterKeyId || (!bktConf.algo && !bktConf.deleteSSE) ? it.skip : it; + optionalSkip('should encrypt MPU and put 2 encrypted parts', async () => { + const mpuKey = `${obj.name}-mpu`; + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)) + .promise(); + const partsBody = [`${obj.body}-MPU1`, `${obj.body}-MPU2`]; + const newParts = []; + for (const [index, body] of partsBody.entries()) { + const part = await scenarios.tests.mpuUploadPart( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: body, + Key: mpuKey, + PartNumber: index + 1, + }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + newParts.push(part); + } + await scenarios.tests.mpuComplete( + { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, + { existingParts: [], newParts }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + const assertion = { + Bucket: bkt.name, + Key: mpuKey, + Body: `${obj.body}-MPU1${obj.body}-MPU2`, + }; + await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }); + }); + + optionalSkip('should encrypt MPU and copy an encrypted parts from encrypted bucket', async () => { + const mpuKey = `${obj.name}-mpucopy`; + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)) + .promise(); + const part1 = await scenarios.tests.mpuUploadPartCopy( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 1, + CopySource: `${copyBkt}/${copyObj}`, + }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + const part2 = await scenarios.tests.mpuUploadPart( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: 2, + }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + + await scenarios.tests.mpuComplete( + { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, + { existingParts: [], newParts: [part1, part2] }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + const assertion = { + Bucket: bkt.name, + Key: mpuKey, + Body: `BODY(copy)${obj.body}-MPU2`, + }; + await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }); + }); + + optionalSkip( + 'should encrypt MPU and copy an encrypted range parts from encrypted bucket', + async () => { + const mpuKey = `${obj.name}-mpucopyrange`; + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)) + .promise(); + // source body is "BODY(copy)" + // [copy, BODY] + const sourceRanges = ['bytes=5-8', 'bytes=0-3']; + const newParts = []; + for (const [index, range] of sourceRanges.entries()) { + const part = await scenarios.tests.mpuUploadPartCopy( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: index + 1, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: range, + }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + newParts.push(part); + } + + await scenarios.tests.mpuComplete( + { UploadId: mpu.UploadId, Bucket: bkt.name, Key: mpuKey }, + { existingParts: [], newParts }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + const assertion = { + Bucket: bkt.name, + Key: mpuKey, + Body: 'copyBODY', + }; + await scenarios.assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }); + } + ); + + optionalSkip('should prepare empty encrypted MPU without completion', async () => { + const mpuKey = `${obj.name}-migration-mpu-empty`; + await helpers.s3 + .createMultipartUpload(helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)) + .promise(); + }); + + optionalSkip( + 'should prepare encrypte MPU and put 2 encrypted parts without completion', + async () => { + const mpuKey = `${obj.name}-migration-mpu`; + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)) + .promise(); + const partsBody = [`${obj.body}-MPU1`, `${obj.body}-MPU2`]; + for (const [index, body] of partsBody.entries()) { + await scenarios.tests.mpuUploadPart( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: body, + Key: mpuKey, + PartNumber: index + 1, + }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + } + } + ); + + optionalSkip( + 'should prepare encrypted MPU and copy an encrypted parts ' + + 'from encrypted bucket without completion', + async () => { + const mpuKey = `${obj.name}-migration-mpucopy`; + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)) + .promise(); + await scenarios.tests.mpuUploadPartCopy( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: 1, + CopySource: `${copyBkt}/${copyObj}`, + }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + await scenarios.tests.mpuUploadPart( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: 2, + }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + } + ); + + optionalSkip( + 'should prepare encrypte MPU and copy an encrypted range parts ' + + 'from encrypted bucket without completion', + async () => { + const mpuKey = `${obj.name}-migration-mpucopyrange`; + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(bkt.name, mpuKey, objConf, obj.kmsKey)) + .promise(); + // source body is "BODY(copy)" + // [copy, BODY] + const sourceRanges = ['bytes=5-8', 'bytes=0-3']; + for (const [index, range] of sourceRanges.entries()) { + await scenarios.tests.mpuUploadPartCopy( + { + UploadId: mpu.UploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: index + 1, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: range, + }, + mpu, + objConf.algo || bktConf.algo, + 'before' + ); + } + } + ); + + it(`should PutObject versioned with SSE ${obj.name}`, async () => { + // ensure versioned bucket is empty + await helpers.bucketUtil.empty(bkt.vname); + let { Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise(); + // regularly count versioned objects + assert.strictEqual(Versions.length, 0); + + const bodyBase = `BODY(${obj.name})-base`; + await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyBase); + const baseAssertion = { Bucket: bkt.vname, Key: obj.name }; + await scenarios.assertObjectSSE( + { ...baseAssertion, Body: bodyBase }, + { objConf, obj }, + { bktConf, bkt } + ); + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 1); + + await helpers.s3 + .putBucketVersioning({ Bucket: bkt.vname, VersioningConfiguration: { Status: 'Enabled' } }) + .promise(); + + const bodyV1 = `BODY(${obj.name})-v1`; + const v1 = await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV1); + const bodyV2 = `BODY(${obj.name})-v2`; + const v2 = await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyV2); + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + const current = await helpers.s3.headObject({ Bucket: bkt.vname, Key: obj.name }).promise(); + assert.strictEqual(current.VersionId, v2.VersionId); // ensure versioning as expected + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + await scenarios.assertObjectSSE( + { ...baseAssertion, Body: bodyV2 }, + { objConf, obj }, + { bktConf, bkt } + ); // v2 + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: 'null', Body: bodyBase }, + { objConf, obj }, + { bktConf, bkt } + ); + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: v1.VersionId, Body: bodyV1 }, + { objConf, obj }, + { bktConf, bkt } + ); + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: v2.VersionId, Body: bodyV2 }, + { objConf, obj }, + { bktConf, bkt } + ); + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + await helpers.s3 + .putBucketVersioning({ + Bucket: bkt.vname, + VersioningConfiguration: { Status: 'Suspended' }, + }) + .promise(); + + // should be fine after version suspension + await scenarios.assertObjectSSE( + { ...baseAssertion, Body: bodyV2 }, + { objConf, obj }, + { bktConf, bkt } + ); // v2 + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: 'null', Body: bodyBase }, + { objConf, obj }, + { bktConf, bkt } + ); + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: v1.VersionId, Body: bodyV1 }, + { objConf, obj }, + { bktConf, bkt } + ); + await scenarios.assertObjectSSE( + { ...baseAssertion, VersionId: v2.VersionId, Body: bodyV2 }, + { objConf, obj }, + { bktConf, bkt } + ); + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + + // put a new null version + const bodyFinal = `BODY(${obj.name})-final`; + await helpers.putEncryptedObject(bkt.vname, obj.name, objConf, obj.kmsKey, bodyFinal); + await scenarios.assertObjectSSE( + { ...baseAssertion, Body: bodyFinal }, + { objConf, obj }, + { bktConf, bkt } + ); // null + await scenarios.assertObjectSSE( + { ...baseAssertion, Body: bodyFinal }, + { objConf, obj }, + { bktConf, bkt }, + 'null' + ); + ({ Versions } = await helpers.s3.listObjectVersions({ Bucket: bkt.vname }).promise()); + assert.strictEqual(Versions.length, 3); + }); + }) + ); + }) + ); + + it( + 'should prepare encrypted MPU and copy parts from ' + 'every buckets and objects matrice without completion', + async () => { + await helpers.s3 + .putBucketEncryption({ + Bucket: mpuCopyBkt, + // AES256 because input key is broken for now + ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ algo: 'AES256' }), + }) + .promise(); + const mpuKey = 'mpucopy'; + const mpu = await helpers.s3 + .createMultipartUpload(helpers.putObjParams(mpuCopyBkt, mpuKey, {}, null)) + .promise(); + const copyPartArg = { + UploadId: mpu.UploadId, + Bucket: mpuCopyBkt, + Key: mpuKey, + }; + // For each test Case bucket and object copy a part + const uploadPromises = scenarios.testCases.reduce((acc, bktConf, bktIdx) => { + const bkt = bkts[bktConf.name]; + + return acc.concat( + scenarios.testCasesObj.map(async (objConf, objIdx) => { + const obj = bkt.objs[objConf.name]; + + const partNumber = bktIdx * scenarios.testCasesObj.length + objIdx + 1; + const res = await helpers.s3 + .uploadPartCopy({ + ...copyPartArg, + PartNumber: partNumber, + CopySource: `${bkt.name}/${obj.name}`, + }) + .promise(); + + return { partNumber, body: obj.body, res: res.CopyPartResult }; + }) + ); + }, []); + + await Promise.all(uploadPromises); + } + ); }); diff --git a/tests/functional/sse-kms-migration/cleanup.js b/tests/functional/sse-kms-migration/cleanup.js index a184255e0b..4a3670e50c 100644 --- a/tests/functional/sse-kms-migration/cleanup.js +++ b/tests/functional/sse-kms-migration/cleanup.js @@ -16,8 +16,10 @@ describe('SSE KMS Cleanup', () => { const mpuCopyBkt = 'enc-bkt-mpu-copy'; it('Empty and delete buckets for SSE KMS Migration', async () => { - console.log('Run cleanup', - { profile: helpers.credsProfile, accessKeyId: helpers.s3.config.credentials.accessKeyId }); + console.log('Run cleanup', { + profile: helpers.credsProfile, + accessKeyId: helpers.s3.config.credentials.accessKeyId, + }); const allBuckets = (await helpers.s3.listBuckets().promise()).Buckets.map(b => b.Name); console.log('List buckets:', allBuckets); @@ -26,10 +28,14 @@ describe('SSE KMS Cleanup', () => { try { await cleanup(copyBkt); await cleanup(mpuCopyBkt); - await Promise.all(scenarios.testCases.map(async bktConf => { - await cleanup(`enc-bkt-${bktConf.name}`); - return await cleanup(`versioned-enc-bkt-${bktConf.name}`); - })); - } catch (e) { void e; } + await Promise.all( + scenarios.testCases.map(async bktConf => { + await cleanup(`enc-bkt-${bktConf.name}`); + return await cleanup(`versioned-enc-bkt-${bktConf.name}`); + }) + ); + } catch (e) { + void e; + } }); }); diff --git a/tests/functional/sse-kms-migration/configs/aws.json b/tests/functional/sse-kms-migration/configs/aws.json index c593f310f5..913e031c99 100644 --- a/tests/functional/sse-kms-migration/configs/aws.json +++ b/tests/functional/sse-kms-migration/configs/aws.json @@ -1,9 +1,8 @@ { - "kmsAWS": { "noAwsArn": true, "providerName": "local", - "region": "us-east-1", + "region": "us-east-1", "endpoint": "http://0:8080", "ak": "456", "sk": "123" diff --git a/tests/functional/sse-kms-migration/configs/base.json b/tests/functional/sse-kms-migration/configs/base.json index 841aa341c7..9db4bea239 100644 --- a/tests/functional/sse-kms-migration/configs/base.json +++ b/tests/functional/sse-kms-migration/configs/base.json @@ -12,28 +12,33 @@ "127.0.0.2": "us-east-1", "s3.amazonaws.com": "us-east-1" }, - "websiteEndpoints": ["s3-website-us-east-1.amazonaws.com", - "s3-website.us-east-2.amazonaws.com", - "s3-website-us-west-1.amazonaws.com", - "s3-website-us-west-2.amazonaws.com", - "s3-website.ap-south-1.amazonaws.com", - "s3-website.ap-northeast-2.amazonaws.com", - "s3-website-ap-southeast-1.amazonaws.com", - "s3-website-ap-southeast-2.amazonaws.com", - "s3-website-ap-northeast-1.amazonaws.com", - "s3-website.eu-central-1.amazonaws.com", - "s3-website-eu-west-1.amazonaws.com", - "s3-website-sa-east-1.amazonaws.com", - "s3-website.localhost", - "s3-website.scality.test"], - "replicationEndpoints": [{ - "site": "zenko", - "servers": ["127.0.0.1:8000"], - "default": true - }, { - "site": "us-east-2", - "type": "aws_s3" - }], + "websiteEndpoints": [ + "s3-website-us-east-1.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.localhost", + "s3-website.scality.test" + ], + "replicationEndpoints": [ + { + "site": "zenko", + "servers": ["127.0.0.1:8000"], + "default": true + }, + { + "site": "us-east-2", + "type": "aws_s3" + } + ], "cdmi": { "host": "localhost", "port": 81, diff --git a/tests/functional/sse-kms-migration/configs/kmip-cluster.json b/tests/functional/sse-kms-migration/configs/kmip-cluster.json index 70ffcaf1b5..0723ab7260 100644 --- a/tests/functional/sse-kms-migration/configs/kmip-cluster.json +++ b/tests/functional/sse-kms-migration/configs/kmip-cluster.json @@ -4,7 +4,7 @@ "client": { "compoundCreateActivate": false }, - "transport": [ + "transport": [ { "pipelineDepth": 8, "tls": { diff --git a/tests/functional/sse-kms-migration/helpers.js b/tests/functional/sse-kms-migration/helpers.js index bd9ad70c91..5d1cc3b6fe 100644 --- a/tests/functional/sse-kms-migration/helpers.js +++ b/tests/functional/sse-kms-migration/helpers.js @@ -22,12 +22,18 @@ const bucketUtil = new BucketUtility(credsProfile); function hydrateSSEConfig({ algo: SSEAlgorithm, masterKeyId: KMSMasterKeyID }) { // stringify and parse to strip undefined values - return JSON.parse(JSON.stringify({ Rules: [{ - ApplyServerSideEncryptionByDefault: { - SSEAlgorithm, - KMSMasterKeyID, - }, - }] })); + return JSON.parse( + JSON.stringify({ + Rules: [ + { + ApplyServerSideEncryptionByDefault: { + SSEAlgorithm, + KMSMasterKeyID, + }, + }, + ], + }) + ); } function putObjParams(Bucket, Key, sseConfig, kmsKeyId) { @@ -52,17 +58,16 @@ const MD = { async function getBucketSSE(Bucket) { const sse = await s3.getBucketEncryption({ Bucket }).promise(); - return sse - .ServerSideEncryptionConfiguration - .Rules[0] - .ApplyServerSideEncryptionByDefault; + return sse.ServerSideEncryptionConfiguration.Rules[0].ApplyServerSideEncryptionByDefault; } async function putEncryptedObject(Bucket, Key, sseConfig, kmsKeyId, Body) { - return s3.putObject({ - ...putObjParams(Bucket, Key, sseConfig, kmsKeyId), - Body, - }).promise(); + return s3 + .putObject({ + ...putObjParams(Bucket, Key, sseConfig, kmsKeyId), + Body, + }) + .promise(); } async function getObjectMDSSE(Bucket, Key) { diff --git a/tests/functional/sse-kms-migration/load.js b/tests/functional/sse-kms-migration/load.js index 9ae2b43eed..bf93983ed7 100644 --- a/tests/functional/sse-kms-migration/load.js +++ b/tests/functional/sse-kms-migration/load.js @@ -37,8 +37,7 @@ async function spawnTcpdump(port, packetCount) { detached: true, stdio: ['ignore', 'pipe', 'pipe'], // ignored stdin shell: false, // no need as it's detached - - }, + } ); let stderr = ''; child.stderr.on('data', data => { @@ -51,7 +50,8 @@ async function spawnTcpdump(port, packetCount) { spawnTimeout = setTimeout(() => { if (child.exitCode !== null || child.signalCode !== null) { const err = `countPacketsByIp.sh stopped after spawn with code ${ - child.exitCode} and signal ${child.signalCode}.\nStderr: ${stderr}`; + child.exitCode + } and signal ${child.signalCode}.\nStderr: ${stderr}`; reject(new Error(err)); } else { resolve(child); @@ -70,9 +70,7 @@ async function spawnTcpdump(port, packetCount) { if (spawnTimeout) { clearTimeout(spawnTimeout); } - reject(new Error( - `tcpdump script closed with code ${code} and signal ${signal}.\nStderr: ${stderr}` - )); + reject(new Error(`tcpdump script closed with code ${code} and signal ${signal}.\nStderr: ${stderr}`)); } }); }); @@ -89,8 +87,9 @@ async function stopTcpdump(tcpdump) { }); } -describe(`KMS load (kmip cluster ${KMS_NODES} nodes): ${OBJECT_NUMBER - } objs each in ${BUCKET_NUMBER} bkts (${TOTAL_OBJECTS} objs)`, () => { +describe(`KMS load (kmip cluster ${KMS_NODES} nodes): ${ + OBJECT_NUMBER +} objs each in ${BUCKET_NUMBER} bkts (${TOTAL_OBJECTS} objs)`, () => { let buckets = []; let tcpdumpProcess; let stdout; @@ -104,21 +103,28 @@ describe(`KMS load (kmip cluster ${KMS_NODES} nodes): ${OBJECT_NUMBER const { masterKeyArn } = await helpers.createKmsKey(log); await helpers.s3.createBucket({ Bucket }).promise(); - await helpers.s3.putBucketEncryption({ - Bucket, - ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ - algo: 'aws:kms', masterKeyId: masterKeyArn }), - }).promise(); + await helpers.s3 + .putBucketEncryption({ + Bucket, + ServerSideEncryptionConfiguration: helpers.hydrateSSEConfig({ + algo: 'aws:kms', + masterKeyId: masterKeyArn, + }), + }) + .promise(); return { Bucket, masterKeyArn }; - })); + }) + ); }); after(async () => { - await Promise.all(buckets.map(async ({ Bucket, masterKeyArn }) => { - await helpers.cleanup(Bucket); - return helpers.destroyKmsKey(masterKeyArn, log); - })); + await Promise.all( + buckets.map(async ({ Bucket, masterKeyArn }) => { + await helpers.cleanup(Bucket); + return helpers.destroyKmsKey(masterKeyArn, log); + }) + ); await promisify(kms.client.stop.bind(kms.client))(); }); @@ -175,27 +181,36 @@ describe(`KMS load (kmip cluster ${KMS_NODES} nodes): ${OBJECT_NUMBER const repartitionCount = repartition.map(({ count }) => count); assert.strictEqual(code, 0, `tcpdump script closed with code ${code} and signal ${signal}`); assert(repartition.length === KMS_NODES, `Expected ${KMS_NODES} IPs but got ${repartition.length}`); - assert(repartitionCount.every(count => - count >= EXPECTED_MIN && count <= EXPECTED_MAX), - `Repartition counts should be around ${TOTAL_OBJECTS_PER_NODE} but got ${repartitionCount}`); + assert( + repartitionCount.every(count => count >= EXPECTED_MIN && count <= EXPECTED_MAX), + `Repartition counts should be around ${TOTAL_OBJECTS_PER_NODE} but got ${repartitionCount}` + ); } it(`should encrypt ${TOTAL_OBJECTS} times in parallel, ~${TOTAL_OBJECTS_PER_NODE} per node`, async () => { - await (Promise.all( - buckets.map(async ({ Bucket }) => Promise.all( - new Array(OBJECT_NUMBER).fill(0).map(async (_, i) => - helpers.s3.putObject({ Bucket, Key: `obj-${i}`, Body: `body-${i}` }).promise()) - )) - )); + await Promise.all( + buckets.map(async ({ Bucket }) => + Promise.all( + new Array(OBJECT_NUMBER) + .fill(0) + .map(async (_, i) => + helpers.s3.putObject({ Bucket, Key: `obj-${i}`, Body: `body-${i}` }).promise() + ) + ) + ) + ); await assertRepartition(closePromise); }); it(`should decrypt ${TOTAL_OBJECTS} times in parallel, ~${TOTAL_OBJECTS_PER_NODE} per node`, async () => { await Promise.all( - buckets.map(async ({ Bucket }) => Promise.all( - new Array(OBJECT_NUMBER).fill(0).map(async (_, i) => - helpers.s3.getObject({ Bucket, Key: `obj-${i}` }).promise()) - )) + buckets.map(async ({ Bucket }) => + Promise.all( + new Array(OBJECT_NUMBER) + .fill(0) + .map(async (_, i) => helpers.s3.getObject({ Bucket, Key: `obj-${i}` }).promise()) + ) + ) ); await assertRepartition(closePromise); }); diff --git a/tests/functional/sse-kms-migration/migration.js b/tests/functional/sse-kms-migration/migration.js index 1fedacaee0..7bf013ea1b 100644 --- a/tests/functional/sse-kms-migration/migration.js +++ b/tests/functional/sse-kms-migration/migration.js @@ -20,21 +20,22 @@ async function assertObjectSSE( { obj, objConf }, { bkt, bktConf }, // headers come from the command like putObject, CopyObject, MPUs... - { arnPrefix = kms.arnPrefix, put, headers } = { arnPrefix: kms.arnPrefix }, + { arnPrefix = kms.arnPrefix, put, headers } = { arnPrefix: kms.arnPrefix } ) { const sseMD = await helpers.getObjectMDSSE(Bucket, Key); const head = await helpers.s3.headObject({ Bucket, Key, VersionId }).promise(); const sseMDMigrated = await helpers.getObjectMDSSE(Bucket, Key); - const expectedKey = `${sseMD.SSEKMSKeyId && isScalityKmsArn(sseMD.SSEKMSKeyId) - ? '' : arnPrefix}${sseMD.SSEKMSKeyId}`; + const expectedKey = `${ + sseMD.SSEKMSKeyId && isScalityKmsArn(sseMD.SSEKMSKeyId) ? '' : arnPrefix + }${sseMD.SSEKMSKeyId}`; if (!put && sseMD.SSEKMSKeyId) { assert.doesNotMatch(sseMD.SSEKMSKeyId, SCAL_KMS_ARN_REG); } // obj precedence over bkt - assert.strictEqual(head.ServerSideEncryption, (objConf.algo || bktConf.algo)); - headers && assert.strictEqual(headers.ServerSideEncryption, (objConf.algo || bktConf.algo)); + assert.strictEqual(head.ServerSideEncryption, objConf.algo || bktConf.algo); + headers && assert.strictEqual(headers.ServerSideEncryption, objConf.algo || bktConf.algo); if (sseMDMigrated.SSEKMSKeyId) { // on metadata verify the full key with arn prefix @@ -82,12 +83,10 @@ describe('SSE KMS migration', () => { bkts[bktConf.name] = bkt; if (bktConf.algo && bktConf.masterKeyId) { bkt.kmsKeyInfo = await helpers.createKmsKey(log); - bkt.kmsKey = bktConf.arnPrefix - ? bkt.kmsKeyInfo.masterKeyArn - : bkt.kmsKeyInfo.masterKeyId; + bkt.kmsKey = bktConf.arnPrefix ? bkt.kmsKeyInfo.masterKeyArn : bkt.kmsKeyInfo.masterKeyId; } - await helpers.s3.headBucket(({ Bucket: bkt.name })).promise(); - await helpers.s3.headBucket(({ Bucket: bkt.vname })).promise(); + await helpers.s3.headBucket({ Bucket: bkt.name }).promise(); + await helpers.s3.headBucket({ Bucket: bkt.vname }).promise(); if (bktConf.algo) { const bktSSE = await helpers.getBucketSSE(bkt.name); assert.strictEqual(bktSSE.SSEAlgorithm, bktConf.algo); @@ -103,38 +102,40 @@ describe('SSE KMS migration', () => { } // Check object SSE using MD api, not S3 to avoid triggering migration - await Promise.all(scenarios.testCases.map(async objConf => { - const obj = { - name: `for-copy-enc-obj-${objConf.name}`, - kmsKeyInfo: null, - kmsKey: null, - body: `BODY(for-copy-enc-obj-${objConf.name})`, - }; - bkt.objs[objConf.name] = obj; - if (objConf.algo && objConf.masterKeyId) { - obj.kmsKeyInfo = await helpers.createKmsKey(log); - obj.kmsKey = objConf.arnPrefix - ? obj.kmsKeyInfo.masterKeyArn - : obj.kmsKeyInfo.masterKeyId; - } - const objSSE = await helpers.getObjectMDSSE(bkt.name, obj.name); - assert.strictEqual(objSSE.ServerSideEncryption, objConf.algo || bktConf.algo || ''); - assert.doesNotMatch(objSSE.SSEKMSKeyId, SCAL_KMS_ARN_REG); - return undefined; - })); + await Promise.all( + scenarios.testCases.map(async objConf => { + const obj = { + name: `for-copy-enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(for-copy-enc-obj-${objConf.name})`, + }; + bkt.objs[objConf.name] = obj; + if (objConf.algo && objConf.masterKeyId) { + obj.kmsKeyInfo = await helpers.createKmsKey(log); + obj.kmsKey = objConf.arnPrefix ? obj.kmsKeyInfo.masterKeyArn : obj.kmsKeyInfo.masterKeyId; + } + const objSSE = await helpers.getObjectMDSSE(bkt.name, obj.name); + assert.strictEqual(objSSE.ServerSideEncryption, objConf.algo || bktConf.algo || ''); + assert.doesNotMatch(objSSE.SSEKMSKeyId, SCAL_KMS_ARN_REG); + return undefined; + }) + ); }; before('setup', async () => { - console.log('Run migration', - { profile: helpers.credsProfile, accessKeyId: helpers.s3.config.credentials.accessKeyId }); + console.log('Run migration', { + profile: helpers.credsProfile, + accessKeyId: helpers.s3.config.credentials.accessKeyId, + }); const allBuckets = (await helpers.s3.listBuckets().promise()).Buckets.map(b => b.Name); console.log('List buckets:', allBuckets); await helpers.MD.setup(); await helpers.s3.headBucket({ Bucket: copyBkt }).promise(); - await helpers.s3.headBucket(({ Bucket: mpuCopyBkt })).promise(); + await helpers.s3.headBucket({ Bucket: mpuCopyBkt }).promise(); const copySSE = await helpers.s3.getBucketEncryption({ Bucket: copyBkt }).promise(); - const { SSEAlgorithm, KMSMasterKeyID } = copySSE - .ServerSideEncryptionConfiguration.Rules[0].ApplyServerSideEncryptionByDefault; + const { SSEAlgorithm, KMSMasterKeyID } = + copySSE.ServerSideEncryptionConfiguration.Rules[0].ApplyServerSideEncryptionByDefault; assert.strictEqual(SSEAlgorithm, 'aws:kms'); assert.doesNotMatch(KMSMasterKeyID, SCAL_KMS_ARN_REG); @@ -146,241 +147,291 @@ describe('SSE KMS migration', () => { await helpers.cleanup(copyBkt); await helpers.cleanup(mpuCopyBkt); // Clean every bucket - await Promise.all(Object.values(bkts).map(async bkt => { - await helpers.cleanup(bkt.name); - return await helpers.cleanup(bkt.vname); - })); + await Promise.all( + Object.values(bkts).map(async bkt => { + await helpers.cleanup(bkt.name); + return await helpers.cleanup(bkt.vname); + }) + ); }); - scenarios.testCases.forEach(bktConf => describe(`bucket enc-bkt-${bktConf.name}`, () => { - let bkt = bkts[bktConf.name]; + scenarios.testCases.forEach(bktConf => + describe(`bucket enc-bkt-${bktConf.name}`, () => { + let bkt = bkts[bktConf.name]; - before(() => { - bkt = bkts[bktConf.name]; - }); - - if (bktConf.deleteSSE) { - beforeEach(async () => scenarios.deleteBucketSSEBeforeEach(bkt.name, log)); - } - - if (!bktConf.algo) { - it('GetBucketEncryption should return ServerSideEncryptionConfigurationNotFoundError', - async () => await scenarios.tests.getBucketSSEError(bkt.name)); + before(() => { + bkt = bkts[bktConf.name]; + }); - if (!bktConf.deleteSSE) { - it('should have non mandatory SSE in bucket MD as test init put an object with AES256', - async () => scenarios.tests.getBucketNonMandatorySSE(bkt.name, log, 'migration')); + if (bktConf.deleteSSE) { + beforeEach(async () => scenarios.deleteBucketSSEBeforeEach(bkt.name, log)); } - } else { - it('ensure old SSE KMS key setup', - async () => await scenarios.tests.getBucketSSE(bkt.name, log, bktConf.algo, - bktConf.masterKeyId ? bkt.kmsKeyInfo.masterKeyArn : null, 'migration')); - } - scenarios.testCasesObj.forEach(objConf => it(`should have pre uploaded object with SSE ${objConf.name}`, - async () => { - const obj = bkt.objs[objConf.name]; - // use MD here to avoid triggering a migration - const sseMD = await helpers.getObjectMDSSE(bkt.name, obj.name); - if (sseMD.SSEKMSKeyId) { - assert.doesNotMatch(sseMD.SSEKMSKeyId, SCAL_KMS_ARN_REG); - } - })); - - scenarios.testCasesObj.forEach(objConf => describe(`object enc-obj-${objConf.name}`, () => { - const obj = { - name: `enc-obj-${objConf.name}`, - kmsKeyInfo: null, - kmsKey: null, - body: `BODY(enc-obj-${objConf.name})`, - }; - /** to be used as source of copy */ - let objForCopy; - - before(async () => { - if (objConf.algo && objConf.masterKeyId) { - obj.kmsKeyInfo = await helpers.createKmsKey(log); - obj.kmsKey = objConf.arnPrefix - ? obj.kmsKeyInfo.masterKeyArn - : obj.kmsKeyInfo.masterKeyId; - } - objForCopy = bkt.objs[objConf.name]; - }); + if (!bktConf.algo) { + it('GetBucketEncryption should return ServerSideEncryptionConfigurationNotFoundError', async () => + await scenarios.tests.getBucketSSEError(bkt.name)); - const mpus = {}; - before('retrieve MPUS', async () => { - const listed = await helpers.s3.listMultipartUploads({ Bucket: bkt.name }).promise(); - assert.strictEqual(listed.IsTruncated, false, 'Too much MPUs, need to loop on pagination'); - for (const mpu of listed.Uploads) { - mpus[mpu.Key] = mpu.UploadId; + if (!bktConf.deleteSSE) { + it('should have non mandatory SSE in bucket MD as test init put an object with AES256', async () => + scenarios.tests.getBucketNonMandatorySSE(bkt.name, log, 'migration')); } - }); - - it(`should PutObject ${obj.name} overriding bucket SSE`, async () => { - await helpers.putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); - const assertion = { - Bucket: bkt.name, - Key: obj.name, - Body: obj.body, - }; - await assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, { put: true }); - }); - - // CopyObject scenarios - [ - { name: `${obj.name} into encrypted destination bucket`, forceBktSSE: true }, - { name: `${obj.name} into same bucket with object SSE config` }, - { name: `from encrypted source into ${obj.name} with object SSE config` }, - ].forEach(({ name, forceBktSSE }, index) => - it(`should CopyObject ${name}`, async () => - await scenarios.tests.copyObjectAndSSE( - { copyBkt, objForCopy, copyObj }, - { objConf, obj }, - { bktConf, bkt }, - { index, forceBktSSE, assertObjectSSEFct: assertObjectSSE }, - ))); - - // S3C-9996 The SSE was bugged with MPU, where the completion takes only the masterKeyId from bucket - // Fixed at the same time as migration, some scenario can pass only in newer version above migration - const optionalSkip = objConf.algo || bktConf.masterKeyId || (!bktConf.algo && !bktConf.deleteSSE) - ? it.skip - : it; - - // completed MPU should behave like regular objects - [ - { name: '', keySuffix: '', body: `${obj.body}-MPU1${obj.body}-MPU2` }, - { name: 'that has copy', keySuffix: 'copy', body: `BODY(copy)${obj.body}-MPU2` }, - { name: 'that has byte range copy', keySuffix: 'copyrange', body: 'copyBODY' }, - ].forEach(({ name, keySuffix, body }) => - optionalSkip(`should migrate completed MPU ${name}`, async () => { - const mpuKey = `${obj.name}-mpu${keySuffix}`; - const assertion = { Bucket: bkt.name, Key: mpuKey, Body: body }; - await assertObjectSSE( - assertion, { objConf, obj }, { bktConf, bkt }, fileArnPrefix); - })); - - async function prepareMPUTest(mpuKey, expectedExistingParts) { - const uploadId = mpus[mpuKey]; - assert(uploadId, 'Missing MPU, it should have been prepared before'); - const MPUBucketName = `${mpuBucketPrefix}${bkt.name}`; - const longMPUIdentifier = `overview${splitter}${mpuKey}${splitter}${uploadId}`; - const mpuOverviewMDSSE = await helpers.getObjectMDSSE(MPUBucketName, longMPUIdentifier); - - const existingParts = await helpers.s3.listParts({ - Bucket: bkt.name, Key: mpuKey, UploadId: uploadId }).promise(); - const partCount = (existingParts.Parts || []).length || 0; - assert.strictEqual(existingParts.IsTruncated, false, 'Too much parts, need to loop on pagination'); - assert.strictEqual(partCount, expectedExistingParts); - return { mpuKey, uploadId, mpuOverviewMDSSE, partCount, existingParts: existingParts.Parts || [] }; + } else { + it('ensure old SSE KMS key setup', async () => + await scenarios.tests.getBucketSSE( + bkt.name, + log, + bktConf.algo, + bktConf.masterKeyId ? bkt.kmsKeyInfo.masterKeyArn : null, + 'migration' + )); } - // ongoing MPU with regular uploadPart - [ - { - name: 'empty', - keySuffix: '-empty', - existingPartsCount: 0, - partsBody: [`${obj.body}-MPU1`, `${obj.body}-MPU2`], - body: `${obj.body}-MPU1${obj.body}-MPU2`, - }, - { - name: 'with 2 parts', - keySuffix: '', - existingPartsCount: 2, - partsBody: [`${obj.body}-MPU1`, `${obj.body}-MPU2`], - body: `${obj.body}-MPU1${obj.body}-MPU2`.repeat(2), - }, - ].forEach(({ name, keySuffix, existingPartsCount, partsBody, body }) => - optionalSkip(`should finish ongoing encrypted MPU ${name} by adding 2 parts`, async () => { - const { mpuKey, uploadId, mpuOverviewMDSSE, existingParts, partCount } = - await prepareMPUTest(`${obj.name}-migration-mpu${keySuffix}`, existingPartsCount); - const newParts = []; - for (const [index, body] of partsBody.entries()) { - const part = await scenarios.tests.mpuUploadPart({ - UploadId: uploadId, - Bucket: bkt.name, - Body: body, - Key: mpuKey, - PartNumber: partCount + index + 1, - }, mpuOverviewMDSSE, objConf.algo || bktConf.algo); - newParts.push(part); + scenarios.testCasesObj.forEach(objConf => + it(`should have pre uploaded object with SSE ${objConf.name}`, async () => { + const obj = bkt.objs[objConf.name]; + // use MD here to avoid triggering a migration + const sseMD = await helpers.getObjectMDSSE(bkt.name, obj.name); + if (sseMD.SSEKMSKeyId) { + assert.doesNotMatch(sseMD.SSEKMSKeyId, SCAL_KMS_ARN_REG); } - await scenarios.tests.mpuComplete( - { UploadId: uploadId, Bucket: bkt.name, Key: mpuKey }, - { existingParts, newParts }, - mpuOverviewMDSSE, objConf.algo || bktConf.algo); - const assertion = { - Bucket: bkt.name, - Key: mpuKey, - Body: body, + }) + ); + + scenarios.testCasesObj.forEach(objConf => + describe(`object enc-obj-${objConf.name}`, () => { + const obj = { + name: `enc-obj-${objConf.name}`, + kmsKeyInfo: null, + kmsKey: null, + body: `BODY(enc-obj-${objConf.name})`, }; - await assertObjectSSE( - assertion, { objConf, obj }, { bktConf, bkt }, fileArnPrefix); - })); - - optionalSkip('should finish ongoing encrypted MPU with 2 parts by copy and upload part', async () => { - const { mpuKey, uploadId, mpuOverviewMDSSE, existingParts, partCount } = - await prepareMPUTest(`${obj.name}-migration-mpucopy`, 2); - const part1 = await scenarios.tests.mpuUploadPartCopy({ - UploadId: uploadId, - Bucket: bkt.name, - Key: mpuKey, - PartNumber: partCount + 1, - CopySource: `${copyBkt}/${copyObj}`, - }, mpuOverviewMDSSE, objConf.algo || bktConf.algo); - const part2 = await scenarios.tests.mpuUploadPart({ - UploadId: uploadId, - Bucket: bkt.name, - Body: `${obj.body}-MPU2`, - Key: mpuKey, - PartNumber: partCount + 2, - }, mpuOverviewMDSSE, objConf.algo || bktConf.algo); - await scenarios.tests.mpuComplete( - { UploadId: uploadId, Bucket: bkt.name, Key: mpuKey }, - { existingParts, newParts: [part1, part2] }, - mpuOverviewMDSSE, objConf.algo || bktConf.algo); - const assertion = { - Bucket: bkt.name, - Key: mpuKey, - Body: `BODY(copy)${obj.body}-MPU2`.repeat(2), - }; - await assertObjectSSE( - assertion, { objConf, obj }, { bktConf, bkt }, fileArnPrefix); - }); - - optionalSkip('should finish ongoing encrypted MPU with 2 parts by 2 copy byte range', async () => { - const { mpuKey, uploadId, mpuOverviewMDSSE, existingParts, partCount } = - await prepareMPUTest(`${obj.name}-migration-mpucopyrange`, 2); - // source body is "BODY(copy)" - // [copy, BODY] - const sourceRanges = ['bytes=5-8', 'bytes=0-3']; - const newParts = []; - for (const [index, range] of sourceRanges.entries()) { - const part = await scenarios.tests.mpuUploadPartCopy({ - UploadId: uploadId, - Bucket: bkt.name, - Key: mpuKey, - PartNumber: partCount + index + 1, - CopySource: `${copyBkt}/${copyObj}`, - CopySourceRange: range, - }, mpuOverviewMDSSE, objConf.algo || bktConf.algo); - newParts.push(part); - } + /** to be used as source of copy */ + let objForCopy; + + before(async () => { + if (objConf.algo && objConf.masterKeyId) { + obj.kmsKeyInfo = await helpers.createKmsKey(log); + obj.kmsKey = objConf.arnPrefix ? obj.kmsKeyInfo.masterKeyArn : obj.kmsKeyInfo.masterKeyId; + } + objForCopy = bkt.objs[objConf.name]; + }); + + const mpus = {}; + before('retrieve MPUS', async () => { + const listed = await helpers.s3.listMultipartUploads({ Bucket: bkt.name }).promise(); + assert.strictEqual(listed.IsTruncated, false, 'Too much MPUs, need to loop on pagination'); + for (const mpu of listed.Uploads) { + mpus[mpu.Key] = mpu.UploadId; + } + }); + + it(`should PutObject ${obj.name} overriding bucket SSE`, async () => { + await helpers.putEncryptedObject(bkt.name, obj.name, objConf, obj.kmsKey, obj.body); + const assertion = { + Bucket: bkt.name, + Key: obj.name, + Body: obj.body, + }; + await assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, { put: true }); + }); + + // CopyObject scenarios + [ + { name: `${obj.name} into encrypted destination bucket`, forceBktSSE: true }, + { name: `${obj.name} into same bucket with object SSE config` }, + { name: `from encrypted source into ${obj.name} with object SSE config` }, + ].forEach(({ name, forceBktSSE }, index) => + it(`should CopyObject ${name}`, async () => + await scenarios.tests.copyObjectAndSSE( + { copyBkt, objForCopy, copyObj }, + { objConf, obj }, + { bktConf, bkt }, + { index, forceBktSSE, assertObjectSSEFct: assertObjectSSE } + )) + ); + + // S3C-9996 The SSE was bugged with MPU, where the completion takes only the masterKeyId from bucket + // Fixed at the same time as migration, some scenario can pass only in newer version above migration + const optionalSkip = + objConf.algo || bktConf.masterKeyId || (!bktConf.algo && !bktConf.deleteSSE) ? it.skip : it; + + // completed MPU should behave like regular objects + [ + { name: '', keySuffix: '', body: `${obj.body}-MPU1${obj.body}-MPU2` }, + { name: 'that has copy', keySuffix: 'copy', body: `BODY(copy)${obj.body}-MPU2` }, + { name: 'that has byte range copy', keySuffix: 'copyrange', body: 'copyBODY' }, + ].forEach(({ name, keySuffix, body }) => + optionalSkip(`should migrate completed MPU ${name}`, async () => { + const mpuKey = `${obj.name}-mpu${keySuffix}`; + const assertion = { Bucket: bkt.name, Key: mpuKey, Body: body }; + await assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, fileArnPrefix); + }) + ); + + async function prepareMPUTest(mpuKey, expectedExistingParts) { + const uploadId = mpus[mpuKey]; + assert(uploadId, 'Missing MPU, it should have been prepared before'); + const MPUBucketName = `${mpuBucketPrefix}${bkt.name}`; + const longMPUIdentifier = `overview${splitter}${mpuKey}${splitter}${uploadId}`; + const mpuOverviewMDSSE = await helpers.getObjectMDSSE(MPUBucketName, longMPUIdentifier); + + const existingParts = await helpers.s3 + .listParts({ + Bucket: bkt.name, + Key: mpuKey, + UploadId: uploadId, + }) + .promise(); + const partCount = (existingParts.Parts || []).length || 0; + assert.strictEqual( + existingParts.IsTruncated, + false, + 'Too much parts, need to loop on pagination' + ); + assert.strictEqual(partCount, expectedExistingParts); + return { + mpuKey, + uploadId, + mpuOverviewMDSSE, + partCount, + existingParts: existingParts.Parts || [], + }; + } - await scenarios.tests.mpuComplete( - { UploadId: uploadId, Bucket: bkt.name, Key: mpuKey }, - { existingParts, newParts }, - mpuOverviewMDSSE, objConf.algo || bktConf.algo); - const assertion = { - Bucket: bkt.name, - Key: mpuKey, - Body: 'copyBODY'.repeat(2), - }; - await assertObjectSSE( - assertion, { objConf, obj }, { bktConf, bkt }, fileArnPrefix); - }); - })); - })); + // ongoing MPU with regular uploadPart + [ + { + name: 'empty', + keySuffix: '-empty', + existingPartsCount: 0, + partsBody: [`${obj.body}-MPU1`, `${obj.body}-MPU2`], + body: `${obj.body}-MPU1${obj.body}-MPU2`, + }, + { + name: 'with 2 parts', + keySuffix: '', + existingPartsCount: 2, + partsBody: [`${obj.body}-MPU1`, `${obj.body}-MPU2`], + body: `${obj.body}-MPU1${obj.body}-MPU2`.repeat(2), + }, + ].forEach(({ name, keySuffix, existingPartsCount, partsBody, body }) => + optionalSkip(`should finish ongoing encrypted MPU ${name} by adding 2 parts`, async () => { + const { mpuKey, uploadId, mpuOverviewMDSSE, existingParts, partCount } = + await prepareMPUTest(`${obj.name}-migration-mpu${keySuffix}`, existingPartsCount); + const newParts = []; + for (const [index, body] of partsBody.entries()) { + const part = await scenarios.tests.mpuUploadPart( + { + UploadId: uploadId, + Bucket: bkt.name, + Body: body, + Key: mpuKey, + PartNumber: partCount + index + 1, + }, + mpuOverviewMDSSE, + objConf.algo || bktConf.algo + ); + newParts.push(part); + } + await scenarios.tests.mpuComplete( + { UploadId: uploadId, Bucket: bkt.name, Key: mpuKey }, + { existingParts, newParts }, + mpuOverviewMDSSE, + objConf.algo || bktConf.algo + ); + const assertion = { + Bucket: bkt.name, + Key: mpuKey, + Body: body, + }; + await assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, fileArnPrefix); + }) + ); + + optionalSkip( + 'should finish ongoing encrypted MPU with 2 parts by copy and upload part', + async () => { + const { mpuKey, uploadId, mpuOverviewMDSSE, existingParts, partCount } = + await prepareMPUTest(`${obj.name}-migration-mpucopy`, 2); + const part1 = await scenarios.tests.mpuUploadPartCopy( + { + UploadId: uploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: partCount + 1, + CopySource: `${copyBkt}/${copyObj}`, + }, + mpuOverviewMDSSE, + objConf.algo || bktConf.algo + ); + const part2 = await scenarios.tests.mpuUploadPart( + { + UploadId: uploadId, + Bucket: bkt.name, + Body: `${obj.body}-MPU2`, + Key: mpuKey, + PartNumber: partCount + 2, + }, + mpuOverviewMDSSE, + objConf.algo || bktConf.algo + ); + await scenarios.tests.mpuComplete( + { UploadId: uploadId, Bucket: bkt.name, Key: mpuKey }, + { existingParts, newParts: [part1, part2] }, + mpuOverviewMDSSE, + objConf.algo || bktConf.algo + ); + const assertion = { + Bucket: bkt.name, + Key: mpuKey, + Body: `BODY(copy)${obj.body}-MPU2`.repeat(2), + }; + await assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, fileArnPrefix); + } + ); + + optionalSkip('should finish ongoing encrypted MPU with 2 parts by 2 copy byte range', async () => { + const { mpuKey, uploadId, mpuOverviewMDSSE, existingParts, partCount } = await prepareMPUTest( + `${obj.name}-migration-mpucopyrange`, + 2 + ); + // source body is "BODY(copy)" + // [copy, BODY] + const sourceRanges = ['bytes=5-8', 'bytes=0-3']; + const newParts = []; + for (const [index, range] of sourceRanges.entries()) { + const part = await scenarios.tests.mpuUploadPartCopy( + { + UploadId: uploadId, + Bucket: bkt.name, + Key: mpuKey, + PartNumber: partCount + index + 1, + CopySource: `${copyBkt}/${copyObj}`, + CopySourceRange: range, + }, + mpuOverviewMDSSE, + objConf.algo || bktConf.algo + ); + newParts.push(part); + } + + await scenarios.tests.mpuComplete( + { UploadId: uploadId, Bucket: bkt.name, Key: mpuKey }, + { existingParts, newParts }, + mpuOverviewMDSSE, + objConf.algo || bktConf.algo + ); + const assertion = { + Bucket: bkt.name, + Key: mpuKey, + Body: 'copyBODY'.repeat(2), + }; + await assertObjectSSE(assertion, { objConf, obj }, { bktConf, bkt }, fileArnPrefix); + }); + }) + ); + }) + ); it('should finish ongoing encrypted MPU by copy parts from all bkt and objects matrice', async () => { const mpuKey = 'mpucopy'; @@ -403,39 +454,49 @@ describe('SSE KMS migration', () => { const uploadPromises = scenarios.testCases.reduce((acc, bktConf, bktIdx) => { const bkt = bkts[bktConf.name]; - return acc.concat(scenarios.testCasesObj.map(async (objConf, objIdx) => { - const obj = bkt.objs[objConf.name]; - - const partNumber = partCount + bktIdx * scenarios.testCasesObj.length + objIdx + 1; - const res = await helpers.s3.uploadPartCopy({ - ...copyPartArg, - PartNumber: partNumber, - CopySource: `${bkt.name}/${obj.name}`, - }).promise(); - - return { partNumber, body: obj.body, res: res.CopyPartResult }; - })); + return acc.concat( + scenarios.testCasesObj.map(async (objConf, objIdx) => { + const obj = bkt.objs[objConf.name]; + + const partNumber = partCount + bktIdx * scenarios.testCasesObj.length + objIdx + 1; + const res = await helpers.s3 + .uploadPartCopy({ + ...copyPartArg, + PartNumber: partNumber, + CopySource: `${bkt.name}/${obj.name}`, + }) + .promise(); + + return { partNumber, body: obj.body, res: res.CopyPartResult }; + }) + ); }, []); const parts = await Promise.all(uploadPromises); - await helpers.s3.completeMultipartUpload({ - UploadId: uploadId, - Bucket: mpuCopyBkt, - Key: mpuKey, - MultipartUpload: { - Parts: [ - ...existingParts.Parts.map(part => ({ PartNumber: part.PartNumber, ETag: part.ETag })), - ...parts.map(part => ({ PartNumber: part.partNumber, ETag: part.res.ETag })), - ], - }, - }).promise(); + await helpers.s3 + .completeMultipartUpload({ + UploadId: uploadId, + Bucket: mpuCopyBkt, + Key: mpuKey, + MultipartUpload: { + Parts: [ + ...existingParts.Parts.map(part => ({ PartNumber: part.PartNumber, ETag: part.ETag })), + ...parts.map(part => ({ PartNumber: part.partNumber, ETag: part.res.ETag })), + ], + }, + }) + .promise(); const assertion = { Bucket: mpuCopyBkt, Key: mpuKey, Body: parts.reduce((acc, part) => `${acc}${part.body}`, '').repeat(2), }; await assertObjectSSE( - assertion, { objConf: {}, obj: {} }, { bktConf: { algo: 'AES256' }, bkt: {} }, fileArnPrefix); + assertion, + { objConf: {}, obj: {} }, + { bktConf: { algo: 'AES256' }, bkt: {} }, + fileArnPrefix + ); }); }); diff --git a/tests/functional/sse-kms-migration/scenarios.js b/tests/functional/sse-kms-migration/scenarios.js index cf8f9e923b..d7c43891e1 100644 --- a/tests/functional/sse-kms-migration/scenarios.js +++ b/tests/functional/sse-kms-migration/scenarios.js @@ -46,16 +46,16 @@ async function assertObjectSSE( { bkt, bktConf }, // headers come from the command like putObject, CopyObject, MPUs... { arnPrefix = kms.arnPrefix, headers } = { arnPrefix: kms.arnPrefix }, - testCase, + testCase ) { const head = await helpers.s3.headObject({ Bucket, Key, VersionId }).promise(); const sseMD = await helpers.getObjectMDSSE(Bucket, Key); const arnPrefixReg = new RegExp(`^${arnPrefix}`); - const expectedAlgo = (objConf.algo || bktConf.algo) || - (testCase === 'after' && helpers.config.globalEncryptionEnabled && !bktConf.deleteSSE - ? 'AES256' - : undefined); + const expectedAlgo = + objConf.algo || + bktConf.algo || + (testCase === 'after' && helpers.config.globalEncryptionEnabled && !bktConf.deleteSSE ? 'AES256' : undefined); // obj precedence over bkt assert.strictEqual(head.ServerSideEncryption, expectedAlgo); @@ -171,7 +171,7 @@ async function copyObjectAndSSE( { bktConf, bkt }, // migration has its own assert object function { index, forceBktSSE, assertObjectSSEFct = assertObjectSSE }, - testCase, + testCase ) { // variables are defined in before hook, can only be accessed inside test const tests = [ @@ -207,8 +207,7 @@ async function copyObjectAndSSE( const { SSEAlgorithm, KMSMasterKeyID } = await helpers.getBucketSSE(copyBkt); assert.strictEqual(headers.ServerSideEncryption, SSEAlgorithm); testCase !== 'before' && assert.strictEqual(headers.SSEKMSKeyId, KMSMasterKeyID); - const keyArn = `${KMSMasterKeyID && isScalityKmsArn(KMSMasterKeyID) - ? '' : kms.arnPrefix}${KMSMasterKeyID}`; + const keyArn = `${KMSMasterKeyID && isScalityKmsArn(KMSMasterKeyID) ? '' : kms.arnPrefix}${KMSMasterKeyID}`; const kmsKeyInfo = { masterKeyId: getKeyIdFromArn(keyArn), masterKeyArn: keyArn, @@ -226,7 +225,7 @@ async function copyObjectAndSSE( forcedSSE ? { objConf: {}, obj: {} } : { objConf, obj }, forcedSSE || { bktConf, bkt }, { headers: testCase === 'before' ? null : headers, put: true }, - testCase, + testCase ); } @@ -247,13 +246,15 @@ function assertMPUSSEHeaders(actual, expected, algo) { // before has no headers to assert async function mpuUploadPart({ UploadId, Bucket, Key, Body, PartNumber }, mpuOverviewMDSSE, algo, testCase) { - const part = await helpers.s3.uploadPart({ - UploadId, - Bucket, - Body, - Key, - PartNumber, - }).promise(); + const part = await helpers.s3 + .uploadPart({ + UploadId, + Bucket, + Body, + Key, + PartNumber, + }) + .promise(); testCase !== 'before' && assertMPUSSEHeaders(part, mpuOverviewMDSSE, algo); return part; } @@ -261,33 +262,39 @@ async function mpuUploadPart({ UploadId, Bucket, Key, Body, PartNumber }, mpuOve // before has no headers to assert async function mpuUploadPartCopy( { UploadId, Bucket, Key, PartNumber, CopySource, CopySourceRange }, - mpuOverviewMDSSE, algo, testCase + mpuOverviewMDSSE, + algo, + testCase ) { - const part = await helpers.s3.uploadPartCopy({ - UploadId, - Bucket, - Key, - PartNumber, - CopySource, - CopySourceRange, - }).promise(); + const part = await helpers.s3 + .uploadPartCopy({ + UploadId, + Bucket, + Key, + PartNumber, + CopySource, + CopySourceRange, + }) + .promise(); testCase !== 'before' && assertMPUSSEHeaders(part, mpuOverviewMDSSE, algo); return part; } // before has no headers to assert async function mpuComplete({ UploadId, Bucket, Key }, { existingParts, newParts }, mpuOverviewMDSSE, algo, testCase) { - const complete = await helpers.s3.completeMultipartUpload({ - UploadId, - Bucket, - Key, - MultipartUpload: { - Parts: [ - ...existingParts.map(part => ({ PartNumber: part.PartNumber, ETag: part.ETag })), - ...newParts.map((part, idx) => ({ PartNumber: existingParts.length + idx + 1, ETag: part.ETag })), - ], - }, - }).promise(); + const complete = await helpers.s3 + .completeMultipartUpload({ + UploadId, + Bucket, + Key, + MultipartUpload: { + Parts: [ + ...existingParts.map(part => ({ PartNumber: part.PartNumber, ETag: part.ETag })), + ...newParts.map((part, idx) => ({ PartNumber: existingParts.length + idx + 1, ETag: part.ETag })), + ], + }, + }) + .promise(); testCase !== 'before' && assertMPUSSEHeaders(complete, mpuOverviewMDSSE, algo); return complete; } diff --git a/tests/functional/utilities/reportHandler.js b/tests/functional/utilities/reportHandler.js index dc5948eeb7..762c4722bd 100644 --- a/tests/functional/utilities/reportHandler.js +++ b/tests/functional/utilities/reportHandler.js @@ -142,16 +142,16 @@ function requestHandler(req, res) { } } else { switch (req.url) { - case '/_/crr/status': - case '/_/ingestion/status': - res.write(JSON.stringify(expectedStatusResults)); - break; - case '/_/crr/resume/all': - case '/_/ingestion/resume/all': - res.write(JSON.stringify(expectedScheduleResults)); - break; - default: - break; + case '/_/crr/status': + case '/_/ingestion/status': + res.write(JSON.stringify(expectedStatusResults)); + break; + case '/_/crr/resume/all': + case '/_/ingestion/resume/all': + res.write(JSON.stringify(expectedScheduleResults)); + break; + default: + break; } } res.end(); @@ -168,8 +168,7 @@ function requestHandler(req, res) { describe('Test Request Failure Cases', () => { before(done => { - httpServer = http.createServer(requestFailHandler) - .listen(testPort); + httpServer = http.createServer(requestFailHandler).listen(testPort); httpServer.on('listening', done); httpServer.on('error', err => { process.stdout.write(`https server: ${err.stack}\n`); @@ -181,8 +180,7 @@ function requestHandler(req, res) { httpServer.close(); }); - it('should return empty object if a request error occurs', - done => { + it('should return empty object if a request error occurs', done => { const endpoint = 'http://nonexists:4242'; item.method(endpoint, 'all', logger, (err, res) => { assert.ifError(err); @@ -191,8 +189,7 @@ function requestHandler(req, res) { }); }); - it('should return empty object if response status code is >= 400', - done => { + it('should return empty object if response status code is >= 400', done => { const endpoint = 'http://localhost:4242'; item.method(endpoint, 'all', logger, (err, res) => { assert.ifError(err); @@ -205,8 +202,7 @@ function requestHandler(req, res) { describe('Test Request Success Cases', () => { const endpoint = 'http://localhost:4242'; before(done => { - httpServer = http.createServer(requestHandler) - .listen(testPort); + httpServer = http.createServer(requestHandler).listen(testPort); httpServer.on('listening', done); httpServer.on('error', err => { process.stdout.write(`https server: ${err.stack}\n`); @@ -221,8 +217,7 @@ function requestHandler(req, res) { it('should return correct location metrics', done => { item.method(endpoint, 'site1', logger, (err, res) => { assert.ifError(err); - assert.deepStrictEqual( - res, item.result.byLocation.site1); + assert.deepStrictEqual(res, item.result.byLocation.site1); done(); }); }); @@ -256,27 +251,33 @@ function requestHandler(req, res) { it('should return correct results', done => { if (item.method.name === 'getIngestionMetrics') { const sites = ['site1', 'site2']; - item.method(sites, logger, (err, res) => { - assert.ifError(err); - assert.deepStrictEqual(res, item.result); - done(); - }, config); + item.method( + sites, + logger, + (err, res) => { + assert.ifError(err); + assert.deepStrictEqual(res, item.result); + done(); + }, + config + ); } else { - item.method(logger, (err, res) => { - assert.ifError(err); - assert.deepStrictEqual(res, item.result); - done(); - }, config); + item.method( + logger, + (err, res) => { + assert.ifError(err); + assert.deepStrictEqual(res, item.result); + done(); + }, + config + ); } }); }); }); }); -[ - { method: getReplicationStates }, - { method: getIngestionStates }, -].forEach(item => { +[{ method: getReplicationStates }, { method: getIngestionStates }].forEach(item => { describe(`reportHandler::${item.method.name}`, function testSuite() { this.timeout(20000); const testPort = '4242'; @@ -284,8 +285,7 @@ function requestHandler(req, res) { describe('Test Request Failure Cases', () => { before(done => { - httpServer = http.createServer(requestFailHandler) - .listen(testPort); + httpServer = http.createServer(requestFailHandler).listen(testPort); httpServer.on('listening', done); httpServer.on('error', err => { process.stdout.write(`https server: ${err.stack}\n`); @@ -297,29 +297,34 @@ function requestHandler(req, res) { httpServer.close(); }); - it('should return empty object if a request error occurs', - done => { - item.method(logger, (err, res) => { - assert.ifError(err); - assert.deepStrictEqual(res, {}); - done(); - }, { backbeat: { host: 'nonexisthost', port: testPort } }); + it('should return empty object if a request error occurs', done => { + item.method( + logger, + (err, res) => { + assert.ifError(err); + assert.deepStrictEqual(res, {}); + done(); + }, + { backbeat: { host: 'nonexisthost', port: testPort } } + ); }); - it('should return empty object if response status code is >= 400', - done => { - item.method(logger, (err, res) => { - assert.ifError(err); - assert.deepStrictEqual(res, {}); - done(); - }, { backbeat: { host: 'localhost', port: testPort } }); + it('should return empty object if response status code is >= 400', done => { + item.method( + logger, + (err, res) => { + assert.ifError(err); + assert.deepStrictEqual(res, {}); + done(); + }, + { backbeat: { host: 'localhost', port: testPort } } + ); }); }); describe('Test Request Success Cases', () => { before(done => { - httpServer = http.createServer(requestHandler) - .listen(testPort); + httpServer = http.createServer(requestHandler).listen(testPort); httpServer.on('listening', done); httpServer.on('error', err => { process.stdout.write(`https server: ${err.stack}\n`); @@ -332,20 +337,24 @@ function requestHandler(req, res) { }); it('should return correct results', done => { - item.method(logger, (err, res) => { - const expectedResults = { - states: { - site1: 'enabled', - site2: 'disabled', - }, - schedules: { - site2: expectedScheduleResults.site2, - }, - }; - assert.ifError(err); - assert.deepStrictEqual(res, expectedResults); - done(); - }, { backbeat: { host: 'localhost', port: testPort } }); + item.method( + logger, + (err, res) => { + const expectedResults = { + states: { + site1: 'enabled', + site2: 'disabled', + }, + schedules: { + site2: expectedScheduleResults.site2, + }, + }; + assert.ifError(err); + assert.deepStrictEqual(res, expectedResults); + done(); + }, + { backbeat: { host: 'localhost', port: testPort } } + ); }); }); }); @@ -358,8 +367,7 @@ describe('reportHanlder::getIngestionInfo', function testSuite() { describe('Test Request Success Cases', () => { before(done => { - httpServer = http.createServer(requestHandler) - .listen(testPort); + httpServer = http.createServer(requestHandler).listen(testPort); httpServer.on('listening', done); httpServer.on('error', err => { process.stdout.write(`https server: ${err.stack}\n`); @@ -372,25 +380,28 @@ describe('reportHanlder::getIngestionInfo', function testSuite() { }); it('should return correct results', done => { - getIngestionInfo(logger, (err, res) => { - const expectedStatusResults = { - states: { - site1: 'enabled', - site2: 'disabled', - }, - schedules: { - site2: expectedScheduleResults.site2, - }, - }; - assert.ifError(err); + getIngestionInfo( + logger, + (err, res) => { + const expectedStatusResults = { + states: { + site1: 'enabled', + site2: 'disabled', + }, + schedules: { + site2: expectedScheduleResults.site2, + }, + }; + assert.ifError(err); - assert(res.metrics); - assert(res.status); - assert.deepStrictEqual(res.status, expectedStatusResults); - assert.deepStrictEqual(res.metrics, - ingestionExpectedResultsRef); - done(); - }, config); + assert(res.metrics); + assert(res.status); + assert.deepStrictEqual(res.status, expectedStatusResults); + assert.deepStrictEqual(res.metrics, ingestionExpectedResultsRef); + done(); + }, + config + ); }); it('should return empty if no ingestion locations exist', done => { diff --git a/tests/locationConfig/locationConfigLegacy.json b/tests/locationConfig/locationConfigLegacy.json index bd22731bfd..51d6534a9f 100644 --- a/tests/locationConfig/locationConfigLegacy.json +++ b/tests/locationConfig/locationConfigLegacy.json @@ -1,4 +1,3 @@ - { "legacy": { "type": "mem", diff --git a/tests/multipleBackend/backendHealthcheckResponse.js b/tests/multipleBackend/backendHealthcheckResponse.js index b89bbcc7f9..3446ef9e86 100644 --- a/tests/multipleBackend/backendHealthcheckResponse.js +++ b/tests/multipleBackend/backendHealthcheckResponse.js @@ -2,8 +2,7 @@ const assert = require('assert'); const DummyRequestLogger = require('../unit/helpers').DummyRequestLogger; -const clientCheck - = require('../../lib/utilities/healthcheckHandler').clientCheck; +const clientCheck = require('../../lib/utilities/healthcheckHandler').clientCheck; const { config } = require('../../lib/Config'); const { getAzureClient, @@ -16,55 +15,59 @@ const locConstraints = Object.keys(config.locationConstraints); const azureClient = getAzureClient(); describe('Healthcheck response', () => { - it('should return result for every location constraint in ' + - 'locationConfig and every external locations with flightCheckOnStartUp ' + - 'set to true', done => { - clientCheck(true, log, (err, results) => { - const resultKeys = Object.keys(results); - locConstraints.forEach(constraint => { - if (constraint === 'location-dmf-v1') { - // FIXME: location-dmf-v1 is not in results, see CLDSRV-440 - return; - } - assert(resultKeys.includes(constraint), `constraint: ${constraint} not in results: ${resultKeys}`); + it( + 'should return result for every location constraint in ' + + 'locationConfig and every external locations with flightCheckOnStartUp ' + + 'set to true', + done => { + clientCheck(true, log, (err, results) => { + const resultKeys = Object.keys(results); + locConstraints.forEach(constraint => { + if (constraint === 'location-dmf-v1') { + // FIXME: location-dmf-v1 is not in results, see CLDSRV-440 + return; + } + assert(resultKeys.includes(constraint), `constraint: ${constraint} not in results: ${resultKeys}`); + }); + done(); }); - done(); - }); - }); - it('should return no error with flightCheckOnStartUp set to false', - done => { + } + ); + it('should return no error with flightCheckOnStartUp set to false', done => { clientCheck(false, log, err => { - assert.strictEqual(err, null, - `Expected success but got error ${err}`); + assert.strictEqual(err, null, `Expected success but got error ${err}`); done(); }); }); - it('should return result for every location constraint in ' + - 'locationConfig and at least one of every external locations with ' + - 'flightCheckOnStartUp set to false', done => { - clientCheck(false, log, (err, results) => { - assert.notStrictEqual(results.length, locConstraints.length); - locConstraints.forEach(constraint => { - if (constraint === 'location-dmf-v1') { - // FIXME: location-dmf-v1 is not in results, see CLDSRV-440 - return; - } - if (Object.keys(results).indexOf(constraint) === -1) { - const locationType = config - .locationConstraints[constraint].type; - assert(Object.keys(results).some(result => - config.locationConstraints[result].type - === locationType)); - } + it( + 'should return result for every location constraint in ' + + 'locationConfig and at least one of every external locations with ' + + 'flightCheckOnStartUp set to false', + done => { + clientCheck(false, log, (err, results) => { + assert.notStrictEqual(results.length, locConstraints.length); + locConstraints.forEach(constraint => { + if (constraint === 'location-dmf-v1') { + // FIXME: location-dmf-v1 is not in results, see CLDSRV-440 + return; + } + if (Object.keys(results).indexOf(constraint) === -1) { + const locationType = config.locationConstraints[constraint].type; + assert( + Object.keys(results).some( + result => config.locationConstraints[result].type === locationType + ) + ); + } + }); + done(); }); - done(); - }); - }); + } + ); // FIXME: does not pass, see CLDSRV-441 describe.skip('Azure container creation', () => { - const containerName = - getAzureContainerName(azureLocationNonExistContainer); + const containerName = getAzureContainerName(azureLocationNonExistContainer); beforeEach(async () => { await azureClient.getContainerClient(containerName).deleteIfExists(); @@ -74,44 +77,57 @@ describe('Healthcheck response', () => { await azureClient.getContainerClient(containerName).deleteIfExists(); }); - it('should create an azure location\'s container if it is missing ' + - 'and the check is a flightCheckOnStartUp', done => { - clientCheck(true, log, (err, results) => { - const azureLocationNonExistContainerError = - results[azureLocationNonExistContainer].error; - if (err) { - assert(err.is.InternalError, `got unexpected err in clientCheck: ${err}`); - assert(azureLocationNonExistContainerError.startsWith( - 'The specified container is being deleted.')); - return done(); - } - return azureClient.getContainerClient(containerName).getProperties( - azureResult => { - assert.strictEqual(azureResult.metadata.name, containerName); + it( + "should create an azure location's container if it is missing " + 'and the check is a flightCheckOnStartUp', + done => { + clientCheck(true, log, (err, results) => { + const azureLocationNonExistContainerError = results[azureLocationNonExistContainer].error; + if (err) { + assert(err.is.InternalError, `got unexpected err in clientCheck: ${err}`); + assert( + azureLocationNonExistContainerError.startsWith('The specified container is being deleted.') + ); return done(); - }, err => { - assert.strictEqual(err, null, 'got unexpected err ' + - `heading azure container: ${err}`); - return done(); - }); - }); - }); + } + return azureClient.getContainerClient(containerName).getProperties( + azureResult => { + assert.strictEqual(azureResult.metadata.name, containerName); + return done(); + }, + err => { + assert.strictEqual(err, null, 'got unexpected err ' + `heading azure container: ${err}`); + return done(); + } + ); + }); + } + ); - it('should not create an azure location\'s container even if it is ' + - 'missing if the check is not a flightCheckOnStartUp', done => { - clientCheck(false, log, err => { - assert.strictEqual(err, null, - `got unexpected err in clientCheck: ${err}`); - return azureClient.getContainerClient(containerName).getProperties().then( - () => { - assert(err, 'Expected err but did not find one'); - return done(); - }, err => { - assert.strictEqual(err.code, 'NotFound', - `got unexpected err code in clientCheck: ${err.code}`); - return done(); - }); - }); - }); + it( + "should not create an azure location's container even if it is " + + 'missing if the check is not a flightCheckOnStartUp', + done => { + clientCheck(false, log, err => { + assert.strictEqual(err, null, `got unexpected err in clientCheck: ${err}`); + return azureClient + .getContainerClient(containerName) + .getProperties() + .then( + () => { + assert(err, 'Expected err but did not find one'); + return done(); + }, + err => { + assert.strictEqual( + err.code, + 'NotFound', + `got unexpected err code in clientCheck: ${err.code}` + ); + return done(); + } + ); + }); + } + ); }); }); diff --git a/tests/multipleBackend/multipartUpload.js b/tests/multipleBackend/multipartUpload.js index 90a3d9a736..2d8d57f996 100644 --- a/tests/multipleBackend/multipartUpload.js +++ b/tests/multipleBackend/multipartUpload.js @@ -5,10 +5,8 @@ const { parseString } = require('xml2js'); const { models } = require('arsenal'); const BucketInfo = models.BucketInfo; -const { getRealAwsConfig } = - require('../functional/aws-node-sdk/test/support/awsConfig'); -const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } = - require('../unit/helpers'); +const { getRealAwsConfig } = require('../functional/aws-node-sdk/test/support/awsConfig'); +const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } = require('../unit/helpers'); const DummyRequest = require('../unit/DummyRequest'); const { config } = require('../../lib/Config'); const { metadata } = require('arsenal').storage.metadata.inMemory.metadata; @@ -17,13 +15,11 @@ const { bucketPut } = require('../../lib/api/bucketPut'); const objectPut = require('../../lib/api/objectPut'); const objectGet = require('../../lib/api/objectGet'); const bucketPutVersioning = require('../../lib/api/bucketPutVersioning'); -const initiateMultipartUpload = - require('../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../lib/api/initiateMultipartUpload'); const multipartDelete = require('../../lib/api/multipartDelete'); const objectPutCopyPart = require('../../lib/api/objectPutCopyPart'); const objectPutPart = require('../../lib/api/objectPutPart'); -const completeMultipartUpload = - require('../../lib/api/completeMultipartUpload'); +const completeMultipartUpload = require('../../lib/api/completeMultipartUpload'); const listParts = require('../../lib/api/listParts'); const listMultipartUploads = require('../../lib/api/listMultipartUploads'); const constants = require('../../constants'); @@ -44,15 +40,13 @@ const namespace = 'default'; const bucketName = 'bucketname'; const mpuBucket = `${constants.mpuBucketPrefix}${bucketName}`; const awsBucket = config.locationConstraints[awsLocation].details.bucketName; -const awsMismatchBucket = config.locationConstraints[awsLocationMismatch] - .details.bucketName; +const awsMismatchBucket = config.locationConstraints[awsLocationMismatch].details.bucketName; const smallBody = Buffer.from('I am a body', 'utf8'); const bigBody = Buffer.alloc(10485760); const locMetaHeader = 'scal-location-constraint'; -const isCEPH = (config.locationConstraints[awsLocation] - .details.awsEndpoint !== undefined && - config.locationConstraints[awsLocation] - .details.awsEndpoint.indexOf('amazon') === -1); +const isCEPH = + config.locationConstraints[awsLocation].details.awsEndpoint !== undefined && + config.locationConstraints[awsLocation].details.awsEndpoint.indexOf('amazon') === -1; const itSkipCeph = isCEPH ? it.skip : it; const bucketPutRequest = { bucketName, @@ -67,7 +61,8 @@ const bucketPutRequest = { const awsETag = 'be747eb4b75517bf6b3cf7c5fbb62f3a'; const awsETagBigObj = 'f1c9645dbc14efddc7d8a322685f26eb'; const tagSet = 'key1=value1&key2=value2'; -const completeBody = '' + +const completeBody = + '' + '' + '1' + `"${awsETagBigObj}"` + @@ -85,29 +80,38 @@ const basicParams = { }; function getObjectGetRequest(objectKey) { - return Object.assign({ - objectKey, - headers: {}, - url: `/${bucketName}/${objectKey}`, - }, basicParams); + return Object.assign( + { + objectKey, + headers: {}, + url: `/${bucketName}/${objectKey}`, + }, + basicParams + ); } function getDeleteParams(objectKey, uploadId) { - return Object.assign({ - url: `/${objectKey}?uploadId=${uploadId}`, - query: { uploadId }, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - }, basicParams); + return Object.assign( + { + url: `/${objectKey}?uploadId=${uploadId}`, + query: { uploadId }, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + }, + basicParams + ); } function getPartParams(objectKey, uploadId, partNumber) { - return Object.assign({ - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=${partNumber}&uploadId=${uploadId}`, - query: { partNumber, uploadId }, - }, basicParams); + return Object.assign( + { + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=${partNumber}&uploadId=${uploadId}`, + query: { partNumber, uploadId }, + }, + basicParams + ); } function _getOverviewKey(objectKey, uploadId) { @@ -115,23 +119,29 @@ function _getOverviewKey(objectKey, uploadId) { } function getCompleteParams(objectKey, uploadId) { - return Object.assign({ - objectKey, - parsedHost: 's3.amazonaws.com', - headers: { host: `${bucketName}.s3.amazonaws.com` }, - post: completeBody, - url: `/${objectKey}?uploadId=${uploadId}`, - query: { uploadId }, - }, basicParams); + return Object.assign( + { + objectKey, + parsedHost: 's3.amazonaws.com', + headers: { host: `${bucketName}.s3.amazonaws.com` }, + post: completeBody, + url: `/${objectKey}?uploadId=${uploadId}`, + query: { uploadId }, + }, + basicParams + ); } function getListParams(objectKey, uploadId) { - return Object.assign({ - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?uploadId=${uploadId}`, - query: { uploadId }, - }, basicParams); + return Object.assign( + { + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?uploadId=${uploadId}`, + query: { uploadId }, + }, + basicParams + ); } function getAwsParams(objectKey) { @@ -145,10 +155,8 @@ function getAwsParamsBucketNotMatch(objectKey) { function assertMpuInitResults(initResult, key, cb) { parseString(initResult, (err, json) => { assert.equal(err, null, `Error parsing mpu init results: ${err}`); - assert.strictEqual(json.InitiateMultipartUploadResult - .Bucket[0], bucketName); - assert.strictEqual(json.InitiateMultipartUploadResult - .Key[0], key); + assert.strictEqual(json.InitiateMultipartUploadResult.Bucket[0], bucketName); + assert.strictEqual(json.InitiateMultipartUploadResult.Key[0], key); assert(json.InitiateMultipartUploadResult.UploadId[0]); cb(json.InitiateMultipartUploadResult.UploadId[0]); }); @@ -156,16 +164,13 @@ function assertMpuInitResults(initResult, key, cb) { function assertMpuCompleteResults(compResult, objectKey) { parseString(compResult, (err, json) => { - assert.equal(err, null, - `Error parsing mpu complete results: ${err}`); + assert.equal(err, null, `Error parsing mpu complete results: ${err}`); assert.strictEqual( json.CompleteMultipartUploadResult.Location[0], - `http://${bucketName}.s3.amazonaws.com/${objectKey}`); - assert.strictEqual( - json.CompleteMultipartUploadResult.Bucket[0], - bucketName); - assert.strictEqual( - json.CompleteMultipartUploadResult.Key[0], objectKey); + `http://${bucketName}.s3.amazonaws.com/${objectKey}` + ); + assert.strictEqual(json.CompleteMultipartUploadResult.Bucket[0], bucketName); + assert.strictEqual(json.CompleteMultipartUploadResult.Key[0], objectKey); const MD = metadata.keyMaps.get(bucketName).get(objectKey); assert(MD); }); @@ -176,53 +181,42 @@ function assertListResults(listResult, testAttribute, uploadId, objectKey) { assert.equal(err, null, `Error parsing list part results: ${err}`); assert.strictEqual(json.ListPartsResult.Key[0], objectKey); assert.strictEqual(json.ListPartsResult.UploadId[0], uploadId); - assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], - authInfo.getCanonicalID()); + assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], authInfo.getCanonicalID()); // attributes to test specific to PartNumberMarker being set // in listParts if (testAttribute === 'partNumMarker') { - assert.strictEqual(json.ListPartsResult.NextPartNumberMarker, - undefined); + assert.strictEqual(json.ListPartsResult.NextPartNumberMarker, undefined); assert.strictEqual(json.ListPartsResult.IsTruncated[0], 'false'); assert.strictEqual(json.ListPartsResult.Part.length, 1); assert.strictEqual(json.ListPartsResult.PartNumberMarker[0], '1'); // data of second part put assert.strictEqual(json.ListPartsResult.Part[0].PartNumber[0], '2'); - assert.strictEqual(json.ListPartsResult.Part[0].ETag[0], - `"${awsETag}"`); + assert.strictEqual(json.ListPartsResult.Part[0].ETag[0], `"${awsETag}"`); assert.strictEqual(json.ListPartsResult.Part[0].Size[0], '11'); } else { // common attributes to test if MaxParts set or // neither MaxParts nor PartNumberMarker set - assert.strictEqual(json.ListPartsResult.PartNumberMarker, - undefined); + assert.strictEqual(json.ListPartsResult.PartNumberMarker, undefined); assert.strictEqual(json.ListPartsResult.Part[0].PartNumber[0], '1'); - assert.strictEqual(json.ListPartsResult.Part[0].ETag[0], - `"${awsETagBigObj}"`); - assert.strictEqual(json.ListPartsResult.Part[0].Size[0], - '10485760'); + assert.strictEqual(json.ListPartsResult.Part[0].ETag[0], `"${awsETagBigObj}"`); + assert.strictEqual(json.ListPartsResult.Part[0].Size[0], '10485760'); // attributes to test specific to MaxParts being set in listParts if (testAttribute === 'maxParts') { - assert.strictEqual(json.ListPartsResult.NextPartNumberMarker[0], - '1'); + assert.strictEqual(json.ListPartsResult.NextPartNumberMarker[0], '1'); assert.strictEqual(json.ListPartsResult.IsTruncated[0], 'true'); assert.strictEqual(json.ListPartsResult.Part.length, 1); assert.strictEqual(json.ListPartsResult.MaxParts[0], '1'); } else { // attributes to test if neither MaxParts nor // PartNumberMarker set - assert.strictEqual(json.ListPartsResult.NextPartNumberMarker, - undefined); - assert.strictEqual(json.ListPartsResult.IsTruncated[0], - 'false'); + assert.strictEqual(json.ListPartsResult.NextPartNumberMarker, undefined); + assert.strictEqual(json.ListPartsResult.IsTruncated[0], 'false'); assert.strictEqual(json.ListPartsResult.Part.length, 2); assert.strictEqual(json.ListPartsResult.MaxParts[0], '1000'); - assert.strictEqual(json.ListPartsResult.Part[1].PartNumber[0], - '2'); - assert.strictEqual(json.ListPartsResult.Part[1].ETag[0], - `"${awsETag}"`); + assert.strictEqual(json.ListPartsResult.Part[1].PartNumber[0], '2'); + assert.strictEqual(json.ListPartsResult.Part[1].ETag[0], `"${awsETag}"`); assert.strictEqual(json.ListPartsResult.Part[1].Size[0], '11'); } } @@ -239,15 +233,12 @@ function _getZenkoObjectKey(objectKey) { function assertObjOnBackend(expectedBackend, objectKey, cb) { const zenkoObjectKey = _getZenkoObjectKey(objectKey); - return objectGet(authInfo, getObjectGetRequest(zenkoObjectKey), false, log, - (err, result, metaHeaders) => { + return objectGet(authInfo, getObjectGetRequest(zenkoObjectKey), false, log, (err, result, metaHeaders) => { assert.equal(err, null, `Error getting object on S3: ${err}`); assert.strictEqual(metaHeaders[`x-amz-meta-${locMetaHeader}`], expectedBackend); if (expectedBackend === awsLocation) { - return s3.headObject({ Bucket: awsBucket, Key: objectKey }, - (err, result) => { - assert.equal(err, null, 'Error on headObject call to AWS: ' + - `${err}`); + return s3.headObject({ Bucket: awsBucket, Key: objectKey }, (err, result) => { + assert.equal(err, null, 'Error on headObject call to AWS: ' + `${err}`); assert.strictEqual(result.Metadata[locMetaHeader], awsLocation); return cb(); }); @@ -275,14 +266,12 @@ function mpuSetup(location, key, cb) { bucketName, namespace, objectKey: key, - headers: { 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-meta-scal-location-constraint': location }, + headers: { host: `${bucketName}.s3.amazonaws.com`, 'x-amz-meta-scal-location-constraint': location }, url: `/${key}?uploads`, parsedHost: 'localhost', actionImplicitDenies: false, }; - initiateMultipartUpload(authInfo, initiateRequest, log, - (err, result) => { + initiateMultipartUpload(authInfo, initiateRequest, log, (err, result) => { assert.strictEqual(err, null, 'Error initiating MPU'); assertMpuInitResults(result, key, uploadId => { putParts(uploadId, key, () => { @@ -293,14 +282,17 @@ function mpuSetup(location, key, cb) { } function putObject(putBackend, objectKey, cb) { - const putParams = Object.assign({ - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-meta-scal-location-constraint': putBackend, + const putParams = Object.assign( + { + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-meta-scal-location-constraint': putBackend, + }, + url: '/', + objectKey, }, - url: '/', - objectKey, - }, basicParams); + basicParams + ); const objectPutRequest = new DummyRequest(putParams, smallBody); return objectPut(authInfo, objectPutRequest, undefined, log, err => { assert.equal(err, null, `Error putting object to ${putBackend} ${err}`); @@ -317,15 +309,19 @@ function abortMPU(uploadId, awsParams, cb) { } function abortMultipleMpus(backendsInfo, callback) { - async.forEach(backendsInfo, (backend, cb) => { - const delParams = getDeleteParams(backend.key, backend.uploadId); - multipartDelete(authInfo, delParams, log, err => { - cb(err); - }); - }, err => { - assert.equal(err, null, `Error aborting MPU: ${err}`); - callback(); - }); + async.forEach( + backendsInfo, + (backend, cb) => { + const delParams = getDeleteParams(backend.key, backend.uploadId); + multipartDelete(authInfo, delParams, log, err => { + cb(err); + }); + }, + err => { + assert.equal(err, null, `Error aborting MPU: ${err}`); + callback(); + } + ); } describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { @@ -348,15 +344,16 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { bucketName, namespace, objectKey, - headers: { 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-meta-scal-location-constraint': `${awsLocation}` }, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-meta-scal-location-constraint': `${awsLocation}`, + }, url: `/${objectKey}?uploads`, parsedHost: 'localhost', actionImplicitDenies: false, }; - initiateMultipartUpload(authInfo, initiateRequest, log, - (err, result) => { + initiateMultipartUpload(authInfo, initiateRequest, log, (err, result) => { assert.strictEqual(err, null, 'Error initiating MPU'); assertMpuInitResults(result, objectKey, uploadId => { abortMPU(uploadId, getAwsParams(objectKey), done); @@ -364,23 +361,22 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { }); }); - it('should initiate a multipart upload on AWS location with ' + - 'bucketMatch equals false', done => { + it('should initiate a multipart upload on AWS location with ' + 'bucketMatch equals false', done => { const objectKey = `key-${Date.now()}`; const initiateRequest = { bucketName, namespace, objectKey, - headers: { 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-meta-scal-location-constraint': - `${awsLocationMismatch}` }, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-meta-scal-location-constraint': `${awsLocationMismatch}`, + }, url: `/${objectKey}?uploads`, parsedHost: 'localhost', actionImplicitDenies: false, }; - initiateMultipartUpload(authInfo, initiateRequest, log, - (err, result) => { + initiateMultipartUpload(authInfo, initiateRequest, log, (err, result) => { assert.strictEqual(err, null, 'Error initiating MPU'); assertMpuInitResults(result, objectKey, uploadId => { abortMPU(uploadId, getAwsParamsBucketNotMatch(objectKey), done); @@ -395,7 +391,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { namespace, objectKey, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-meta-scal-location-constraint': `${awsLocation}`, 'x-amz-tagging': tagSet, }, @@ -404,8 +400,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { actionImplicitDenies: false, }; - initiateMultipartUpload(authInfo, initiateRequest, log, - (err, result) => { + initiateMultipartUpload(authInfo, initiateRequest, log, (err, result) => { assert.ifError(err); assertMpuInitResults(result, objectKey, uploadId => { abortMPU(uploadId, getAwsParams(objectKey), done); @@ -425,8 +420,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { }); }); - it('should list the parts of a multipart upload on real AWS location ' + - 'with bucketMatch set to false', done => { + it('should list the parts of a multipart upload on real AWS location ' + 'with bucketMatch set to false', done => { const objectKey = `key-${Date.now()}`; mpuSetup(awsLocationMismatch, objectKey, uploadId => { const listParams = getListParams(objectKey, uploadId); @@ -438,8 +432,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { }); }); - it('should only return number of parts equal to specified maxParts', - function itF(done) { + it('should only return number of parts equal to specified maxParts', function itF(done) { this.timeout(90000); const objectKey = `key-${Date.now()}`; mpuSetup(awsLocation, objectKey, uploadId => { @@ -473,7 +466,8 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { abortMPU(uploadId, getAwsParams(objectKey), () => { const listParams = getListParams(objectKey, uploadId); listParts(authInfo, listParams, log, err => { - let wantedDesc = 'Error returned from AWS: ' + + let wantedDesc = + 'Error returned from AWS: ' + 'The specified upload does not exist. The upload ID ' + 'may be invalid, or the upload may have been aborted' + ' or completed.'; @@ -494,35 +488,40 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { const delParams = getDeleteParams(objectKey, uploadId); multipartDelete(authInfo, delParams, log, err => { assert.equal(err, null, `Error aborting MPU: ${err}`); - s3.listParts({ - Bucket: awsBucket, - Key: objectKey, - UploadId: uploadId, - }, err => { - const wantedError = isCEPH ? 'NoSuchKey' : 'NoSuchUpload'; - assert.strictEqual(err.code, wantedError); - done(); - }); + s3.listParts( + { + Bucket: awsBucket, + Key: objectKey, + UploadId: uploadId, + }, + err => { + const wantedError = isCEPH ? 'NoSuchKey' : 'NoSuchUpload'; + assert.strictEqual(err.code, wantedError); + done(); + } + ); }); }); }); - it('should abort a multipart upload on real AWS location with' + - 'bucketMatch set to false', done => { + it('should abort a multipart upload on real AWS location with' + 'bucketMatch set to false', done => { const objectKey = `key-${Date.now()}`; mpuSetup(awsLocationMismatch, objectKey, uploadId => { const delParams = getDeleteParams(objectKey, uploadId); multipartDelete(authInfo, delParams, log, err => { assert.equal(err, null, `Error aborting MPU: ${err}`); - s3.listParts({ - Bucket: awsBucket, - Key: `${bucketName}/${objectKey}`, - UploadId: uploadId, - }, err => { - const wantedError = isCEPH ? 'NoSuchKey' : 'NoSuchUpload'; - assert.strictEqual(err.code, wantedError); - done(); - }); + s3.listParts( + { + Bucket: awsBucket, + Key: `${bucketName}/${objectKey}`, + UploadId: uploadId, + }, + err => { + const wantedError = isCEPH ? 'NoSuchKey' : 'NoSuchUpload'; + assert.strictEqual(err.code, wantedError); + done(); + } + ); }); }); }); @@ -537,8 +536,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { }); }); - it('should return ServiceUnavailable if MPU deleted directly from AWS ' + - 'and try to complete from S3', done => { + it('should return ServiceUnavailable if MPU deleted directly from AWS ' + 'and try to complete from S3', done => { const objectKey = `key-${Date.now()}`; mpuSetup(awsLocation, objectKey, uploadId => { abortMPU(uploadId, getAwsParams(objectKey), () => { @@ -555,8 +553,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { const objectKey = `key-${Date.now()}`; mpuSetup(awsLocation, objectKey, uploadId => { const compParams = getCompleteParams(objectKey, uploadId); - completeMultipartUpload(authInfo, compParams, log, - (err, result) => { + completeMultipartUpload(authInfo, compParams, log, (err, result) => { assert.equal(err, null, `Error completing mpu on AWS: ${err}`); assertMpuCompleteResults(result, objectKey); assertObjOnBackend(awsLocation, objectKey, done); @@ -564,31 +561,25 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { }); }); - it('should complete a multipart upload on real AWS location with ' + - 'bucketMatch set to false', done => { + it('should complete a multipart upload on real AWS location with ' + 'bucketMatch set to false', done => { const objectKey = `key-${Date.now()}`; mpuSetup(awsLocationMismatch, objectKey, uploadId => { const compParams = getCompleteParams(objectKey, uploadId); - completeMultipartUpload(authInfo, compParams, log, - (err, result) => { + completeMultipartUpload(authInfo, compParams, log, (err, result) => { assert.equal(err, null, `Error completing mpu on AWS: ${err}`); assertMpuCompleteResults(result, objectKey); - assertObjOnBackend(awsLocationMismatch, - `${bucketName}/${objectKey}`, done); + assertObjOnBackend(awsLocationMismatch, `${bucketName}/${objectKey}`, done); }); }); }); - it('should complete MPU on AWS with same key as object put to file', - done => { + it('should complete MPU on AWS with same key as object put to file', done => { const objectKey = `key-${Date.now()}`; return putObject(fileLocation, objectKey, () => { mpuSetup(awsLocation, objectKey, uploadId => { const compParams = getCompleteParams(objectKey, uploadId); - completeMultipartUpload(authInfo, compParams, log, - (err, result) => { - assert.equal(err, null, 'Error completing mpu on AWS ' + - `${err}`); + completeMultipartUpload(authInfo, compParams, log, (err, result) => { + assert.equal(err, null, 'Error completing mpu on AWS ' + `${err}`); assertMpuCompleteResults(result, objectKey); assertObjOnBackend(awsLocation, objectKey, done); }); @@ -596,16 +587,13 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { }); }); - it('should complete MPU on file with same key as object put to AWS', - done => { + it('should complete MPU on file with same key as object put to AWS', done => { const objectKey = `key-${Date.now()}`; putObject(awsLocation, objectKey, () => { mpuSetup(fileLocation, objectKey, uploadId => { const compParams = getCompleteParams(objectKey, uploadId); - completeMultipartUpload(authInfo, compParams, log, - (err, result) => { - assert.equal(err, null, 'Error completing mpu on file ' + - `${err}`); + completeMultipartUpload(authInfo, compParams, log, (err, result) => { + assert.equal(err, null, 'Error completing mpu on file ' + `${err}`); assertMpuCompleteResults(result, objectKey); assertObjOnBackend(fileLocation, objectKey, done); }); @@ -613,28 +601,26 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { }); }); - it('should be successful initiating MPU on AWS with Scality ' + - 'S3 versioning enabled', done => { + it('should be successful initiating MPU on AWS with Scality ' + 'S3 versioning enabled', done => { const objectKey = `key-${Date.now()}`; // putting null version: put obj before versioning configured putObject(awsLocation, objectKey, () => { - const enableVersioningRequest = versioningTestUtils. - createBucketPutVersioningReq(bucketName, 'Enabled'); + const enableVersioningRequest = versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Enabled'); bucketPutVersioning(authInfo, enableVersioningRequest, log, err => { - assert.equal(err, null, 'Error enabling bucket versioning: ' + - `${err}`); + assert.equal(err, null, 'Error enabling bucket versioning: ' + `${err}`); const initiateRequest = { bucketName, namespace, objectKey, - headers: { 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-meta-scal-location-constraint': awsLocation }, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-meta-scal-location-constraint': awsLocation, + }, url: `/${objectKey}?uploads`, parsedHost: 'localhost', actionImplicitDenies: false, }; - initiateMultipartUpload(authInfo, initiateRequest, log, - err => { + initiateMultipartUpload(authInfo, initiateRequest, log, err => { assert.strictEqual(err, null); done(); }); @@ -645,7 +631,8 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { it('should return invalidPart error', done => { const objectKey = `key-${Date.now()}`; mpuSetup(awsLocation, objectKey, uploadId => { - const errorBody = '' + + const errorBody = + '' + '' + '1' + `"${awsETag}"` + @@ -667,7 +654,8 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { itSkipCeph('should return invalidPartOrder error', done => { const objectKey = `key-${Date.now()}`; mpuSetup(awsLocation, objectKey, uploadId => { - const errorBody = '' + + const errorBody = + '' + '' + '2' + `"${awsETag}"` + @@ -692,7 +680,8 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { const partRequest3 = new DummyRequest(putPartParam, smallBody); objectPutPart(authInfo, partRequest3, undefined, log, err => { assert.equal(err, null, `Error putting part: ${err}`); - const errorBody = '' + + const errorBody = + '' + '' + '1' + `"${awsETagBigObj}"` + @@ -720,57 +709,54 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { const objectKey = `testkey-${Date.now()}`; const fileKey = `fileKey-${Date.now()}`; const memKey = `memKey-${Date.now()}`; - async.series([ - cb => mpuSetup(fileLocation, fileKey, - fileUploadId => cb(null, fileUploadId)), - cb => mpuSetup(memLocation, memKey, memUploadId => - cb(null, memUploadId)), - cb => mpuSetup(awsLocation, objectKey, awsUploadId => - cb(null, awsUploadId)), - ], (err, uploadIds) => { - assert.equal(err, null, `Error setting up MPUs: ${err}`); - const listMpuParams = { - bucketName, - namespace, - headers: { host: '/' }, - url: `/${bucketName}?uploads`, - query: {}, - actionImplicitDenies: false, - }; - listMultipartUploads(authInfo, listMpuParams, log, - (err, mpuListXml) => { - assert.equal(err, null, `Error listing MPUs: ${err}`); - parseString(mpuListXml, (err, json) => { - const mpuListing = json.ListMultipartUploadsResult.Upload; - assert.strictEqual(fileKey, mpuListing[0].Key[0]); - assert.strictEqual(uploadIds[0], mpuListing[0].UploadId[0]); - assert.strictEqual(memKey, mpuListing[1].Key[0]); - assert.strictEqual(uploadIds[1], mpuListing[1].UploadId[0]); - assert.strictEqual(objectKey, mpuListing[2].Key[0]); - assert.strictEqual(uploadIds[2], mpuListing[2].UploadId[0]); - const backendsInfo = [ - { backend: fileLocation, key: fileKey, - uploadId: uploadIds[0] }, - { backend: memLocation, key: memKey, - uploadId: uploadIds[1] }, - { backend: 'aws', key: objectKey, - uploadId: uploadIds[2] }, - ]; - abortMultipleMpus(backendsInfo, done); + async.series( + [ + cb => mpuSetup(fileLocation, fileKey, fileUploadId => cb(null, fileUploadId)), + cb => mpuSetup(memLocation, memKey, memUploadId => cb(null, memUploadId)), + cb => mpuSetup(awsLocation, objectKey, awsUploadId => cb(null, awsUploadId)), + ], + (err, uploadIds) => { + assert.equal(err, null, `Error setting up MPUs: ${err}`); + const listMpuParams = { + bucketName, + namespace, + headers: { host: '/' }, + url: `/${bucketName}?uploads`, + query: {}, + actionImplicitDenies: false, + }; + listMultipartUploads(authInfo, listMpuParams, log, (err, mpuListXml) => { + assert.equal(err, null, `Error listing MPUs: ${err}`); + parseString(mpuListXml, (err, json) => { + const mpuListing = json.ListMultipartUploadsResult.Upload; + assert.strictEqual(fileKey, mpuListing[0].Key[0]); + assert.strictEqual(uploadIds[0], mpuListing[0].UploadId[0]); + assert.strictEqual(memKey, mpuListing[1].Key[0]); + assert.strictEqual(uploadIds[1], mpuListing[1].UploadId[0]); + assert.strictEqual(objectKey, mpuListing[2].Key[0]); + assert.strictEqual(uploadIds[2], mpuListing[2].UploadId[0]); + const backendsInfo = [ + { backend: fileLocation, key: fileKey, uploadId: uploadIds[0] }, + { backend: memLocation, key: memKey, uploadId: uploadIds[1] }, + { backend: 'aws', key: objectKey, uploadId: uploadIds[2] }, + ]; + abortMultipleMpus(backendsInfo, done); + }); }); - }); - }); + } + ); }); describe('with mpu initiated on legacy version', () => { beforeEach(function beFn() { this.currentTest.lcObj = config.locationConstraints; - const legacyObj = Object.assign(config.locationConstraints, - { legacy: { + const legacyObj = Object.assign(config.locationConstraints, { + legacy: { type: 'mem', legacyAwsBehavior: true, details: {}, - } }); + }, + }); config.setLocationConstraints(legacyObj); }); @@ -782,40 +768,37 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { const objectKey = `testkey-${Date.now()}`; mpuSetup('scality-internal-mem', objectKey, uploadId => { const mpuOverviewKey = _getOverviewKey(objectKey, uploadId); - async.waterfall([ - next => { - const bucketMD = BucketInfo.fromObj( - metadata.buckets.get(bucketName)); - const objMD = - metadata.keyMaps.get(mpuBucket).get(mpuOverviewKey); - // remove location constraints to mimic legacy behavior - bucketMD.setLocationConstraint(undefined); - objMD.controllingLocationConstraint = undefined; - objMD.dataStoreName = undefined; - objMD[constants.objectLocationConstraintHeader] = - undefined; - next(null, uploadId, bucketMD, objMD); - }, - (uploadId, bucketMD, objMD, next) => { - metadata.buckets.set(bucketName, bucketMD); - metadata.keyMaps.get(mpuBucket). - set(mpuOverviewKey, objMD); - next(null, uploadId); - }, - (uploadId, next) => { - const compParams = getCompleteParams( - objectKey, uploadId); - completeMultipartUpload( - authInfo, compParams, log, next); - }, - (completeRes, resHeaders, next) => { - assertMpuCompleteResults(completeRes, objectKey); - next(); - }, - ], err => { - assert.ifError(err); - done(); - }); + async.waterfall( + [ + next => { + const bucketMD = BucketInfo.fromObj(metadata.buckets.get(bucketName)); + const objMD = metadata.keyMaps.get(mpuBucket).get(mpuOverviewKey); + // remove location constraints to mimic legacy behavior + bucketMD.setLocationConstraint(undefined); + objMD.controllingLocationConstraint = undefined; + objMD.dataStoreName = undefined; + objMD[constants.objectLocationConstraintHeader] = undefined; + next(null, uploadId, bucketMD, objMD); + }, + (uploadId, bucketMD, objMD, next) => { + metadata.buckets.set(bucketName, bucketMD); + metadata.keyMaps.get(mpuBucket).set(mpuOverviewKey, objMD); + next(null, uploadId); + }, + (uploadId, next) => { + const compParams = getCompleteParams(objectKey, uploadId); + completeMultipartUpload(authInfo, compParams, log, next); + }, + (completeRes, resHeaders, next) => { + assertMpuCompleteResults(completeRes, objectKey); + next(); + }, + ], + err => { + assert.ifError(err); + done(); + } + ); }); }); @@ -823,39 +806,37 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { const objectKey = `testkey-${Date.now()}`; mpuSetup('scality-internal-mem', objectKey, uploadId => { const mpuOverviewKey = _getOverviewKey(objectKey, uploadId); - async.waterfall([ - next => { - const bucketMD = BucketInfo.fromObj( - metadata.buckets.get(bucketName)); - const objMD = - metadata.keyMaps.get(mpuBucket).get(mpuOverviewKey); - // remove location constraints to mimic legacy behavior - bucketMD.setLocationConstraint(undefined); - objMD.controllingLocationConstraint = undefined; - objMD.dataStoreName = undefined; - objMD[constants.objectLocationConstraintHeader] = - undefined; - metadata.buckets.set(bucketName, bucketMD); - metadata.keyMaps.get(mpuBucket). - set(mpuOverviewKey, objMD); - next(null, uploadId); - }, - (uploadId, next) => { - const delParams = getDeleteParams(objectKey, uploadId); - multipartDelete(authInfo, delParams, log, - err => next(err, uploadId)); - }, - (uploadId, next) => { - const listParams = getListParams(objectKey, uploadId); - listParts(authInfo, listParams, log, err => { - assert(err.is.NoSuchUpload); - next(); - }); - }, - ], err => { - assert.ifError(err); - done(); - }); + async.waterfall( + [ + next => { + const bucketMD = BucketInfo.fromObj(metadata.buckets.get(bucketName)); + const objMD = metadata.keyMaps.get(mpuBucket).get(mpuOverviewKey); + // remove location constraints to mimic legacy behavior + bucketMD.setLocationConstraint(undefined); + objMD.controllingLocationConstraint = undefined; + objMD.dataStoreName = undefined; + objMD[constants.objectLocationConstraintHeader] = undefined; + metadata.buckets.set(bucketName, bucketMD); + metadata.keyMaps.get(mpuBucket).set(mpuOverviewKey, objMD); + next(null, uploadId); + }, + (uploadId, next) => { + const delParams = getDeleteParams(objectKey, uploadId); + multipartDelete(authInfo, delParams, log, err => next(err, uploadId)); + }, + (uploadId, next) => { + const listParams = getListParams(objectKey, uploadId); + listParts(authInfo, listParams, log, err => { + assert(err.is.NoSuchUpload); + next(); + }); + }, + ], + err => { + assert.ifError(err); + done(); + } + ); }); }); @@ -863,40 +844,38 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { const objectKey = `testkey-${Date.now()}`; mpuSetup('scality-internal-mem', objectKey, uploadId => { const mpuOverviewKey = _getOverviewKey(objectKey, uploadId); - async.waterfall([ - next => { - const bucketMD = BucketInfo.fromObj( - metadata.buckets.get(bucketName)); - const objMD = - metadata.keyMaps.get(mpuBucket).get(mpuOverviewKey); - // remove location constraints to mimic legacy behavior - bucketMD.setLocationConstraint(undefined); - objMD.controllingLocationConstraint = undefined; - objMD.dataStoreName = undefined; - objMD[constants.objectLocationConstraintHeader] = - undefined; - metadata.buckets.set(bucketName, bucketMD); - metadata.keyMaps.get(mpuBucket). - set(mpuOverviewKey, objMD); - next(null, uploadId); - }, - (uploadId, next) => { - const listParams = getListParams(objectKey, uploadId); - listParts(authInfo, listParams, log, (err, res) => { - assert.ifError(err); - assertListResults(res, null, uploadId, objectKey); + async.waterfall( + [ + next => { + const bucketMD = BucketInfo.fromObj(metadata.buckets.get(bucketName)); + const objMD = metadata.keyMaps.get(mpuBucket).get(mpuOverviewKey); + // remove location constraints to mimic legacy behavior + bucketMD.setLocationConstraint(undefined); + objMD.controllingLocationConstraint = undefined; + objMD.dataStoreName = undefined; + objMD[constants.objectLocationConstraintHeader] = undefined; + metadata.buckets.set(bucketName, bucketMD); + metadata.keyMaps.get(mpuBucket).set(mpuOverviewKey, objMD); next(null, uploadId); - }); - }, - (uploadId, next) => { - const delParams = getDeleteParams(objectKey, uploadId); - multipartDelete(authInfo, delParams, log, - err => next(err, uploadId)); - }, - ], err => { - assert.ifError(err); - done(); - }); + }, + (uploadId, next) => { + const listParams = getListParams(objectKey, uploadId); + listParts(authInfo, listParams, log, (err, res) => { + assert.ifError(err); + assertListResults(res, null, uploadId, objectKey); + next(null, uploadId); + }); + }, + (uploadId, next) => { + const delParams = getDeleteParams(objectKey, uploadId); + multipartDelete(authInfo, delParams, log, err => next(err, uploadId)); + }, + ], + err => { + assert.ifError(err); + done(); + } + ); }); }); @@ -904,70 +883,58 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() { const objectKey = `testkey-${Date.now()}`; mpuSetup('scality-internal-mem', objectKey, uploadId => { const mpuOverviewKey = _getOverviewKey(objectKey, uploadId); - async.waterfall([ - next => { - const copyObjectKey = `copykey-${Date.now()}`; - putObject('scality-internal-mem', copyObjectKey, - () => next(null, copyObjectKey)); - }, - (copyObjectKey, next) => { - const bucketMD = BucketInfo.fromObj( - metadata.buckets.get(bucketName)); - const mpuMD = - metadata.keyMaps.get(mpuBucket).get(mpuOverviewKey); - const copyObjMD = - metadata.keyMaps.get(bucketName).get(copyObjectKey); - // remove location constraints to mimic legacy behavior - bucketMD.setLocationConstraint(undefined); - mpuMD.controllingLocationConstraint = undefined; - mpuMD.dataStoreName = undefined; - mpuMD[constants.objectLocationConstraintHeader] = - undefined; - copyObjMD.controllingLocationConstraint = undefined; - copyObjMD.dataStoreName = undefined; - copyObjMD[constants.objectLocationConstraintHeader] = - undefined; - metadata.buckets.set(bucketName, bucketMD); - metadata.keyMaps.get(mpuBucket). - set(mpuOverviewKey, mpuMD); - metadata.keyMaps.get(bucketName). - set(copyObjectKey, copyObjMD); - next(null, uploadId, copyObjectKey); - }, - (uploadId, copyObjectKey, next) => { - const copyParams = - getPartParams(objectKey, uploadId, 3); - objectPutCopyPart(authInfo, copyParams, bucketName, - copyObjectKey, undefined, log, err => { - next(err, uploadId); - }); - }, - (uploadId, next) => { - const listParams = getListParams(objectKey, uploadId); - listParts(authInfo, listParams, log, (err, listRes) => { - assert.ifError(err); - parseString(listRes, (err, json) => { - assert.equal(err, null, - `Error parsing list part results: ${err}`); - assert.strictEqual(json.ListPartsResult. - Part[2].PartNumber[0], '3'); - assert.strictEqual(json.ListPartsResult. - Part[2].ETag[0], `"${awsETag}"`); - assert.strictEqual(json.ListPartsResult. - Part[2].Size[0], '11'); - next(null, uploadId); + async.waterfall( + [ + next => { + const copyObjectKey = `copykey-${Date.now()}`; + putObject('scality-internal-mem', copyObjectKey, () => next(null, copyObjectKey)); + }, + (copyObjectKey, next) => { + const bucketMD = BucketInfo.fromObj(metadata.buckets.get(bucketName)); + const mpuMD = metadata.keyMaps.get(mpuBucket).get(mpuOverviewKey); + const copyObjMD = metadata.keyMaps.get(bucketName).get(copyObjectKey); + // remove location constraints to mimic legacy behavior + bucketMD.setLocationConstraint(undefined); + mpuMD.controllingLocationConstraint = undefined; + mpuMD.dataStoreName = undefined; + mpuMD[constants.objectLocationConstraintHeader] = undefined; + copyObjMD.controllingLocationConstraint = undefined; + copyObjMD.dataStoreName = undefined; + copyObjMD[constants.objectLocationConstraintHeader] = undefined; + metadata.buckets.set(bucketName, bucketMD); + metadata.keyMaps.get(mpuBucket).set(mpuOverviewKey, mpuMD); + metadata.keyMaps.get(bucketName).set(copyObjectKey, copyObjMD); + next(null, uploadId, copyObjectKey); + }, + (uploadId, copyObjectKey, next) => { + const copyParams = getPartParams(objectKey, uploadId, 3); + objectPutCopyPart(authInfo, copyParams, bucketName, copyObjectKey, undefined, log, err => { + next(err, uploadId); }); - }); - }, - (uploadId, next) => { - const delParams = getDeleteParams(objectKey, uploadId); - multipartDelete(authInfo, delParams, log, - err => next(err, uploadId)); - }, - ], err => { - assert.ifError(err); - done(); - }); + }, + (uploadId, next) => { + const listParams = getListParams(objectKey, uploadId); + listParts(authInfo, listParams, log, (err, listRes) => { + assert.ifError(err); + parseString(listRes, (err, json) => { + assert.equal(err, null, `Error parsing list part results: ${err}`); + assert.strictEqual(json.ListPartsResult.Part[2].PartNumber[0], '3'); + assert.strictEqual(json.ListPartsResult.Part[2].ETag[0], `"${awsETag}"`); + assert.strictEqual(json.ListPartsResult.Part[2].Size[0], '11'); + next(null, uploadId); + }); + }); + }, + (uploadId, next) => { + const delParams = getDeleteParams(objectKey, uploadId); + multipartDelete(authInfo, delParams, log, err => next(err, uploadId)); + }, + ], + err => { + assert.ifError(err); + done(); + } + ); }); }); }); diff --git a/tests/multipleBackend/objectCopy.js b/tests/multipleBackend/objectCopy.js index 8e59a1617f..e037e3a756 100644 --- a/tests/multipleBackend/objectCopy.js +++ b/tests/multipleBackend/objectCopy.js @@ -6,8 +6,7 @@ const objectPut = require('../../lib/api/objectPut'); const objectCopy = require('../../lib/api/objectCopy'); const { metadata } = require('arsenal').storage.metadata.inMemory.metadata; const DummyRequest = require('../unit/DummyRequest'); -const { cleanup, DummyRequestLogger, makeAuthInfo } - = require('../unit/helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../unit/helpers'); const log = new DummyRequestLogger(); const canonicalID = 'accessKey1'; @@ -19,11 +18,13 @@ const memLocation = 'scality-internal-mem'; const fileLocation = 'scality-internal-file'; function _createBucketPutRequest(bucketName, bucketLoc) { - const post = bucketLoc ? '' + - '' + - `${bucketLoc}` + - '' : ''; + const post = bucketLoc + ? '' + + '' + + `${bucketLoc}` + + '' + : ''; return new DummyRequest({ bucketName, namespace, @@ -56,20 +57,18 @@ function _createObjectPutRequest(bucketName, objectKey, body) { } function copySetup(params, cb) { - const { sourceBucket, sourceLocation, sourceKey, destBucket, - destLocation, body } = params; - const putDestBucketRequest = - _createBucketPutRequest(destBucket, destLocation); - const putSourceBucketRequest = - _createBucketPutRequest(sourceBucket, sourceLocation); - const putSourceObjRequest = _createObjectPutRequest(sourceBucket, - sourceKey, body); - async.series([ - callback => bucketPut(authInfo, putDestBucketRequest, log, callback), - callback => bucketPut(authInfo, putSourceBucketRequest, log, callback), - callback => objectPut(authInfo, putSourceObjRequest, undefined, log, - callback), - ], err => cb(err)); + const { sourceBucket, sourceLocation, sourceKey, destBucket, destLocation, body } = params; + const putDestBucketRequest = _createBucketPutRequest(destBucket, destLocation); + const putSourceBucketRequest = _createBucketPutRequest(sourceBucket, sourceLocation); + const putSourceObjRequest = _createObjectPutRequest(sourceBucket, sourceKey, body); + async.series( + [ + callback => bucketPut(authInfo, putDestBucketRequest, log, callback), + callback => bucketPut(authInfo, putSourceBucketRequest, log, callback), + callback => objectPut(authInfo, putSourceObjRequest, undefined, log, callback), + ], + err => cb(err) + ); } describe('ObjectCopy API with multiple backends', () => { @@ -79,29 +78,30 @@ describe('ObjectCopy API with multiple backends', () => { after(() => cleanup()); - it('object metadata for newly stored object should have dataStoreName ' + - 'if copying to mem based on bucket location', done => { - const params = { - sourceBucket: sourceBucketName, - sourceKey: `sourcekey-${Date.now()}`, - sourceLocation: fileLocation, - body: 'testbody', - destBucket: destBucketName, - destLocation: memLocation, - }; - const destKey = `destkey-${Date.now()}`; - const testObjectCopyRequest = - _createObjectCopyRequest(destBucketName, destKey); - copySetup(params, err => { - assert.strictEqual(err, null, `Error setting up copy: ${err}`); - objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, - params.sourceKey, undefined, log, err => { + it( + 'object metadata for newly stored object should have dataStoreName ' + + 'if copying to mem based on bucket location', + done => { + const params = { + sourceBucket: sourceBucketName, + sourceKey: `sourcekey-${Date.now()}`, + sourceLocation: fileLocation, + body: 'testbody', + destBucket: destBucketName, + destLocation: memLocation, + }; + const destKey = `destkey-${Date.now()}`; + const testObjectCopyRequest = _createObjectCopyRequest(destBucketName, destKey); + copySetup(params, err => { + assert.strictEqual(err, null, `Error setting up copy: ${err}`); + objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, params.sourceKey, undefined, log, err => { assert.strictEqual(err, null, `Error copying: ${err}`); const bucket = metadata.keyMaps.get(params.destBucket); const objMd = bucket.get(destKey); assert.strictEqual(objMd.dataStoreName, memLocation); done(); }); - }); - }); + }); + } + ); }); diff --git a/tests/multipleBackend/objectPut.js b/tests/multipleBackend/objectPut.js index 325662e419..b2e5e3664b 100644 --- a/tests/multipleBackend/objectPut.js +++ b/tests/multipleBackend/objectPut.js @@ -2,8 +2,7 @@ const assert = require('assert'); const async = require('async'); const { storage } = require('arsenal'); -const { cleanup, DummyRequestLogger, makeAuthInfo } - = require('../unit/helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../unit/helpers'); const { bucketPut } = require('../../lib/api/bucketPut'); const objectPut = require('../../lib/api/objectPut'); const DummyRequest = require('../unit/DummyRequest'); @@ -25,11 +24,13 @@ const isCEPH = process.env.CI_CEPH !== undefined; const describeSkipIfE2E = process.env.S3_END_TO_END ? describe.skip : describe; function put(bucketLoc, objLoc, requestHost, objectName, cb, errorDescription) { - const post = bucketLoc ? '' + - '' + - `${bucketLoc}` + - '' : ''; + const post = bucketLoc + ? '' + + '' + + `${bucketLoc}` + + '' + : ''; const bucketPutReq = new DummyRequest({ bucketName, namespace, @@ -58,8 +59,7 @@ function put(bucketLoc, objLoc, requestHost, objectName, cb, errorDescription) { testPutObjReq.parsedHost = requestHost; } bucketPut(authInfo, bucketPutReq, log, () => { - objectPut(authInfo, testPutObjReq, undefined, log, (err, - resHeaders) => { + objectPut(authInfo, testPutObjReq, undefined, log, (err, resHeaders) => { if (errorDescription) { assert.strictEqual(err.code, 400); assert(err.is.InvalidArgument); @@ -133,8 +133,7 @@ describeSkipIfE2E('objectPutAPI with multiple backends', function testSuite() { } function isDataStoredInMem(testCase) { - return testCase.objLoc === memLocation - || (testCase.objLoc === null && testCase.bucketLoc === memLocation); + return testCase.objLoc === memLocation || (testCase.objLoc === null && testCase.bucketLoc === memLocation); } function checkPut(testCase) { @@ -151,28 +150,31 @@ describeSkipIfE2E('objectPutAPI with multiple backends', function testSuite() { putCases.forEach(testCase => { it(`should put an object to ${testCase.name}`, done => { - async.series([ - next => put(testCase.bucketLoc, testCase.objLoc, 'localhost', 'obj1', next), - next => { - checkPut(testCase); - // Increase the probability of the first request having released - // the socket, so that it can be reused for the next request. - // This tests how HTTP connection reuse behaves. - setTimeout(next, 10); - }, - // Second put should work as well - next => put(testCase.bucketLoc, testCase.objLoc, 'localhost', 'obj2', next), - next => { - checkPut(testCase); - setTimeout(next, 10); - }, - // Overwriting PUT - next => put(testCase.bucketLoc, testCase.objLoc, 'localhost', 'obj2', next), - next => { - checkPut(testCase); - next(); - }, - ], done); + async.series( + [ + next => put(testCase.bucketLoc, testCase.objLoc, 'localhost', 'obj1', next), + next => { + checkPut(testCase); + // Increase the probability of the first request having released + // the socket, so that it can be reused for the next request. + // This tests how HTTP connection reuse behaves. + setTimeout(next, 10); + }, + // Second put should work as well + next => put(testCase.bucketLoc, testCase.objLoc, 'localhost', 'obj2', next), + next => { + checkPut(testCase); + setTimeout(next, 10); + }, + // Overwriting PUT + next => put(testCase.bucketLoc, testCase.objLoc, 'localhost', 'obj2', next), + next => { + checkPut(testCase); + next(); + }, + ], + done + ); }); }); }); diff --git a/tests/multipleBackend/objectPutCopyPart.js b/tests/multipleBackend/objectPutCopyPart.js index a4dadfc8b3..cb2a559748 100644 --- a/tests/multipleBackend/objectPutCopyPart.js +++ b/tests/multipleBackend/objectPutCopyPart.js @@ -4,11 +4,9 @@ const { parseString } = require('xml2js'); const AWS = require('aws-sdk'); const { storage, errors } = require('arsenal'); -const { cleanup, DummyRequestLogger, makeAuthInfo } - = require('../unit/helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../unit/helpers'); const { bucketPut } = require('../../lib/api/bucketPut'); -const initiateMultipartUpload - = require('../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../lib/api/initiateMultipartUpload'); const objectPut = require('../../lib/api/objectPut'); const objectPutCopyPart = require('../../lib/api/objectPutCopyPart'); const DummyRequest = require('../unit/DummyRequest'); @@ -38,10 +36,9 @@ const partETag = 'be747eb4b75517bf6b3cf7c5fbb62f3a'; const describeSkipIfE2E = process.env.S3_END_TO_END ? describe.skip : describe; const { config } = require('../../lib/Config'); -const isCEPH = (config.locationConstraints[awsLocation] - .details.awsEndpoint !== undefined && - config.locationConstraints[awsLocation] - .details.awsEndpoint.indexOf('amazon') === -1); +const isCEPH = + config.locationConstraints[awsLocation].details.awsEndpoint !== undefined && + config.locationConstraints[awsLocation].details.awsEndpoint.indexOf('amazon') === -1; const itSkipCeph = isCEPH ? it.skip : it; function getSourceAndDestKeys() { @@ -62,15 +59,16 @@ function getAwsParamsBucketMismatch(destObjName, uploadId) { return params; } -function copyPutPart(bucketLoc, mpuLoc, srcObjLoc, requestHost, cb, -errorPutCopyPart) { +function copyPutPart(bucketLoc, mpuLoc, srcObjLoc, requestHost, cb, errorPutCopyPart) { const keys = getSourceAndDestKeys(); const { sourceObjName, destObjName } = keys; - const post = bucketLoc ? '' + - '' + - `${bucketLoc}` + - '' : ''; + const post = bucketLoc + ? '' + + '' + + `${bucketLoc}` + + '' + : ''; const bucketPutReq = new DummyRequest({ bucketName, namespace, @@ -90,8 +88,10 @@ errorPutCopyPart) { actionImplicitDenies: false, }; if (mpuLoc) { - initiateReq.headers = { 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-meta-scal-location-constraint': `${mpuLoc}` }; + initiateReq.headers = { + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-meta-scal-location-constraint': `${mpuLoc}`, + }; } if (requestHost) { initiateReq.parsedHost = requestHost; @@ -105,71 +105,78 @@ errorPutCopyPart) { actionImplicitDenies: false, }; if (srcObjLoc) { - sourceObjPutParams.headers = { 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-meta-scal-location-constraint': `${srcObjLoc}` }; + sourceObjPutParams.headers = { + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-meta-scal-location-constraint': `${srcObjLoc}`, + }; } const sourceObjPutReq = new DummyRequest(sourceObjPutParams, body); if (requestHost) { sourceObjPutReq.parsedHost = requestHost; } - async.waterfall([ - next => { - bucketPut(authInfo, bucketPutReq, log, err => { - assert.ifError(err, 'Error putting bucket'); - next(err); - }); - }, - next => { - objectPut(authInfo, sourceObjPutReq, undefined, log, err => - next(err)); - }, - next => { - initiateMultipartUpload(authInfo, initiateReq, log, next); - }, - (result, corsHeaders, next) => { - const mpuKeys = metadata.keyMaps.get(mpuBucket); - assert.strictEqual(mpuKeys.size, 1); - assert(mpuKeys.keys().next().value - .startsWith(`overview${splitter}${destObjName}`)); - parseString(result, next); - }, - ], - (err, json) => { - // Need to build request in here since do not have - // uploadId until here - assert.ifError(err, 'Error putting source object or initiate MPU'); - const testUploadId = json.InitiateMultipartUploadResult. - UploadId[0]; - const copyPartParams = { - bucketName, - namespace, - objectKey: destObjName, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${destObjName}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, + async.waterfall( + [ + next => { + bucketPut(authInfo, bucketPutReq, log, err => { + assert.ifError(err, 'Error putting bucket'); + next(err); + }); }, - }; - const copyPartReq = new DummyRequest(copyPartParams); - return objectPutCopyPart(authInfo, copyPartReq, - bucketName, sourceObjName, undefined, log, (err, copyResult) => { - if (errorPutCopyPart) { - assert.strictEqual(err.code, errorPutCopyPart.statusCode); - assert(err.is[errorPutCopyPart.code]); - return cb(); + next => { + objectPut(authInfo, sourceObjPutReq, undefined, log, err => next(err)); + }, + next => { + initiateMultipartUpload(authInfo, initiateReq, log, next); + }, + (result, corsHeaders, next) => { + const mpuKeys = metadata.keyMaps.get(mpuBucket); + assert.strictEqual(mpuKeys.size, 1); + assert(mpuKeys.keys().next().value.startsWith(`overview${splitter}${destObjName}`)); + parseString(result, next); + }, + ], + (err, json) => { + // Need to build request in here since do not have + // uploadId until here + assert.ifError(err, 'Error putting source object or initiate MPU'); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const copyPartParams = { + bucketName, + namespace, + objectKey: destObjName, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${destObjName}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + }; + const copyPartReq = new DummyRequest(copyPartParams); + return objectPutCopyPart( + authInfo, + copyPartReq, + bucketName, + sourceObjName, + undefined, + log, + (err, copyResult) => { + if (errorPutCopyPart) { + assert.strictEqual(err.code, errorPutCopyPart.statusCode); + assert(err.is[errorPutCopyPart.code]); + return cb(); + } + assert.strictEqual(err, null); + return parseString(copyResult, (err, json) => { + assert.equal(err, null, `Error parsing copy result ${err}`); + assert.strictEqual(json.CopyPartResult.ETag[0], `"${partETag}"`); + assert(json.CopyPartResult.LastModified); + return cb(keys, testUploadId); + }); } - assert.strictEqual(err, null); - return parseString(copyResult, (err, json) => { - assert.equal(err, null, `Error parsing copy result ${err}`); - assert.strictEqual(json.CopyPartResult.ETag[0], - `"${partETag}"`); - assert(json.CopyPartResult.LastModified); - return cb(keys, testUploadId); - }); - }); - }); + ); + } + ); } function assertPartList(partList, uploadId) { @@ -180,8 +187,7 @@ function assertPartList(partList, uploadId) { assert.strictEqual(partList.Parts[0].Size, 11); } -describeSkipIfE2E('ObjectCopyPutPart API with multiple backends', -function testSuite() { +describeSkipIfE2E('ObjectCopyPutPart API with multiple backends', function testSuite() { this.timeout(60000); beforeEach(() => { @@ -207,15 +213,17 @@ function testSuite() { }); itSkipCeph('should copy part to AWS based on mpu location', done => { - copyPutPart(memLocation, awsLocation, null, 'localhost', - (keys, uploadId) => { + copyPutPart(memLocation, awsLocation, null, 'localhost', (keys, uploadId) => { assert.strictEqual(ds.length, 2); const awsReq = getAwsParams(keys.destObjName, uploadId); s3.listParts(awsReq, (err, partList) => { assertPartList(partList, uploadId); s3.abortMultipartUpload(awsReq, err => { - assert.equal(err, null, `Error aborting MPU: ${err}. ` + - `You must abort MPU with upload ID ${uploadId} manually.`); + assert.equal( + err, + null, + `Error aborting MPU: ${err}. ` + `You must abort MPU with upload ID ${uploadId} manually.` + ); done(); }); }); @@ -256,57 +264,68 @@ function testSuite() { s3.listParts(awsReq, (err, partList) => { assertPartList(partList, uploadId); s3.abortMultipartUpload(awsReq, err => { - assert.equal(err, null, `Error aborting MPU: ${err}. ` + - `You must abort MPU with upload ID ${uploadId} manually.`); + assert.equal( + err, + null, + `Error aborting MPU: ${err}. ` + `You must abort MPU with upload ID ${uploadId} manually.` + ); done(); }); }); }); }); - itSkipCeph('should copy part an object on AWS location that has ' + - 'bucketMatch equals false to a mpu with a different AWS location', done => { - copyPutPart(null, awsLocation, awsLocationMismatch, 'localhost', - (keys, uploadId) => { - assert.deepStrictEqual(ds, []); - const awsReq = getAwsParams(keys.destObjName, uploadId); - s3.listParts(awsReq, (err, partList) => { - assertPartList(partList, uploadId); - s3.abortMultipartUpload(awsReq, err => { - assert.equal(err, null, `Error aborting MPU: ${err}. ` + - `You must abort MPU with upload ID ${uploadId} manually.`); - done(); + itSkipCeph( + 'should copy part an object on AWS location that has ' + + 'bucketMatch equals false to a mpu with a different AWS location', + done => { + copyPutPart(null, awsLocation, awsLocationMismatch, 'localhost', (keys, uploadId) => { + assert.deepStrictEqual(ds, []); + const awsReq = getAwsParams(keys.destObjName, uploadId); + s3.listParts(awsReq, (err, partList) => { + assertPartList(partList, uploadId); + s3.abortMultipartUpload(awsReq, err => { + assert.equal( + err, + null, + `Error aborting MPU: ${err}. ` + `You must abort MPU with upload ID ${uploadId} manually.` + ); + done(); + }); }); }); - }); - }); + } + ); - itSkipCeph('should copy part an object on AWS to a mpu with a different ' + - 'AWS location that has bucketMatch equals false', done => { - copyPutPart(null, awsLocationMismatch, awsLocation, 'localhost', - (keys, uploadId) => { - assert.deepStrictEqual(ds, []); - const awsReq = getAwsParamsBucketMismatch(keys.destObjName, - uploadId); - s3.listParts(awsReq, (err, partList) => { - assertPartList(partList, uploadId); - s3.abortMultipartUpload(awsReq, err => { - assert.equal(err, null, `Error aborting MPU: ${err}. ` + - `You must abort MPU with upload ID ${uploadId} manually.`); - done(); + itSkipCeph( + 'should copy part an object on AWS to a mpu with a different ' + + 'AWS location that has bucketMatch equals false', + done => { + copyPutPart(null, awsLocationMismatch, awsLocation, 'localhost', (keys, uploadId) => { + assert.deepStrictEqual(ds, []); + const awsReq = getAwsParamsBucketMismatch(keys.destObjName, uploadId); + s3.listParts(awsReq, (err, partList) => { + assertPartList(partList, uploadId); + s3.abortMultipartUpload(awsReq, err => { + assert.equal( + err, + null, + `Error aborting MPU: ${err}. ` + `You must abort MPU with upload ID ${uploadId} manually.` + ); + done(); + }); }); }); - }); - }); + } + ); // FIXME: does not pass, see CLDSRV-442 - it.skip('should return error 403 AccessDenied copying part to a ' + - 'different AWS location without object READ access', - done => { - copyPutPart(null, awsLocation, awsLocation2, 'localhost', done, - errors.AccessDenied); - }); - + it.skip( + 'should return error 403 AccessDenied copying part to a ' + 'different AWS location without object READ access', + done => { + copyPutPart(null, awsLocation, awsLocation2, 'localhost', done, errors.AccessDenied); + } + ); it('should copy part to file based on request endpoint', done => { copyPutPart(null, null, memLocation, 'localhost', () => { diff --git a/tests/multipleBackend/objectPutPart.js b/tests/multipleBackend/objectPutPart.js index 345266007d..387f588390 100644 --- a/tests/multipleBackend/objectPutPart.js +++ b/tests/multipleBackend/objectPutPart.js @@ -6,17 +6,14 @@ const AWS = require('aws-sdk'); const { storage } = require('arsenal'); const { config } = require('../../lib/Config'); -const { cleanup, DummyRequestLogger, makeAuthInfo } - = require('../unit/helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../unit/helpers'); const { bucketPut } = require('../../lib/api/bucketPut'); -const initiateMultipartUpload - = require('../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../lib/api/initiateMultipartUpload'); const objectPutPart = require('../../lib/api/objectPutPart'); const DummyRequest = require('../unit/DummyRequest'); const mdWrapper = require('../../lib/metadata/wrapper'); const constants = require('../../constants'); -const { getRealAwsConfig } = - require('../functional/aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../functional/aws-node-sdk/test/support/awsConfig'); const { metadata } = storage.metadata.inMemory.metadata; const { ds } = storage.data.inMemory.datastore; @@ -49,14 +46,15 @@ function _getOverviewKey(objectKey, uploadId) { return `overview${splitter}${objectKey}${splitter}${uploadId}`; } -function putPart(bucketLoc, mpuLoc, requestHost, cb, -errorDescription) { +function putPart(bucketLoc, mpuLoc, requestHost, cb, errorDescription) { const objectName = `objectName-${Date.now()}`; - const post = bucketLoc ? '' + - '' + - `${bucketLoc}` + - '' : ''; + const post = bucketLoc + ? '' + + '' + + `${bucketLoc}` + + '' + : ''; const bucketPutReq = { bucketName, namespace, @@ -76,84 +74,88 @@ errorDescription) { actionImplicitDenies: false, }; if (mpuLoc) { - initiateReq.headers = { 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-meta-scal-location-constraint': `${mpuLoc}` }; + initiateReq.headers = { + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-meta-scal-location-constraint': `${mpuLoc}`, + }; } if (requestHost) { initiateReq.parsedHost = requestHost; } - async.waterfall([ - next => { - bucketPut(authInfo, bucketPutReq, log, err => { - assert.ifError(err, 'Error putting bucket'); - next(err); - }); - }, - next => { - initiateMultipartUpload(authInfo, initiateReq, log, next); - }, - (result, corsHeaders, next) => { - const mpuKeys = metadata.keyMaps.get(mpuBucket); - assert.strictEqual(mpuKeys.size, 1); - assert(mpuKeys.keys().next().value - .startsWith(`overview${splitter}${objectName}`)); - parseString(result, next); - }, - ], - (err, json) => { - if (errorDescription) { - assert.strictEqual(err.code, 400); - assert(err.is.InvalidArgument); - assert(err.description.indexOf(errorDescription) > -1); - return cb(); - } - - assert.ifError(err, 'Error initiating MPU'); - - // Need to build request in here since do not have uploadId - // until here - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const partReqParams = { - bucketName, - namespace, - objectKey: objectName, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectName}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - }; - const partReq = new DummyRequest(partReqParams, body1); - return objectPutPart(authInfo, partReq, undefined, log, err => { - assert.strictEqual(err, null); - if (bucketLoc !== awsLocation && mpuLoc !== awsLocation && - bucketLoc !== awsLocationMismatch && - mpuLoc !== awsLocationMismatch) { - const keysInMPUkeyMap = []; - metadata.keyMaps.get(mpuBucket).forEach((val, key) => { - keysInMPUkeyMap.push(key); + async.waterfall( + [ + next => { + bucketPut(authInfo, bucketPutReq, log, err => { + assert.ifError(err, 'Error putting bucket'); + next(err); }); - const sortedKeyMap = keysInMPUkeyMap.sort(a => { - if (a.slice(0, 8) === 'overview') { - return -1; - } - return 0; - }); - const partKey = sortedKeyMap[1]; - const partETag = metadata.keyMaps.get(mpuBucket) - .get(partKey)['content-md5']; - assert.strictEqual(keysInMPUkeyMap.length, 2); - assert.strictEqual(partETag, calculatedHash1); + }, + next => { + initiateMultipartUpload(authInfo, initiateReq, log, next); + }, + (result, corsHeaders, next) => { + const mpuKeys = metadata.keyMaps.get(mpuBucket); + assert.strictEqual(mpuKeys.size, 1); + assert(mpuKeys.keys().next().value.startsWith(`overview${splitter}${objectName}`)); + parseString(result, next); + }, + ], + (err, json) => { + if (errorDescription) { + assert.strictEqual(err.code, 400); + assert(err.is.InvalidArgument); + assert(err.description.indexOf(errorDescription) > -1); + return cb(); } - cb(objectName, testUploadId); - }); - }); + + assert.ifError(err, 'Error initiating MPU'); + + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partReqParams = { + bucketName, + namespace, + objectKey: objectName, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectName}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + }; + const partReq = new DummyRequest(partReqParams, body1); + return objectPutPart(authInfo, partReq, undefined, log, err => { + assert.strictEqual(err, null); + if ( + bucketLoc !== awsLocation && + mpuLoc !== awsLocation && + bucketLoc !== awsLocationMismatch && + mpuLoc !== awsLocationMismatch + ) { + const keysInMPUkeyMap = []; + metadata.keyMaps.get(mpuBucket).forEach((val, key) => { + keysInMPUkeyMap.push(key); + }); + const sortedKeyMap = keysInMPUkeyMap.sort(a => { + if (a.slice(0, 8) === 'overview') { + return -1; + } + return 0; + }); + const partKey = sortedKeyMap[1]; + const partETag = metadata.keyMaps.get(mpuBucket).get(partKey)['content-md5']; + assert.strictEqual(keysInMPUkeyMap.length, 2); + assert.strictEqual(partETag, calculatedHash1); + } + cb(objectName, testUploadId); + }); + } + ); } function listAndAbort(uploadId, calculatedHash2, objectName, location, done) { - const awsBucket = config.locationConstraints[location]. - details.bucketName; + const awsBucket = config.locationConstraints[location].details.bucketName; const params = { Bucket: awsBucket, Key: objectName, @@ -166,15 +168,17 @@ function listAndAbort(uploadId, calculatedHash2, objectName, location, done) { assert.strictEqual(`"${calculatedHash2}"`, data.Parts[0].ETag); } s3.abortMultipartUpload(params, err => { - assert.equal(err, null, `Error aborting MPU: ${err}. ` + - `You must abort MPU with upload ID ${uploadId} manually.`); + assert.equal( + err, + null, + `Error aborting MPU: ${err}. ` + `You must abort MPU with upload ID ${uploadId} manually.` + ); done(); }); }); } -describeSkipIfE2E('objectPutPart API with multiple backends', -function testSuite() { +describeSkipIfE2E('objectPutPart API with multiple backends', function testSuite() { this.timeout(50000); beforeEach(() => { @@ -199,40 +203,35 @@ function testSuite() { }); it('should put a part to AWS based on mpu location', done => { - putPart(fileLocation, awsLocation, 'localhost', - (objectName, uploadId) => { + putPart(fileLocation, awsLocation, 'localhost', (objectName, uploadId) => { assert.deepStrictEqual(ds, []); listAndAbort(uploadId, null, objectName, awsLocation, done); }); }); - it('should replace part if two parts uploaded with same part number to AWS', - done => { - putPart(fileLocation, awsLocation, 'localhost', - (objectName, uploadId) => { + it('should replace part if two parts uploaded with same part number to AWS', done => { + putPart(fileLocation, awsLocation, 'localhost', (objectName, uploadId) => { assert.deepStrictEqual(ds, []); const partReqParams = { bucketName, namespace, objectKey: objectName, - headers: { 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-meta-scal-location-constraint': awsLocation }, + headers: { host: `${bucketName}.s3.amazonaws.com`, 'x-amz-meta-scal-location-constraint': awsLocation }, url: `/${objectName}?partNumber=1&uploadId=${uploadId}`, query: { - partNumber: '1', uploadId, + partNumber: '1', + uploadId, }, }; const partReq = new DummyRequest(partReqParams, body2); objectPutPart(authInfo, partReq, undefined, log, err => { assert.equal(err, null, `Error putting second part: ${err}`); - listAndAbort(uploadId, calculatedHash2, - objectName, awsLocation, done); + listAndAbort(uploadId, calculatedHash2, objectName, awsLocation, done); }); }); }); - it('should upload part based on mpu location even if part ' + - 'location constraint is specified ', done => { + it('should upload part based on mpu location even if part ' + 'location constraint is specified ', done => { putPart(fileLocation, memLocation, 'localhost', () => { assert.deepStrictEqual(ds[1].value, body1); done(); @@ -254,29 +253,23 @@ function testSuite() { }); it('should put a part to AWS based on bucket location', done => { - putPart(awsLocation, null, 'localhost', - (objectName, uploadId) => { + putPart(awsLocation, null, 'localhost', (objectName, uploadId) => { assert.deepStrictEqual(ds, []); listAndAbort(uploadId, null, objectName, awsLocation, done); }); }); - it('should put a part to AWS based on bucket location with bucketMatch ' + - 'set to true', done => { - putPart(null, awsLocation, 'localhost', - (objectName, uploadId) => { + it('should put a part to AWS based on bucket location with bucketMatch ' + 'set to true', done => { + putPart(null, awsLocation, 'localhost', (objectName, uploadId) => { assert.deepStrictEqual(ds, []); listAndAbort(uploadId, null, objectName, awsLocation, done); }); }); - it('should put a part to AWS based on bucket location with bucketMatch ' + - 'set to false', done => { - putPart(null, awsLocationMismatch, 'localhost', - (objectName, uploadId) => { + it('should put a part to AWS based on bucket location with bucketMatch ' + 'set to false', done => { + putPart(null, awsLocationMismatch, 'localhost', (objectName, uploadId) => { assert.deepStrictEqual(ds, []); - listAndAbort(uploadId, null, `${bucketName}/${objectName}`, - awsLocationMismatch, done); + listAndAbort(uploadId, null, `${bucketName}/${objectName}`, awsLocationMismatch, done); }); }); @@ -287,28 +280,28 @@ function testSuite() { }); }); - it('should store a part even if the MPU was initiated on legacy version', - done => { - putPart('scality-internal-mem', null, 'localhost', - (objectKey, uploadId) => { + it('should store a part even if the MPU was initiated on legacy version', done => { + putPart('scality-internal-mem', null, 'localhost', (objectKey, uploadId) => { const mputOverviewKey = _getOverviewKey(objectKey, uploadId); - mdWrapper.getObjectMD(mpuBucket, mputOverviewKey, {}, log, - (err, res) => { + mdWrapper.getObjectMD(mpuBucket, mputOverviewKey, {}, log, (err, res) => { // remove location constraint to mimic legacy behvior // eslint-disable-next-line no-param-reassign res.controllingLocationConstraint = undefined; const md5Hash = crypto.createHash('md5'); const bufferBody = Buffer.from(body1); const calculatedHash = md5Hash.update(bufferBody).digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, - query: { partNumber: '1', uploadId }, - calculatedHash, - }, body1); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + query: { partNumber: '1', uploadId }, + calculatedHash, + }, + body1 + ); objectPutPart(authInfo, partRequest, undefined, log, err => { assert.strictEqual(err, null); const keysInMPUkeyMap = []; @@ -322,8 +315,7 @@ function testSuite() { return 0; }); const partKey = sortedKeyMap[1]; - const partETag = metadata.keyMaps.get(mpuBucket) - .get(partKey)['content-md5']; + const partETag = metadata.keyMaps.get(mpuBucket).get(partKey)['content-md5']; assert.strictEqual(keysInMPUkeyMap.length, 2); assert.strictEqual(partETag, calculatedHash); done(); diff --git a/tests/multipleBackend/routes/routeBackbeat.js b/tests/multipleBackend/routes/routeBackbeat.js index ff6a624254..23c3a1ee97 100644 --- a/tests/multipleBackend/routes/routeBackbeat.js +++ b/tests/multipleBackend/routes/routeBackbeat.js @@ -8,8 +8,7 @@ const versionIdUtils = versioning.VersionID; const { makeid } = require('../../unit/helpers'); const { makeRequest, makeBackbeatRequest } = require('../../functional/raw-node/utils/makeRequest'); -const BucketUtility = - require('../../functional/aws-node-sdk/lib/utility/bucket-util'); +const BucketUtility = require('../../functional/aws-node-sdk/lib/utility/bucket-util'); const { describeSkipIfNotMultipleOrCeph, itSkipCeph, @@ -18,8 +17,7 @@ const { getAzureContainerName, getAzureClient, } = require('../../functional/aws-node-sdk/test/multipleBackend/utils'); -const { getRealAwsConfig } = - require('../../functional/aws-node-sdk/test/support/awsConfig'); +const { getRealAwsConfig } = require('../../functional/aws-node-sdk/test/support/awsConfig'); const { getCredentials } = require('../../functional/aws-node-sdk/test/support/credentials'); const { config } = require('../../../lib/Config'); @@ -49,15 +47,12 @@ const testArn = 'aws::iam:123456789012:user/bart'; const testKey = 'testkey'; const testKeyUTF8 = '䆩鈁櫨㟔罳'; const testData = 'testkey data'; -const testDataMd5 = crypto.createHash('md5') - .update(testData, 'utf-8') - .digest('hex'); +const testDataMd5 = crypto.createHash('md5').update(testData, 'utf-8').digest('hex'); const emptyContentsMd5 = 'd41d8cd98f00b204e9800998ecf8427e'; const testMd = { 'md-model-version': 2, 'owner-display-name': 'Bart', - 'owner-id': ('79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be'), + 'owner-id': '79a59df900b949e55d96a1e698fbaced' + 'fd6e09d98eacf8f8d5218e7cd47ef2be', 'last-modified': '2017-05-15T20:32:40.032Z', 'content-length': testData.length, 'content-md5': testDataMd5, @@ -66,18 +61,18 @@ const testMd = { 'x-amz-server-side-encryption': '', 'x-amz-server-side-encryption-aws-kms-key-id': '', 'x-amz-server-side-encryption-customer-algorithm': '', - 'location': null, - 'acl': { + location: null, + acl: { Canned: 'private', FULL_CONTROL: [], WRITE_ACP: [], READ: [], READ_ACP: [], }, - 'nullVersionId': '99999999999999999999RG001 ', - 'isDeleteMarker': false, - 'versionId': '98505119639965999999RG001 ', - 'replicationInfo': { + nullVersionId: '99999999999999999999RG001 ', + isDeleteMarker: false, + versionId: '98505119639965999999RG001 ', + replicationInfo: { status: 'COMPLETED', backends: [{ site: 'zenko', status: 'PENDING' }], content: ['DATA', 'METADATA'], @@ -88,8 +83,7 @@ const testMd = { const nonVersionedTestMd = { 'owner-display-name': 'Bart', - 'owner-id': ('79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be'), + 'owner-id': '79a59df900b949e55d96a1e698fbaced' + 'fd6e09d98eacf8f8d5218e7cd47ef2be', 'content-length': testData.length, 'content-md5': testDataMd5, 'x-amz-version-id': 'null', @@ -98,19 +92,19 @@ const nonVersionedTestMd = { 'x-amz-server-side-encryption': '', 'x-amz-server-side-encryption-aws-kms-key-id': '', 'x-amz-server-side-encryption-customer-algorithm': '', - 'acl': { + acl: { Canned: 'private', FULL_CONTROL: [], WRITE_ACP: [], READ: [], READ_ACP: [], }, - 'location': null, - 'isNull': '', - 'nullVersionId': '', - 'isDeleteMarker': false, - 'tags': {}, - 'replicationInfo': { + location: null, + isNull: '', + nullVersionId: '', + isDeleteMarker: false, + tags: {}, + replicationInfo: { status: '', backends: [], content: [], @@ -121,32 +115,38 @@ const nonVersionedTestMd = { dataStoreVersionId: '', isNFS: null, }, - 'dataStoreName': 'us-east-1', + dataStoreName: 'us-east-1', 'last-modified': '2018-12-18T01:22:15.986Z', 'md-model-version': 3, }; function checkObjectData(s3, bucket, objectKey, dataValue, done) { - s3.getObject({ - Bucket: bucket, - Key: objectKey, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Body.toString(), dataValue); - done(); - }); + s3.getObject( + { + Bucket: bucket, + Key: objectKey, + }, + (err, data) => { + assert.ifError(err); + assert.strictEqual(data.Body.toString(), dataValue); + done(); + } + ); } function checkVersionData(s3, bucket, objectKey, versionId, dataValue, done) { - return s3.getObject({ - Bucket: bucket, - Key: objectKey, - VersionId: versionId, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Body.toString(), dataValue); - return done(); - }); + return s3.getObject( + { + Bucket: bucket, + Key: objectKey, + VersionId: versionId, + }, + (err, data) => { + assert.ifError(err); + assert.strictEqual(data.Body.toString(), dataValue); + return done(); + } + ); } function updateStorageClass(data, storageClass) { @@ -167,46 +167,58 @@ function updateStorageClass(data, storageClass) { describeSkipIfNotMultipleOrCeph('backbeat DELETE routes', () => { it('abort MPU', done => { const awsKey = 'backbeat-mpu-test'; - async.waterfall([ - next => - awsClient.createMultipartUpload({ - Bucket: awsBucket, - Key: awsKey, - }, next), - (response, next) => { - const { UploadId } = response; - makeBackbeatRequest({ - method: 'DELETE', - bucket: awsBucket, - objectKey: awsKey, - resourceType: 'multiplebackenddata', - queryObj: { operation: 'abortmpu' }, - headers: { - 'x-scal-upload-id': UploadId, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-storage-class': awsLocation, - }, - authCredentials: backbeatAuthCredentials, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - assert.deepStrictEqual(JSON.parse(response.body), {}); - return next(null, UploadId); - }); - }, (UploadId, next) => - awsClient.listMultipartUploads({ - Bucket: awsBucket, - }, (err, response) => { - assert.ifError(err); - const hasOngoingUpload = - response.Uploads.some(upload => (upload === UploadId)); - assert(!hasOngoingUpload); - return next(); - }), - ], err => { - assert.ifError(err); - done(); - }); + async.waterfall( + [ + next => + awsClient.createMultipartUpload( + { + Bucket: awsBucket, + Key: awsKey, + }, + next + ), + (response, next) => { + const { UploadId } = response; + makeBackbeatRequest( + { + method: 'DELETE', + bucket: awsBucket, + objectKey: awsKey, + resourceType: 'multiplebackenddata', + queryObj: { operation: 'abortmpu' }, + headers: { + 'x-scal-upload-id': UploadId, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-storage-class': awsLocation, + }, + authCredentials: backbeatAuthCredentials, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + assert.deepStrictEqual(JSON.parse(response.body), {}); + return next(null, UploadId); + } + ); + }, + (UploadId, next) => + awsClient.listMultipartUploads( + { + Bucket: awsBucket, + }, + (err, response) => { + assert.ifError(err); + const hasOngoingUpload = response.Uploads.some(upload => upload === UploadId); + assert(!hasOngoingUpload); + return next(); + } + ), + ], + err => { + assert.ifError(err); + done(); + } + ); }); }); @@ -214,13 +226,15 @@ function getMetadataToPut(putDataResponse) { const mdToPut = Object.assign({}, testMd); // Reproduce what backbeat does to update target metadata mdToPut.location = JSON.parse(putDataResponse.body); - ['x-amz-server-side-encryption', - 'x-amz-server-side-encryption-aws-kms-key-id', - 'x-amz-server-side-encryption-customer-algorithm'].forEach(headerName => { - if (putDataResponse.headers[headerName]) { - mdToPut[headerName] = putDataResponse.headers[headerName]; - } - }); + [ + 'x-amz-server-side-encryption', + 'x-amz-server-side-encryption-aws-kms-key-id', + 'x-amz-server-side-encryption-customer-algorithm', + ].forEach(headerName => { + if (putDataResponse.headers[headerName]) { + mdToPut[headerName] = putDataResponse.headers[headerName]; + } + }); return mdToPut; } @@ -229,45 +243,60 @@ describe('backbeat routes', () => { let s3; before(done => { - bucketUtil = new BucketUtility( - 'default', { signatureVersion: 'v4' }); + bucketUtil = new BucketUtility('default', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - bucketUtil.emptyManyIfExists([TEST_BUCKET, TEST_ENCRYPTED_BUCKET, NONVERSIONED_BUCKET, - VERSION_SUSPENDED_BUCKET]) + bucketUtil + .emptyManyIfExists([TEST_BUCKET, TEST_ENCRYPTED_BUCKET, NONVERSIONED_BUCKET, VERSION_SUSPENDED_BUCKET]) .then(() => s3.createBucket({ Bucket: TEST_BUCKET }).promise()) - .then(() => s3.putBucketVersioning( - { - Bucket: TEST_BUCKET, - VersioningConfiguration: { Status: 'Enabled' }, - }).promise()) - .then(() => s3.createBucket({ - Bucket: NONVERSIONED_BUCKET, - }).promise()) + .then(() => + s3 + .putBucketVersioning({ + Bucket: TEST_BUCKET, + VersioningConfiguration: { Status: 'Enabled' }, + }) + .promise() + ) + .then(() => + s3 + .createBucket({ + Bucket: NONVERSIONED_BUCKET, + }) + .promise() + ) .then(() => s3.createBucket({ Bucket: VERSION_SUSPENDED_BUCKET }).promise()) - .then(() => s3.putBucketVersioning( - { - Bucket: VERSION_SUSPENDED_BUCKET, - VersioningConfiguration: { Status: 'Suspended' }, - }).promise()) + .then(() => + s3 + .putBucketVersioning({ + Bucket: VERSION_SUSPENDED_BUCKET, + VersioningConfiguration: { Status: 'Suspended' }, + }) + .promise() + ) .then(() => s3.createBucket({ Bucket: TEST_ENCRYPTED_BUCKET }).promise()) - .then(() => s3.putBucketVersioning( - { - Bucket: TEST_ENCRYPTED_BUCKET, - VersioningConfiguration: { Status: 'Enabled' }, - }).promise()) - .then(() => s3.putBucketEncryption( - { - Bucket: TEST_ENCRYPTED_BUCKET, - ServerSideEncryptionConfiguration: { - Rules: [ - { - ApplyServerSideEncryptionByDefault: { - SSEAlgorithm: 'AES256', + .then(() => + s3 + .putBucketVersioning({ + Bucket: TEST_ENCRYPTED_BUCKET, + VersioningConfiguration: { Status: 'Enabled' }, + }) + .promise() + ) + .then(() => + s3 + .putBucketEncryption({ + Bucket: TEST_ENCRYPTED_BUCKET, + ServerSideEncryptionConfiguration: { + Rules: [ + { + ApplyServerSideEncryptionByDefault: { + SSEAlgorithm: 'AES256', + }, }, - }, - ], - }, - }).promise()) + ], + }, + }) + .promise() + ) .then(() => done()) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); @@ -276,16 +305,15 @@ describe('backbeat routes', () => { }); after(() => - bucketUtil.empty(TEST_BUCKET) + bucketUtil + .empty(TEST_BUCKET) .then(() => s3.deleteBucket({ Bucket: TEST_BUCKET }).promise()) .then(() => bucketUtil.empty(TEST_ENCRYPTED_BUCKET)) .then(() => s3.deleteBucket({ Bucket: TEST_ENCRYPTED_BUCKET }).promise()) .then(() => bucketUtil.empty(NONVERSIONED_BUCKET)) - .then(() => - s3.deleteBucket({ Bucket: NONVERSIONED_BUCKET }).promise()) + .then(() => s3.deleteBucket({ Bucket: NONVERSIONED_BUCKET }).promise()) .then(() => bucketUtil.empty(VERSION_SUSPENDED_BUCKET)) - .then(() => - s3.deleteBucket({ Bucket: VERSION_SUSPENDED_BUCKET }).promise()) + .then(() => s3.deleteBucket({ Bucket: VERSION_SUSPENDED_BUCKET }).promise()) ); describe('null version', () => { @@ -308,1384 +336,1744 @@ describe('backbeat routes', () => { } beforeEach(() => - bucketUtil.emptyIfExists(BUCKET_FOR_NULL_VERSION) + bucketUtil + .emptyIfExists(BUCKET_FOR_NULL_VERSION) .then(() => s3.createBucket({ Bucket: BUCKET_FOR_NULL_VERSION }).promise()) ); afterEach(() => - bucketUtil.empty(BUCKET_FOR_NULL_VERSION) + bucketUtil + .empty(BUCKET_FOR_NULL_VERSION) .then(() => s3.deleteBucket({ Bucket: BUCKET_FOR_NULL_VERSION }).promise()) ); it('should update metadata of a current null version', done => { let objMD; - return async.series({ - putObject: next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - enableVersioningSource: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + { + putObject: next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + enableVersioningSource: next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + putMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + getMetadataAfter: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + next + ), + listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - putMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - headObject: next => s3.headObject( - { Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - getMetadataAfter: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, next), - listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObject; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = results.headObject; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const getMetadataAfterRes = results.getMetadataAfter; - const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body; - const expectedMd = JSON.parse(objMD); - expectedMd.isNull = true; // TODO remove the line once CLDSRV-509 is fixed - if (!isNullVersionCompatMode) { - expectedMd.isNull2 = true; // TODO remove the line once CLDSRV-509 is fixed - } - assert.deepStrictEqual(JSON.parse(objMDAfter), expectedMd); + const getMetadataAfterRes = results.getMetadataAfter; + const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body; + const expectedMd = JSON.parse(objMD); + expectedMd.isNull = true; // TODO remove the line once CLDSRV-509 is fixed + if (!isNullVersionCompatMode) { + expectedMd.isNull2 = true; // TODO remove the line once CLDSRV-509 is fixed + } + assert.deepStrictEqual(JSON.parse(objMDAfter), expectedMd); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); + assert.strictEqual(Versions.length, 1); - const [currentVersion] = Versions; - assertVersionIsNullAndUpdated(currentVersion); - return done(); - }); + const [currentVersion] = Versions; + assertVersionIsNullAndUpdated(currentVersion); + return done(); + } + ); }); it('should update metadata of a non-current null version', done => { let objMD; let expectedVersionId; - return async.series({ - putObjectInitial: next => s3.putObject( - { Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - enableVersioning: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectAgain: next => s3.putObject( - { Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); - }), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + { + putObjectInitial: next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + enableVersioning: next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObjectAgain: next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + putMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + getMetadataAfter: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + next + ), + listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - putMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - getMetadataAfter: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, next), - listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObject; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = results.headObject; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const getMetadataAfterRes = results.getMetadataAfter; - const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body; - assert.deepStrictEqual(JSON.parse(objMDAfter), JSON.parse(objMD)); + const getMetadataAfterRes = results.getMetadataAfter; + const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body; + assert.deepStrictEqual(JSON.parse(objMDAfter), JSON.parse(objMD)); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); - const currentVersion = Versions.find(v => v.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); + assert.strictEqual(Versions.length, 2); + const currentVersion = Versions.find(v => v.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); - const nonCurrentVersion = Versions.find(v => !v.IsLatest); - assertVersionIsNullAndUpdated(nonCurrentVersion); - return done(); - }); + const nonCurrentVersion = Versions.find(v => !v.IsLatest); + assertVersionIsNullAndUpdated(nonCurrentVersion); + return done(); + } + ); }); it('should update metadata of a suspended null version', done => { let objMD; - return async.series({ - suspendVersioning: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, next), - putObject: next => s3.putObject( - { Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioning: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + { + suspendVersioning: next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + putObject: next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), + enableVersioning: next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + putUpdatedMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + getMetadataAfter: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + next + ), + listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), + }, + (err, results) => { if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); + return done(err); } - objMD = result; - return next(); - }), - putUpdatedMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - getMetadataAfter: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, next), - listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObject; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = results.headObject; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const getMetadataAfterRes = results.getMetadataAfter; - const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body; - assert.deepStrictEqual(JSON.parse(objMDAfter), JSON.parse(objMD)); + const getMetadataAfterRes = results.getMetadataAfter; + const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body; + assert.deepStrictEqual(JSON.parse(objMDAfter), JSON.parse(objMD)); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); + assert.strictEqual(Versions.length, 1); - const [currentVersion] = Versions; - assertVersionIsNullAndUpdated(currentVersion); - return done(); - }); + const [currentVersion] = Versions; + assertVersionIsNullAndUpdated(currentVersion); + return done(); + } + ); }); it('should update metadata of a suspended null version with internal version id', done => { let objMD; - return async.series({ - suspendVersioning: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, next), - putObject: next => s3.putObject( - { Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioning: next => s3.putBucketVersioning( - { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectTagging: next => s3.putObjectTagging({ - Bucket: bucket, Key: keyName, VersionId: 'null', - Tagging: { TagSet: [{ Key: 'key1', Value: 'value1' }] }, - }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + { + suspendVersioning: next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + putObject: next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next), + enableVersioning: next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObjectTagging: next => + s3.putObjectTagging( + { + Bucket: bucket, + Key: keyName, + VersionId: 'null', + Tagging: { TagSet: [{ Key: 'key1', Value: 'value1' }] }, + }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + putUpdatedMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + getMetadataAfter: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + next + ), + listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - putUpdatedMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - getMetadataAfter: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, next), - listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObject; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = results.headObject; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const getMetadataAfterRes = results.getMetadataAfter; - const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body; - assert.deepStrictEqual(JSON.parse(objMDAfter), JSON.parse(objMD)); + const getMetadataAfterRes = results.getMetadataAfter; + const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body; + assert.deepStrictEqual(JSON.parse(objMDAfter), JSON.parse(objMD)); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); + assert.strictEqual(Versions.length, 1); - const [currentVersion] = Versions; - assertVersionIsNullAndUpdated(currentVersion); - return done(); - }); + const [currentVersion] = Versions; + assertVersionIsNullAndUpdated(currentVersion); + return done(); + } + ); }); it('should update metadata of a non-version object', done => { let objMD; - return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + [ + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => s3.headObject({ Bucket: bucket, Key: keyName }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { if (err) { - return next(err); + return done(err); } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[3]; - assert(!headObjectRes.VersionId); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = data[3]; + assert(!headObjectRes.VersionId); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[4]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; + const listObjectVersionsRes = data[4]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 1); + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 1); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionIsNullAndUpdated(currentVersion); - return done(); - }); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionIsNullAndUpdated(currentVersion); + return done(); + } + ); }); it('should create a new null version if versioning suspended and no version', done => { let objMD; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + [ + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => s3.headObject({ Bucket: bucket, Key: keyName }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); + return done(err); } - objMD = result; - return next(); - }), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[5]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = data[5]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[6]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; + const listObjectVersionsRes = data[6]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 1); + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 1); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); - assertVersionIsNullAndUpdated(currentVersion); + assertVersionIsNullAndUpdated(currentVersion); - return done(); - }); + return done(); + } + ); }); it('should create a new null version if versioning suspended and delete marker null version', done => { let objMD; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + [ + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => s3.deleteObject({ Bucket: bucket, Key: keyName }, next), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => s3.headObject({ Bucket: bucket, Key: keyName }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { if (err) { - return next(err); + return done(err); } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => s3.deleteObject({ Bucket: bucket, Key: keyName }, next), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[5]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = data[5]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[6]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; + const listObjectVersionsRes = data[6]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 1); + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 1); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionIsNullAndUpdated(currentVersion); - return done(); - }); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionIsNullAndUpdated(currentVersion); + return done(); + } + ); }); it('should create a new null version if versioning suspended and version has version id', done => { let expectedVersionId; let objMD; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + return async.series( + [ + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: null, + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => s3.headObject({ Bucket: bucket, Key: keyName }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { if (err) { - return next(err); + return done(err); } - expectedVersionId = data.VersionId; - return next(); - }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: null, - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[7]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = data[7]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[8]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; + const listObjectVersionsRes = data[8]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 2); + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 2); - const currentVersion = Versions.find(v => v.IsLatest); - assertVersionIsNullAndUpdated(currentVersion); + const currentVersion = Versions.find(v => v.IsLatest); + assertVersionIsNullAndUpdated(currentVersion); - const nonCurrentVersion = Versions.find(v => !v.IsLatest); - assertVersionHasNotBeenUpdated(nonCurrentVersion, expectedVersionId); + const nonCurrentVersion = Versions.find(v => !v.IsLatest); + assertVersionHasNotBeenUpdated(nonCurrentVersion, expectedVersionId); - // give some time for the async deletes to complete - return setTimeout(() => checkVersionData(s3, bucket, keyName, expectedVersionId, testData, done), - 1000); - }); + // give some time for the async deletes to complete + return setTimeout( + () => checkVersionData(s3, bucket, keyName, expectedVersionId, testData, done), + 1000 + ); + } + ); }); it('should update null version with no version id and versioning suspended', done => { let objMD; - return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + [ + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => s3.headObject({ Bucket: bucket, Key: keyName }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); + return done(err); } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[4]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = data[4]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[5]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 1); + const listObjectVersionsRes = data[5]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 1); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionIsNullAndUpdated(currentVersion); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionIsNullAndUpdated(currentVersion); - return done(); - }); + return done(); + } + ); }); it('should update null version if versioning suspended and null version has a version id', done => { let objMD; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + [ + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { if (err) { - return next(err); + return done(err); } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - - const headObjectRes = data[4]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[5]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); - assert.strictEqual(DeleteMarkers.length, 0); + const headObjectRes = data[4]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionIsNullAndUpdated(currentVersion); - return done(); - }); - }); + const listObjectVersionsRes = data[5]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; + assert.strictEqual(Versions.length, 1); + assert.strictEqual(DeleteMarkers.length, 0); - it('should update null version if versioning suspended and null version has a version id and' + - 'put object afterward', done => { - let objMD; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionIsNullAndUpdated(currentVersion); + return done(); } + ); + }); - const headObjectRes = data[5]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert(!headObjectRes.StorageClass); + it( + 'should update null version if versioning suspended and null version has a version id and' + + 'put object afterward', + done => { + let objMD; + return async.series( + [ + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { + if (err) { + return done(err); + } - const listObjectVersionsRes = data[6]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 1); + const headObjectRes = data[5]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert(!headObjectRes.StorageClass); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, 'null'); - return done(); - }); - }); + const listObjectVersionsRes = data[6]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 1); - it('should update null version if versioning suspended and null version has a version id and' + - 'put version afterward', done => { - let objMD; - let expectedVersionId; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, 'null'); + return done(); } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); - }), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } + ); + } + ); - const headObjectRes = data[6]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + it( + 'should update null version if versioning suspended and null version has a version id and' + + 'put version afterward', + done => { + let objMD; + let expectedVersionId; + return async.series( + [ + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { + if (err) { + return done(err); + } - const listObjectVersionsRes = data[7]; - const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); + const headObjectRes = data[6]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const [currentVersion] = Versions.filter(v => v.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); + const listObjectVersionsRes = data[7]; + const { Versions } = listObjectVersionsRes; + assert.strictEqual(Versions.length, 2); - const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); - assertVersionIsNullAndUpdated(nonCurrentVersion); - return done(); - }); - }); + const [currentVersion] = Versions.filter(v => v.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); + + const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); + assertVersionIsNullAndUpdated(nonCurrentVersion); + return done(); + } + ); + } + ); it('should update non-current null version if versioning suspended', done => { let expectedVersionId; let objMD; - return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); - }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + [ + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { if (err) { - return next(err); + return done(err); } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[6]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = data[6]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[7]; - const deleteMarkers = listObjectVersionsRes.DeleteMarkers; - assert.strictEqual(deleteMarkers.length, 0); - const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); + const listObjectVersionsRes = data[7]; + const deleteMarkers = listObjectVersionsRes.DeleteMarkers; + assert.strictEqual(deleteMarkers.length, 0); + const { Versions } = listObjectVersionsRes; + assert.strictEqual(Versions.length, 2); - const [currentVersion] = Versions.filter(v => v.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); + const [currentVersion] = Versions.filter(v => v.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); - const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); - assertVersionIsNullAndUpdated(nonCurrentVersion); + const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); + assertVersionIsNullAndUpdated(nonCurrentVersion); - return done(); - }); + return done(); + } + ); }); it('should update current null version if versioning suspended', done => { let objMD; let expectedVersionId; - return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); - }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: expectedVersionId }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + return async.series( + [ + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: expectedVersionId }, next), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { if (err) { - return next(err); + return done(err); } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[7]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = data[7]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[8]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); - assert.strictEqual(DeleteMarkers.length, 0); + const listObjectVersionsRes = data[8]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; + assert.strictEqual(Versions.length, 1); + assert.strictEqual(DeleteMarkers.length, 0); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionIsNullAndUpdated(currentVersion); - return done(); - }); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionIsNullAndUpdated(currentVersion); + return done(); + } + ); }); - it('should update current null version if versioning suspended and put a null version ' + - 'afterwards', done => { - let objMD; - let deletedVersionId; - return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - deletedVersionId = data.VersionId; - return next(); - }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: deletedVersionId }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } + it( + 'should update current null version if versioning suspended and put a null version ' + 'afterwards', + done => { + let objMD; + let deletedVersionId; + return async.series( + [ + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + deletedVersionId = data.VersionId; + return next(); + }), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: deletedVersionId }, next), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { + if (err) { + return done(err); + } - const headObjectRes = data[8]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert(!headObjectRes.StorageClass); + const headObjectRes = data[8]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert(!headObjectRes.StorageClass); - const listObjectVersionsRes = data[9]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 1); + const listObjectVersionsRes = data[9]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 1); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, 'null'); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, 'null'); - return done(); - }); - }); + return done(); + } + ); + } + ); it('should update current null version if versioning suspended and put a version afterwards', done => { let objMD; let deletedVersionId; let expectedVersionId; - return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - deletedVersionId = data.VersionId; - return next(); - }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: deletedVersionId }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + return async.series( + [ + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + deletedVersionId = data.VersionId; + return next(); + }), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: deletedVersionId }, next), + next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + } + ), + next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, + next + ), + next => + s3.putBucketVersioning( + { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + next => + s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], + (err, data) => { if (err) { - return next(err); + return done(err); } - expectedVersionId = data.VersionId; - return next(); - }), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[9]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = data[9]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[10]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 2); + const listObjectVersionsRes = data[10]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 2); - const [currentVersion] = Versions.filter(v => v.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); + const [currentVersion] = Versions.filter(v => v.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); - const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); - assertVersionIsNullAndUpdated(nonCurrentVersion); + const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); + assertVersionIsNullAndUpdated(nonCurrentVersion); - return done(); - }); + return done(); + } + ); }); }); describe('backbeat PUT routes', () => { - describe('PUT data + metadata should create a new complete object', - () => { - [{ - caption: 'with ascii test key', - key: testKey, encodedKey: testKey, - }, - { - caption: 'with UTF8 key', - key: testKeyUTF8, encodedKey: encodeURI(testKeyUTF8), - }, - { - caption: 'with percents and spaces encoded as \'+\' in key', - key: '50% full or 50% empty', - encodedKey: '50%25+full+or+50%25+empty', - }, - { - caption: 'with legacy API v1', - key: testKey, encodedKey: testKey, - legacyAPI: true, - }, - { - caption: 'with encryption configuration', - key: testKey, encodedKey: testKey, - encryption: true, - }, - { - caption: 'with encryption configuration and legacy API v1', - key: testKey, encodedKey: testKey, - encryption: true, - legacyAPI: true, - }].concat([ - `${testKeyUTF8}/${testKeyUTF8}/%42/mykey`, - 'Pâtisserie=中文-español-English', - 'notes/spring/1.txt', - 'notes/spring/2.txt', - 'notes/spring/march/1.txt', - 'notes/summer/1.txt', - 'notes/summer/2.txt', - 'notes/summer/august/1.txt', - 'notes/year.txt', - 'notes/yore.rs', - 'notes/zaphod/Beeblebrox.txt', - ].map(key => ({ - key, encodedKey: encodeURI(key), - caption: `with key ${key}`, - }))) - .forEach(testCase => { - it(testCase.caption, done => { - async.waterfall([next => { - const queryObj = testCase.legacyAPI ? {} : { v2: '' }; - makeBackbeatRequest({ - method: 'PUT', bucket: testCase.encryption ? - TEST_ENCRYPTED_BUCKET : TEST_BUCKET, - objectKey: testCase.encodedKey, - resourceType: 'data', - queryObj, - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const newMd = getMetadataToPut(response); - if (testCase.encryption && !testCase.legacyAPI) { - assert.strictEqual(typeof newMd.location[0].cryptoScheme, 'number'); - assert.strictEqual(typeof newMd.location[0].cipheredDataKey, 'string'); - } else { - // if no encryption or legacy API, data should not be encrypted - assert.strictEqual(newMd.location[0].cryptoScheme, undefined); - assert.strictEqual(newMd.location[0].cipheredDataKey, undefined); - } - makeBackbeatRequest({ - method: 'PUT', bucket: testCase.encryption ? - TEST_ENCRYPTED_BUCKET : TEST_BUCKET, - objectKey: testCase.encodedKey, - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - checkObjectData( - s3, testCase.encryption ? TEST_ENCRYPTED_BUCKET : TEST_BUCKET, - testCase.key, testData, next); - }], err => { - assert.ifError(err); - done(); + describe('PUT data + metadata should create a new complete object', () => { + [ + { + caption: 'with ascii test key', + key: testKey, + encodedKey: testKey, + }, + { + caption: 'with UTF8 key', + key: testKeyUTF8, + encodedKey: encodeURI(testKeyUTF8), + }, + { + caption: "with percents and spaces encoded as '+' in key", + key: '50% full or 50% empty', + encodedKey: '50%25+full+or+50%25+empty', + }, + { + caption: 'with legacy API v1', + key: testKey, + encodedKey: testKey, + legacyAPI: true, + }, + { + caption: 'with encryption configuration', + key: testKey, + encodedKey: testKey, + encryption: true, + }, + { + caption: 'with encryption configuration and legacy API v1', + key: testKey, + encodedKey: testKey, + encryption: true, + legacyAPI: true, + }, + ] + .concat( + [ + `${testKeyUTF8}/${testKeyUTF8}/%42/mykey`, + 'Pâtisserie=中文-español-English', + 'notes/spring/1.txt', + 'notes/spring/2.txt', + 'notes/spring/march/1.txt', + 'notes/summer/1.txt', + 'notes/summer/2.txt', + 'notes/summer/august/1.txt', + 'notes/year.txt', + 'notes/yore.rs', + 'notes/zaphod/Beeblebrox.txt', + ].map(key => ({ + key, + encodedKey: encodeURI(key), + caption: `with key ${key}`, + })) + ) + .forEach(testCase => { + it(testCase.caption, done => { + async.waterfall( + [ + next => { + const queryObj = testCase.legacyAPI ? {} : { v2: '' }; + makeBackbeatRequest( + { + method: 'PUT', + bucket: testCase.encryption ? TEST_ENCRYPTED_BUCKET : TEST_BUCKET, + objectKey: testCase.encodedKey, + resourceType: 'data', + queryObj, + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + const newMd = getMetadataToPut(response); + if (testCase.encryption && !testCase.legacyAPI) { + assert.strictEqual(typeof newMd.location[0].cryptoScheme, 'number'); + assert.strictEqual(typeof newMd.location[0].cipheredDataKey, 'string'); + } else { + // if no encryption or legacy API, data should not be encrypted + assert.strictEqual(newMd.location[0].cryptoScheme, undefined); + assert.strictEqual(newMd.location[0].cipheredDataKey, undefined); + } + makeBackbeatRequest( + { + method: 'PUT', + bucket: testCase.encryption ? TEST_ENCRYPTED_BUCKET : TEST_BUCKET, + objectKey: testCase.encodedKey, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + checkObjectData( + s3, + testCase.encryption ? TEST_ENCRYPTED_BUCKET : TEST_BUCKET, + testCase.key, + testData, + next + ); + }, + ], + err => { + assert.ifError(err); + done(); + } + ); }); }); - }); }); it('should PUT metadata for a non-versioned bucket', done => { const bucket = NONVERSIONED_BUCKET; const objectKey = 'non-versioned-key'; - async.waterfall([ - next => - makeBackbeatRequest({ - method: 'PUT', - bucket, - objectKey, - resourceType: 'data', - queryObj: { v2: '' }, - headers: { - 'content-length': testData.length, - 'content-md5': testDataMd5, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData, - }, (err, response) => { - assert.ifError(err); - const metadata = Object.assign({}, nonVersionedTestMd, { - location: JSON.parse(response.body), - }); - return next(null, metadata); - }), - (metadata, next) => - makeBackbeatRequest({ - method: 'PUT', - bucket, - objectKey, - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(metadata), - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - next(); - }), - next => - s3.headObject({ - Bucket: bucket, - Key: objectKey, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.StorageClass, 'awsbackend'); - next(); - }), - next => checkObjectData(s3, bucket, objectKey, testData, next), - ], done); + async.waterfall( + [ + next => + makeBackbeatRequest( + { + method: 'PUT', + bucket, + objectKey, + resourceType: 'data', + queryObj: { v2: '' }, + headers: { + 'content-length': testData.length, + 'content-md5': testDataMd5, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + (err, response) => { + assert.ifError(err); + const metadata = Object.assign({}, nonVersionedTestMd, { + location: JSON.parse(response.body), + }); + return next(null, metadata); + } + ), + (metadata, next) => + makeBackbeatRequest( + { + method: 'PUT', + bucket, + objectKey, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(metadata), + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + next(); + } + ), + next => + s3.headObject( + { + Bucket: bucket, + Key: objectKey, + }, + (err, data) => { + assert.ifError(err); + assert.strictEqual(data.StorageClass, 'awsbackend'); + next(); + } + ), + next => checkObjectData(s3, bucket, objectKey, testData, next), + ], + done + ); }); - it('PUT metadata with "x-scal-replication-content: METADATA"' + - 'header should replicate metadata only', done => { - async.waterfall([next => { - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, - objectKey: 'test-updatemd-key', - resourceType: 'data', - queryObj: { v2: '' }, - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData, - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const newMd = getMetadataToPut(response); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, - objectKey: 'test-updatemd-key', - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // Don't update the sent metadata since it is sent by - // backbeat as received from the replication queue, - // without updated data location or encryption info - // (since that info is not known by backbeat) - const newMd = Object.assign({}, testMd); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, - objectKey: 'test-updatemd-key', - resourceType: 'metadata', - headers: { 'x-scal-replication-content': 'METADATA' }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - checkObjectData(s3, TEST_ENCRYPTED_BUCKET, 'test-updatemd-key', - testData, next); - }], err => { - assert.ifError(err); - done(); - }); - }); + it( + 'PUT metadata with "x-scal-replication-content: METADATA"' + 'header should replicate metadata only', + done => { + async.waterfall( + [ + next => { + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_ENCRYPTED_BUCKET, + objectKey: 'test-updatemd-key', + resourceType: 'data', + queryObj: { v2: '' }, + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + const newMd = getMetadataToPut(response); + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_ENCRYPTED_BUCKET, + objectKey: 'test-updatemd-key', + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // Don't update the sent metadata since it is sent by + // backbeat as received from the replication queue, + // without updated data location or encryption info + // (since that info is not known by backbeat) + const newMd = Object.assign({}, testMd); + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_ENCRYPTED_BUCKET, + objectKey: 'test-updatemd-key', + resourceType: 'metadata', + headers: { 'x-scal-replication-content': 'METADATA' }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + checkObjectData(s3, TEST_ENCRYPTED_BUCKET, 'test-updatemd-key', testData, next); + }, + ], + err => { + assert.ifError(err); + done(); + } + ); + } + ); itSkipCeph('should PUT tags for a non-versioned bucket', function test(done) { this.timeout(10000); const bucket = NONVERSIONED_BUCKET; const awsKey = uuidv4(); - async.waterfall([ - next => - makeBackbeatRequest({ - method: 'PUT', - bucket, - objectKey: awsKey, - resourceType: 'multiplebackenddata', - queryObj: { operation: 'putobject' }, - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-storage-class': awsLocation, - 'x-scal-tags': JSON.stringify({ Key1: 'Value1' }), - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - return next(); - }), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data.TagSet, [{ - Key: 'Key1', - Value: 'Value1' - }]); - next(); - }), - ], done); + async.waterfall( + [ + next => + makeBackbeatRequest( + { + method: 'PUT', + bucket, + objectKey: awsKey, + resourceType: 'multiplebackenddata', + queryObj: { operation: 'putobject' }, + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-storage-class': awsLocation, + 'x-scal-tags': JSON.stringify({ Key1: 'Value1' }), + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + return next(); + } + ), + next => + awsClient.getObjectTagging( + { + Bucket: awsBucket, + Key: awsKey, + }, + (err, data) => { + assert.ifError(err); + assert.deepStrictEqual(data.TagSet, [ + { + Key: 'Key1', + Value: 'Value1', + }, + ]); + next(); + } + ), + ], + done + ); }); const testCases = [ @@ -1702,789 +2090,1033 @@ describe('backbeat routes', () => { testCases.forEach(({ description, bucket }) => { it(`should PUT metadata and data if ${description} and x-scal-versioning-required is not set`, done => { let objectMd; - async.waterfall([ - next => s3.putObject({ - Bucket: bucket, - Key: 'sourcekey', - Body: new Buffer(testData) }, - next), - (resp, next) => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: 'sourcekey', - authCredentials: backbeatAuthCredentials, - }, (err, resp) => { - objectMd = JSON.parse(resp.body).Body; - return next(); - }), - next => { - makeBackbeatRequest({ - method: 'PUT', bucket, - objectKey: 'destinationkey', - resourceType: 'data', - queryObj: { v2: '' }, - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData, - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - makeBackbeatRequest({ - method: 'PUT', bucket, - objectKey: 'destinationkey', - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: objectMd, - }, next); - }], + async.waterfall( + [ + next => + s3.putObject( + { + Bucket: bucket, + Key: 'sourcekey', + Body: new Buffer(testData), + }, + next + ), + (resp, next) => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: 'sourcekey', + authCredentials: backbeatAuthCredentials, + }, + (err, resp) => { + objectMd = JSON.parse(resp.body).Body; + return next(); + } + ), + next => { + makeBackbeatRequest( + { + method: 'PUT', + bucket, + objectKey: 'destinationkey', + resourceType: 'data', + queryObj: { v2: '' }, + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + makeBackbeatRequest( + { + method: 'PUT', + bucket, + objectKey: 'destinationkey', + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: objectMd, + }, + next + ); + }, + ], err => { assert.ifError(err); done(); - }); + } + ); }); }); testCases.forEach(({ description, bucket }) => { it(`should refuse PUT data if ${description} and x-scal-versioning-required is true`, done => { - makeBackbeatRequest({ - method: 'PUT', - bucket, - objectKey: testKey, - resourceType: 'data', - queryObj: { v2: '' }, - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - 'x-scal-versioning-required': 'true', + makeBackbeatRequest( + { + method: 'PUT', + bucket, + objectKey: testKey, + resourceType: 'data', + queryObj: { v2: '' }, + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + 'x-scal-versioning-required': 'true', + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, }, - authCredentials: backbeatAuthCredentials, - requestBody: testData, - }, err => { - assert.strictEqual(err.code, 'InvalidBucketState'); - done(); - }); + err => { + assert.strictEqual(err.code, 'InvalidBucketState'); + done(); + } + ); }); }); testCases.forEach(({ description, bucket }) => { it(`should refuse PUT metadata if ${description} and x-scal-versioning-required is true`, done => { - makeBackbeatRequest({ - method: 'PUT', - bucket, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - headers: { - 'x-scal-versioning-required': 'true', + makeBackbeatRequest( + { + method: 'PUT', + bucket, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + headers: { + 'x-scal-versioning-required': 'true', + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(testMd), }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(testMd), - }, err => { - assert.strictEqual(err.code, 'InvalidBucketState'); - done(); - }); - }); - }); - - it('should refuse PUT data if no x-scal-canonical-id header ' + - 'is provided', done => makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, resourceType: 'data', - queryObj: { v2: '' }, - headers: { - 'content-length': testData.length, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData, - }, - err => { - assert.strictEqual(err.code, 'BadRequest'); - done(); - })); - - it('should refuse PUT in metadata-only mode if object does not exist', - done => { - async.waterfall([next => { - const newMd = Object.assign({}, testMd); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: 'does-not-exist', - resourceType: 'metadata', - headers: { 'x-scal-replication-content': 'METADATA' }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }], err => { - assert.strictEqual(err.statusCode, 404); - done(); + err => { + assert.strictEqual(err.code, 'InvalidBucketState'); + done(); + } + ); }); }); - it('should remove old object data locations if version is overwritten ' + - 'with same contents', done => { - let oldLocation; - const testKeyOldData = `${testKey}-old-data`; - async.waterfall([next => { - // put object's data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put object metadata - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - oldLocation = newMd.location; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put another object which metadata reference the - // same data locations, we will attempt to retrieve - // this object at the end of the test to confirm that - // its locations have been deleted - const oldDataMd = Object.assign({}, testMd); - oldDataMd.location = oldLocation; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKeyOldData, - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(oldDataMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // create new data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, + it('should refuse PUT data if no x-scal-canonical-id header ' + 'is provided', done => + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, objectKey: testKey, resourceType: 'data', + queryObj: { v2: '' }, headers: { 'content-length': testData.length, - 'x-scal-canonical-id': testArn, }, authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // overwrite the original object version, now - // with references to the new data locations - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), + requestBody: testData, + }, + err => { + assert.strictEqual(err.code, 'BadRequest'); + done(); + } + ) + ); + + it('should refuse PUT in metadata-only mode if object does not exist', done => { + async.waterfall( + [ + next => { + const newMd = Object.assign({}, testMd); + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: 'does-not-exist', + resourceType: 'metadata', + headers: { 'x-scal-replication-content': 'METADATA' }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, + next + ); }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // give some time for the async deletes to complete - setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, testData, next), - 1000); - }, next => { - // check that the object copy referencing the old data - // locations is unreadable, confirming that the old - // data locations have been deleted - s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKeyOldData, - }, err => { - assert(err, 'expected error to get object with old data ' + - 'locations, got success'); - next(); - }); - }], err => { - assert.ifError(err); - done(); - }); + ], + err => { + assert.strictEqual(err.statusCode, 404); + done(); + } + ); }); - it('should remove old object data locations if version is overwritten ' + - 'with empty contents', done => { + it('should remove old object data locations if version is overwritten ' + 'with same contents', done => { let oldLocation; const testKeyOldData = `${testKey}-old-data`; - async.waterfall([next => { - // put object's data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, + async.waterfall( + [ + next => { + // put object's data locations + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + next + ); }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put object metadata - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - oldLocation = newMd.location; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put object metadata + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + oldLocation = newMd.location; + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, + next + ); }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put another object which metadata reference the - // same data locations, we will attempt to retrieve - // this object at the end of the test to confirm that - // its locations have been deleted - const oldDataMd = Object.assign({}, testMd); - oldDataMd.location = oldLocation; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKeyOldData, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put another object which metadata reference the + // same data locations, we will attempt to retrieve + // this object at the end of the test to confirm that + // its locations have been deleted + const oldDataMd = Object.assign({}, testMd); + oldDataMd.location = oldLocation; + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKeyOldData, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(oldDataMd), + }, + next + ); }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(oldDataMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // overwrite the original object version with an empty location - const newMd = Object.assign({}, testMd); - newMd['content-length'] = 0; - newMd['content-md5'] = emptyContentsMd5; - newMd.location = null; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // create new data locations + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + next + ); }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // give some time for the async deletes to complete - setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, '', next), - 1000); - }, next => { - // check that the object copy referencing the old data - // locations is unreadable, confirming that the old - // data locations have been deleted - s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKeyOldData, - }, err => { - assert(err, 'expected error to get object with old data ' + - 'locations, got success'); - next(); - }); - }], err => { - assert.ifError(err); - done(); - }); - }); - - it('should not remove data locations on replayed metadata PUT', - done => { - let serializedNewMd; - async.waterfall([next => { - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // overwrite the original object version, now + // with references to the new data locations + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, + next + ); }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - serializedNewMd = JSON.stringify(newMd); - async.timesSeries(2, (i, putDone) => makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // give some time for the async deletes to complete + setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, testData, next), 1000); }, - authCredentials: backbeatAuthCredentials, - requestBody: serializedNewMd, - }, (err, response) => { - assert.ifError(err); - assert.strictEqual(response.statusCode, 200); - putDone(err); - }), () => next()); - }, next => { - // check that the object is still readable to make - // sure we did not remove the data keys - s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKey, - }, (err, data) => { + next => { + // check that the object copy referencing the old data + // locations is unreadable, confirming that the old + // data locations have been deleted + s3.getObject( + { + Bucket: TEST_BUCKET, + Key: testKeyOldData, + }, + err => { + assert(err, 'expected error to get object with old data ' + 'locations, got success'); + next(); + } + ); + }, + ], + err => { assert.ifError(err); - assert.strictEqual(data.Body.toString(), testData); - next(); - }); - }], err => { - assert.ifError(err); - done(); - }); + done(); + } + ); }); - it('should create a new version when no versionId is passed in query string', done => { - let newVersion; - async.waterfall([next => { - // put object's data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, + it('should remove old object data locations if version is overwritten ' + 'with empty contents', done => { + let oldLocation; + const testKeyOldData = `${testKey}-old-data`; + async.waterfall( + [ + next => { + // put object's data locations + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + next + ); }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put object metadata - const oldMd = Object.assign({}, testMd); - oldMd.location = JSON.parse(response.body); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put object metadata + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + oldLocation = newMd.location; + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, + next + ); }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(oldMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const parsedResponse = JSON.parse(response.body); - assert.strictEqual(parsedResponse.versionId, testMd.versionId); - // create new data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put another object which metadata reference the + // same data locations, we will attempt to retrieve + // this object at the end of the test to confirm that + // its locations have been deleted + const oldDataMd = Object.assign({}, testMd); + oldDataMd.location = oldLocation; + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKeyOldData, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(oldDataMd), + }, + next + ); }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // create a new version with the new data locations, - // not passing 'versionId' in the query string - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const parsedResponse = JSON.parse(response.body); - newVersion = parsedResponse.versionId; - assert.notStrictEqual(newVersion, testMd.versionId); - // give some time for the async deletes to complete, - // then check that we can read the latest version - setTimeout(() => s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKey, - }, (err, data) => { + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // overwrite the original object version with an empty location + const newMd = Object.assign({}, testMd); + newMd['content-length'] = 0; + newMd['content-md5'] = emptyContentsMd5; + newMd.location = null; + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // give some time for the async deletes to complete + setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, '', next), 1000); + }, + next => { + // check that the object copy referencing the old data + // locations is unreadable, confirming that the old + // data locations have been deleted + s3.getObject( + { + Bucket: TEST_BUCKET, + Key: testKeyOldData, + }, + err => { + assert(err, 'expected error to get object with old data ' + 'locations, got success'); + next(); + } + ); + }, + ], + err => { assert.ifError(err); - assert.strictEqual(data.Body.toString(), testData); - next(); - }), 1000); - }, next => { - // check that the previous object version is still readable - s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKey, - VersionId: versionIdUtils.encode(testMd.versionId), - }, (err, data) => { + done(); + } + ); + }); + + it('should not remove data locations on replayed metadata PUT', done => { + let serializedNewMd; + async.waterfall( + [ + next => { + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + serializedNewMd = JSON.stringify(newMd); + async.timesSeries( + 2, + (i, putDone) => + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: serializedNewMd, + }, + (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + putDone(err); + } + ), + () => next() + ); + }, + next => { + // check that the object is still readable to make + // sure we did not remove the data keys + s3.getObject( + { + Bucket: TEST_BUCKET, + Key: testKey, + }, + (err, data) => { + assert.ifError(err); + assert.strictEqual(data.Body.toString(), testData); + next(); + } + ); + }, + ], + err => { assert.ifError(err); - assert.strictEqual(data.Body.toString(), testData); - next(); - }); - }], err => { - assert.ifError(err); - done(); - }); + done(); + } + ); + }); + + it('should create a new version when no versionId is passed in query string', done => { + let newVersion; + async.waterfall( + [ + next => { + // put object's data locations + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put object metadata + const oldMd = Object.assign({}, testMd); + oldMd.location = JSON.parse(response.body); + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(oldMd), + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + const parsedResponse = JSON.parse(response.body); + assert.strictEqual(parsedResponse.versionId, testMd.versionId); + // create new data locations + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + // create a new version with the new data locations, + // not passing 'versionId' in the query string + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, + next + ); + }, + (response, next) => { + assert.strictEqual(response.statusCode, 200); + const parsedResponse = JSON.parse(response.body); + newVersion = parsedResponse.versionId; + assert.notStrictEqual(newVersion, testMd.versionId); + // give some time for the async deletes to complete, + // then check that we can read the latest version + setTimeout( + () => + s3.getObject( + { + Bucket: TEST_BUCKET, + Key: testKey, + }, + (err, data) => { + assert.ifError(err); + assert.strictEqual(data.Body.toString(), testData); + next(); + } + ), + 1000 + ); + }, + next => { + // check that the previous object version is still readable + s3.getObject( + { + Bucket: TEST_BUCKET, + Key: testKey, + VersionId: versionIdUtils.encode(testMd.versionId), + }, + (err, data) => { + assert.ifError(err); + assert.strictEqual(data.Body.toString(), testData); + next(); + } + ); + }, + ], + err => { + assert.ifError(err); + done(); + } + ); }); }); describe('backbeat authorization checks', () => { - [{ method: 'PUT', resourceType: 'metadata' }, - { method: 'PUT', resourceType: 'data' }].forEach(test => { - const queryObj = test.resourceType === 'data' ? { v2: '' } : {}; - it(`${test.method} ${test.resourceType} should respond with ` + - '403 Forbidden if no credentials are provided', - done => { - makeBackbeatRequest({ - method: test.method, bucket: TEST_BUCKET, - objectKey: TEST_KEY, resourceType: test.resourceType, - queryObj, - }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); - }); - it(`${test.method} ${test.resourceType} should respond with ` + - '403 Forbidden if wrong credentials are provided', + [ + { method: 'PUT', resourceType: 'metadata' }, + { method: 'PUT', resourceType: 'data' }, + ].forEach(test => { + const queryObj = test.resourceType === 'data' ? { v2: '' } : {}; + it( + `${test.method} ${test.resourceType} should respond with ` + + '403 Forbidden if no credentials are provided', done => { - makeBackbeatRequest({ - method: test.method, bucket: TEST_BUCKET, - objectKey: TEST_KEY, resourceType: test.resourceType, - queryObj, - authCredentials: { - accessKey: 'wrong', - secretKey: 'still wrong', + makeBackbeatRequest( + { + method: test.method, + bucket: TEST_BUCKET, + objectKey: TEST_KEY, + resourceType: test.resourceType, + queryObj, }, - }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'InvalidAccessKeyId'); - done(); - }); - }); - it(`${test.method} ${test.resourceType} should respond with ` + - '403 Forbidden if the account does not match the ' + - 'backbeat user', + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'AccessDenied'); + done(); + } + ); + } + ); + it( + `${test.method} ${test.resourceType} should respond with ` + + '403 Forbidden if wrong credentials are provided', done => { - makeBackbeatRequest({ - method: test.method, bucket: TEST_BUCKET, - objectKey: TEST_KEY, resourceType: test.resourceType, - queryObj, - authCredentials: { - accessKey: 'accessKey2', - secretKey: 'verySecretKey2', + makeBackbeatRequest( + { + method: test.method, + bucket: TEST_BUCKET, + objectKey: TEST_KEY, + resourceType: test.resourceType, + queryObj, + authCredentials: { + accessKey: 'wrong', + secretKey: 'still wrong', + }, }, - }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); - }); - it(`${test.method} ${test.resourceType} should respond with ` + - '403 Forbidden if backbeat user has wrong secret key', + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'InvalidAccessKeyId'); + done(); + } + ); + } + ); + it( + `${test.method} ${test.resourceType} should respond with ` + + '403 Forbidden if the account does not match the ' + + 'backbeat user', + done => { + makeBackbeatRequest( + { + method: test.method, + bucket: TEST_BUCKET, + objectKey: TEST_KEY, + resourceType: test.resourceType, + queryObj, + authCredentials: { + accessKey: 'accessKey2', + secretKey: 'verySecretKey2', + }, + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'AccessDenied'); + done(); + } + ); + } + ); + it( + `${test.method} ${test.resourceType} should respond with ` + + '403 Forbidden if backbeat user has wrong secret key', done => { - makeBackbeatRequest({ - method: test.method, bucket: TEST_BUCKET, - objectKey: TEST_KEY, resourceType: test.resourceType, - queryObj, - authCredentials: { - accessKey: backbeatAuthCredentials.accessKey, - secretKey: 'hastalavista', + makeBackbeatRequest( + { + method: test.method, + bucket: TEST_BUCKET, + objectKey: TEST_KEY, + resourceType: test.resourceType, + queryObj, + authCredentials: { + accessKey: backbeatAuthCredentials.accessKey, + secretKey: 'hastalavista', + }, }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'SignatureDoesNotMatch'); + done(); + } + ); + } + ); + }); + it( + 'GET /_/backbeat/api/... should respond with ' + '503 on authenticated requests (API server down)', + done => { + const options = { + authCredentials: { + accessKey: 'accessKey2', + secretKey: 'verySecretKey2', }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'SignatureDoesNotMatch'); - done(); - }); + hostname: ipAddress, + port: 8000, + method: 'GET', + path: '/_/backbeat/api/crr/failed', + jsonResponse: true, + }; + makeRequest(options, err => { + assert(err); + assert.strictEqual(err.statusCode, 503); + assert.strictEqual(err.code, 'ServiceUnavailable'); + done(); + }); + } + ); + it( + 'GET /_/backbeat/api/... should respond with ' + '403 Forbidden if the request is unauthenticated', + done => { + const options = { + hostname: ipAddress, + port: 8000, + method: 'GET', + path: '/_/backbeat/api/crr/failed', + jsonResponse: true, + }; + makeRequest(options, err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'AccessDenied'); + done(); }); - }); - it('GET /_/backbeat/api/... should respond with ' + - '503 on authenticated requests (API server down)', - done => { - const options = { - authCredentials: { - accessKey: 'accessKey2', - secretKey: 'verySecretKey2', - }, - hostname: ipAddress, - port: 8000, - method: 'GET', - path: '/_/backbeat/api/crr/failed', - jsonResponse: true, - }; - makeRequest(options, err => { - assert(err); - assert.strictEqual(err.statusCode, 503); - assert.strictEqual(err.code, 'ServiceUnavailable'); - done(); - }); - }); - it('GET /_/backbeat/api/... should respond with ' + - '403 Forbidden if the request is unauthenticated', - done => { - const options = { - hostname: ipAddress, - port: 8000, - method: 'GET', - path: '/_/backbeat/api/crr/failed', - jsonResponse: true, - }; - makeRequest(options, err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); - }); + } + ); }); describe('GET Metadata route', () => { - beforeEach(done => makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: TEST_KEY, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(testMd), - }, done)); + beforeEach(done => + makeBackbeatRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: TEST_KEY, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(testMd), + }, + done + ) + ); it('should return metadata blob for a versionId', done => { - makeBackbeatRequest({ - method: 'GET', bucket: TEST_BUCKET, - objectKey: TEST_KEY, resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), + makeBackbeatRequest( + { + method: 'GET', + bucket: TEST_BUCKET, + objectKey: TEST_KEY, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, }, - }, (err, data) => { - const parsedBody = JSON.parse(JSON.parse(data.body).Body); - assert.strictEqual(data.statusCode, 200); - assert.deepStrictEqual(parsedBody, testMd); - done(); - }); + (err, data) => { + const parsedBody = JSON.parse(JSON.parse(data.body).Body); + assert.strictEqual(data.statusCode, 200); + assert.deepStrictEqual(parsedBody, testMd); + done(); + } + ); }); it('should return error if bucket does not exist', done => { - makeBackbeatRequest({ - method: 'GET', bucket: 'blah', - objectKey: TEST_KEY, resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), + makeBackbeatRequest( + { + method: 'GET', + bucket: 'blah', + objectKey: TEST_KEY, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, }, - }, (err, data) => { - assert.strictEqual(data.statusCode, 404); - assert.strictEqual(JSON.parse(data.body).code, 'NoSuchBucket'); - done(); - }); + (err, data) => { + assert.strictEqual(data.statusCode, 404); + assert.strictEqual(JSON.parse(data.body).code, 'NoSuchBucket'); + done(); + } + ); }); it('should return error if object does not exist', done => { - makeBackbeatRequest({ - method: 'GET', bucket: TEST_BUCKET, - objectKey: 'blah', resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), + makeBackbeatRequest( + { + method: 'GET', + bucket: TEST_BUCKET, + objectKey: 'blah', + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, }, - }, (err, data) => { - assert.strictEqual(data.statusCode, 404); - assert.strictEqual(JSON.parse(data.body).code, 'ObjNotFound'); - done(); - }); + (err, data) => { + assert.strictEqual(data.statusCode, 404); + assert.strictEqual(JSON.parse(data.body).code, 'ObjNotFound'); + done(); + } + ); }); }); describe('backbeat multipart upload operations', function test() { this.timeout(10000); // The ceph image does not support putting tags during initiate MPU. - itSkipCeph('should put tags if the source is AWS and tags are ' + - 'provided when initiating the multipart upload', done => { - const awsKey = uuidv4(); - const multipleBackendPath = - `/_/backbeat/multiplebackenddata/${awsBucket}/${awsKey}`; - let uploadId; - let partData; - async.series([ - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: multipleBackendPath, - queryObj: { operation: 'initiatempu' }, - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-tags': JSON.stringify({ 'key1': 'value1' }), - }, - jsonResponse: true, - }, (err, data) => { - if (err) { - return next(err); - } - uploadId = JSON.parse(data.body).uploadId; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'PUT', - path: multipleBackendPath, - queryObj: { operation: 'putpart' }, - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-upload-id': uploadId, - 'x-scal-part-number': '1', - 'content-length': testData.length, - }, - requestBody: testData, - jsonResponse: true, - }, (err, data) => { - if (err) { - return next(err); - } - const body = JSON.parse(data.body); - partData = [{ - PartNumber: [body.partNumber], - ETag: [body.ETag], - }]; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: multipleBackendPath, - queryObj: { operation: 'completempu' }, - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-upload-id': uploadId, - }, - requestBody: JSON.stringify(partData), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data.TagSet, [{ - Key: 'key1', - Value: 'value1', - }]); - next(); - }), - ], done); - }); - it('should put tags if the source is Azure and tags are provided ' + - 'when completing the multipart upload', done => { - const containerName = getAzureContainerName(azureLocation); - const blob = uuidv4(); - const multipleBackendPath = - `/_/backbeat/multiplebackenddata/${containerName}/${blob}`; - const uploadId = uuidv4().replace(/-/g, ''); - let partData; - async.series([ - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'PUT', - path: multipleBackendPath, - queryObj: { operation: 'putpart' }, - headers: { - 'x-scal-storage-class': azureLocation, - 'x-scal-storage-type': 'azure', - 'x-scal-upload-id': uploadId, - 'x-scal-part-number': '1', - 'content-length': testData.length, - }, - requestBody: testData, - jsonResponse: true, - }, (err, data) => { - if (err) { - return next(err); - } - const body = JSON.parse(data.body); - partData = [{ - PartNumber: [body.partNumber], - ETag: [body.ETag], - NumberSubParts: [body.numberSubParts], - }]; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: multipleBackendPath, - queryObj: { operation: 'completempu' }, - headers: { - 'x-scal-storage-class': azureLocation, - 'x-scal-storage-type': 'azure', - 'x-scal-upload-id': uploadId, - 'x-scal-tags': JSON.stringify({ 'key1': 'value1' }), - }, - requestBody: JSON.stringify(partData), - jsonResponse: true, - }, next), - next => - azureClient.getContainerClient(containerName).getBlobClient(blob).getProperties() - .then(result => { - const tags = JSON.parse(result.metadata.tags); - assert.deepStrictEqual(tags, { key1: 'value1' }); - return next(); - }, next), - ], done); - }); + itSkipCeph( + 'should put tags if the source is AWS and tags are ' + 'provided when initiating the multipart upload', + done => { + const awsKey = uuidv4(); + const multipleBackendPath = `/_/backbeat/multiplebackenddata/${awsBucket}/${awsKey}`; + let uploadId; + let partData; + async.series( + [ + next => + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: multipleBackendPath, + queryObj: { operation: 'initiatempu' }, + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-tags': JSON.stringify({ key1: 'value1' }), + }, + jsonResponse: true, + }, + (err, data) => { + if (err) { + return next(err); + } + uploadId = JSON.parse(data.body).uploadId; + return next(); + } + ), + next => + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'PUT', + path: multipleBackendPath, + queryObj: { operation: 'putpart' }, + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-upload-id': uploadId, + 'x-scal-part-number': '1', + 'content-length': testData.length, + }, + requestBody: testData, + jsonResponse: true, + }, + (err, data) => { + if (err) { + return next(err); + } + const body = JSON.parse(data.body); + partData = [ + { + PartNumber: [body.partNumber], + ETag: [body.ETag], + }, + ]; + return next(); + } + ), + next => + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: multipleBackendPath, + queryObj: { operation: 'completempu' }, + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-upload-id': uploadId, + }, + requestBody: JSON.stringify(partData), + jsonResponse: true, + }, + next + ), + next => + awsClient.getObjectTagging( + { + Bucket: awsBucket, + Key: awsKey, + }, + (err, data) => { + assert.ifError(err); + assert.deepStrictEqual(data.TagSet, [ + { + Key: 'key1', + Value: 'value1', + }, + ]); + next(); + } + ), + ], + done + ); + } + ); + it( + 'should put tags if the source is Azure and tags are provided ' + 'when completing the multipart upload', + done => { + const containerName = getAzureContainerName(azureLocation); + const blob = uuidv4(); + const multipleBackendPath = `/_/backbeat/multiplebackenddata/${containerName}/${blob}`; + const uploadId = uuidv4().replace(/-/g, ''); + let partData; + async.series( + [ + next => + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'PUT', + path: multipleBackendPath, + queryObj: { operation: 'putpart' }, + headers: { + 'x-scal-storage-class': azureLocation, + 'x-scal-storage-type': 'azure', + 'x-scal-upload-id': uploadId, + 'x-scal-part-number': '1', + 'content-length': testData.length, + }, + requestBody: testData, + jsonResponse: true, + }, + (err, data) => { + if (err) { + return next(err); + } + const body = JSON.parse(data.body); + partData = [ + { + PartNumber: [body.partNumber], + ETag: [body.ETag], + NumberSubParts: [body.numberSubParts], + }, + ]; + return next(); + } + ), + next => + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: multipleBackendPath, + queryObj: { operation: 'completempu' }, + headers: { + 'x-scal-storage-class': azureLocation, + 'x-scal-storage-type': 'azure', + 'x-scal-upload-id': uploadId, + 'x-scal-tags': JSON.stringify({ key1: 'value1' }), + }, + requestBody: JSON.stringify(partData), + jsonResponse: true, + }, + next + ), + next => + azureClient + .getContainerClient(containerName) + .getBlobClient(blob) + .getProperties() + .then(result => { + const tags = JSON.parse(result.metadata.tags); + assert.deepStrictEqual(tags, { key1: 'value1' }); + return next(); + }, next), + ], + done + ); + } + ); }); describe('Batch Delete Route', function test() { this.timeout(30000); @@ -2493,405 +3125,511 @@ describe('backbeat routes', () => { let location; const testKey = 'batch-delete-test-key'; - async.series([ - done => s3.putObject({ - Bucket: TEST_BUCKET, - Key: testKey, - Body: new Buffer('hello'), - }, (err, data) => { - assert.ifError(err); - versionId = data.VersionId; - done(); - }), - done => { - makeBackbeatRequest({ - method: 'GET', - bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - queryObj: { - versionId, - }, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.statusCode, 200); - const metadata = JSON.parse( - JSON.parse(data.body).Body); - location = metadata.location; - done(); - }); - }, - done => { - const options = { - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: `/_/backbeat/batchdelete/${TEST_BUCKET}/${testKey}`, - requestBody: - `{"Locations":${JSON.stringify(location)}}`, - jsonResponse: true, - }; - makeRequest(options, done); - }, - done => s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKey, - }, err => { - // should error out as location shall no longer exist - assert(err); - done(); - }), - ], done); + async.series( + [ + done => + s3.putObject( + { + Bucket: TEST_BUCKET, + Key: testKey, + Body: new Buffer('hello'), + }, + (err, data) => { + assert.ifError(err); + versionId = data.VersionId; + done(); + } + ), + done => { + makeBackbeatRequest( + { + method: 'GET', + bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + queryObj: { + versionId, + }, + }, + (err, data) => { + assert.ifError(err); + assert.strictEqual(data.statusCode, 200); + const metadata = JSON.parse(JSON.parse(data.body).Body); + location = metadata.location; + done(); + } + ); + }, + done => { + const options = { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: `/_/backbeat/batchdelete/${TEST_BUCKET}/${testKey}`, + requestBody: `{"Locations":${JSON.stringify(location)}}`, + jsonResponse: true, + }; + makeRequest(options, done); + }, + done => + s3.getObject( + { + Bucket: TEST_BUCKET, + Key: testKey, + }, + err => { + // should error out as location shall no longer exist + assert(err); + done(); + } + ), + ], + done + ); }); itSkipCeph('should batch delete a versioned AWS location', done => { let versionId; const awsKey = `${TEST_BUCKET}/batch-delete-test-key-${makeid(8)}`; - async.series([ - done => awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - Body: new Buffer('hello'), - }, (err, data) => { - assert.ifError(err); - versionId = data.VersionId; - done(); - }), - done => { - const location = [{ - key: awsKey, - size: 5, - dataStoreName: awsLocation, - dataStoreVersionId: versionId, - }]; - const reqBody = `{"Locations":${JSON.stringify(location)}}`; - const options = { - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: '/_/backbeat/batchdelete', - requestBody: reqBody, - jsonResponse: true, - }; - makeRequest(options, done); - }, - done => awsClient.getObject({ - Bucket: awsBucket, - Key: awsKey, - }, err => { - // should error out as location shall no longer exist - assert(err); - done(); - }), - ], done); + async.series( + [ + done => + awsClient.putObject( + { + Bucket: awsBucket, + Key: awsKey, + Body: new Buffer('hello'), + }, + (err, data) => { + assert.ifError(err); + versionId = data.VersionId; + done(); + } + ), + done => { + const location = [ + { + key: awsKey, + size: 5, + dataStoreName: awsLocation, + dataStoreVersionId: versionId, + }, + ]; + const reqBody = `{"Locations":${JSON.stringify(location)}}`; + const options = { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: '/_/backbeat/batchdelete', + requestBody: reqBody, + jsonResponse: true, + }; + makeRequest(options, done); + }, + done => + awsClient.getObject( + { + Bucket: awsBucket, + Key: awsKey, + }, + err => { + // should error out as location shall no longer exist + assert(err); + done(); + } + ), + ], + done + ); }); it('should fail with error if given malformed JSON', done => { - async.series([ - done => { - const options = { - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: '/_/backbeat/batchdelete', - requestBody: 'NOTJSON', - jsonResponse: true, - }; - makeRequest(options, done); - }, - ], err => { - assert(err); - done(); - }); + async.series( + [ + done => { + const options = { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: '/_/backbeat/batchdelete', + requestBody: 'NOTJSON', + jsonResponse: true, + }; + makeRequest(options, done); + }, + ], + err => { + assert(err); + done(); + } + ); }); it('should skip batch delete of a non-existent location', done => { - async.series([ - done => { - const options = { - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: '/_/backbeat/batchdelete', - requestBody: - '{"Locations":' + - '[{"key":"abcdef","dataStoreName":"us-east-1"}]}', - jsonResponse: true, - }; - makeRequest(options, done); - }, - ], done); + async.series( + [ + done => { + const options = { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: '/_/backbeat/batchdelete', + requestBody: '{"Locations":' + '[{"key":"abcdef","dataStoreName":"us-east-1"}]}', + jsonResponse: true, + }; + makeRequest(options, done); + }, + ], + done + ); }); it('should skip batch delete of empty location array', done => { - async.series([ - done => { - const options = { - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: '/_/backbeat/batchdelete', - requestBody: '{"Locations":[]}', - jsonResponse: true, - }; - makeRequest(options, done); - }, - ], done); + async.series( + [ + done => { + const options = { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: '/_/backbeat/batchdelete', + requestBody: '{"Locations":[]}', + jsonResponse: true, + }; + makeRequest(options, done); + }, + ], + done + ); }); - it('should not put delete tags if the source is not Azure and ' + - 'if-unmodified-since header is not provided', done => { - const awsKey = uuidv4(); - async.series([ - next => - awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - }, next), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: '/_/backbeat/batchdelete', - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', - }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: awsKey, - dataStoreName: awsLocation, - }], - }), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data.TagSet, []); - next(); - }), - ], done); - }); + it( + 'should not put delete tags if the source is not Azure and ' + 'if-unmodified-since header is not provided', + done => { + const awsKey = uuidv4(); + async.series( + [ + next => + awsClient.putObject( + { + Bucket: awsBucket, + Key: awsKey, + }, + next + ), + next => + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: '/_/backbeat/batchdelete', + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [ + { + key: awsKey, + dataStoreName: awsLocation, + }, + ], + }), + jsonResponse: true, + }, + next + ), + next => + awsClient.getObjectTagging( + { + Bucket: awsBucket, + Key: awsKey, + }, + (err, data) => { + assert.ifError(err); + assert.deepStrictEqual(data.TagSet, []); + next(); + } + ), + ], + done + ); + } + ); - itSkipCeph('should not put tags if the source is not Azure and ' + - 'if-unmodified-since condition is not met', done => { - const awsKey = uuidv4(); - async.series([ - next => - awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - }, next), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: '/_/backbeat/batchdelete', - headers: { - 'if-unmodified-since': - 'Sun, 31 Mar 2019 00:00:00 GMT', - 'x-scal-storage-class': awsLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', - }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: awsKey, - dataStoreName: awsLocation, - }], - }), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data.TagSet, []); - next(); - }), - ], done); - }); + itSkipCeph( + 'should not put tags if the source is not Azure and ' + 'if-unmodified-since condition is not met', + done => { + const awsKey = uuidv4(); + async.series( + [ + next => + awsClient.putObject( + { + Bucket: awsBucket, + Key: awsKey, + }, + next + ), + next => + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: '/_/backbeat/batchdelete', + headers: { + 'if-unmodified-since': 'Sun, 31 Mar 2019 00:00:00 GMT', + 'x-scal-storage-class': awsLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [ + { + key: awsKey, + dataStoreName: awsLocation, + }, + ], + }), + jsonResponse: true, + }, + next + ), + next => + awsClient.getObjectTagging( + { + Bucket: awsBucket, + Key: awsKey, + }, + (err, data) => { + assert.ifError(err); + assert.deepStrictEqual(data.TagSet, []); + next(); + } + ), + ], + done + ); + } + ); - itSkipCeph('should put tags if the source is not Azure and ' + - 'if-unmodified-since condition is met', done => { + itSkipCeph('should put tags if the source is not Azure and ' + 'if-unmodified-since condition is met', done => { const awsKey = uuidv4(); let lastModified; - async.series([ - next => - awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - }, next), - next => - awsClient.headObject({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - if (err) { - return next(err); - } - lastModified = data.LastModified; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: `/_/backbeat/batchdelete/${awsBucket}/${awsKey}`, - headers: { - 'if-unmodified-since': lastModified, - 'x-scal-storage-class': awsLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', - }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: awsKey, - dataStoreName: awsLocation, - }], - }), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.TagSet.length, 2); - data.TagSet.forEach(tag => { - const { Key, Value } = tag; - const isValidTag = - Key === 'scal-delete-marker' || - Key === 'scal-delete-service'; - assert(isValidTag); - if (Key === 'scal-delete-marker') { - assert.strictEqual(Value, 'true'); + async.series( + [ + next => + awsClient.putObject( + { + Bucket: awsBucket, + Key: awsKey, + }, + next + ), + next => + awsClient.headObject( + { + Bucket: awsBucket, + Key: awsKey, + }, + (err, data) => { + if (err) { + return next(err); + } + lastModified = data.LastModified; + return next(); } - if (Key === 'scal-delete-service') { - assert.strictEqual( - Value, 'lifecycle-transition'); + ), + next => + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: `/_/backbeat/batchdelete/${awsBucket}/${awsKey}`, + headers: { + 'if-unmodified-since': lastModified, + 'x-scal-storage-class': awsLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [ + { + key: awsKey, + dataStoreName: awsLocation, + }, + ], + }), + jsonResponse: true, + }, + next + ), + next => + awsClient.getObjectTagging( + { + Bucket: awsBucket, + Key: awsKey, + }, + (err, data) => { + assert.ifError(err); + assert.strictEqual(data.TagSet.length, 2); + data.TagSet.forEach(tag => { + const { Key, Value } = tag; + const isValidTag = Key === 'scal-delete-marker' || Key === 'scal-delete-service'; + assert(isValidTag); + if (Key === 'scal-delete-marker') { + assert.strictEqual(Value, 'true'); + } + if (Key === 'scal-delete-service') { + assert.strictEqual(Value, 'lifecycle-transition'); + } + }); + next(); } - }); - next(); - }), - ], done); + ), + ], + done + ); }); - it('should not delete the object if the source is Azure and ' + - 'if-unmodified-since condition is not met', done => { - const blob = uuidv4(); - async.series([ - next => - azureClient.getContainerClient(containerName).uploadBlockBlob(blob, 'a', 1) - .then(() => next(), next), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: - `/_/backbeat/batchdelete/${containerName}/${blob}`, - headers: { - 'if-unmodified-since': - 'Sun, 31 Mar 2019 00:00:00 GMT', - 'x-scal-storage-class': azureLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', - }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: blob, - dataStoreName: azureLocation, - }], - }), - jsonResponse: true, - }, err => { - if (err && err.statusCode === 412) { - return next(); - } - return next(err); - }), - next => - azureClient.getContainerClient(containerName).getBlobClient(blob).getProperties() - .then(result => { - assert(result); - return next(); - }, next), - ], done); - }); + it( + 'should not delete the object if the source is Azure and ' + 'if-unmodified-since condition is not met', + done => { + const blob = uuidv4(); + async.series( + [ + next => + azureClient + .getContainerClient(containerName) + .uploadBlockBlob(blob, 'a', 1) + .then(() => next(), next), + next => + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: `/_/backbeat/batchdelete/${containerName}/${blob}`, + headers: { + 'if-unmodified-since': 'Sun, 31 Mar 2019 00:00:00 GMT', + 'x-scal-storage-class': azureLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [ + { + key: blob, + dataStoreName: azureLocation, + }, + ], + }), + jsonResponse: true, + }, + err => { + if (err && err.statusCode === 412) { + return next(); + } + return next(err); + } + ), + next => + azureClient + .getContainerClient(containerName) + .getBlobClient(blob) + .getProperties() + .then(result => { + assert(result); + return next(); + }, next), + ], + done + ); + } + ); - it('should delete the object if the source is Azure and ' + - 'if-unmodified-since condition is met', done => { + it('should delete the object if the source is Azure and ' + 'if-unmodified-since condition is met', done => { const blob = uuidv4(); let lastModified; - async.series([ - next => - azureClient.getContainerClient(containerName).uploadBlockBlob(blob, 'a', 1) - .then(() => next(), next), - next => - azureClient.getContainerClient(containerName).getBlobClient(blob).getProperties() - .then(result => { - lastModified = result.lastModified; - return next(); - }, next), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: - `/_/backbeat/batchdelete/${containerName}/${blob}`, - headers: { - 'if-unmodified-since': lastModified, - 'x-scal-storage-class': azureLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', - }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: blob, - dataStoreName: azureLocation, - }], - }), - jsonResponse: true, - }, next), - next => - azureClient.getContainerClient(containerName).getBlobClient(blob).getProperties() - .then(() => assert.fail('Expected error'), err => { - assert.strictEqual(err.statusCode, 404); - return next(); - }), - ], done); + async.series( + [ + next => + azureClient + .getContainerClient(containerName) + .uploadBlockBlob(blob, 'a', 1) + .then(() => next(), next), + next => + azureClient + .getContainerClient(containerName) + .getBlobClient(blob) + .getProperties() + .then(result => { + lastModified = result.lastModified; + return next(); + }, next), + next => + makeRequest( + { + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: `/_/backbeat/batchdelete/${containerName}/${blob}`, + headers: { + 'if-unmodified-since': lastModified, + 'x-scal-storage-class': azureLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [ + { + key: blob, + dataStoreName: azureLocation, + }, + ], + }), + jsonResponse: true, + }, + next + ), + next => + azureClient + .getContainerClient(containerName) + .getBlobClient(blob) + .getProperties() + .then( + () => assert.fail('Expected error'), + err => { + assert.strictEqual(err.statusCode, 404); + return next(); + } + ), + ], + done + ); }); }); }); diff --git a/tests/multipleBackend/routes/routeBackbeatForReplication.js b/tests/multipleBackend/routes/routeBackbeatForReplication.js index e4c717ed3f..b21a622ec8 100644 --- a/tests/multipleBackend/routes/routeBackbeatForReplication.js +++ b/tests/multipleBackend/routes/routeBackbeatForReplication.js @@ -20,8 +20,7 @@ const destinationAuthCredentials = { secretKey: destinationCreds.secretAccessKey, }; -const dstAccountInfo = require('../../../conf/authdata.json') - .accounts.find(acc => acc.name === 'Replication'); +const dstAccountInfo = require('../../../conf/authdata.json').accounts.find(acc => acc.name === 'Replication'); const testData = 'testkey data'; @@ -67,802 +66,1006 @@ describe('backbeat routes for replication', () => { let objMD; let versionId; - async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObject: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + }), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + headObject: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: versionId }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - headObject: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: versionId }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObject; - assert.strictEqual(headObjectRes.VersionId, versionId); + const headObjectRes = results.headObject; + assert.strictEqual(headObjectRes.VersionId, versionId); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); - assert.strictEqual(Versions[0].IsLatest, true); - assert.strictEqual(Versions[0].VersionId, versionId); + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; + assert.strictEqual(Versions.length, 1); + assert.strictEqual(Versions[0].IsLatest, true); + assert.strictEqual(Versions[0].VersionId, versionId); - return done(); - }); + return done(); + } + ); }); it('should successfully replicate a version and update it', done => { let objMD; let versionId; - async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); + async.series( + { + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObject: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + }), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + updateMetadata: next => { + const { result, error } = ObjectMD.createFromBlob(objMD); + if (error) { + return next(error); } - versionId = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId, + result.setTags({ foo: 'bar' }); + return makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: destinationAuthCredentials, + requestBody: result.getSerialized(), + }, + next + ); }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + getObjectTagging: next => + dstS3.getObjectTagging({ Bucket: bucketDestination, Key: keyName, VersionId: versionId }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); - } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - updateMetadata: next => { - const { result, error } = ObjectMD.createFromBlob(objMD); - if (error) { - return next(error); + return done(err); } - result.setTags({ foo: 'bar' }); - return makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: destinationAuthCredentials, - requestBody: result.getSerialized(), - }, next); - }, - getObjectTagging: next => dstS3.getObjectTagging( - { Bucket: bucketDestination, Key: keyName, VersionId: versionId }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const getObjectTaggingRes = results.getObjectTagging; - assert.strictEqual(getObjectTaggingRes.VersionId, versionId); - assert.deepStrictEqual(getObjectTaggingRes.TagSet, [{ Key: 'foo', Value: 'bar' }]); + const getObjectTaggingRes = results.getObjectTagging; + assert.strictEqual(getObjectTaggingRes.VersionId, versionId); + assert.deepStrictEqual(getObjectTaggingRes.TagSet, [{ Key: 'foo', Value: 'bar' }]); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); - assert.strictEqual(Versions[0].IsLatest, true); - assert.strictEqual(Versions[0].VersionId, versionId); + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; + assert.strictEqual(Versions.length, 1); + assert.strictEqual(Versions[0].IsLatest, true); + assert.strictEqual(Versions[0].VersionId, versionId); - return done(); - }); + return done(); + } + ); }); it('should successfully replicate a version and update account info', done => { let objMD; let versionId; - async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - objMD = objectMDFromRequestBody(data) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - // Specifying the account id in the query string - // should make it update the account info in the - // metadata to the destination account info - accountId: dstAccountInfo.shortid, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - getDestinationMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: destinationAuthCredentials, - }, (err, data) => { + async.series( + { + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObject: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + }), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + objMD = objectMDFromRequestBody(data).getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + // Specifying the account id in the query string + // should make it update the account info in the + // metadata to the destination account info + accountId: dstAccountInfo.shortid, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + getDestinationMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: destinationAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + return next(null, objectMDFromRequestBody(data)); + } + ), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - return next(null, objectMDFromRequestBody(data)); - }), - }, (err, results) => { - if (err) { - return done(err); - } - const dstObjMD = results.getDestinationMetadata; - assert.strictEqual(dstObjMD.getOwnerDisplayName(), dstAccountInfo.name); - assert.strictEqual(dstObjMD.getOwnerId(), dstAccountInfo.canonicalID); + const dstObjMD = results.getDestinationMetadata; + assert.strictEqual(dstObjMD.getOwnerDisplayName(), dstAccountInfo.name); + assert.strictEqual(dstObjMD.getOwnerId(), dstAccountInfo.canonicalID); - return done(); - }); + return done(); + } + ); }); it('should fail to replicate a version if the provided account is invalid', done => { let objMD; let versionId; - async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - objMD = objectMDFromRequestBody(data) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - accountId: 'invalid', - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - }, err => { - assert.strictEqual(err.code, 'AccountNotFound'); - return done(); - }); + async.series( + { + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObject: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + }), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + objMD = objectMDFromRequestBody(data).getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + accountId: 'invalid', + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + }, + err => { + assert.strictEqual(err.code, 'AccountNotFound'); + return done(); + } + ); }); it('should successfully replicate multiple versions and keep original order', done => { let objMDCurrent, objMDNonCurrent; let versionIdCurrent, versionIdNonCurrent; - async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectNonCurrent: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionIdNonCurrent = data.VersionId; - return next(); - }), - putObjectCurrent: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionIdCurrent = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadataNonCurrent: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: versionIdNonCurrent, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - // Backbeat updates account info in metadata - // to the destination account info - objMDNonCurrent = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - getMetadataCurrent: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: versionIdCurrent, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObjectNonCurrent: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + versionIdNonCurrent = data.VersionId; + return next(); + }), + putObjectCurrent: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + versionIdCurrent = data.VersionId; + return next(); + }), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadataNonCurrent: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: versionIdNonCurrent, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMDNonCurrent = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + getMetadataCurrent: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: versionIdCurrent, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMDCurrent = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + // replicating the objects in the reverse order + replicateMetadataCurrent: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: versionIdCurrent, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMDCurrent, + }, + next + ), + replicateMetadataNonCurrent: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: versionIdNonCurrent, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMDNonCurrent, + }, + next + ), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMDCurrent = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - // replicating the objects in the reverse order - replicateMetadataCurrent: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: versionIdCurrent, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMDCurrent, - }, next), - replicateMetadataNonCurrent: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: versionIdNonCurrent, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMDNonCurrent, - }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; + assert.strictEqual(Versions.length, 2); - const [currentVersion, nonCurrentVersion] = Versions; + const [currentVersion, nonCurrentVersion] = Versions; - assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(currentVersion.VersionId, versionIdCurrent); + assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, versionIdCurrent); - assert.strictEqual(nonCurrentVersion.IsLatest, false); - assert.strictEqual(nonCurrentVersion.VersionId, versionIdNonCurrent); + assert.strictEqual(nonCurrentVersion.IsLatest, false); + assert.strictEqual(nonCurrentVersion.VersionId, versionIdNonCurrent); - return done(); - }); + return done(); + } + ); }); it('should successfully replicate a delete marker', done => { let objMDVersion, objMDDeleteMarker; let versionIdVersion, versionIdDeleteMarker; - async.series({ - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionIdVersion = data.VersionId; - return next(); - }), - deleteObject: next => srcS3.deleteObject( - { Bucket: bucketSource, Key: keyName }, (err, data) => { - if (err) { - return next(err); - } - versionIdDeleteMarker = data.VersionId; - return next(); - }), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadataVersion: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: versionIdVersion, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - // Backbeat updates account info in metadata - // to the destination account info - objMDVersion = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadataVersion: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: versionIdVersion, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMDVersion, - }, next), - getMetadataDeleteMarker: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: versionIdDeleteMarker, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObject: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + versionIdVersion = data.VersionId; + return next(); + }), + deleteObject: next => + srcS3.deleteObject({ Bucket: bucketSource, Key: keyName }, (err, data) => { + if (err) { + return next(err); + } + versionIdDeleteMarker = data.VersionId; + return next(); + }), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadataVersion: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: versionIdVersion, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMDVersion = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadataVersion: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: versionIdVersion, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMDVersion, + }, + next + ), + getMetadataDeleteMarker: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: versionIdDeleteMarker, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMDDeleteMarker = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadataDeleteMarker: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: versionIdDeleteMarker, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMDDeleteMarker, + }, + next + ), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMDDeleteMarker = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadataDeleteMarker: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: versionIdDeleteMarker, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMDDeleteMarker, - }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const listObjectVersionsRes = results.listObjectVersions; - const { Versions, DeleteMarkers } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions, DeleteMarkers } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); - assert.strictEqual(DeleteMarkers.length, 1); + assert.strictEqual(Versions.length, 1); + assert.strictEqual(DeleteMarkers.length, 1); - assert.strictEqual(Versions[0].IsLatest, false); - assert.strictEqual(Versions[0].VersionId, versionIdVersion); + assert.strictEqual(Versions[0].IsLatest, false); + assert.strictEqual(Versions[0].VersionId, versionIdVersion); - assert.strictEqual(DeleteMarkers[0].IsLatest, true); - assert.strictEqual(DeleteMarkers[0].VersionId, versionIdDeleteMarker); + assert.strictEqual(DeleteMarkers[0].IsLatest, true); + assert.strictEqual(DeleteMarkers[0].VersionId, versionIdDeleteMarker); - return done(); - }); + return done(); + } + ); }); it('should successfully replicate a null version', done => { let objMD; - async.series({ - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + putObject: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, next), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + headObject: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - headObject: next => dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObject; - assert.strictEqual(headObjectRes.VersionId, 'null'); + const headObjectRes = results.headObject; + assert.strictEqual(headObjectRes.VersionId, 'null'); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); + assert.strictEqual(Versions.length, 1); - const [currentVersion] = Versions; - assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(currentVersion.VersionId, 'null'); + const [currentVersion] = Versions; + assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, 'null'); - return done(); - }); + return done(); + } + ); }); it('should successfully replicate a suspended null version', done => { let objMD; - async.series({ - suspendVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Suspended' } }, next), - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + suspendVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + putObject: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, next), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + headObject: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - headObject: next => dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObject; - assert.strictEqual(headObjectRes.VersionId, 'null'); + const headObjectRes = results.headObject; + assert.strictEqual(headObjectRes.VersionId, 'null'); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); + assert.strictEqual(Versions.length, 1); - const [currentVersion] = Versions; - assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(currentVersion.VersionId, 'null'); + const [currentVersion] = Versions; + assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, 'null'); - return done(); - }); + return done(); + } + ); }); it('should successfully replicate a null version and update it', done => { let objMD; - async.series({ - putObject: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: 'null', + async.series( + { + putObject: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + updateMetadata: next => { + const { result, error } = ObjectMD.createFromBlob(objMD); + if (error) { + return next(error); + } + result.setAmzStorageClass(storageClass); + return makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: destinationAuthCredentials, + requestBody: result.getSerialized(), + }, + next + ); }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + headObject: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); - } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - updateMetadata: next => { - const { result, error } = ObjectMD.createFromBlob(objMD); - if (error) { - return next(error); + return done(err); } - result.setAmzStorageClass(storageClass); - return makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: destinationAuthCredentials, - requestBody: result.getSerialized(), - }, next); - }, - headObject: next => dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObject; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = results.headObject; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); + assert.strictEqual(Versions.length, 1); - const [currentVersion] = Versions; - assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(currentVersion.VersionId, 'null'); - assert.strictEqual(currentVersion.StorageClass, storageClass); + const [currentVersion] = Versions; + assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, 'null'); + assert.strictEqual(currentVersion.StorageClass, storageClass); - return done(); - }); + return done(); + } + ); }); it('should successfully put object after replicating a null version', done => { let objMD; let expectedVersionId; - async.series({ - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - putObjectDestination: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { + async.series( + { + putObjectSource: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + putObjectDestination: next => + dstS3.putObject( + { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, + (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + } + ), + headObject: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - expectedVersionId = data.VersionId; - return next(); - }), - headObject: next => dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObject; - assert.strictEqual(headObjectRes.VersionId, 'null'); + const headObjectRes = results.headObject; + assert.strictEqual(headObjectRes.VersionId, 'null'); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); + assert.strictEqual(Versions.length, 2); - const [currentVersion, nonCurrentVersion] = Versions; - assert.strictEqual(currentVersion.VersionId, expectedVersionId); - assert.strictEqual(nonCurrentVersion.VersionId, 'null'); + const [currentVersion, nonCurrentVersion] = Versions; + assert.strictEqual(currentVersion.VersionId, expectedVersionId); + assert.strictEqual(nonCurrentVersion.VersionId, 'null'); - return done(); - }); + return done(); + } + ); }); it('should replicate/put metadata to a destination that has a version', done => { @@ -870,248 +1073,313 @@ describe('backbeat routes for replication', () => { let firstVersionId; let secondVersionId; - async.series({ - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectDestination: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - firstVersionId = data.VersionId; - return next(); - }), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - secondVersionId = data.VersionId; - return next(); - }), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: secondVersionId, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObjectDestination: next => + dstS3.putObject( + { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, + (err, data) => { + if (err) { + return next(err); + } + firstVersionId = data.VersionId; + return next(); + } + ), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObjectSource: next => + srcS3.putObject( + { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, + (err, data) => { + if (err) { + return next(err); + } + secondVersionId = data.VersionId; + return next(); + } + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: secondVersionId, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: secondVersionId, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + headObjectFirstVersion: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: firstVersionId }, next), + headObjectSecondVersion: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: secondVersionId }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: secondVersionId, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - headObjectFirstVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: firstVersionId }, next), - headObjectSecondVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: secondVersionId }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const firstHeadObjectRes = results.headObjectFirstVersion; - assert.strictEqual(firstHeadObjectRes.VersionId, firstVersionId); + const firstHeadObjectRes = results.headObjectFirstVersion; + assert.strictEqual(firstHeadObjectRes.VersionId, firstVersionId); - const secondHeadObjectRes = results.headObjectSecondVersion; - assert.strictEqual(secondHeadObjectRes.VersionId, secondVersionId); + const secondHeadObjectRes = results.headObjectSecondVersion; + assert.strictEqual(secondHeadObjectRes.VersionId, secondVersionId); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); - const [currentVersion, nonCurrentVersion] = Versions; + assert.strictEqual(Versions.length, 2); + const [currentVersion, nonCurrentVersion] = Versions; - assert.strictEqual(currentVersion.VersionId, secondVersionId); - assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, secondVersionId); + assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(nonCurrentVersion.VersionId, firstVersionId); - assert.strictEqual(nonCurrentVersion.IsLatest, false); + assert.strictEqual(nonCurrentVersion.VersionId, firstVersionId); + assert.strictEqual(nonCurrentVersion.IsLatest, false); - return done(); - }); + return done(); + } + ); }); it.skip('should replicate/put metadata to a destination that has a null version', done => { let objMD; let versionId; - async.series({ - putObjectDestinationInitial: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + putObjectDestinationInitial: next => + dstS3.putObject({ Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObjectSource: next => + srcS3.putObject( + { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, + (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + } + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + headObjectNullVersion: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - headObjectNullVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions( - { Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObjectNullVersion; - assert.strictEqual(headObjectRes.VersionId, 'null'); + const headObjectRes = results.headObjectNullVersion; + assert.strictEqual(headObjectRes.VersionId, 'null'); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); - const [currentVersion, nonCurrentVersion] = Versions; + assert.strictEqual(Versions.length, 2); + const [currentVersion, nonCurrentVersion] = Versions; - assert.strictEqual(currentVersion.VersionId, versionId); - assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, versionId); + assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(nonCurrentVersion.VersionId, 'null'); - assert.strictEqual(nonCurrentVersion.IsLatest, false); + assert.strictEqual(nonCurrentVersion.VersionId, 'null'); + assert.strictEqual(nonCurrentVersion.IsLatest, false); - return done(); - }); + return done(); + } + ); }); it.skip('should replicate/put metadata to a destination that has a suspended null version', done => { let objMD; let versionId; - async.series({ - suspendVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Suspended' } }, next), - putObjectDestinationInitial: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + suspendVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + putObjectDestinationInitial: next => + dstS3.putObject({ Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObjectSource: next => + srcS3.putObject( + { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, + (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + } + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + headObjectNullVersion: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - headObjectNullVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObjectNullVersion; - assert.strictEqual(headObjectRes.VersionId, 'null'); + const headObjectRes = results.headObjectNullVersion; + assert.strictEqual(headObjectRes.VersionId, 'null'); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); - const [currentVersion, nonCurrentVersion] = Versions; + assert.strictEqual(Versions.length, 2); + const [currentVersion, nonCurrentVersion] = Versions; - assert.strictEqual(currentVersion.VersionId, versionId); - assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, versionId); + assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(nonCurrentVersion.VersionId, 'null'); - assert.strictEqual(nonCurrentVersion.IsLatest, false); + assert.strictEqual(nonCurrentVersion.VersionId, 'null'); + assert.strictEqual(nonCurrentVersion.IsLatest, false); - return done(); - }); + return done(); + } + ); }); it.skip('should replicate/put metadata to a destination that has a previously updated null version', done => { @@ -1119,109 +1387,136 @@ describe('backbeat routes for replication', () => { let objMDNull; let versionId; - async.series({ - putObjectDestinationInitial: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - getMetadataNullVersion: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: destinationAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - objMDNull = JSON.parse(data.body).Body; - return next(); - }), - updateMetadataNullVersion: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: destinationAuthCredentials, - requestBody: objMDNull, - }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + putObjectDestinationInitial: next => + dstS3.putObject({ Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + getMetadataNullVersion: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: destinationAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + objMDNull = JSON.parse(data.body).Body; + return next(); + } + ), + updateMetadataNullVersion: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: destinationAuthCredentials, + requestBody: objMDNull, + }, + next + ), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObjectSource: next => + srcS3.putObject( + { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, + (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + } + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + headObjectNullVersion: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - headObjectNullVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObjectNullVersion; - assert.strictEqual(headObjectRes.VersionId, 'null'); + const headObjectRes = results.headObjectNullVersion; + assert.strictEqual(headObjectRes.VersionId, 'null'); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); - const [currentVersion, nonCurrentVersion] = Versions; + assert.strictEqual(Versions.length, 2); + const [currentVersion, nonCurrentVersion] = Versions; - assert.strictEqual(currentVersion.VersionId, versionId); - assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, versionId); + assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(nonCurrentVersion.VersionId, 'null'); - assert.strictEqual(nonCurrentVersion.IsLatest, false); + assert.strictEqual(nonCurrentVersion.VersionId, 'null'); + assert.strictEqual(nonCurrentVersion.IsLatest, false); - return done(); - }); + return done(); + } + ); }); - it.skip('should replicate/put metadata to a destination that has a suspended null version with internal version', - done => { + it.skip('should replicate/put metadata to a destination that has a suspended null version with internal version', done => { const tagSet = [ { Key: 'key1', @@ -1231,87 +1526,113 @@ describe('backbeat routes for replication', () => { let objMD; let versionId; - async.series({ - suspendVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Suspended' } }, next), - putObjectDestinationInitial: next => dstS3.putObject( - { Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), - putObjectTagging: next => dstS3.putObjectTagging( - { Bucket: bucketDestination, Key: keyName, Tagging: { TagSet: tagSet } }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - putObjectSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + suspendVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Suspended' } }, + next + ), + putObjectDestinationInitial: next => + dstS3.putObject({ Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next), + putObjectTagging: next => + dstS3.putObjectTagging( + { Bucket: bucketDestination, Key: keyName, Tagging: { TagSet: tagSet } }, + next + ), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + putObjectSource: next => + srcS3.putObject( + { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, + (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + } + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + headObjectNullVersion: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), + getObjectTaggingNullVersion: next => + dstS3.getObjectTagging({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), + listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - headObjectNullVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - getObjectTaggingNullVersion: next => dstS3.getObjectTagging( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - listObjectVersions: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObjectNullVersion; - assert.strictEqual(headObjectRes.VersionId, 'null'); + const headObjectRes = results.headObjectNullVersion; + assert.strictEqual(headObjectRes.VersionId, 'null'); - const getObjectTaggingRes = results.getObjectTaggingNullVersion; - assert.deepStrictEqual(getObjectTaggingRes.TagSet, tagSet); + const getObjectTaggingRes = results.getObjectTaggingNullVersion; + assert.deepStrictEqual(getObjectTaggingRes.TagSet, tagSet); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); - const [currentVersion, nonCurrentVersion] = Versions; + assert.strictEqual(Versions.length, 2); + const [currentVersion, nonCurrentVersion] = Versions; - assert.strictEqual(currentVersion.VersionId, versionId); - assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, versionId); + assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(nonCurrentVersion.VersionId, 'null'); - assert.strictEqual(nonCurrentVersion.IsLatest, false); + assert.strictEqual(nonCurrentVersion.VersionId, 'null'); + assert.strictEqual(nonCurrentVersion.IsLatest, false); - return done(); - }); + return done(); + } + ); }); it.skip('should mimic null version replication by crrExistingObjects, then replicate version', done => { @@ -1320,335 +1641,454 @@ describe('backbeat routes for replication', () => { let objMDVersion; let versionId; - async.series({ - createNullSoloMasterKey: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), - enableVersioningSource: next => srcS3.putBucketVersioning( - { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next), - simulateCrrExistingObjectsGetMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - objMDNull = JSON.parse(data.body).Body; - assert.strictEqual(JSON.parse(objMDNull).versionId, undefined); - return next(); - }), - simulateCrrExistingObjectsPutMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: sourceAuthCredentials, - requestBody: objMDNull, - }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning( - { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next), - replicateNullVersion: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - // Backbeat updates account info in metadata - // to the destination account info - objMDNullReplicated = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - putReplicatedNullVersion: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: destinationAuthCredentials, - requestBody: objMDNullReplicated, - }, next), - putNewVersionSource: next => srcS3.putObject( - { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - simulateMetadataReplicationVersion: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + createNullSoloMasterKey: next => + srcS3.putObject({ Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + simulateCrrExistingObjectsGetMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + objMDNull = JSON.parse(data.body).Body; + assert.strictEqual(JSON.parse(objMDNull).versionId, undefined); + return next(); + } + ), + simulateCrrExistingObjectsPutMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: sourceAuthCredentials, + requestBody: objMDNull, + }, + next + ), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, + next + ), + replicateNullVersion: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMDNullReplicated = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + putReplicatedNullVersion: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: destinationAuthCredentials, + requestBody: objMDNullReplicated, + }, + next + ), + putNewVersionSource: next => + srcS3.putObject( + { Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, + (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + } + ), + simulateMetadataReplicationVersion: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMDVersion = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + listObjectVersionsBeforeReplicate: next => + dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + putReplicatedVersion: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { + versionId, + }, + authCredentials: destinationAuthCredentials, + requestBody: objMDVersion, + }, + next + ), + checkReplicatedNullVersion: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), + checkReplicatedVersion: next => + dstS3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: versionId }, next), + listObjectVersionsAfterReplicate: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMDVersion = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - listObjectVersionsBeforeReplicate: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - putReplicatedVersion: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { - versionId, - }, - authCredentials: destinationAuthCredentials, - requestBody: objMDVersion, - }, next), - checkReplicatedNullVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next), - checkReplicatedVersion: next => dstS3.headObject( - { Bucket: bucketDestination, Key: keyName, VersionId: versionId }, next), - listObjectVersionsAfterReplicate: next => dstS3.listObjectVersions({ Bucket: bucketDestination }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectNullVersionRes = results.checkReplicatedNullVersion; - assert.strictEqual(headObjectNullVersionRes.VersionId, 'null'); + const headObjectNullVersionRes = results.checkReplicatedNullVersion; + assert.strictEqual(headObjectNullVersionRes.VersionId, 'null'); - const headObjectVersionRes = results.checkReplicatedVersion; - assert.strictEqual(headObjectVersionRes.VersionId, versionId); + const headObjectVersionRes = results.checkReplicatedVersion; + assert.strictEqual(headObjectVersionRes.VersionId, versionId); - const listObjectVersionsRes = results.listObjectVersionsAfterReplicate; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersionsAfterReplicate; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); + assert.strictEqual(Versions.length, 2); - const [currentVersion, nonCurrentVersion] = Versions; + const [currentVersion, nonCurrentVersion] = Versions; - assert.strictEqual(currentVersion.VersionId, versionId); - assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, versionId); + assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(nonCurrentVersion.VersionId, 'null'); - assert.strictEqual(nonCurrentVersion.IsLatest, false); + assert.strictEqual(nonCurrentVersion.VersionId, 'null'); + assert.strictEqual(nonCurrentVersion.IsLatest, false); - return done(); - }); + return done(); + } + ); }); it('should replicate/put NULL metadata to a destination that has a version', done => { let objMD; let versionId; - async.series({ - enableVersioningDestination: next => dstS3.putBucketVersioning({ - Bucket: bucketDestination, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - putObjectDestination: next => dstS3.putObject({ - Bucket: bucketDestination, - Key: keyName, - Body: Buffer.from(testData), - }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - putObjectSource: next => srcS3.putObject({ - Bucket: bucketSource, - Key: keyName, - Body: Buffer.from(testData), - }, next), - enableVersioningSource: next => srcS3.putBucketVersioning({ - Bucket: bucketSource, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { versionId: 'null' }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + putObjectDestination: next => + dstS3.putObject( + { + Bucket: bucketDestination, + Key: keyName, + Body: Buffer.from(testData), + }, + (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + } + ), + putObjectSource: next => + srcS3.putObject( + { + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData), + }, + next + ), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { versionId: 'null' }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { versionId: 'null' }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + headObjectByVersionId: next => + dstS3.headObject( + { + Bucket: bucketDestination, + Key: keyName, + VersionId: versionId, + }, + next + ), + headObjectByNullVersionId: next => + dstS3.headObject( + { + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null', + }, + next + ), + listObjectVersions: next => + dstS3.listObjectVersions( + { + Bucket: bucketDestination, + }, + next + ), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { versionId: 'null' }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - headObjectByVersionId: next => dstS3.headObject({ - Bucket: bucketDestination, - Key: keyName, - VersionId: versionId, - }, next), - headObjectByNullVersionId: next => dstS3.headObject({ - Bucket: bucketDestination, - Key: keyName, - VersionId: 'null', - }, next), - listObjectVersions: next => dstS3.listObjectVersions({ - Bucket: bucketDestination, - }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const firstHeadObjectRes = results.headObjectByVersionId; - assert.strictEqual(firstHeadObjectRes.VersionId, versionId); + const firstHeadObjectRes = results.headObjectByVersionId; + assert.strictEqual(firstHeadObjectRes.VersionId, versionId); - const secondHeadObjectRes = results.headObjectByNullVersionId; - assert.strictEqual(secondHeadObjectRes.VersionId, 'null'); + const secondHeadObjectRes = results.headObjectByNullVersionId; + assert.strictEqual(secondHeadObjectRes.VersionId, 'null'); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); - const [currentVersion, nonCurrentVersion] = Versions; + assert.strictEqual(Versions.length, 2); + const [currentVersion, nonCurrentVersion] = Versions; - assert.strictEqual(currentVersion.VersionId, 'null'); - assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, 'null'); + assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(nonCurrentVersion.VersionId, versionId); - assert.strictEqual(nonCurrentVersion.IsLatest, false); + assert.strictEqual(nonCurrentVersion.VersionId, versionId); + assert.strictEqual(nonCurrentVersion.IsLatest, false); - return done(); - }); + return done(); + } + ); }); it('should replicate/put NULL metadata to a destination that has a null version', done => { let objMD; - async.series({ - putObjectDestinationInitial: next => dstS3.putObject({ - Bucket: bucketDestination, - Key: keyName, - Body: Buffer.from(testData), - }, next), - enableVersioningDestination: next => dstS3.putBucketVersioning({ - Bucket: bucketDestination, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - putObjectSource: next => srcS3.putObject({ - Bucket: bucketSource, - Key: keyName, - Body: Buffer.from(testData), - }, next), - enableVersioningSource: next => srcS3.putBucketVersioning({ - Bucket: bucketSource, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - putObjectTaggingSource: next => srcS3.putObjectTagging({ - Bucket: bucketSource, - Key: keyName, - VersionId: 'null', - Tagging: { TagSet: [{ Key: 'key1', Value: 'value1' }] }, - }, next), - getMetadata: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { versionId: 'null' }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { + async.series( + { + putObjectDestinationInitial: next => + dstS3.putObject( + { + Bucket: bucketDestination, + Key: keyName, + Body: Buffer.from(testData), + }, + next + ), + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + putObjectSource: next => + srcS3.putObject( + { + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData), + }, + next + ), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + putObjectTaggingSource: next => + srcS3.putObjectTagging( + { + Bucket: bucketSource, + Key: keyName, + VersionId: 'null', + Tagging: { TagSet: [{ Key: 'key1', Value: 'value1' }] }, + }, + next + ), + getMetadata: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { versionId: 'null' }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMD = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + replicateMetadata: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { versionId: 'null' }, + authCredentials: destinationAuthCredentials, + requestBody: objMD, + }, + next + ), + headObjectNullVersion: next => + dstS3.headObject( + { + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null', + }, + next + ), + getObjectTaggingNullVersion: next => + dstS3.getObjectTagging( + { + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null', + }, + next + ), + listObjectVersions: next => + dstS3.listObjectVersions( + { + Bucket: bucketDestination, + }, + next + ), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMD = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - replicateMetadata: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { versionId: 'null' }, - authCredentials: destinationAuthCredentials, - requestBody: objMD, - }, next), - headObjectNullVersion: next => dstS3.headObject({ - Bucket: bucketDestination, - Key: keyName, - VersionId: 'null', - }, next), - getObjectTaggingNullVersion: next => dstS3.getObjectTagging({ - Bucket: bucketDestination, - Key: keyName, - VersionId: 'null', - }, next), - listObjectVersions: next => dstS3.listObjectVersions({ - Bucket: bucketDestination, - }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const headObjectRes = results.headObjectNullVersion; - assert.strictEqual(headObjectRes.VersionId, 'null'); + const headObjectRes = results.headObjectNullVersion; + assert.strictEqual(headObjectRes.VersionId, 'null'); - const getObjectTaggingRes = results.getObjectTaggingNullVersion; - assert.deepStrictEqual(getObjectTaggingRes.TagSet, [{ Key: 'key1', Value: 'value1' }]); + const getObjectTaggingRes = results.getObjectTaggingNullVersion; + assert.deepStrictEqual(getObjectTaggingRes.TagSet, [{ Key: 'key1', Value: 'value1' }]); - const listObjectVersionsRes = results.listObjectVersions; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersions; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 1); - const [currentVersion] = Versions; + assert.strictEqual(Versions.length, 1); + const [currentVersion] = Versions; - assert.strictEqual(currentVersion.VersionId, 'null'); - assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, 'null'); + assert.strictEqual(currentVersion.IsLatest, true); - return done(); - }); + return done(); + } + ); }); it('should replicate/put a lifecycled NULL metadata to a destination that has a version', done => { @@ -1656,119 +2096,166 @@ describe('backbeat routes for replication', () => { let objMDReplicated; let versionId; - async.series({ - enableVersioningDestination: next => dstS3.putBucketVersioning({ - Bucket: bucketDestination, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - putObjectDestination: next => dstS3.putObject({ - Bucket: bucketDestination, - Key: keyName, - Body: Buffer.from(testData), - }, (err, data) => { - if (err) { - return next(err); - } - versionId = data.VersionId; - return next(); - }), - putObjectSource: next => srcS3.putObject({ - Bucket: bucketSource, - Key: keyName, - Body: Buffer.from(testData), - }, next), - enableVersioningSource: next => srcS3.putBucketVersioning({ - Bucket: bucketSource, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - simulateLifecycleNullVersion: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { versionId: 'null' }, - authCredentials: sourceAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - objMDUpdated = JSON.parse(data.body).Body; - return next(); - }), - updateMetadataSource: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { versionId: 'null' }, - authCredentials: sourceAuthCredentials, - requestBody: objMDUpdated, - }, next), - getReplicatedNullVersion: next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket: bucketSource, - objectKey: keyName, - queryObj: { versionId: 'null' }, - authCredentials: destinationAuthCredentials, - }, (err, data) => { + async.series( + { + enableVersioningDestination: next => + dstS3.putBucketVersioning( + { + Bucket: bucketDestination, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + putObjectDestination: next => + dstS3.putObject( + { + Bucket: bucketDestination, + Key: keyName, + Body: Buffer.from(testData), + }, + (err, data) => { + if (err) { + return next(err); + } + versionId = data.VersionId; + return next(); + } + ), + putObjectSource: next => + srcS3.putObject( + { + Bucket: bucketSource, + Key: keyName, + Body: Buffer.from(testData), + }, + next + ), + enableVersioningSource: next => + srcS3.putBucketVersioning( + { + Bucket: bucketSource, + VersioningConfiguration: { Status: 'Enabled' }, + }, + next + ), + simulateLifecycleNullVersion: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { versionId: 'null' }, + authCredentials: sourceAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + objMDUpdated = JSON.parse(data.body).Body; + return next(); + } + ), + updateMetadataSource: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { versionId: 'null' }, + authCredentials: sourceAuthCredentials, + requestBody: objMDUpdated, + }, + next + ), + getReplicatedNullVersion: next => + makeBackbeatRequest( + { + method: 'GET', + resourceType: 'metadata', + bucket: bucketSource, + objectKey: keyName, + queryObj: { versionId: 'null' }, + authCredentials: destinationAuthCredentials, + }, + (err, data) => { + if (err) { + return next(err); + } + // Backbeat updates account info in metadata + // to the destination account info + objMDReplicated = objectMDFromRequestBody(data) + .setOwnerDisplayName(dstAccountInfo.name) + .setOwnerId(dstAccountInfo.canonicalID) + .getSerialized(); + return next(); + } + ), + putReplicatedNullVersion: next => + makeBackbeatRequest( + { + method: 'PUT', + resourceType: 'metadata', + bucket: bucketDestination, + objectKey: keyName, + queryObj: { versionId: 'null' }, + authCredentials: destinationAuthCredentials, + requestBody: objMDReplicated, + }, + next + ), + headObjectByVersionId: next => + dstS3.headObject( + { + Bucket: bucketDestination, + Key: keyName, + VersionId: versionId, + }, + next + ), + headObjectByNullVersion: next => + dstS3.headObject( + { + Bucket: bucketDestination, + Key: keyName, + VersionId: 'null', + }, + next + ), + listObjectVersionsDestination: next => + dstS3.listObjectVersions( + { + Bucket: bucketDestination, + }, + next + ), + }, + (err, results) => { if (err) { - return next(err); + return done(err); } - // Backbeat updates account info in metadata - // to the destination account info - objMDReplicated = objectMDFromRequestBody(data) - .setOwnerDisplayName(dstAccountInfo.name) - .setOwnerId(dstAccountInfo.canonicalID) - .getSerialized(); - return next(); - }), - putReplicatedNullVersion: next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket: bucketDestination, - objectKey: keyName, - queryObj: { versionId: 'null' }, - authCredentials: destinationAuthCredentials, - requestBody: objMDReplicated, - }, next), - headObjectByVersionId: next => dstS3.headObject({ - Bucket: bucketDestination, - Key: keyName, - VersionId: versionId, - }, next), - headObjectByNullVersion: next => dstS3.headObject({ - Bucket: bucketDestination, - Key: keyName, - VersionId: 'null', - }, next), - listObjectVersionsDestination: next => dstS3.listObjectVersions({ - Bucket: bucketDestination, - }, next), - }, (err, results) => { - if (err) { - return done(err); - } - const firstHeadObjectRes = results.headObjectByVersionId; - assert.strictEqual(firstHeadObjectRes.VersionId, versionId); + const firstHeadObjectRes = results.headObjectByVersionId; + assert.strictEqual(firstHeadObjectRes.VersionId, versionId); - const secondHeadObjectRes = results.headObjectByNullVersion; - assert.strictEqual(secondHeadObjectRes.VersionId, 'null'); + const secondHeadObjectRes = results.headObjectByNullVersion; + assert.strictEqual(secondHeadObjectRes.VersionId, 'null'); - const listObjectVersionsRes = results.listObjectVersionsDestination; - const { Versions } = listObjectVersionsRes; + const listObjectVersionsRes = results.listObjectVersionsDestination; + const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); - const [currentVersion, nonCurrentVersion] = Versions; + assert.strictEqual(Versions.length, 2); + const [currentVersion, nonCurrentVersion] = Versions; - assert.strictEqual(currentVersion.VersionId, 'null'); - assert.strictEqual(currentVersion.IsLatest, true); + assert.strictEqual(currentVersion.VersionId, 'null'); + assert.strictEqual(currentVersion.IsLatest, true); - assert.strictEqual(nonCurrentVersion.VersionId, versionId); - assert.strictEqual(nonCurrentVersion.IsLatest, false); + assert.strictEqual(nonCurrentVersion.VersionId, versionId); + assert.strictEqual(nonCurrentVersion.IsLatest, false); - return done(); - }); + return done(); + } + ); }); }); diff --git a/tests/multipleBackend/routes/routeVeeam.js b/tests/multipleBackend/routes/routeVeeam.js index f2520b2f6b..3a5fac05fe 100644 --- a/tests/multipleBackend/routes/routeVeeam.js +++ b/tests/multipleBackend/routes/routeVeeam.js @@ -3,8 +3,7 @@ const crypto = require('crypto'); const async = require('async'); const { makeRequest } = require('../../functional/raw-node/utils/makeRequest'); -const BucketUtility = - require('../../functional/aws-node-sdk/lib/utility/bucket-util'); +const BucketUtility = require('../../functional/aws-node-sdk/lib/utility/bucket-util'); const ipAddress = process.env.IP ? process.env.IP : '127.0.0.1'; @@ -28,9 +27,7 @@ const testCapacity = ` 0 \n`; -const testCapacityMd5 = crypto.createHash('md5') - .update(testCapacity, 'utf-8') - .digest('hex'); +const testCapacityMd5 = crypto.createHash('md5').update(testCapacity, 'utf-8').digest('hex'); const invalidTestCapacity = ` @@ -39,9 +36,7 @@ const invalidTestCapacity = ` 0 \n`; -const invalidTestCapacityMd5 = crypto.createHash('md5') - .update(invalidTestCapacity, 'utf-8') - .digest('hex'); +const invalidTestCapacityMd5 = crypto.createHash('md5').update(invalidTestCapacity, 'utf-8').digest('hex'); const testSystem = ` @@ -64,9 +59,7 @@ const testSystem = ` \n`; -const testSystemMd5 = crypto.createHash('md5') - .update(testSystem, 'utf-8') - .digest('hex'); +const testSystemMd5 = crypto.createHash('md5').update(testSystem, 'utf-8').digest('hex'); const invalidTestSystem = ` @@ -89,9 +82,7 @@ const invalidTestSystem = ` \n`; -const invalidTestSystemMd5 = crypto.createHash('md5') - .update(testSystem, 'utf-8') - .digest('hex'); +const invalidTestSystemMd5 = crypto.createHash('md5').update(testSystem, 'utf-8').digest('hex'); let bucketUtil; let s3; @@ -112,8 +103,7 @@ let s3; * @return {undefined} - and call callback */ function makeVeeamRequest(params, callback) { - const { method, headers, bucket, objectKey, - authCredentials, requestBody, queryObj } = params; + const { method, headers, bucket, objectKey, authCredentials, requestBody, queryObj } = params; const options = { authCredentials, hostname: ipAddress, @@ -131,10 +121,10 @@ function makeVeeamRequest(params, callback) { describe('veeam PUT routes:', () => { before(done => { - bucketUtil = new BucketUtility( - 'default', { signatureVersion: 'v4' }); + bucketUtil = new BucketUtility('default', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - s3.createBucket({ Bucket: TEST_BUCKET }).promise() + s3.createBucket({ Bucket: TEST_BUCKET }) + .promise() .then(() => done()) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); @@ -142,7 +132,8 @@ describe('veeam PUT routes:', () => { }); }); after(done => { - bucketUtil.empty(TEST_BUCKET) + bucketUtil + .empty(TEST_BUCKET) .then(() => s3.deleteBucket({ Bucket: TEST_BUCKET }).promise()) .then(() => done()) .catch(done); @@ -152,76 +143,88 @@ describe('veeam PUT routes:', () => { ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml', testSystem, testSystemMd5], ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml', testCapacity, testCapacityMd5], ].forEach(key => { - it(`PUT ${key[0]}`, done => makeVeeamRequest({ - method: 'PUT', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'content-length': key[1].length, - 'content-md5': key[2], - 'x-scal-canonical-id': testArn, + it(`PUT ${key[0]}`, done => + makeVeeamRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'content-length': key[1].length, + 'content-md5': key[2], + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + requestBody: key[1], }, - authCredentials: veeamAuthCredentials, - requestBody: key[1], - }, (err, response) => { - if (err) { - // Return the error, if any - return done(err); + (err, response) => { + if (err) { + // Return the error, if any + return done(err); + } + assert.strictEqual(response.statusCode, 200); + return done(); } - assert.strictEqual(response.statusCode, 200); - return done(); - })); + )); }); [ ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml', invalidTestSystem, invalidTestSystemMd5], ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml', invalidTestCapacity, invalidTestCapacityMd5], ].forEach(key => { - it(`PUT ${key[0]} should fail for invalid XML`, done => makeVeeamRequest({ - method: 'PUT', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'content-length': key[1].length + 3, - 'content-md5': key[2], - 'x-scal-canonical-id': testArn, + it(`PUT ${key[0]} should fail for invalid XML`, done => + makeVeeamRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'content-length': key[1].length + 3, + 'content-md5': key[2], + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + requestBody: `${key[1]}gff`, }, - authCredentials: veeamAuthCredentials, - requestBody: `${key[1]}gff`, - }, err => { - assert.strictEqual(err.code, 'MalformedXML'); - return done(); - })); + err => { + assert.strictEqual(err.code, 'MalformedXML'); + return done(); + } + )); }); [ ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml', testSystem, testSystemMd5], ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml', testCapacity, testCapacityMd5], ].forEach(key => { - it(`PUT ${key[0]} should fail if invalid credentials are sent`, done => makeVeeamRequest({ - method: 'PUT', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'content-length': key[1].length + 3, - 'content-md5': key[2], - 'x-scal-canonical-id': testArn, + it(`PUT ${key[0]} should fail if invalid credentials are sent`, done => + makeVeeamRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'content-length': key[1].length + 3, + 'content-md5': key[2], + 'x-scal-canonical-id': testArn, + }, + authCredentials: badVeeamAuthCredentials, + requestBody: `${key[1]}gff`, }, - authCredentials: badVeeamAuthCredentials, - requestBody: `${key[1]}gff`, - }, err => { - assert.strictEqual(err.code, 'InvalidAccessKeyId'); - return done(); - })); + err => { + assert.strictEqual(err.code, 'InvalidAccessKeyId'); + return done(); + } + )); }); }); describe('veeam GET routes:', () => { beforeEach(done => { - bucketUtil = new BucketUtility( - 'default', { signatureVersion: 'v4' }); + bucketUtil = new BucketUtility('default', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - s3.createBucket({ Bucket: TEST_BUCKET }).promise() + s3.createBucket({ Bucket: TEST_BUCKET }) + .promise() .then(() => done()) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); @@ -229,7 +232,8 @@ describe('veeam GET routes:', () => { }); }); afterEach(done => { - bucketUtil.empty(TEST_BUCKET) + bucketUtil + .empty(TEST_BUCKET) .then(() => s3.deleteBucket({ Bucket: TEST_BUCKET }).promise()) .then(() => done()) .catch(done); @@ -240,45 +244,56 @@ describe('veeam GET routes:', () => { ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml', testCapacity, testCapacityMd5], ].forEach(key => { it(`GET ${key[0]} should return the expected XML file`, done => { - async.waterfall([ - next => makeVeeamRequest({ - method: 'PUT', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'content-length': key[1].length, - 'content-md5': key[2], - 'x-scal-canonical-id': testArn, - }, - authCredentials: veeamAuthCredentials, - requestBody: key[1], - }, (err, response) => { - if (err) { - return done(err); - } - assert.strictEqual(response.statusCode, 200); - return next(); - }), - next => makeVeeamRequest({ - method: 'GET', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'x-scal-canonical-id': testArn, - }, - authCredentials: veeamAuthCredentials, - }, (err, response) => { - if (err) { - return done(err); - } - assert.strictEqual(response.statusCode, 200); - assert.strictEqual(response.body.replaceAll(' ', ''), key[1].replaceAll(' ', '')); - return next(); - }), - ], err => { - assert.ifError(err); - return done(); - }); + async.waterfall( + [ + next => + makeVeeamRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'content-length': key[1].length, + 'content-md5': key[2], + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + requestBody: key[1], + }, + (err, response) => { + if (err) { + return done(err); + } + assert.strictEqual(response.statusCode, 200); + return next(); + } + ), + next => + makeVeeamRequest( + { + method: 'GET', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + }, + (err, response) => { + if (err) { + return done(err); + } + assert.strictEqual(response.statusCode, 200); + assert.strictEqual(response.body.replaceAll(' ', ''), key[1].replaceAll(' ', '')); + return next(); + } + ), + ], + err => { + assert.ifError(err); + return done(); + } + ); }); }); @@ -287,76 +302,90 @@ describe('veeam GET routes:', () => { ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml', testCapacity, testCapacityMd5], ].forEach(key => { it(`GET ${key[0]} should return the expected XML file for cors requests`, done => { - async.waterfall([ - next => makeVeeamRequest({ - method: 'PUT', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'origin': 'http://localhost:8000', - 'content-length': key[1].length, - 'content-md5': key[2], - 'x-scal-canonical-id': testArn, - }, - authCredentials: veeamAuthCredentials, - requestBody: key[1], - }, (err, response) => { - if (err) { - return done(err); - } - assert.strictEqual(response.statusCode, 200); - return next(); - }), - next => makeVeeamRequest({ - method: 'GET', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'origin': 'http://localhost:8000', - 'x-scal-canonical-id': testArn, - }, - authCredentials: veeamAuthCredentials, - }, (err, response) => { - if (err) { - return done(err); - } - assert.strictEqual(response.statusCode, 200); - assert.strictEqual(response.body.replaceAll(' ', ''), key[1].replaceAll(' ', '')); - return next(); - }), - ], err => { - assert.ifError(err); - return done(); - }); + async.waterfall( + [ + next => + makeVeeamRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + origin: 'http://localhost:8000', + 'content-length': key[1].length, + 'content-md5': key[2], + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + requestBody: key[1], + }, + (err, response) => { + if (err) { + return done(err); + } + assert.strictEqual(response.statusCode, 200); + return next(); + } + ), + next => + makeVeeamRequest( + { + method: 'GET', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + origin: 'http://localhost:8000', + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + }, + (err, response) => { + if (err) { + return done(err); + } + assert.strictEqual(response.statusCode, 200); + assert.strictEqual(response.body.replaceAll(' ', ''), key[1].replaceAll(' ', '')); + return next(); + } + ), + ], + err => { + assert.ifError(err); + return done(); + } + ); }); }); - [ ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml', testSystem, testSystemMd5], ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml', testCapacity, testCapacityMd5], ].forEach(key => { - it(`GET ${key[0]} should fail if no data in bucket metadata`, done => makeVeeamRequest({ - method: 'GET', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'x-scal-canonical-id': testArn, + it(`GET ${key[0]} should fail if no data in bucket metadata`, done => + makeVeeamRequest( + { + method: 'GET', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, }, - authCredentials: veeamAuthCredentials, - }, err => { - assert.strictEqual(err.code, 'NoSuchKey'); - return done(); - })); + err => { + assert.strictEqual(err.code, 'NoSuchKey'); + return done(); + } + )); }); }); describe('veeam DELETE routes:', () => { beforeEach(done => { - bucketUtil = new BucketUtility( - 'default', { signatureVersion: 'v4' }); + bucketUtil = new BucketUtility('default', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - s3.createBucket({ Bucket: TEST_BUCKET }).promise() + s3.createBucket({ Bucket: TEST_BUCKET }) + .promise() .then(() => done()) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); @@ -364,7 +393,8 @@ describe('veeam DELETE routes:', () => { }); }); afterEach(done => { - bucketUtil.empty(TEST_BUCKET) + bucketUtil + .empty(TEST_BUCKET) .then(() => s3.deleteBucket({ Bucket: TEST_BUCKET }).promise()) .then(() => done()) .catch(done); @@ -375,72 +405,91 @@ describe('veeam DELETE routes:', () => { ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml', testCapacity, testCapacityMd5], ].forEach(key => { it(`DELETE ${key[0]} should delete the XML file`, done => { - async.waterfall([ - next => makeVeeamRequest({ - method: 'PUT', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'content-length': key[1].length, - 'content-md5': key[2], - 'x-scal-canonical-id': testArn, - }, - authCredentials: veeamAuthCredentials, - requestBody: key[1], - }, (err, response) => { - if (err) { - return done(err); - } - assert.strictEqual(response.statusCode, 200); - return next(); - }), - next => makeVeeamRequest({ - method: 'GET', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'x-scal-canonical-id': testArn, - }, - authCredentials: veeamAuthCredentials, - }, (err, response) => { - if (err) { - return done(err); - } - assert.strictEqual(response.statusCode, 200); - assert.strictEqual(response.body.replaceAll(' ', ''), key[1].replaceAll(' ', '')); - return next(); - }), - next => makeVeeamRequest({ - method: 'DELETE', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'x-scal-canonical-id': testArn, - }, - authCredentials: veeamAuthCredentials, - }, (err, response) => { - if (err) { - return done(err); - } - assert.strictEqual(response.statusCode, 204); - return next(); - }), - next => makeVeeamRequest({ - method: 'GET', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'x-scal-canonical-id': testArn, - }, - authCredentials: veeamAuthCredentials, - }, err => { - assert.strictEqual(err.code, 'NoSuchKey'); - return next(); - }), - ], err => { - assert.ifError(err); - return done(); - }); + async.waterfall( + [ + next => + makeVeeamRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'content-length': key[1].length, + 'content-md5': key[2], + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + requestBody: key[1], + }, + (err, response) => { + if (err) { + return done(err); + } + assert.strictEqual(response.statusCode, 200); + return next(); + } + ), + next => + makeVeeamRequest( + { + method: 'GET', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + }, + (err, response) => { + if (err) { + return done(err); + } + assert.strictEqual(response.statusCode, 200); + assert.strictEqual(response.body.replaceAll(' ', ''), key[1].replaceAll(' ', '')); + return next(); + } + ), + next => + makeVeeamRequest( + { + method: 'DELETE', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + }, + (err, response) => { + if (err) { + return done(err); + } + assert.strictEqual(response.statusCode, 204); + return next(); + } + ), + next => + makeVeeamRequest( + { + method: 'GET', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + }, + err => { + assert.strictEqual(err.code, 'NoSuchKey'); + return next(); + } + ), + ], + err => { + assert.ifError(err); + return done(); + } + ); }); }); @@ -448,27 +497,31 @@ describe('veeam DELETE routes:', () => { ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml', testSystem, testSystemMd5], ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml', testCapacity, testCapacityMd5], ].forEach(key => { - it(`DELETE ${key[0]} should fail if XML doesn't exist yet`, done => makeVeeamRequest({ - method: 'DELETE', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'x-scal-canonical-id': testArn, + it(`DELETE ${key[0]} should fail if XML doesn't exist yet`, done => + makeVeeamRequest( + { + method: 'DELETE', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, }, - authCredentials: veeamAuthCredentials, - }, err => { - assert.strictEqual(err.code, 'NoSuchKey'); - return done(); - })); + err => { + assert.strictEqual(err.code, 'NoSuchKey'); + return done(); + } + )); }); }); describe('veeam HEAD routes:', () => { beforeEach(done => { - bucketUtil = new BucketUtility( - 'default', { signatureVersion: 'v4' }); + bucketUtil = new BucketUtility('default', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - s3.createBucket({ Bucket: TEST_BUCKET }).promise() + s3.createBucket({ Bucket: TEST_BUCKET }) + .promise() .then(() => done()) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); @@ -476,7 +529,8 @@ describe('veeam HEAD routes:', () => { }); }); afterEach(done => { - bucketUtil.empty(TEST_BUCKET) + bucketUtil + .empty(TEST_BUCKET) .then(() => s3.deleteBucket({ Bucket: TEST_BUCKET }).promise()) .then(() => done()) .catch(done); @@ -487,44 +541,55 @@ describe('veeam HEAD routes:', () => { ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml', testCapacity, testCapacityMd5], ].forEach(key => { it(`HEAD ${key[0]} should return the existing XML file metadata`, done => { - async.waterfall([ - next => makeVeeamRequest({ - method: 'PUT', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'content-length': key[1].length, - 'content-md5': key[2], - 'x-scal-canonical-id': testArn, - }, - authCredentials: veeamAuthCredentials, - requestBody: key[1], - }, (err, response) => { - if (err) { - return done(err); - } - assert.strictEqual(response.statusCode, 200); - return next(); - }), - next => makeVeeamRequest({ - method: 'HEAD', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'x-scal-canonical-id': testArn, - }, - authCredentials: veeamAuthCredentials, - }, (err, response) => { - if (err) { - return done(err); - } - assert.strictEqual(response.statusCode, 200); - return next(); - }), - ], err => { - assert.ifError(err); - return done(); - }); + async.waterfall( + [ + next => + makeVeeamRequest( + { + method: 'PUT', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'content-length': key[1].length, + 'content-md5': key[2], + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + requestBody: key[1], + }, + (err, response) => { + if (err) { + return done(err); + } + assert.strictEqual(response.statusCode, 200); + return next(); + } + ), + next => + makeVeeamRequest( + { + method: 'HEAD', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, + }, + (err, response) => { + if (err) { + return done(err); + } + assert.strictEqual(response.statusCode, 200); + return next(); + } + ), + ], + err => { + assert.ifError(err); + return done(); + } + ); }); }); @@ -532,29 +597,32 @@ describe('veeam HEAD routes:', () => { ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml', testSystem, testSystemMd5], ['.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml', testCapacity, testCapacityMd5], ].forEach(key => { - it(`HEAD ${key[0]} should fail if no data in bucket metadata`, done => makeVeeamRequest({ - method: 'HEAD', - bucket: TEST_BUCKET, - objectKey: key[0], - headers: { - 'x-scal-canonical-id': testArn, + it(`HEAD ${key[0]} should fail if no data in bucket metadata`, done => + makeVeeamRequest( + { + method: 'HEAD', + bucket: TEST_BUCKET, + objectKey: key[0], + headers: { + 'x-scal-canonical-id': testArn, + }, + authCredentials: veeamAuthCredentials, }, - authCredentials: veeamAuthCredentials, - }, (err, res) => { - assert.strictEqual(res.statusCode, 404); - return done(); - })); + (err, res) => { + assert.strictEqual(res.statusCode, 404); + return done(); + } + )); }); }); - // TODO {test_debt} handle query params tests with signature (happy path) describe.skip('veeam LIST routes:', () => { beforeEach(done => { - bucketUtil = new BucketUtility( - 'default', { signatureVersion: 'v4' }); + bucketUtil = new BucketUtility('default', { signatureVersion: 'v4' }); s3 = bucketUtil.s3; - s3.createBucket({ Bucket: TEST_BUCKET }).promise() + s3.createBucket({ Bucket: TEST_BUCKET }) + .promise() .then(() => done()) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); @@ -562,7 +630,8 @@ describe.skip('veeam LIST routes:', () => { }); }); afterEach(done => { - bucketUtil.empty(TEST_BUCKET) + bucketUtil + .empty(TEST_BUCKET) .then(() => s3.deleteBucket({ Bucket: TEST_BUCKET }).promise()) .then(() => done()) .catch(done); diff --git a/tests/quota/awsNodeSdk.js b/tests/quota/awsNodeSdk.js index eff35613bd..9f2b7d026d 100644 --- a/tests/quota/awsNodeSdk.js +++ b/tests/quota/awsNodeSdk.js @@ -34,55 +34,67 @@ function createBucket(bucket, locked, cb) { } function configureBucketVersioning(bucket, cb) { - return s3Client.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: { - Status: 'Enabled', + return s3Client.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: { + Status: 'Enabled', + }, }, - }, (err, data) => { - assert.ifError(err); - return cb(err, data); - }); + (err, data) => { + assert.ifError(err); + return cb(err, data); + } + ); } function putObjectLockConfiguration(bucket, cb) { - return s3Client.putObjectLockConfiguration({ - Bucket: bucket, - ObjectLockConfiguration: { - ObjectLockEnabled: 'Enabled', - Rule: { - DefaultRetention: { - Mode: 'GOVERNANCE', - Days: 1, + return s3Client.putObjectLockConfiguration( + { + Bucket: bucket, + ObjectLockConfiguration: { + ObjectLockEnabled: 'Enabled', + Rule: { + DefaultRetention: { + Mode: 'GOVERNANCE', + Days: 1, + }, }, }, }, - }, (err, data) => { - assert.ifError(err); - return cb(err, data); - }); + (err, data) => { + assert.ifError(err); + return cb(err, data); + } + ); } function deleteBucket(bucket, cb) { - return s3Client.deleteBucket({ - Bucket: bucket, - }, err => { - assert.ifError(err); - return cb(err); - }); + return s3Client.deleteBucket( + { + Bucket: bucket, + }, + err => { + assert.ifError(err); + return cb(err); + } + ); } function putObject(bucket, key, size, cb) { - return s3Client.putObject({ - Bucket: bucket, - Key: key, - Body: Buffer.alloc(size), - }, (err, data) => { - if (!err && !s3Config.isQuotaInflightEnabled()) { - mockScuba.incrementBytesForBucket(bucket, size); + return s3Client.putObject( + { + Bucket: bucket, + Key: key, + Body: Buffer.alloc(size), + }, + (err, data) => { + if (!err && !s3Config.isQuotaInflightEnabled()) { + mockScuba.incrementBytesForBucket(bucket, size); + } + return cb(err, data); } - return cb(err, data); - }); + ); } function putObjectWithCustomHeader(bucket, key, size, vID, cb) { @@ -105,42 +117,51 @@ function putObjectWithCustomHeader(bucket, key, size, vID, cb) { } function copyObject(bucket, key, sourceSize, cb) { - return s3Client.copyObject({ - Bucket: bucket, - CopySource: `/${bucket}/${key}`, - Key: `${key}-copy`, - }, (err, data) => { - if (!err && !s3Config.isQuotaInflightEnabled()) { - mockScuba.incrementBytesForBucket(bucket, sourceSize); + return s3Client.copyObject( + { + Bucket: bucket, + CopySource: `/${bucket}/${key}`, + Key: `${key}-copy`, + }, + (err, data) => { + if (!err && !s3Config.isQuotaInflightEnabled()) { + mockScuba.incrementBytesForBucket(bucket, sourceSize); + } + return cb(err, data); } - return cb(err, data); - }); + ); } function deleteObject(bucket, key, size, cb) { - return s3Client.deleteObject({ - Bucket: bucket, - Key: key, - }, err => { - if (!err && !s3Config.isQuotaInflightEnabled()) { - mockScuba.incrementBytesForBucket(bucket, -size); + return s3Client.deleteObject( + { + Bucket: bucket, + Key: key, + }, + err => { + if (!err && !s3Config.isQuotaInflightEnabled()) { + mockScuba.incrementBytesForBucket(bucket, -size); + } + assert.ifError(err); + return cb(err); } - assert.ifError(err); - return cb(err); - }); + ); } function deleteVersionID(bucket, key, versionId, size, cb) { - return s3Client.deleteObject({ - Bucket: bucket, - Key: key, - VersionId: versionId, - }, (err, data) => { - if (!err && !s3Config.isQuotaInflightEnabled()) { - mockScuba.incrementBytesForBucket(bucket, -size); + return s3Client.deleteObject( + { + Bucket: bucket, + Key: key, + VersionId: versionId, + }, + (err, data) => { + if (!err && !s3Config.isQuotaInflightEnabled()) { + mockScuba.incrementBytesForBucket(bucket, -size); + } + return cb(err, data); } - return cb(err, data); - }); + ); } function objectMPU(bucket, key, parts, partSize, callback) { @@ -151,72 +172,82 @@ function objectMPU(bucket, key, parts, partSize, callback) { Bucket: bucket, Key: key, }; - return async.waterfall([ - next => s3Client.createMultipartUpload(initiateMPUParams, - (err, data) => { - if (err) { - return next(err); - } - uploadId = data.UploadId; - return next(); - }), - next => - async.mapLimit(partNumbers, 1, (partNumber, callback) => { - const uploadPartParams = { + return async.waterfall( + [ + next => + s3Client.createMultipartUpload(initiateMPUParams, (err, data) => { + if (err) { + return next(err); + } + uploadId = data.UploadId; + return next(); + }), + next => + async.mapLimit( + partNumbers, + 1, + (partNumber, callback) => { + const uploadPartParams = { + Bucket: bucket, + Key: key, + PartNumber: partNumber + 1, + UploadId: uploadId, + Body: Buffer.alloc(partSize), + }; + + return s3Client.uploadPart(uploadPartParams, (err, data) => { + if (err) { + return callback(err); + } + return callback(null, data.ETag); + }); + }, + (err, results) => { + if (err) { + return next(err); + } + ETags = results; + return next(); + } + ), + next => { + const params = { Bucket: bucket, Key: key, - PartNumber: partNumber + 1, + MultipartUpload: { + Parts: partNumbers.map(n => ({ + ETag: ETags[n], + PartNumber: n + 1, + })), + }, UploadId: uploadId, - Body: Buffer.alloc(partSize), }; - - return s3Client.uploadPart(uploadPartParams, - (err, data) => { - if (err) { - return callback(err); - } - return callback(null, data.ETag); - }); - }, (err, results) => { - if (err) { - return next(err); - } - ETags = results; - return next(); - }), - next => { - const params = { - Bucket: bucket, - Key: key, - MultipartUpload: { - Parts: partNumbers.map(n => ({ - ETag: ETags[n], - PartNumber: n + 1, - })), - }, - UploadId: uploadId, - }; - return s3Client.completeMultipartUpload(params, next); - }, - ], err => { - if (!err && !s3Config.isQuotaInflightEnabled()) { - mockScuba.incrementBytesForBucket(bucket, parts * partSize); + return s3Client.completeMultipartUpload(params, next); + }, + ], + err => { + if (!err && !s3Config.isQuotaInflightEnabled()) { + mockScuba.incrementBytesForBucket(bucket, parts * partSize); + } + return callback(err, uploadId); } - return callback(err, uploadId); - }); + ); } function abortMPU(bucket, key, uploadId, size, callback) { - return s3Client.abortMultipartUpload({ - Bucket: bucket, - Key: key, - UploadId: uploadId, - }, (err, data) => { - if (!err && !s3Config.isQuotaInflightEnabled()) { - mockScuba.incrementBytesForBucket(bucket, -size); + return s3Client.abortMultipartUpload( + { + Bucket: bucket, + Key: key, + UploadId: uploadId, + }, + (err, data) => { + if (!err && !s3Config.isQuotaInflightEnabled()) { + mockScuba.incrementBytesForBucket(bucket, -size); + } + return callback(err, data); } - return callback(err, data); - }); + ); } function uploadPartCopy(bucket, key, partNumber, partSize, sleepDuration, keyToCopy, callback) { @@ -231,203 +262,234 @@ function uploadPartCopy(bucket, key, partNumber, partSize, sleepDuration, keyToC if (!s3Config.isQuotaInflightEnabled()) { mockScuba.incrementBytesForBucket(bucket, parts * partSize); } - return async.waterfall([ - next => s3Client.createMultipartUpload(initiateMPUParams, - (err, data) => { - if (err) { - return next(err); - } - uploadId = data.UploadId; - return next(); - }), - next => { - const uploadPartParams = { - Bucket: bucket, - Key: key, - PartNumber: partNumber + 1, - UploadId: uploadId, - Body: Buffer.alloc(partSize), - }; - return s3Client.uploadPart(uploadPartParams, (err, data) => { - if (err) { - return next(err); - } - ETags[partNumber] = data.ETag; - return next(); - }); - }, - next => wait(sleepDuration, next), - next => { - const copyPartParams = { - Bucket: bucket, - CopySource: `/${bucket}/${keyToCopy}`, - Key: `${key}-copy`, - PartNumber: partNumber + 1, - UploadId: uploadId, - }; - return s3Client.uploadPartCopy(copyPartParams, (err, data) => { - if (err) { - return next(err); - } - ETags[partNumber] = data.ETag; - return next(null, data.ETag); - }); - }, - next => { - const params = { - Bucket: bucket, - Key: key, - MultipartUpload: { - Parts: partNumbers.map(n => ({ - ETag: ETags[n], - PartNumber: n + 1, - })), - }, - UploadId: uploadId, - }; - return s3Client.completeMultipartUpload(params, next); - }, - ], err => { - if (err && !s3Config.isQuotaInflightEnabled()) { - mockScuba.incrementBytesForBucket(bucket, -(parts * partSize)); + return async.waterfall( + [ + next => + s3Client.createMultipartUpload(initiateMPUParams, (err, data) => { + if (err) { + return next(err); + } + uploadId = data.UploadId; + return next(); + }), + next => { + const uploadPartParams = { + Bucket: bucket, + Key: key, + PartNumber: partNumber + 1, + UploadId: uploadId, + Body: Buffer.alloc(partSize), + }; + return s3Client.uploadPart(uploadPartParams, (err, data) => { + if (err) { + return next(err); + } + ETags[partNumber] = data.ETag; + return next(); + }); + }, + next => wait(sleepDuration, next), + next => { + const copyPartParams = { + Bucket: bucket, + CopySource: `/${bucket}/${keyToCopy}`, + Key: `${key}-copy`, + PartNumber: partNumber + 1, + UploadId: uploadId, + }; + return s3Client.uploadPartCopy(copyPartParams, (err, data) => { + if (err) { + return next(err); + } + ETags[partNumber] = data.ETag; + return next(null, data.ETag); + }); + }, + next => { + const params = { + Bucket: bucket, + Key: key, + MultipartUpload: { + Parts: partNumbers.map(n => ({ + ETag: ETags[n], + PartNumber: n + 1, + })), + }, + UploadId: uploadId, + }; + return s3Client.completeMultipartUpload(params, next); + }, + ], + err => { + if (err && !s3Config.isQuotaInflightEnabled()) { + mockScuba.incrementBytesForBucket(bucket, -(parts * partSize)); + } + return callback(err, uploadId); } - return callback(err, uploadId); - }); + ); } function restoreObject(bucket, key, size, callback) { - return s3Client.restoreObject({ - Bucket: bucket, - Key: key, - RestoreRequest: { - Days: 1, + return s3Client.restoreObject( + { + Bucket: bucket, + Key: key, + RestoreRequest: { + Days: 1, + }, }, - }, (err, data) => { - if (!err && !s3Config.isQuotaInflightEnabled()) { - mockScuba.incrementBytesForBucket(bucket, size); + (err, data) => { + if (!err && !s3Config.isQuotaInflightEnabled()) { + mockScuba.incrementBytesForBucket(bucket, size); + } + return callback(err, data); } - return callback(err, data); - }); + ); } function multiObjectDelete(bucket, keys, size, callback) { if (!s3Config.isQuotaInflightEnabled()) { mockScuba.incrementBytesForBucket(bucket, -size); } - return s3Client.deleteObjects({ - Bucket: bucket, - Delete: { - Objects: keys.map(key => ({ Key: key })), + return s3Client.deleteObjects( + { + Bucket: bucket, + Delete: { + Objects: keys.map(key => ({ Key: key })), + }, }, - }, (err, data) => { - if (err && !s3Config.isQuotaInflightEnabled()) { - mockScuba.incrementBytesForBucket(bucket, size); + (err, data) => { + if (err && !s3Config.isQuotaInflightEnabled()) { + mockScuba.incrementBytesForBucket(bucket, size); + } + return callback(err, data); } - return callback(err, data); - }); + ); } -(process.env.S3METADATA === 'mongodb' ? describe : describe.skip)('quota evaluation with scuba metrics', - function t() { - this.timeout(30000); - const scuba = new MockScuba(); - const putQuotaVerb = 'PUT'; - const config = { - accessKey: memCredentials.default.accessKey, - secretKey: memCredentials.default.secretKey, - }; - mockScuba = scuba; - - before(done => { - const config = getConfig('default', { signatureVersion: 'v4', maxRetries: 0 }); - s3Client = new S3(config); - scuba.start(); - metadata.setup(err => wait(2000, () => done(err))); - }); - - afterEach(() => { - scuba.reset(); - }); - - after(() => { - scuba.stop(); - }); - - it('should return QuotaExceeded when trying to PutObject in a bucket with quota', done => { - const bucket = 'quota-test-bucket1'; - const key = 'quota-test-object'; - const size = 1024; - return async.series([ +(process.env.S3METADATA === 'mongodb' ? describe : describe.skip)('quota evaluation with scuba metrics', function t() { + this.timeout(30000); + const scuba = new MockScuba(); + const putQuotaVerb = 'PUT'; + const config = { + accessKey: memCredentials.default.accessKey, + secretKey: memCredentials.default.secretKey, + }; + mockScuba = scuba; + + before(done => { + const config = getConfig('default', { signatureVersion: 'v4', maxRetries: 0 }); + s3Client = new S3(config); + scuba.start(); + metadata.setup(err => wait(2000, () => done(err))); + }); + + afterEach(() => { + scuba.reset(); + }); + + after(() => { + scuba.stop(); + }); + + it('should return QuotaExceeded when trying to PutObject in a bucket with quota', done => { + const bucket = 'quota-test-bucket1'; + const key = 'quota-test-object'; + const size = 1024; + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, key, size, err => { - assert.strictEqual(err.code, 'QuotaExceeded'); - return next(); - }), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, key, size, err => { + assert.strictEqual(err.code, 'QuotaExceeded'); + return next(); + }), next => deleteBucket(bucket, next), - ], done); - }); - - it('should return QuotaExceeded when trying to copyObject in a versioned bucket with quota', done => { - const bucket = 'quota-test-bucket12'; - const key = 'quota-test-object'; - const size = 900; - let vID = null; - return async.series([ + ], + done + ); + }); + + it('should return QuotaExceeded when trying to copyObject in a versioned bucket with quota', done => { + const bucket = 'quota-test-bucket12'; + const key = 'quota-test-object'; + const size = 900; + let vID = null; + return async.series( + [ next => createBucket(bucket, false, next), next => configureBucketVersioning(bucket, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, key, size, (err, data) => { - assert.ifError(err); - vID = data.VersionId; - return next(); - }), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, key, size, (err, data) => { + assert.ifError(err); + vID = data.VersionId; + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), - next => copyObject(bucket, key, size, err => { - assert.strictEqual(err.code, 'QuotaExceeded'); - return next(); - }), + next => + copyObject(bucket, key, size, err => { + assert.strictEqual(err.code, 'QuotaExceeded'); + return next(); + }), next => deleteVersionID(bucket, key, vID, size, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should return QuotaExceeded when trying to CopyObject in a bucket with quota', done => { - const bucket = 'quota-test-bucket2'; - const key = 'quota-test-object'; - const size = 900; - return async.series([ + ], + done + ); + }); + + it('should return QuotaExceeded when trying to CopyObject in a bucket with quota', done => { + const bucket = 'quota-test-bucket2'; + const key = 'quota-test-object'; + const size = 900; + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), next => putObject(bucket, key, size, next), next => wait(inflightFlushFrequencyMS * 2, next), - next => copyObject(bucket, key, size, err => { - assert.strictEqual(err.code, 'QuotaExceeded'); - return next(); - }), + next => + copyObject(bucket, key, size, err => { + assert.strictEqual(err.code, 'QuotaExceeded'); + return next(); + }), next => deleteObject(bucket, key, size, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should return QuotaExceeded when trying to complete MPU in a bucket with quota', done => { - const bucket = 'quota-test-bucket3'; - const key = 'quota-test-object'; - const parts = 5; - const partSize = 1024 * 1024 * 6; - let uploadId = null; - return async.series([ + ], + done + ); + }); + + it('should return QuotaExceeded when trying to complete MPU in a bucket with quota', done => { + const bucket = 'quota-test-bucket3'; + const key = 'quota-test-object'; + const parts = 5; + const partSize = 1024 * 1024 * 6; + let uploadId = null; + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => objectMPU(bucket, key, parts, partSize, (err, _uploadId) => { - uploadId = _uploadId; - assert.strictEqual(err.code, 'QuotaExceeded'); - return next(); - }), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + objectMPU(bucket, key, parts, partSize, (err, _uploadId) => { + uploadId = _uploadId; + assert.strictEqual(err.code, 'QuotaExceeded'); + return next(); + }), next => abortMPU(bucket, key, uploadId, 0, next), next => wait(inflightFlushFrequencyMS * 2, next), next => { @@ -435,122 +497,179 @@ function multiObjectDelete(bucket, keys, size, callback) { return next(); }, next => deleteBucket(bucket, next), - ], done); - }); - - it('should not return QuotaExceeded if the quota is not exceeded', done => { - const bucket = 'quota-test-bucket4'; - const key = 'quota-test-object'; - const size = 300; - return async.series([ + ], + done + ); + }); + + it('should not return QuotaExceeded if the quota is not exceeded', done => { + const bucket = 'quota-test-bucket4'; + const key = 'quota-test-object'; + const size = 300; + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, key, size, err => { - assert.ifError(err); - return next(); - }), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, key, size, err => { + assert.ifError(err); + return next(); + }), next => deleteObject(bucket, key, size, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should not evaluate quotas if the backend is not available', done => { - scuba.stop(); - const bucket = 'quota-test-bucket5'; - const key = 'quota-test-object'; - const size = 1024; - return async.series([ + ], + done + ); + }); + + it('should not evaluate quotas if the backend is not available', done => { + scuba.stop(); + const bucket = 'quota-test-bucket5'; + const key = 'quota-test-object'; + const size = 1024; + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, key, size, err => { - assert.ifError(err); - return next(); - }), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, key, size, err => { + assert.ifError(err); + return next(); + }), next => deleteObject(bucket, key, size, next), next => deleteBucket(bucket, next), - ], err => { + ], + err => { assert.ifError(err); scuba.start(); return wait(2000, done); - }); - }); - - it('should return QuotaExceeded when trying to copy a part in a bucket with quota', done => { - const bucket = 'quota-test-bucket6'; - const key = 'quota-test-object-copy'; - const keyToCopy = 'quota-test-existing'; - const parts = 5; - const partSize = 1024 * 1024 * 6; - let uploadId = null; - return async.series([ + } + ); + }); + + it('should return QuotaExceeded when trying to copy a part in a bucket with quota', done => { + const bucket = 'quota-test-bucket6'; + const key = 'quota-test-object-copy'; + const keyToCopy = 'quota-test-existing'; + const parts = 5; + const partSize = 1024 * 1024 * 6; + let uploadId = null; + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify({ quota: Math.round(partSize * 2.5) }), config) - .then(() => next()).catch(err => next(err)), + next => + sendRequest( + putQuotaVerb, + '127.0.0.1:8000', + `/${bucket}/?quota=true`, + JSON.stringify({ quota: Math.round(partSize * 2.5) }), + config + ) + .then(() => next()) + .catch(err => next(err)), next => putObject(bucket, keyToCopy, partSize, next), - next => uploadPartCopy(bucket, key, parts, partSize, inflightFlushFrequencyMS * 2, keyToCopy, - (err, _uploadId) => { - uploadId = _uploadId; - assert.strictEqual(err.code, 'QuotaExceeded'); - return next(); - }), + next => + uploadPartCopy( + bucket, + key, + parts, + partSize, + inflightFlushFrequencyMS * 2, + keyToCopy, + (err, _uploadId) => { + uploadId = _uploadId; + assert.strictEqual(err.code, 'QuotaExceeded'); + return next(); + } + ), next => abortMPU(bucket, key, uploadId, parts * partSize, next), next => deleteObject(bucket, keyToCopy, partSize, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should return QuotaExceeded when trying to restore an object in a bucket with quota', done => { - const bucket = 'quota-test-bucket7'; - const key = 'quota-test-object'; - const size = 900; - let vID = null; - return async.series([ + ], + done + ); + }); + + it('should return QuotaExceeded when trying to restore an object in a bucket with quota', done => { + const bucket = 'quota-test-bucket7'; + const key = 'quota-test-object'; + const size = 900; + let vID = null; + return async.series( + [ next => createBucket(bucket, false, next), next => configureBucketVersioning(bucket, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, key, size, (err, data) => { - assert.ifError(err); - vID = data.VersionId; - return next(); - }), - next => fakeMetadataArchive(bucket, key, vID, { - archiveInfo: {}, - }, next), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, key, size, (err, data) => { + assert.ifError(err); + vID = data.VersionId; + return next(); + }), + next => + fakeMetadataArchive( + bucket, + key, + vID, + { + archiveInfo: {}, + }, + next + ), next => wait(inflightFlushFrequencyMS * 2, next), - next => restoreObject(bucket, key, size, err => { - assert.strictEqual(err.code, 'QuotaExceeded'); - return next(); - }), + next => + restoreObject(bucket, key, size, err => { + assert.strictEqual(err.code, 'QuotaExceeded'); + return next(); + }), next => deleteVersionID(bucket, key, vID, size, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should not update the inflights if the quota check is passing but the object is already restored', done => { - const bucket = 'quota-test-bucket14'; - const key = 'quota-test-object'; - const size = 100; - let vID = null; - return async.series([ + ], + done + ); + }); + + it('should not update the inflights if the quota check is passing but the object is already restored', done => { + const bucket = 'quota-test-bucket14'; + const key = 'quota-test-object'; + const size = 100; + let vID = null; + return async.series( + [ next => createBucket(bucket, false, next), next => configureBucketVersioning(bucket, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, key, size, (err, data) => { - assert.ifError(err); - vID = data.VersionId; - return next(); - }), - next => fakeMetadataArchive(bucket, key, vID, { - archiveInfo: {}, - restoreRequestedAt: new Date(0).toString(), - restoreCompletedAt: new Date(0).toString() + 1, - restoreRequestedDays: 5, - }, next), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, key, size, (err, data) => { + assert.ifError(err); + vID = data.VersionId; + return next(); + }), + next => + fakeMetadataArchive( + bucket, + key, + vID, + { + archiveInfo: {}, + restoreRequestedAt: new Date(0).toString(), + restoreCompletedAt: new Date(0).toString() + 1, + restoreRequestedDays: 5, + }, + next + ), next => wait(inflightFlushFrequencyMS * 2, next), next => { assert.strictEqual(scuba.getInflightsForBucket(bucket), size); @@ -564,30 +683,38 @@ function multiObjectDelete(bucket, keys, size, callback) { }, next => deleteVersionID(bucket, key, vID, size, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should allow writes after deleting data with quotas', done => { - const bucket = 'quota-test-bucket8'; - const key = 'quota-test-object'; - const size = 400; - return async.series([ + ], + done + ); + }); + + it('should allow writes after deleting data with quotas', done => { + const bucket = 'quota-test-bucket8'; + const key = 'quota-test-object'; + const size = 400; + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, `${key}1`, size, err => { - assert.ifError(err); - return next(); - }), - next => putObject(bucket, `${key}2`, size, err => { - assert.ifError(err); - return next(); - }), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, `${key}1`, size, err => { + assert.ifError(err); + return next(); + }), + next => + putObject(bucket, `${key}2`, size, err => { + assert.ifError(err); + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), - next => putObject(bucket, `${key}3`, size, err => { - assert.strictEqual(err.code, 'QuotaExceeded'); - return next(); - }), + next => + putObject(bucket, `${key}3`, size, err => { + assert.strictEqual(err.code, 'QuotaExceeded'); + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), next => { assert.strictEqual(scuba.getInflightsForBucket(bucket), size * 2); @@ -596,38 +723,52 @@ function multiObjectDelete(bucket, keys, size, callback) { next => wait(inflightFlushFrequencyMS * 2, next), next => deleteObject(bucket, `${key}2`, size, next), next => wait(inflightFlushFrequencyMS * 2, next), - next => putObject(bucket, `${key}4`, size, err => { - assert.ifError(err); - return next(); - }), + next => + putObject(bucket, `${key}4`, size, err => { + assert.ifError(err); + return next(); + }), next => deleteObject(bucket, `${key}1`, size, next), next => deleteObject(bucket, `${key}3`, size, next), next => deleteObject(bucket, `${key}4`, size, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should allow writes after deleting data with quotas below the current number of inflights', done => { - const bucket = 'quota-test-bucket8'; - const key = 'quota-test-object'; - const size = 400; - if (!s3Config.isQuotaInflightEnabled()) { - return done(); - } - return async.series([ + ], + done + ); + }); + + it('should allow writes after deleting data with quotas below the current number of inflights', done => { + const bucket = 'quota-test-bucket8'; + const key = 'quota-test-object'; + const size = 400; + if (!s3Config.isQuotaInflightEnabled()) { + return done(); + } + return async.series( + [ next => createBucket(bucket, false, next), // Set the quota to 10 * size (4000) - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify({ quota: 10 * size }), config).then(() => next()).catch(err => next(err)), + next => + sendRequest( + putQuotaVerb, + '127.0.0.1:8000', + `/${bucket}/?quota=true`, + JSON.stringify({ quota: 10 * size }), + config + ) + .then(() => next()) + .catch(err => next(err)), // Simulate previous operations since last metrics update (4000 bytes) - next => putObject(bucket, `${key}1`, 5 * size, err => { - assert.ifError(err); - return next(); - }), - next => putObject(bucket, `${key}2`, 5 * size, err => { - assert.ifError(err); - return next(); - }), + next => + putObject(bucket, `${key}1`, 5 * size, err => { + assert.ifError(err); + return next(); + }), + next => + putObject(bucket, `${key}2`, 5 * size, err => { + assert.ifError(err); + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), // After metrics update, set the inflights to 0 (simulate end of metrics update) next => { @@ -636,10 +777,11 @@ function multiObjectDelete(bucket, keys, size, callback) { }, // Here we have 0 inflight but the stored bytes are 4000 (equal to the quota) // Should reject new write with QuotaExceeded (4000 + 400) - next => putObject(bucket, `${key}3`, size, err => { - assert.strictEqual(err.code, 'QuotaExceeded'); - return next(); - }), + next => + putObject(bucket, `${key}3`, size, err => { + assert.strictEqual(err.code, 'QuotaExceeded'); + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), // Should still have 0 as inflight next => { @@ -650,37 +792,45 @@ function multiObjectDelete(bucket, keys, size, callback) { // Now delete one object (2000 bytes), it should let us write again next => deleteObject(bucket, `${key}1`, size, next), next => wait(inflightFlushFrequencyMS * 2, next), - next => putObject(bucket, `${key}4`, 5 * size, err => { - assert.ifError(err); - return next(); - }), + next => + putObject(bucket, `${key}4`, 5 * size, err => { + assert.ifError(err); + return next(); + }), // Cleanup next => deleteObject(bucket, `${key}2`, size, next), next => deleteObject(bucket, `${key}4`, size, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should not increase the inflights when the object is being rewritten with a smaller object', done => { - const bucket = 'quota-test-bucket9'; - const key = 'quota-test-object'; - const size = 400; - return async.series([ + ], + done + ); + }); + + it('should not increase the inflights when the object is being rewritten with a smaller object', done => { + const bucket = 'quota-test-bucket9'; + const key = 'quota-test-object'; + const size = 400; + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, key, size, err => { - assert.ifError(err); - return next(); - }), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, key, size, err => { + assert.ifError(err); + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), - next => putObject(bucket, key, size - 100, err => { - assert.ifError(err); - if (!s3Config.isQuotaInflightEnabled()) { - mockScuba.incrementBytesForBucket(bucket, -size); - } - return next(); - }), + next => + putObject(bucket, key, size - 100, err => { + assert.ifError(err); + if (!s3Config.isQuotaInflightEnabled()) { + mockScuba.incrementBytesForBucket(bucket, -size); + } + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), next => { assert.strictEqual(scuba.getInflightsForBucket(bucket), size - 100); @@ -688,247 +838,323 @@ function multiObjectDelete(bucket, keys, size, callback) { }, next => deleteObject(bucket, key, size, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should decrease the inflights when performing multi object delete', done => { - const bucket = 'quota-test-bucket10'; - const key = 'quota-test-object'; - const size = 400; - return async.series([ + ], + done + ); + }); + + it('should decrease the inflights when performing multi object delete', done => { + const bucket = 'quota-test-bucket10'; + const key = 'quota-test-object'; + const size = 400; + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, `${key}1`, size, err => { - assert.ifError(err); - return next(); - } - ), - next => putObject(bucket, `${key}2`, size, err => { - assert.ifError(err); - return next(); - }), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, `${key}1`, size, err => { + assert.ifError(err); + return next(); + }), + next => + putObject(bucket, `${key}2`, size, err => { + assert.ifError(err); + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), - next => multiObjectDelete(bucket, [`${key}1`, `${key}2`], size * 2, err => { - assert.ifError(err); - return next(); - }), + next => + multiObjectDelete(bucket, [`${key}1`, `${key}2`], size * 2, err => { + assert.ifError(err); + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), next => { assert.strictEqual(scuba.getInflightsForBucket(bucket), 0); return next(); }, next => deleteBucket(bucket, next), - ], done); - }); - - it('should allow writes after multi-deleting data with quotas below the current number of inflights', done => { - const bucket = 'quota-test-bucket10'; - const key = 'quota-test-object'; - const size = 400; - if (!s3Config.isQuotaInflightEnabled()) { - return done(); - } - return async.series([ + ], + done + ); + }); + + it('should allow writes after multi-deleting data with quotas below the current number of inflights', done => { + const bucket = 'quota-test-bucket10'; + const key = 'quota-test-object'; + const size = 400; + if (!s3Config.isQuotaInflightEnabled()) { + return done(); + } + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify({ quota: size * 10 }), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, `${key}1`, size * 5, err => { - assert.ifError(err); - return next(); - }), - next => putObject(bucket, `${key}2`, size * 5, err => { - assert.ifError(err); - return next(); - }), + next => + sendRequest( + putQuotaVerb, + '127.0.0.1:8000', + `/${bucket}/?quota=true`, + JSON.stringify({ quota: size * 10 }), + config + ) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, `${key}1`, size * 5, err => { + assert.ifError(err); + return next(); + }), + next => + putObject(bucket, `${key}2`, size * 5, err => { + assert.ifError(err); + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), next => { scuba.setInflightAsCapacity(bucket); return next(); }, - next => putObject(bucket, `${key}3`, size, err => { - assert.strictEqual(err.code, 'QuotaExceeded'); - return next(); - }), + next => + putObject(bucket, `${key}3`, size, err => { + assert.strictEqual(err.code, 'QuotaExceeded'); + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), next => { assert.strictEqual(scuba.getInflightsForBucket(bucket), 0); return next(); }, - next => multiObjectDelete(bucket, [`${key}1`, `${key}2`], size * 10, err => { - assert.ifError(err); - return next(); - }), + next => + multiObjectDelete(bucket, [`${key}1`, `${key}2`], size * 10, err => { + assert.ifError(err); + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), - next => putObject(bucket, `${key}4`, size * 5, err => { - assert.ifError(err); - return next(); - }), + next => + putObject(bucket, `${key}4`, size * 5, err => { + assert.ifError(err); + return next(); + }), next => deleteObject(bucket, `${key}4`, size * 5, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should not update the inflights if the API errored after evaluating quotas (deletion)', done => { - const bucket = 'quota-test-bucket11'; - const key = 'quota-test-object'; - const size = 100; - let vID = null; - return async.series([ + ], + done + ); + }); + + it('should not update the inflights if the API errored after evaluating quotas (deletion)', done => { + const bucket = 'quota-test-bucket11'; + const key = 'quota-test-object'; + const size = 100; + let vID = null; + return async.series( + [ next => createBucket(bucket, true, next), next => putObjectLockConfiguration(bucket, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, key, size, (err, val) => { - assert.ifError(err); - vID = val.VersionId; - return next(); - }), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, key, size, (err, val) => { + assert.ifError(err); + vID = val.VersionId; + return next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), next => { assert.strictEqual(scuba.getInflightsForBucket(bucket), size); return next(); }, - next => deleteVersionID(bucket, key, vID, size, err => { - assert.strictEqual(err.code, 'AccessDenied'); - next(); - }), + next => + deleteVersionID(bucket, key, vID, size, err => { + assert.strictEqual(err.code, 'AccessDenied'); + next(); + }), next => wait(inflightFlushFrequencyMS * 2, next), next => { assert.strictEqual(scuba.getInflightsForBucket(bucket), size); return next(); }, - ], done); - }); - - it('should only evaluate quota and not update inflights for PutObject with the x-scal-s3-version-id header', - done => { - const bucket = 'quota-test-bucket13'; - const key = 'quota-test-object'; - const size = 100; - let vID = null; - return async.series([ - next => createBucket(bucket, true, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, key, size, (err, val) => { + ], + done + ); + }); + + it('should only evaluate quota and not update inflights for PutObject with the x-scal-s3-version-id header', done => { + const bucket = 'quota-test-bucket13'; + const key = 'quota-test-object'; + const size = 100; + let vID = null; + return async.series( + [ + next => createBucket(bucket, true, next), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, key, size, (err, val) => { assert.ifError(err); vID = val.VersionId; return next(); }), - next => wait(inflightFlushFrequencyMS * 2, next), - next => { - assert.strictEqual(scuba.getInflightsForBucket(bucket), size); - return next(); - }, - next => fakeMetadataArchive(bucket, key, vID, { - archiveInfo: {}, - restoreRequestedAt: new Date(0).toISOString(), - restoreRequestedDays: 7, - }, next), - // Simulate the real restore - next => putObjectWithCustomHeader(bucket, key, size, vID, err => { + next => wait(inflightFlushFrequencyMS * 2, next), + next => { + assert.strictEqual(scuba.getInflightsForBucket(bucket), size); + return next(); + }, + next => + fakeMetadataArchive( + bucket, + key, + vID, + { + archiveInfo: {}, + restoreRequestedAt: new Date(0).toISOString(), + restoreRequestedDays: 7, + }, + next + ), + // Simulate the real restore + next => + putObjectWithCustomHeader(bucket, key, size, vID, err => { assert.ifError(err); return next(); }), - next => { - assert.strictEqual(scuba.getInflightsForBucket(bucket), size); - return next(); - }, - next => deleteVersionID(bucket, key, vID, size, next), - next => deleteBucket(bucket, next), - ], done); - }); - - it('should allow a restore if the quota is full but the objet fits with its reserved storage space', - done => { - const bucket = 'quota-test-bucket15'; - const key = 'quota-test-object'; - const size = 1000; - let vID = null; - return async.series([ - next => createBucket(bucket, true, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify(quota), config).then(() => next()).catch(err => next(err)), - next => putObject(bucket, key, size, (err, val) => { + next => { + assert.strictEqual(scuba.getInflightsForBucket(bucket), size); + return next(); + }, + next => deleteVersionID(bucket, key, vID, size, next), + next => deleteBucket(bucket, next), + ], + done + ); + }); + + it('should allow a restore if the quota is full but the objet fits with its reserved storage space', done => { + const bucket = 'quota-test-bucket15'; + const key = 'quota-test-object'; + const size = 1000; + let vID = null; + return async.series( + [ + next => createBucket(bucket, true, next), + next => + sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota), config) + .then(() => next()) + .catch(err => next(err)), + next => + putObject(bucket, key, size, (err, val) => { assert.ifError(err); vID = val.VersionId; return next(); }), - next => wait(inflightFlushFrequencyMS * 2, next), - next => { - assert.strictEqual(scuba.getInflightsForBucket(bucket), size); - return next(); - }, - next => fakeMetadataArchive(bucket, key, vID, { - archiveInfo: {}, - restoreRequestedAt: new Date(0).toISOString(), - restoreRequestedDays: 7, - }, next), - // Put an object, the quota should be exceeded - next => putObject(bucket, `${key}-2`, size, err => { + next => wait(inflightFlushFrequencyMS * 2, next), + next => { + assert.strictEqual(scuba.getInflightsForBucket(bucket), size); + return next(); + }, + next => + fakeMetadataArchive( + bucket, + key, + vID, + { + archiveInfo: {}, + restoreRequestedAt: new Date(0).toISOString(), + restoreRequestedDays: 7, + }, + next + ), + // Put an object, the quota should be exceeded + next => + putObject(bucket, `${key}-2`, size, err => { assert.strictEqual(err.code, 'QuotaExceeded'); return next(); }), - // Simulate the real restore - next => putObjectWithCustomHeader(bucket, key, size, vID, err => { + // Simulate the real restore + next => + putObjectWithCustomHeader(bucket, key, size, vID, err => { assert.ifError(err); return next(); }), - next => { - assert.strictEqual(scuba.getInflightsForBucket(bucket), size); - return next(); - }, - next => deleteVersionID(bucket, key, vID, size, next), - next => deleteBucket(bucket, next), - ], done); - }); - - it('should reduce inflights when completing MPU with fewer parts than uploaded', done => { - const bucket = 'quota-test-bucket-mpu1'; - const key = 'quota-test-object'; - const parts = 3; - const partSize = 5 * 1024 * 1024; - const totalSize = parts * partSize; - const usedParts = 2; - let uploadId = null; - const ETags = []; - - if (!s3Config.isQuotaInflightEnabled()) { - return done(); - } + next => { + assert.strictEqual(scuba.getInflightsForBucket(bucket), size); + return next(); + }, + next => deleteVersionID(bucket, key, vID, size, next), + next => deleteBucket(bucket, next), + ], + done + ); + }); + + it('should reduce inflights when completing MPU with fewer parts than uploaded', done => { + const bucket = 'quota-test-bucket-mpu1'; + const key = 'quota-test-object'; + const parts = 3; + const partSize = 5 * 1024 * 1024; + const totalSize = parts * partSize; + const usedParts = 2; + let uploadId = null; + const ETags = []; - return async.series([ + if (!s3Config.isQuotaInflightEnabled()) { + return done(); + } + + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify({ quota: totalSize * 2 }), config) - .then(() => next()).catch(err => next(err)), - next => s3Client.createMultipartUpload({ - Bucket: bucket, - Key: key, - }, (err, data) => { - if (err) { - return next(err); - } - uploadId = data.UploadId; - return next(); - }), - next => async.timesSeries(parts, (n, cb) => { - const uploadPartParams = { - Bucket: bucket, - Key: key, - PartNumber: n + 1, - UploadId: uploadId, - Body: Buffer.alloc(partSize), - }; - return s3Client.uploadPart(uploadPartParams, (err, data) => { - if (err) { - return cb(err); + next => + sendRequest( + putQuotaVerb, + '127.0.0.1:8000', + `/${bucket}/?quota=true`, + JSON.stringify({ quota: totalSize * 2 }), + config + ) + .then(() => next()) + .catch(err => next(err)), + next => + s3Client.createMultipartUpload( + { + Bucket: bucket, + Key: key, + }, + (err, data) => { + if (err) { + return next(err); + } + uploadId = data.UploadId; + return next(); } - ETags[n] = data.ETag; - return cb(); - }); - }, next), + ), + next => + async.timesSeries( + parts, + (n, cb) => { + const uploadPartParams = { + Bucket: bucket, + Key: key, + PartNumber: n + 1, + UploadId: uploadId, + Body: Buffer.alloc(partSize), + }; + return s3Client.uploadPart(uploadPartParams, (err, data) => { + if (err) { + return cb(err); + } + ETags[n] = data.ETag; + return cb(); + }); + }, + next + ), next => wait(inflightFlushFrequencyMS * 2, next), next => { // Verify all parts are counted in inflights @@ -959,46 +1185,65 @@ function multiObjectDelete(bucket, keys, size, callback) { }, next => deleteObject(bucket, key, usedParts * partSize, next), next => deleteBucket(bucket, next), - ], done); - }); - - it('should reduce inflights when aborting MPU', done => { - const bucket = 'quota-test-bucket-mpu2'; - const key = 'quota-test-object'; - const parts = 3; - const partSize = 5 * 1024 * 1024; - const totalSize = parts * partSize; - let uploadId = null; - - if (!s3Config.isQuotaInflightEnabled()) { - return done(); - } + ], + done + ); + }); - return async.series([ + it('should reduce inflights when aborting MPU', done => { + const bucket = 'quota-test-bucket-mpu2'; + const key = 'quota-test-object'; + const parts = 3; + const partSize = 5 * 1024 * 1024; + const totalSize = parts * partSize; + let uploadId = null; + + if (!s3Config.isQuotaInflightEnabled()) { + return done(); + } + + return async.series( + [ next => createBucket(bucket, false, next), - next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`, - JSON.stringify({ quota: totalSize * 2 }), config) - .then(() => next()).catch(err => next(err)), - next => s3Client.createMultipartUpload({ - Bucket: bucket, - Key: key, - }, (err, data) => { - if (err) { - return next(err); - } - uploadId = data.UploadId; - return next(); - }), - next => async.timesSeries(parts, (n, cb) => { - const uploadPartParams = { - Bucket: bucket, - Key: key, - PartNumber: n + 1, - UploadId: uploadId, - Body: Buffer.alloc(partSize), - }; - return s3Client.uploadPart(uploadPartParams, cb); - }, next), + next => + sendRequest( + putQuotaVerb, + '127.0.0.1:8000', + `/${bucket}/?quota=true`, + JSON.stringify({ quota: totalSize * 2 }), + config + ) + .then(() => next()) + .catch(err => next(err)), + next => + s3Client.createMultipartUpload( + { + Bucket: bucket, + Key: key, + }, + (err, data) => { + if (err) { + return next(err); + } + uploadId = data.UploadId; + return next(); + } + ), + next => + async.timesSeries( + parts, + (n, cb) => { + const uploadPartParams = { + Bucket: bucket, + Key: key, + PartNumber: n + 1, + UploadId: uploadId, + Body: Buffer.alloc(partSize), + }; + return s3Client.uploadPart(uploadPartParams, cb); + }, + next + ), next => wait(inflightFlushFrequencyMS * 2, next), next => { // Verify all parts are counted in inflights @@ -1013,6 +1258,8 @@ function multiObjectDelete(bucket, keys, size, callback) { return next(); }, next => deleteBucket(bucket, next), - ], done); - }); + ], + done + ); }); +}); diff --git a/tests/unit/Config.js b/tests/unit/Config.js index 969f0ed17e..8f95f58a91 100644 --- a/tests/unit/Config.js +++ b/tests/unit/Config.js @@ -19,15 +19,23 @@ describe('Config', () => { const setEnv = (key, value) => { if (key in process.env) { const v = process.env[key]; - envToRestore.push(() => { process.env[key] = v; }); + envToRestore.push(() => { + process.env[key] = v; + }); } else { - envToRestore.push(() => { delete process.env[key]; }); + envToRestore.push(() => { + delete process.env[key]; + }); } process.env[key] = value; }; - beforeEach(() => { envToRestore.length = 0; }); - afterEach(() => { envToRestore.reverse().forEach(cb => cb()); }); + beforeEach(() => { + envToRestore.length = 0; + }); + afterEach(() => { + envToRestore.reverse().forEach(cb => cb()); + }); it('should load default config.json without errors', done => { require('../../lib/Config'); @@ -50,7 +58,7 @@ describe('Config', () => { describe('azureGetStorageAccountName', () => { it('should return the azureStorageAccountName', done => { const accountName = azureGetStorageAccountName('us-west-1', { - azureStorageAccountName: 'someaccount' + azureStorageAccountName: 'someaccount', }); assert.deepStrictEqual(accountName, 'someaccount'); return done(); @@ -60,7 +68,7 @@ describe('Config', () => { setEnv('us-west-1_AZURE_STORAGE_ACCOUNT_NAME', 'other'); setEnv('fr-east-2_AZURE_STORAGE_ACCOUNT_NAME', 'wrong'); const accountName = azureGetStorageAccountName('us-west-1', { - azureStorageAccountName: 'someaccount' + azureStorageAccountName: 'someaccount', }); assert.deepStrictEqual(accountName, 'other'); return done(); @@ -103,7 +111,7 @@ describe('Config', () => { it('should return shared-key credentials with authMethod from details', () => { const creds = azureGetLocationCredentials('us-west-1', { authMode: 'shared-key', - ...locationDetails + ...locationDetails, }); assert.deepStrictEqual(creds, { authMethod: 'shared-key', @@ -144,7 +152,7 @@ describe('Config', () => { it('should return shared-access-signature-token credentials with authMethod from details', () => { const creds = azureGetLocationCredentials('us-west-1', { authMethod: 'shared-access-signature', - ...locationDetails + ...locationDetails, }); assert.deepStrictEqual(creds, { authMethod: 'shared-access-signature', @@ -191,7 +199,7 @@ describe('Config', () => { it('should return client-secret credentials with authMethod from details', () => { const creds = azureGetLocationCredentials('us-west-1', { authMethod: 'client-secret', - ...locationDetails + ...locationDetails, }); assert.deepStrictEqual(creds, { authMethod: 'client-secret', @@ -217,69 +225,54 @@ describe('Config', () => { it('should return account name from config', () => { setEnv('azurebackend_AZURE_STORAGE_ACCOUNT_NAME', ''); const config = new ConfigObject(); - assert.deepStrictEqual( - config.getAzureStorageAccountName('azurebackend'), - 'fakeaccountname' - ); + assert.deepStrictEqual(config.getAzureStorageAccountName('azurebackend'), 'fakeaccountname'); }); it('should return account name from env', () => { setEnv('azurebackend_AZURE_STORAGE_ACCOUNT_NAME', 'foooo'); const config = new ConfigObject(); - assert.deepStrictEqual( - config.getAzureStorageAccountName('azurebackend'), - 'foooo' - ); + assert.deepStrictEqual(config.getAzureStorageAccountName('azurebackend'), 'foooo'); }); it('should return account name from shared-access-signature auth', () => { setEnv('S3_LOCATION_FILE', 'tests/locationConfig/locationConfigTests.json'); const config = new ConfigObject(); - assert.deepStrictEqual( - config.getAzureStorageAccountName('azurebackend3'), - 'fakeaccountname3' - ); + assert.deepStrictEqual(config.getAzureStorageAccountName('azurebackend3'), 'fakeaccountname3'); }); it('should return account name from client-secret auth', () => { setEnv('S3_LOCATION_FILE', 'tests/locationConfig/locationConfigTests.json'); const config = new ConfigObject(); - assert.deepStrictEqual( - config.getAzureStorageAccountName('azurebackend4'), - 'fakeaccountname4', - ); + assert.deepStrictEqual(config.getAzureStorageAccountName('azurebackend4'), 'fakeaccountname4'); }); it('should return account name from endpoint', () => { setEnv('S3_LOCATION_FILE', 'tests/locationConfig/locationConfigTests.json'); const config = new ConfigObject(); - assert.deepStrictEqual( - config.getAzureStorageAccountName('azuritebackend'), - 'myfakeaccount', - ); + assert.deepStrictEqual(config.getAzureStorageAccountName('azuritebackend'), 'myfakeaccount'); }); }); describe('locationConstraintAssert', () => { const memLocation = { - 'details': {}, - 'isCold': false, - 'isTransient': false, - 'legacyAwsBehavior': false, - 'locationType': 'location-mem-v1', - 'objectId': 'a9d9b632-5fa5-11ef-8715-b21941dbc3ea', - 'type': 'mem', + details: {}, + isCold: false, + isTransient: false, + legacyAwsBehavior: false, + locationType: 'location-mem-v1', + objectId: 'a9d9b632-5fa5-11ef-8715-b21941dbc3ea', + type: 'mem', }; it('should parse tlp location', () => { const locationConstraints = { 'dmf-1': { - 'details': {}, - 'isCold': true, - 'legacyAwsBehavior': false, - 'locationType': 'location-dmf-v1', - 'objectId': 'b9d9b632-5fa5-11ef-8715-b21941dbc3ea', - 'type': 'tlp' + details: {}, + isCold: true, + legacyAwsBehavior: false, + locationType: 'location-dmf-v1', + objectId: 'b9d9b632-5fa5-11ef-8715-b21941dbc3ea', + type: 'tlp', }, 'us-east-1': memLocation, }; @@ -289,12 +282,12 @@ describe('Config', () => { it('should fail tlp location is not cold', () => { const locationConstraints = { 'dmf-1': { - 'details': {}, - 'isCold': false, - 'legacyAwsBehavior': false, - 'locationType': 'location-dmf-v1', - 'objectId': 'b9d9b632-5fa5-11ef-8715-b21941dbc3ea', - 'type': 'tlp' + details: {}, + isCold: false, + legacyAwsBehavior: false, + locationType: 'location-dmf-v1', + objectId: 'b9d9b632-5fa5-11ef-8715-b21941dbc3ea', + type: 'tlp', }, 'us-east-1': memLocation, }; @@ -304,14 +297,14 @@ describe('Config', () => { it('should fail if tlp location has details', () => { const locationConstraints = { 'dmf-1': { - 'details': { - 'endpoint': 'http://localhost:8000', + details: { + endpoint: 'http://localhost:8000', }, - 'isCold': true, - 'legacyAwsBehavior': false, - 'locationType': 'location-dmf-v1', - 'objectId': 'b9d9b632-5fa5-11ef-8715-b21941dbc3ea', - 'type': 'tlp' + isCold: true, + legacyAwsBehavior: false, + locationType: 'location-dmf-v1', + objectId: 'b9d9b632-5fa5-11ef-8715-b21941dbc3ea', + type: 'tlp', }, 'us-east-1': memLocation, }; @@ -507,8 +500,7 @@ describe('Config', () => { before(() => { oldConfig = process.env.S3_CONFIG_FILE; - process.env.S3_CONFIG_FILE = - 'tests/unit/testConfigs/allOptsConfig/config.json'; + process.env.S3_CONFIG_FILE = 'tests/unit/testConfigs/allOptsConfig/config.json'; }); after(() => { @@ -518,13 +510,10 @@ describe('Config', () => { it('should set up scuba', () => { const config = new ConfigObject(); - assert.deepStrictEqual( - config.scuba, - { - host: 'localhost', - port: 8100, - }, - ); + assert.deepStrictEqual(config.scuba, { + host: 'localhost', + port: 8100, + }); }); it('should use environment variables for scuba', () => { @@ -533,13 +522,10 @@ describe('Config', () => { const config = new ConfigObject(); - assert.deepStrictEqual( - config.scuba, - { - host: 'scubahost', - port: 1234, - }, - ); + assert.deepStrictEqual(config.scuba, { + host: 'scubahost', + port: 1234, + }); }); }); @@ -548,8 +534,7 @@ describe('Config', () => { before(() => { oldConfig = process.env.S3_CONFIG_FILE; - process.env.S3_CONFIG_FILE = - 'tests/unit/testConfigs/allOptsConfig/config.json'; + process.env.S3_CONFIG_FILE = 'tests/unit/testConfigs/allOptsConfig/config.json'; }); after(() => { @@ -559,13 +544,10 @@ describe('Config', () => { it('should set up quota', () => { const config = new ConfigObject(); - assert.deepStrictEqual( - config.quota, - { - maxStaleness: 24 * 60 * 60 * 1000, - enableInflights: false, - }, - ); + assert.deepStrictEqual(config.quota, { + maxStaleness: 24 * 60 * 60 * 1000, + enableInflights: false, + }); }); it('should use environment variables for scuba', () => { @@ -574,13 +556,10 @@ describe('Config', () => { const config = new ConfigObject(); - assert.deepStrictEqual( - config.quota, - { - maxStaleness: 1234, - enableInflights: true, - }, - ); + assert.deepStrictEqual(config.quota, { + maxStaleness: 1234, + enableInflights: true, + }); }); it('should use the default if the maxStaleness is not a number', () => { @@ -589,13 +568,10 @@ describe('Config', () => { const config = new ConfigObject(); - assert.deepStrictEqual( - config.quota, - { - maxStaleness: 24 * 60 * 60 * 1000, - enableInflights: true, - }, - ); + assert.deepStrictEqual(config.quota, { + maxStaleness: 24 * 60 * 60 * 1000, + enableInflights: true, + }); }); }); @@ -604,8 +580,7 @@ describe('Config', () => { before(() => { oldConfig = process.env.S3_CONFIG_FILE; - process.env.S3_CONFIG_FILE = - 'tests/unit/testConfigs/allOptsConfig/config.json'; + process.env.S3_CONFIG_FILE = 'tests/unit/testConfigs/allOptsConfig/config.json'; }); after(() => { @@ -615,39 +590,30 @@ describe('Config', () => { it('should set up utapi local cache', () => { const config = new ConfigObject(); - assert.deepStrictEqual( - config.localCache, - { name: 'zenko', sentinels: [{ host: 'localhost', port: 6379 }] }, - ); - assert.deepStrictEqual( - config.utapi.localCache, - config.localCache, - ); + assert.deepStrictEqual(config.localCache, { + name: 'zenko', + sentinels: [{ host: 'localhost', port: 6379 }], + }); + assert.deepStrictEqual(config.utapi.localCache, config.localCache); }); it('should set up utapi redis', () => { const config = new ConfigObject(); - assert.deepStrictEqual( - config.redis, - { name: 'zenko', sentinels: [{ host: 'localhost', port: 6379 }] }, - ); - assert.deepStrictEqual( - config.utapi.redis, - { - host: 'localhost', - port: 6379, - retry: { - connectBackoff: { - min: 10, - max: 1000, - factor: 1.5, - jitter: 0.1, - deadline: 10000, - }, + assert.deepStrictEqual(config.redis, { name: 'zenko', sentinels: [{ host: 'localhost', port: 6379 }] }); + assert.deepStrictEqual(config.utapi.redis, { + host: 'localhost', + port: 6379, + retry: { + connectBackoff: { + min: 10, + max: 1000, + factor: 1.5, + jitter: 0.1, + deadline: 10000, }, }, - ); + }); }); }); @@ -672,11 +638,7 @@ describe('Config', () => { }); it('should return the rules provided when they are valid', () => { - const rules = [ - 'Expiration', - 'NoncurrentVersionExpiration', - 'AbortIncompleteMultipartUpload', - ]; + const rules = ['Expiration', 'NoncurrentVersionExpiration', 'AbortIncompleteMultipartUpload']; const parsedRules = parseSupportedLifecycleRules(rules); assert.deepStrictEqual(parsedRules, rules); }); @@ -835,8 +797,7 @@ describe('Config', () => { .withArgs(sinon.match(/\/config\.json$/)) .returns(JSON.stringify({ ...defaultConfig, instanceId: 'test' })); // For all other files, use the original readFileSync - readFileSyncStub - .callsFake((filePath, ...args) => originalReadFileSync(filePath, ...args)); + readFileSyncStub.callsFake((filePath, ...args) => originalReadFileSync(filePath, ...args)); // Create a new ConfigObject instance const config = new ConfigObject(); assert.strictEqual(config.instanceId, 'test'); @@ -851,8 +812,7 @@ describe('Config', () => { .withArgs(sinon.match(/\/config\.json$/)) .returns(JSON.stringify({ ...defaultConfig, instanceId: 1234 })); // For all other files, use the original readFileSync - readFileSyncStub - .callsFake((filePath, ...args) => originalReadFileSync(filePath, ...args)); + readFileSyncStub.callsFake((filePath, ...args) => originalReadFileSync(filePath, ...args)); // Create a new ConfigObject instance assert.throws(() => new ConfigObject()); }); diff --git a/tests/unit/api/api.js b/tests/unit/api/api.js index b9d860ef0d..90788c9f38 100644 --- a/tests/unit/api/api.js +++ b/tests/unit/api/api.js @@ -23,23 +23,32 @@ describe('api.callApiMethod', () => { response = { write: sandbox.stub(), - end: sandbox.stub() + end: sandbox.stub(), }; log = { addDefaultFields: sandbox.stub(), trace: sandbox.stub(), error: sandbox.stub(), - debug: sandbox.stub() + debug: sandbox.stub(), }; authServer = { - doAuth: sandbox.stub().callsArgWith(2, null, new AuthInfo({}), [{ - isAllowed: true, - isImplicit: false, - }], null, { - accountQuota: 5000, - }), + doAuth: sandbox.stub().callsArgWith( + 2, + null, + new AuthInfo({}), + [ + { + isAllowed: true, + isImplicit: false, + }, + ], + null, + { + accountQuota: 5000, + } + ), }; sandbox.stub(auth, 'server').value(authServer); @@ -98,8 +107,7 @@ describe('api.callApiMethod', () => { assert.strictEqual(requestContexts[0]._needQuota, true); done(); }); - sandbox.stub(api, 'completeMultipartUpload').callsFake( - (userInfo, _request, streamingV4Params, log, cb) => cb); + sandbox.stub(api, 'completeMultipartUpload').callsFake((userInfo, _request, streamingV4Params, log, cb) => cb); api.callApiMethod('completeMultipartUpload', request, response, log); }); @@ -108,8 +116,7 @@ describe('api.callApiMethod', () => { assert.strictEqual(requestContexts[0]._needQuota, true); done(); }); - sandbox.stub(api, 'multipartDelete').callsFake( - (userInfo, _request, streamingV4Params, log, cb) => cb); + sandbox.stub(api, 'multipartDelete').callsFake((userInfo, _request, streamingV4Params, log, cb) => cb); api.callApiMethod('multipartDelete', request, response, log); }); }); diff --git a/tests/unit/api/apiUtils/authorization/aclChecks.js b/tests/unit/api/apiUtils/authorization/aclChecks.js index 6bf34850b6..0821d5acc4 100644 --- a/tests/unit/api/apiUtils/authorization/aclChecks.js +++ b/tests/unit/api/apiUtils/authorization/aclChecks.js @@ -1,7 +1,9 @@ const assert = require('assert'); -const { isServiceAccount, getServiceAccountProperties } = - require('../../../../../lib/api/apiUtils/authorization/permissionChecks'); +const { + isServiceAccount, + getServiceAccountProperties, +} = require('../../../../../lib/api/apiUtils/authorization/permissionChecks'); describe('aclChecks', () => { it('should return whether a canonical ID is a service account', () => { @@ -12,15 +14,11 @@ describe('aclChecks', () => { }); it('should return properties of a service account by canonical ID', () => { - assert.strictEqual( - getServiceAccountProperties('abcdefghijkl'), undefined); - assert.strictEqual( - getServiceAccountProperties('abcdefghijkl/notaservice'), undefined); - assert.deepStrictEqual( - getServiceAccountProperties('abcdefghijkl/lifecycle'), {}); - assert.deepStrictEqual( - getServiceAccountProperties('abcdefghijkl/md-ingestion'), { - canReplicate: true, - }); + assert.strictEqual(getServiceAccountProperties('abcdefghijkl'), undefined); + assert.strictEqual(getServiceAccountProperties('abcdefghijkl/notaservice'), undefined); + assert.deepStrictEqual(getServiceAccountProperties('abcdefghijkl/lifecycle'), {}); + assert.deepStrictEqual(getServiceAccountProperties('abcdefghijkl/md-ingestion'), { + canReplicate: true, + }); }); }); diff --git a/tests/unit/api/apiUtils/authorization/prepareRequestContexts.js b/tests/unit/api/apiUtils/authorization/prepareRequestContexts.js index 6cf2e3c591..6537be087f 100644 --- a/tests/unit/api/apiUtils/authorization/prepareRequestContexts.js +++ b/tests/unit/api/apiUtils/authorization/prepareRequestContexts.js @@ -1,15 +1,15 @@ const assert = require('assert'); const DummyRequest = require('../../../DummyRequest'); -const prepareRequestContexts = - require('../../../../../lib/api/apiUtils/authorization/prepareRequestContexts.js'); - -const makeRequest = (headers, query) => new DummyRequest({ - headers, - url: '/', - parsedHost: 'localhost', - socket: {}, - query, -}); +const prepareRequestContexts = require('../../../../../lib/api/apiUtils/authorization/prepareRequestContexts.js'); + +const makeRequest = (headers, query) => + new DummyRequest({ + headers, + url: '/', + parsedHost: 'localhost', + socket: {}, + query, + }); const sourceBucket = 'bucketsource'; const sourceObject = 'objectsource'; const sourceVersionId = 'vid1'; @@ -18,76 +18,79 @@ describe('prepareRequestContexts', () => { it('should return s3:DeleteObject if multiObjectDelete method', () => { const apiMethod = 'multiObjectDelete'; const request = makeRequest(); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); assert.strictEqual(results.length, 1); const expectedAction = 's3:DeleteObject'; assert.strictEqual(results[0].getAction(), expectedAction); }); - it('should return s3:PutObjectVersion request context action for objectPut method with x-scal-s3-version-id' + - ' header', () => { - const apiMethod = 'objectPut'; - const request = makeRequest({ - 'x-scal-s3-version-id': 'vid', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); - - assert.strictEqual(results.length, 1); - const expectedAction = 's3:PutObjectVersion'; - assert.strictEqual(results[0].getAction(), expectedAction); - }); + it( + 'should return s3:PutObjectVersion request context action for objectPut method with x-scal-s3-version-id' + + ' header', + () => { + const apiMethod = 'objectPut'; + const request = makeRequest({ + 'x-scal-s3-version-id': 'vid', + }); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); - it('should return s3:PutObjectVersion request context action for objectPut method with empty x-scal-s3-version-id' + - ' header', () => { - const apiMethod = 'objectPut'; - const request = makeRequest({ - 'x-scal-s3-version-id': '', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + assert.strictEqual(results.length, 1); + const expectedAction = 's3:PutObjectVersion'; + assert.strictEqual(results[0].getAction(), expectedAction); + } + ); + + it( + 'should return s3:PutObjectVersion request context action for objectPut method with empty x-scal-s3-version-id' + + ' header', + () => { + const apiMethod = 'objectPut'; + const request = makeRequest({ + 'x-scal-s3-version-id': '', + }); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); - assert.strictEqual(results.length, 1); - const expectedAction = 's3:PutObjectVersion'; - assert.strictEqual(results[0].getAction(), expectedAction); - }); + assert.strictEqual(results.length, 1); + const expectedAction = 's3:PutObjectVersion'; + assert.strictEqual(results[0].getAction(), expectedAction); + } + ); it('should return s3:PutObject request context action for objectPut method and no header', () => { const apiMethod = 'objectPut'; const request = makeRequest({}); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); assert.strictEqual(results.length, 1); const expectedAction = 's3:PutObject'; assert.strictEqual(results[0].getAction(), expectedAction); }); - it('should return s3:PutObject and s3:PutObjectTagging actions for objectPut method with' + - ' x-amz-tagging header', () => { - const apiMethod = 'objectPut'; - const request = makeRequest({ - 'x-amz-tagging': 'key1=value1&key2=value2', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + it( + 'should return s3:PutObject and s3:PutObjectTagging actions for objectPut method with' + + ' x-amz-tagging header', + () => { + const apiMethod = 'objectPut'; + const request = makeRequest({ + 'x-amz-tagging': 'key1=value1&key2=value2', + }); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); - assert.strictEqual(results.length, 2); - const expectedAction1 = 's3:PutObject'; - const expectedAction2 = 's3:PutObjectTagging'; - assert.strictEqual(results[0].getAction(), expectedAction1); - assert.strictEqual(results[1].getAction(), expectedAction2); - }); + assert.strictEqual(results.length, 2); + const expectedAction1 = 's3:PutObject'; + const expectedAction2 = 's3:PutObjectTagging'; + assert.strictEqual(results[0].getAction(), expectedAction1); + assert.strictEqual(results[1].getAction(), expectedAction2); + } + ); it('should return s3:PutObject and s3:PutObjectAcl actions for objectPut method with ACL header', () => { const apiMethod = 'objectPut'; const request = makeRequest({ 'x-amz-acl': 'private', }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); assert.strictEqual(results.length, 2); const expectedAction1 = 's3:PutObject'; @@ -98,10 +101,8 @@ describe('prepareRequestContexts', () => { it('should return s3:GetObject for headObject', () => { const apiMethod = 'objectHead'; - const request = makeRequest({ - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + const request = makeRequest({}); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); assert.strictEqual(results.length, 1); assert.strictEqual(results[0].getAction(), 's3:GetObject'); @@ -112,43 +113,46 @@ describe('prepareRequestContexts', () => { const request = makeRequest({ 'x-amz-version-id': '0987654323456789', }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); assert.strictEqual(results.length, 2); assert.strictEqual(results[0].getAction(), 's3:GetObject'); assert.strictEqual(results[1].getAction(), 's3:GetObjectVersion'); }); - it('should return s3:GetObject and scality:GetObjectArchiveInfo for headObject ' + - 'with x-amz-scal-archive-info header', () => { - const apiMethod = 'objectHead'; - const request = makeRequest({ - 'x-amz-scal-archive-info': 'true', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); - - assert.strictEqual(results.length, 2); - assert.strictEqual(results[0].getAction(), 's3:GetObject'); - assert.strictEqual(results[1].getAction(), 'scality:GetObjectArchiveInfo'); - }); - - it('should return s3:GetObject, s3:GetObjectVersion and scality:GetObjectArchiveInfo ' + - ' for headObject with x-amz-scal-archive-info header', () => { - const apiMethod = 'objectHead'; - const request = makeRequest({ - 'x-amz-version-id': '0987654323456789', - 'x-amz-scal-archive-info': 'true', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + it( + 'should return s3:GetObject and scality:GetObjectArchiveInfo for headObject ' + + 'with x-amz-scal-archive-info header', + () => { + const apiMethod = 'objectHead'; + const request = makeRequest({ + 'x-amz-scal-archive-info': 'true', + }); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); + + assert.strictEqual(results.length, 2); + assert.strictEqual(results[0].getAction(), 's3:GetObject'); + assert.strictEqual(results[1].getAction(), 'scality:GetObjectArchiveInfo'); + } + ); + + it( + 'should return s3:GetObject, s3:GetObjectVersion and scality:GetObjectArchiveInfo ' + + ' for headObject with x-amz-scal-archive-info header', + () => { + const apiMethod = 'objectHead'; + const request = makeRequest({ + 'x-amz-version-id': '0987654323456789', + 'x-amz-scal-archive-info': 'true', + }); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); - assert.strictEqual(results.length, 3); - assert.strictEqual(results[0].getAction(), 's3:GetObject'); - assert.strictEqual(results[1].getAction(), 's3:GetObjectVersion'); - assert.strictEqual(results[2].getAction(), 'scality:GetObjectArchiveInfo'); - }); + assert.strictEqual(results.length, 3); + assert.strictEqual(results[0].getAction(), 's3:GetObject'); + assert.strictEqual(results[1].getAction(), 's3:GetObjectVersion'); + assert.strictEqual(results[2].getAction(), 'scality:GetObjectArchiveInfo'); + } + ); it('should return s3:PutObjectRetention with header x-amz-object-lock-mode', () => { const apiMethod = 'objectPut'; @@ -165,45 +169,54 @@ describe('prepareRequestContexts', () => { assert.strictEqual(results[1].getAction(), expectedAction2); }); - it('should return s3:PutObjectRetention and s3:BypassGovernanceRetention for objectPut ' + - 'with header x-amz-bypass-governance-retention', () => { - const apiMethod = 'objectPut'; - const request = makeRequest({ - 'x-amz-object-lock-mode': 'GOVERNANCE', - 'x-amz-object-lock-retain-until-date': '2021-12-31T23:59:59.000Z', - 'x-amz-bypass-governance-retention': 'true', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); - - assert.strictEqual(results.length, 3); - const expectedAction1 = 's3:PutObject'; - const expectedAction2 = 's3:PutObjectRetention'; - const expectedAction3 = 's3:BypassGovernanceRetention'; - assert.strictEqual(results[0].getAction(), expectedAction1); - assert.strictEqual(results[1].getAction(), expectedAction2); - assert.strictEqual(results[2].getAction(), expectedAction3); - }); - - it('should return s3:PutObjectRetention and s3:BypassGovernanceRetention for objectPut ' + - 'with header x-amz-bypass-governance-retention with version id specified', () => { - const apiMethod = 'objectPut'; - const request = makeRequest({ - 'x-amz-object-lock-mode': 'GOVERNANCE', - 'x-amz-object-lock-retain-until-date': '2021-12-31T23:59:59.000Z', - 'x-amz-bypass-governance-retention': 'true', - }, { - versionId: 'vid1', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); - - assert.strictEqual(results.length, 3); - const expectedAction1 = 's3:PutObject'; - const expectedAction2 = 's3:PutObjectRetention'; - const expectedAction3 = 's3:BypassGovernanceRetention'; - assert.strictEqual(results[0].getAction(), expectedAction1); - assert.strictEqual(results[1].getAction(), expectedAction2); - assert.strictEqual(results[2].getAction(), expectedAction3); - }); + it( + 'should return s3:PutObjectRetention and s3:BypassGovernanceRetention for objectPut ' + + 'with header x-amz-bypass-governance-retention', + () => { + const apiMethod = 'objectPut'; + const request = makeRequest({ + 'x-amz-object-lock-mode': 'GOVERNANCE', + 'x-amz-object-lock-retain-until-date': '2021-12-31T23:59:59.000Z', + 'x-amz-bypass-governance-retention': 'true', + }); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); + + assert.strictEqual(results.length, 3); + const expectedAction1 = 's3:PutObject'; + const expectedAction2 = 's3:PutObjectRetention'; + const expectedAction3 = 's3:BypassGovernanceRetention'; + assert.strictEqual(results[0].getAction(), expectedAction1); + assert.strictEqual(results[1].getAction(), expectedAction2); + assert.strictEqual(results[2].getAction(), expectedAction3); + } + ); + + it( + 'should return s3:PutObjectRetention and s3:BypassGovernanceRetention for objectPut ' + + 'with header x-amz-bypass-governance-retention with version id specified', + () => { + const apiMethod = 'objectPut'; + const request = makeRequest( + { + 'x-amz-object-lock-mode': 'GOVERNANCE', + 'x-amz-object-lock-retain-until-date': '2021-12-31T23:59:59.000Z', + 'x-amz-bypass-governance-retention': 'true', + }, + { + versionId: 'vid1', + } + ); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); + + assert.strictEqual(results.length, 3); + const expectedAction1 = 's3:PutObject'; + const expectedAction2 = 's3:PutObjectRetention'; + const expectedAction3 = 's3:BypassGovernanceRetention'; + assert.strictEqual(results[0].getAction(), expectedAction1); + assert.strictEqual(results[1].getAction(), expectedAction2); + assert.strictEqual(results[2].getAction(), expectedAction3); + } + ); it('should return s3:PutObjectRetention with header x-amz-object-lock-mode for objectPutRetention action', () => { const apiMethod = 'objectPutRetention'; @@ -218,47 +231,55 @@ describe('prepareRequestContexts', () => { assert.strictEqual(results[0].getAction(), expectedAction); }); - it('should return s3:PutObjectRetention and s3:BypassGovernanceRetention for objectPutRetention ' + - 'with header x-amz-bypass-governance-retention', () => { - const apiMethod = 'objectPutRetention'; - const request = makeRequest({ - 'x-amz-object-lock-mode': 'GOVERNANCE', - 'x-amz-object-lock-retain-until-date': '2021-12-31T23:59:59.000Z', - 'x-amz-bypass-governance-retention': 'true', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); - - assert.strictEqual(results.length, 2); - const expectedAction1 = 's3:PutObjectRetention'; - const expectedAction2 = 's3:BypassGovernanceRetention'; - assert.strictEqual(results[0].getAction(), expectedAction1); - assert.strictEqual(results[1].getAction(), expectedAction2); - }); - - it('should return s3:PutObjectRetention and s3:BypassGovernanceRetention for objectPutRetention ' + - 'with header x-amz-bypass-governance-retention with version id specified', () => { - const apiMethod = 'objectPutRetention'; - const request = makeRequest({ - 'x-amz-object-lock-mode': 'GOVERNANCE', - 'x-amz-object-lock-retain-until-date': '2021-12-31T23:59:59.000Z', - 'x-amz-bypass-governance-retention': 'true', - }, { - versionId: 'vid1', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); - - assert.strictEqual(results.length, 2); - const expectedAction1 = 's3:PutObjectRetention'; - const expectedAction2 = 's3:BypassGovernanceRetention'; - assert.strictEqual(results[0].getAction(), expectedAction1); - assert.strictEqual(results[1].getAction(), expectedAction2); - }); + it( + 'should return s3:PutObjectRetention and s3:BypassGovernanceRetention for objectPutRetention ' + + 'with header x-amz-bypass-governance-retention', + () => { + const apiMethod = 'objectPutRetention'; + const request = makeRequest({ + 'x-amz-object-lock-mode': 'GOVERNANCE', + 'x-amz-object-lock-retain-until-date': '2021-12-31T23:59:59.000Z', + 'x-amz-bypass-governance-retention': 'true', + }); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); + + assert.strictEqual(results.length, 2); + const expectedAction1 = 's3:PutObjectRetention'; + const expectedAction2 = 's3:BypassGovernanceRetention'; + assert.strictEqual(results[0].getAction(), expectedAction1); + assert.strictEqual(results[1].getAction(), expectedAction2); + } + ); + + it( + 'should return s3:PutObjectRetention and s3:BypassGovernanceRetention for objectPutRetention ' + + 'with header x-amz-bypass-governance-retention with version id specified', + () => { + const apiMethod = 'objectPutRetention'; + const request = makeRequest( + { + 'x-amz-object-lock-mode': 'GOVERNANCE', + 'x-amz-object-lock-retain-until-date': '2021-12-31T23:59:59.000Z', + 'x-amz-bypass-governance-retention': 'true', + }, + { + versionId: 'vid1', + } + ); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); + + assert.strictEqual(results.length, 2); + const expectedAction1 = 's3:PutObjectRetention'; + const expectedAction2 = 's3:BypassGovernanceRetention'; + assert.strictEqual(results[0].getAction(), expectedAction1); + assert.strictEqual(results[1].getAction(), expectedAction2); + } + ); it('should return s3:DeleteObject for objectDelete method', () => { const apiMethod = 'objectDelete'; const request = makeRequest(); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); assert.strictEqual(results.length, 1); assert.strictEqual(results[0].getAction(), 's3:DeleteObject'); @@ -266,82 +287,94 @@ describe('prepareRequestContexts', () => { it('should return s3:DeleteObjectVersion for objectDelete method with version id specified', () => { const apiMethod = 'objectDelete'; - const request = makeRequest({}, { - versionId: 'vid1', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + const request = makeRequest( + {}, + { + versionId: 'vid1', + } + ); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); assert.strictEqual(results.length, 1); assert.strictEqual(results[0].getAction(), 's3:DeleteObjectVersion'); }); // Now it shuld include the bypass header if set - it('should return s3:DeleteObjectVersion and s3:BypassGovernanceRetention for objectDelete method ' + - 'with version id specified and x-amz-bypass-governance-retention header', () => { - const apiMethod = 'objectDelete'; - const request = makeRequest({ - 'x-amz-bypass-governance-retention': 'true', - }, { - versionId: 'vid1', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); - - assert.strictEqual(results.length, 2); - const expectedAction1 = 's3:DeleteObjectVersion'; - const expectedAction2 = 's3:BypassGovernanceRetention'; - assert.strictEqual(results[0].getAction(), expectedAction1); - assert.strictEqual(results[1].getAction(), expectedAction2); - }); + it( + 'should return s3:DeleteObjectVersion and s3:BypassGovernanceRetention for objectDelete method ' + + 'with version id specified and x-amz-bypass-governance-retention header', + () => { + const apiMethod = 'objectDelete'; + const request = makeRequest( + { + 'x-amz-bypass-governance-retention': 'true', + }, + { + versionId: 'vid1', + } + ); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); + + assert.strictEqual(results.length, 2); + const expectedAction1 = 's3:DeleteObjectVersion'; + const expectedAction2 = 's3:BypassGovernanceRetention'; + assert.strictEqual(results[0].getAction(), expectedAction1); + assert.strictEqual(results[1].getAction(), expectedAction2); + } + ); // When there is no version ID, AWS does not return any error if the object // is locked, but creates a delete marker - it('should only return s3:DeleteObject for objectDelete method ' + - 'with x-amz-bypass-governance-retention header and no version id', () => { - const apiMethod = 'objectDelete'; - const request = makeRequest({ - 'x-amz-bypass-governance-retention': 'true', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); - - assert.strictEqual(results.length, 1); - const expectedAction = 's3:DeleteObject'; - assert.strictEqual(results[0].getAction(), expectedAction); - }); - - ['initiateMultipartUpload', 'objectPutPart', 'completeMultipartUpload'].forEach(apiMethod => { - it(`should return s3:PutObjectVersion request context action for ${apiMethod} method ` + - 'with x-scal-s3-version-id header', () => { + it( + 'should only return s3:DeleteObject for objectDelete method ' + + 'with x-amz-bypass-governance-retention header and no version id', + () => { + const apiMethod = 'objectDelete'; const request = makeRequest({ - 'x-scal-s3-version-id': '', + 'x-amz-bypass-governance-retention': 'true', }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); assert.strictEqual(results.length, 1); - const expectedAction = 's3:PutObjectVersion'; + const expectedAction = 's3:DeleteObject'; assert.strictEqual(results[0].getAction(), expectedAction); - }); + } + ); - it(`should return s3:PutObjectVersion request context action for ${apiMethod} method` + - 'with empty x-scal-s3-version-id header', () => { - const request = makeRequest({ - 'x-scal-s3-version-id': '', - }); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); - - assert.strictEqual(results.length, 1); - const expectedAction = 's3:PutObjectVersion'; - assert.strictEqual(results[0].getAction(), expectedAction); - }); + ['initiateMultipartUpload', 'objectPutPart', 'completeMultipartUpload'].forEach(apiMethod => { + it( + `should return s3:PutObjectVersion request context action for ${apiMethod} method ` + + 'with x-scal-s3-version-id header', + () => { + const request = makeRequest({ + 'x-scal-s3-version-id': '', + }); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); + + assert.strictEqual(results.length, 1); + const expectedAction = 's3:PutObjectVersion'; + assert.strictEqual(results[0].getAction(), expectedAction); + } + ); + + it( + `should return s3:PutObjectVersion request context action for ${apiMethod} method` + + 'with empty x-scal-s3-version-id header', + () => { + const request = makeRequest({ + 'x-scal-s3-version-id': '', + }); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); + + assert.strictEqual(results.length, 1); + const expectedAction = 's3:PutObjectVersion'; + assert.strictEqual(results[0].getAction(), expectedAction); + } + ); it(`should return s3:PutObject request context action for ${apiMethod} method and no header`, () => { const request = makeRequest({}); - const results = prepareRequestContexts(apiMethod, request, sourceBucket, - sourceObject, sourceVersionId); + const results = prepareRequestContexts(apiMethod, request, sourceBucket, sourceObject, sourceVersionId); assert.strictEqual(results.length, 1); const expectedAction = 's3:PutObject'; diff --git a/tests/unit/api/apiUtils/coldStorage.js b/tests/unit/api/apiUtils/coldStorage.js index 2d436caa32..75625acd82 100644 --- a/tests/unit/api/apiUtils/coldStorage.js +++ b/tests/unit/api/apiUtils/coldStorage.js @@ -4,7 +4,7 @@ const { errors } = require('arsenal'); const { startRestore, validatePutVersionId, - verifyColdObjectAvailable + verifyColdObjectAvailable, } = require('../../../../lib/api/apiUtils/object/coldStorage'); const { DummyRequestLogger } = require('../../helpers'); const { ObjectMD, ObjectMDArchive } = require('arsenal/build/lib/models'); @@ -70,29 +70,36 @@ describe('cold storage', () => { }, expectedRes: undefined, }, - ].forEach(testCase => it(testCase.description, () => { - const res = validatePutVersionId(testCase.objMD, testCase.versionId, log); - assert.deepStrictEqual(res, testCase.expectedRes); - })); + ].forEach(testCase => + it(testCase.description, () => { + const res = validatePutVersionId(testCase.objMD, testCase.versionId, log); + assert.deepStrictEqual(res, testCase.expectedRes); + }) + ); }); describe('verifyColdObjectAvailable', () => { [ { description: 'should return error if object is in a cold location', - objectMd: new ObjectMD() - .setArchive(new ObjectMDArchive({ + objectMd: new ObjectMD().setArchive( + new ObjectMDArchive({ archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', - archiveVersion: 5577006791947779 - })) + archiveVersion: 5577006791947779, + }) + ), }, { description: 'should return error if object is restoring', - objectMd: new ObjectMD() - .setArchive(new ObjectMDArchive({ - archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', - archiveVersion: 5577006791947779, - }, Date.now())) + objectMd: new ObjectMD().setArchive( + new ObjectMDArchive( + { + archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', + archiveVersion: 5577006791947779, + }, + Date.now() + ) + ), }, ].forEach(params => { it(`${params.description}`, () => { @@ -114,16 +121,18 @@ describe('cold storage', () => { }); it('should return null if object is restored', () => { - const objectMd = new ObjectMD().setArchive(new ObjectMDArchive( - { - archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', - archiveVersion: 5577006791947779, - }, - /*restoreRequestedAt*/ new Date(0), - /*restoreRequestedDays*/ 5, - /*restoreCompletedAt*/ new Date(1000), - /*restoreWillExpireAt*/ new Date(1000 + 5 * oneDay), - )); + const objectMd = new ObjectMD().setArchive( + new ObjectMDArchive( + { + archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', + archiveVersion: 5577006791947779, + }, + /*restoreRequestedAt*/ new Date(0), + /*restoreRequestedDays*/ 5, + /*restoreCompletedAt*/ new Date(1000), + /*restoreWillExpireAt*/ new Date(1000 + 5 * oneDay) + ) + ); const err = verifyColdObjectAvailable(objectMd.getValue()); assert.ifError(err); }); @@ -140,16 +149,19 @@ describe('cold storage', () => { }); it('should fail when object is being restored', done => { - const objectMd = new ObjectMD().setDataStoreName( - 'location-dmf-v1' - ).setArchive(new ObjectMDArchive( - { - archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', - archiveVersion: 5577006791947779, - }, - /*restoreRequestedAt*/ new Date(0), - /*restoreRequestedDays*/ 5, - )).getValue(); + const objectMd = new ObjectMD() + .setDataStoreName('location-dmf-v1') + .setArchive( + new ObjectMDArchive( + { + archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', + archiveVersion: 5577006791947779, + }, + /*restoreRequestedAt*/ new Date(0), + /*restoreRequestedDays*/ 5 + ) + ) + .getValue(); startRestore(objectMd, { days: 5 }, log, err => { assert.deepStrictEqual(err, errors.RestoreAlreadyInProgress); @@ -158,18 +170,21 @@ describe('cold storage', () => { }); it('should fail when restored object is expired', done => { - const objectMd = new ObjectMD().setDataStoreName( - 'location-dmf-v1' - ).setArchive(new ObjectMDArchive( - { - archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', - archiveVersion: 5577006791947779, - }, - /*restoreRequestedAt*/ new Date(0), - /*restoreRequestedDays*/ 5, - /*restoreCompletedAt*/ new Date(Date.now() - 6 * oneDay), - /*restoreWillExpireAt*/ new Date(Date.now() - 1 * oneDay), - )).getValue(); + const objectMd = new ObjectMD() + .setDataStoreName('location-dmf-v1') + .setArchive( + new ObjectMDArchive( + { + archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', + archiveVersion: 5577006791947779, + }, + /*restoreRequestedAt*/ new Date(0), + /*restoreRequestedDays*/ 5, + /*restoreCompletedAt*/ new Date(Date.now() - 6 * oneDay), + /*restoreWillExpireAt*/ new Date(Date.now() - 1 * oneDay) + ) + ) + .getValue(); startRestore(objectMd, { days: 5 }, log, err => { assert.deepStrictEqual(err, errors.InvalidObjectState); @@ -178,12 +193,15 @@ describe('cold storage', () => { }); it('should succeed for cold object', done => { - const objectMd = new ObjectMD().setDataStoreName( - 'location-dmf-v1' - ).setArchive(new ObjectMDArchive({ - archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', - archiveVersion: 5577006791947779, - })).getValue(); + const objectMd = new ObjectMD() + .setDataStoreName('location-dmf-v1') + .setArchive( + new ObjectMDArchive({ + archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', + archiveVersion: 5577006791947779, + }) + ) + .getValue(); const t = new Date(); startRestore(objectMd, { days: 7 }, log, (err, isObjectAlreadyRestored) => { @@ -203,22 +221,26 @@ describe('cold storage', () => { }); it('should succeed for restored object', done => { - const objectMd = new ObjectMD().setDataStoreName( - 'location-dmf-v1' - ).setArchive(new ObjectMDArchive( - { - archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', - archiveVersion: 5577006791947779, - }, - /*restoreRequestedAt*/ new Date(0), - /*restoreRequestedDays*/ 2, - /*restoreCompletedAt*/ new Date(Date.now() - 1 * oneDay), - /*restoreWillExpireAt*/ new Date(Date.now() + 1 * oneDay), - )).setAmzRestore({ - 'ongoing-request': false, - 'expiry-date': new Date(Date.now() + 1 * oneDay), - 'content-md5': '12345' - }).getValue(); + const objectMd = new ObjectMD() + .setDataStoreName('location-dmf-v1') + .setArchive( + new ObjectMDArchive( + { + archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322', + archiveVersion: 5577006791947779, + }, + /*restoreRequestedAt*/ new Date(0), + /*restoreRequestedDays*/ 2, + /*restoreCompletedAt*/ new Date(Date.now() - 1 * oneDay), + /*restoreWillExpireAt*/ new Date(Date.now() + 1 * oneDay) + ) + ) + .setAmzRestore({ + 'ongoing-request': false, + 'expiry-date': new Date(Date.now() + 1 * oneDay), + 'content-md5': '12345', + }) + .getValue(); const restoreCompletedAt = objectMd.archive.restoreCompletedAt; const t = new Date(); @@ -232,12 +254,14 @@ describe('cold storage', () => { assert.ok(objectMd.archive.restoreRequestedAt.getTime() <= new Date()); assert.strictEqual(objectMd.archive.restoreCompletedAt, restoreCompletedAt); - assert.strictEqual(objectMd.archive.restoreWillExpireAt.getTime(), - objectMd.archive.restoreRequestedAt.getTime() + (5 * scaledMsPerDay)); + assert.strictEqual( + objectMd.archive.restoreWillExpireAt.getTime(), + objectMd.archive.restoreRequestedAt.getTime() + 5 * scaledMsPerDay + ); assert.deepEqual(objectMd['x-amz-restore'], { 'ongoing-request': false, 'expiry-date': objectMd.archive.restoreWillExpireAt, - 'content-md5': '12345' + 'content-md5': '12345', }); done(); @@ -245,9 +269,7 @@ describe('cold storage', () => { }); it('should fail if _updateRestoreInfo fails', done => { - const objectMd = new ObjectMD().setDataStoreName( - 'location-dmf-v1' - ).setArchive(false).getValue(); + const objectMd = new ObjectMD().setDataStoreName('location-dmf-v1').setArchive(false).getValue(); startRestore(objectMd, { days: 7 }, log, err => { assert.deepStrictEqual(err, errors.InternalError); diff --git a/tests/unit/api/apiUtils/expirationHeaders.js b/tests/unit/api/apiUtils/expirationHeaders.js index 41fa1af48c..b5ad46fd2a 100644 --- a/tests/unit/api/apiUtils/expirationHeaders.js +++ b/tests/unit/api/apiUtils/expirationHeaders.js @@ -1,25 +1,19 @@ const assert = require('assert'); const { LifecycleDateTime } = require('arsenal').s3middleware.lifecycleHelpers; - -const { - generateExpirationHeaders, -} = require('../../../../lib/api/apiUtils/object/expirationHeaders'); +const { generateExpirationHeaders } = require('../../../../lib/api/apiUtils/object/expirationHeaders'); const datetime = new LifecycleDateTime(); const objectDate = 'Fri, 21 Dec 2012 00:00:00 GMT'; const expectedDaysExpiryDate = 'Sat, 22 Dec 2012 00:00:00 GMT'; const expectedDateExpiryDate = 'Mon, 24 Dec 2012 00:00:00 GMT'; - const lifecycleExpirationDays = { rules: [ { ruleID: 'test-days', ruleStatus: 'Enabled', - actions: [ - { actionName: 'Expiration', days: 1 }, - ], + actions: [{ actionName: 'Expiration', days: 1 }], prefix: '', }, ], @@ -31,13 +25,9 @@ const lifecycleExpirationTags = { ruleID: 'test-tags', ruleStatus: 'Enabled', filters: { - tags: [ - { key: 'key1', val: 'val1' }, - ], + tags: [{ key: 'key1', val: 'val1' }], }, - actions: [ - { actionName: 'Expiration', days: 1 }, - ], + actions: [{ actionName: 'Expiration', days: 1 }], }, ], }; @@ -47,9 +37,7 @@ const lifecycleExpirationDate = { { ruleID: 'test-date', ruleStatus: 'Enabled', - actions: [ - { actionName: 'Expiration', date: 'Mon, 24 Dec 2012 00:00:00 GMT' }, - ], + actions: [{ actionName: 'Expiration', date: 'Mon, 24 Dec 2012 00:00:00 GMT' }], prefix: '', }, ], @@ -60,9 +48,7 @@ const lifecycleExpirationMPU = { { ruleID: 'test-mpu', ruleStatus: 'Enabled', - actions: [ - { actionName: 'AbortIncompleteMultipartUpload', days: 1 }, - ], + actions: [{ actionName: 'AbortIncompleteMultipartUpload', days: 1 }], prefix: '', }, ], @@ -172,7 +158,9 @@ describe('generateExpirationHeaders', () => { ], ]; - tests.forEach(([msg, params, expected]) => it(msg, () => { - assert.deepStrictEqual(generateExpirationHeaders(params, datetime), expected); - })); + tests.forEach(([msg, params, expected]) => + it(msg, () => { + assert.deepStrictEqual(generateExpirationHeaders(params, datetime), expected); + }) + ); }); diff --git a/tests/unit/api/apiUtils/getNotificationConfiguration.js b/tests/unit/api/apiUtils/getNotificationConfiguration.js index d5a531421c..8d226edb2e 100644 --- a/tests/unit/api/apiUtils/getNotificationConfiguration.js +++ b/tests/unit/api/apiUtils/getNotificationConfiguration.js @@ -3,8 +3,7 @@ const sinon = require('sinon'); const { config } = require('../../../../lib/Config'); const errors = require('arsenal').errors; -const getNotificationConfiguration = - require('../../../../lib/api/apiUtils/bucket/getNotificationConfiguration'); +const getNotificationConfiguration = require('../../../../lib/api/apiUtils/bucket/getNotificationConfiguration'); const parsedXml = { NotificationConfiguration: { @@ -15,18 +14,18 @@ const parsedXml = { Queue: ['arn:scality:bucketnotif:::target1'], }, ], - } + }, }; const expectedConfig = { queueConfig: [ { - events: ['s3:ObjectCreated:*'], - queueArn: 'arn:scality:bucketnotif:::target1', - id: 'notification-id', - filterRules: undefined - } - ] + events: ['s3:ObjectCreated:*'], + queueArn: 'arn:scality:bucketnotif:::target1', + id: 'notification-id', + filterRules: undefined, + }, + ], }; const destination1 = [ @@ -34,7 +33,7 @@ const destination1 = [ resource: 'target1', type: 'dummy', host: 'localhost:6000', - } + }, ]; const destinations2 = [ @@ -42,7 +41,7 @@ const destinations2 = [ resource: 'target2', type: 'dummy', host: 'localhost:6000', - } + }, ]; describe('getNotificationConfiguration', () => { @@ -58,7 +57,7 @@ describe('getNotificationConfiguration', () => { it('should return empty notification configuration', done => { sinon.stub(config, 'bucketNotificationDestinations').value(destination1); const notifConfig = getNotificationConfiguration({ - NotificationConfiguration: {} + NotificationConfiguration: {}, }); assert.deepEqual(notifConfig, {}); return done(); @@ -76,10 +75,12 @@ describe('getNotificationConfiguration', () => { const notifConfig = getNotificationConfiguration(parsedXml); assert.deepEqual(notifConfig.error, errors.InvalidArgument); const invalidArguments = notifConfig.error.metadata.get('invalidArguments'); - assert.deepEqual(invalidArguments, [{ - ArgumentName: 'arn:scality:bucketnotif:::target1', - ArgumentValue: 'The destination queue does not exist', - }]); + assert.deepEqual(invalidArguments, [ + { + ArgumentName: 'arn:scality:bucketnotif:::target1', + ArgumentValue: 'The destination queue does not exist', + }, + ]); return done(); }); }); diff --git a/tests/unit/api/apiUtils/getReplicationInfo.js b/tests/unit/api/apiUtils/getReplicationInfo.js index d8bec4c1e4..d8f5f340e2 100644 --- a/tests/unit/api/apiUtils/getReplicationInfo.js +++ b/tests/unit/api/apiUtils/getReplicationInfo.js @@ -2,15 +2,25 @@ const assert = require('assert'); const BucketInfo = require('arsenal').models.BucketInfo; const AuthInfo = require('arsenal').auth.AuthInfo; -const getReplicationInfo = - require('../../../../lib/api/apiUtils/object/getReplicationInfo'); +const getReplicationInfo = require('../../../../lib/api/apiUtils/object/getReplicationInfo'); function _getObjectReplicationInfo(s3config, replicationConfig) { const bucketInfo = new BucketInfo( - 'testbucket', 'someCanonicalId', 'accountDisplayName', + 'testbucket', + 'someCanonicalId', + 'accountDisplayName', new Date().toJSON(), - null, null, null, null, null, null, null, null, null, - replicationConfig); + null, + null, + null, + null, + null, + null, + null, + null, + null, + replicationConfig + ); return getReplicationInfo(s3config, 'fookey', bucketInfo, true, 123, null, null); } @@ -36,39 +46,46 @@ const TEST_CONFIG = { azureStorageAccountName: 'fakeaccountname', azureStorageAccessKey: 'Fake00Key001', bucketMatch: true, - azureContainerName: 's3test' - } + azureContainerName: 's3test', + }, }, }, - replicationEndpoints: [{ - site: 'zenko', - servers: ['127.0.0.1:8000'], - default: true, - }, { - site: 'us-east-2', - type: 'aws_s3', - }], + replicationEndpoints: [ + { + site: 'zenko', + servers: ['127.0.0.1:8000'], + default: true, + }, + { + site: 'us-east-2', + type: 'aws_s3', + }, + ], }; describe('getReplicationInfo helper', () => { it('should get replication info when rules are enabled', () => { const replicationConfig = { role: 'arn:aws:iam::root:role/s3-replication-role', - rules: [{ - prefix: '', - enabled: true, - storageClass: 'awsbackend', - }], + rules: [ + { + prefix: '', + enabled: true, + storageClass: 'awsbackend', + }, + ], destination: 'tosomewhere', }; const replicationInfo = _getObjectReplicationInfo(TEST_CONFIG, replicationConfig); assert.deepStrictEqual(replicationInfo, { status: 'PENDING', - backends: [{ - site: 'awsbackend', - status: 'PENDING', - dataStoreVersionId: '', - }], + backends: [ + { + site: 'awsbackend', + status: 'PENDING', + dataStoreVersionId: '', + }, + ], content: ['METADATA'], destination: 'tosomewhere', storageClass: 'awsbackend', @@ -81,11 +98,13 @@ describe('getReplicationInfo helper', () => { it('should not get replication info when rules are disabled', () => { const replicationConfig = { role: 'arn:aws:iam::root:role/s3-replication-role', - rules: [{ - prefix: '', - enabled: false, - storageClass: 'awsbackend', - }], + rules: [ + { + prefix: '', + enabled: false, + storageClass: 'awsbackend', + }, + ], destination: 'tosomewhere', }; const replicationInfo = _getObjectReplicationInfo(TEST_CONFIG, replicationConfig); @@ -95,21 +114,25 @@ describe('getReplicationInfo helper', () => { it('should get replication info with single cloud target', () => { const replicationConfig = { role: 'arn:aws:iam::root:role/s3-replication-role', - rules: [{ - prefix: '', - enabled: true, - storageClass: 'awsbackend', - }], + rules: [ + { + prefix: '', + enabled: true, + storageClass: 'awsbackend', + }, + ], destination: 'tosomewhere', }; const replicationInfo = _getObjectReplicationInfo(TEST_CONFIG, replicationConfig); assert.deepStrictEqual(replicationInfo, { status: 'PENDING', - backends: [{ - site: 'awsbackend', - status: 'PENDING', - dataStoreVersionId: '', - }], + backends: [ + { + site: 'awsbackend', + status: 'PENDING', + dataStoreVersionId: '', + }, + ], content: ['METADATA'], destination: 'tosomewhere', storageClass: 'awsbackend', @@ -122,25 +145,30 @@ describe('getReplicationInfo helper', () => { it('should get replication info with multiple cloud targets', () => { const replicationConfig = { role: 'arn:aws:iam::root:role/s3-replication-role', - rules: [{ - prefix: '', - enabled: true, - storageClass: 'awsbackend,azurebackend', - }], + rules: [ + { + prefix: '', + enabled: true, + storageClass: 'awsbackend,azurebackend', + }, + ], destination: 'tosomewhere', }; const replicationInfo = _getObjectReplicationInfo(TEST_CONFIG, replicationConfig); assert.deepStrictEqual(replicationInfo, { status: 'PENDING', - backends: [{ - site: 'awsbackend', - status: 'PENDING', - dataStoreVersionId: '', - }, { - site: 'azurebackend', - status: 'PENDING', - dataStoreVersionId: '', - }], + backends: [ + { + site: 'awsbackend', + status: 'PENDING', + dataStoreVersionId: '', + }, + { + site: 'azurebackend', + status: 'PENDING', + dataStoreVersionId: '', + }, + ], content: ['METADATA'], destination: 'tosomewhere', storageClass: 'awsbackend,azurebackend', @@ -150,30 +178,34 @@ describe('getReplicationInfo helper', () => { }); }); - it('should get replication info with multiple cloud targets and ' + - 'preferred read location', () => { + it('should get replication info with multiple cloud targets and ' + 'preferred read location', () => { const replicationConfig = { role: 'arn:aws:iam::root:role/s3-replication-role', - rules: [{ - prefix: '', - enabled: true, - storageClass: 'awsbackend:preferred_read,azurebackend', - }], + rules: [ + { + prefix: '', + enabled: true, + storageClass: 'awsbackend:preferred_read,azurebackend', + }, + ], destination: 'tosomewhere', preferredReadLocation: 'awsbackend', }; const replicationInfo = _getObjectReplicationInfo(TEST_CONFIG, replicationConfig); assert.deepStrictEqual(replicationInfo, { status: 'PENDING', - backends: [{ - site: 'awsbackend', - status: 'PENDING', - dataStoreVersionId: '', - }, { - site: 'azurebackend', - status: 'PENDING', - dataStoreVersionId: '', - }], + backends: [ + { + site: 'awsbackend', + status: 'PENDING', + dataStoreVersionId: '', + }, + { + site: 'azurebackend', + status: 'PENDING', + dataStoreVersionId: '', + }, + ], content: ['METADATA'], destination: 'tosomewhere', storageClass: 'awsbackend:preferred_read,azurebackend', @@ -183,61 +215,84 @@ describe('getReplicationInfo helper', () => { }); }); - it('should not get replication info when service account type ' + - 'cannot trigger replication', () => { + it('should not get replication info when service account type ' + 'cannot trigger replication', () => { const replicationConfig = { role: 'arn:aws:iam::root:role/s3-replication-role', - rules: [{ - prefix: '', - enabled: true, - storageClass: 'awsbackend', - }], + rules: [ + { + prefix: '', + enabled: true, + storageClass: 'awsbackend', + }, + ], destination: 'tosomewhere', }; const bucketInfo = new BucketInfo( - 'testbucket', 'abcdef/lifecycle', 'Lifecycle Service Account', + 'testbucket', + 'abcdef/lifecycle', + 'Lifecycle Service Account', new Date().toJSON(), - null, null, null, null, null, null, null, null, null, - replicationConfig); + null, + null, + null, + null, + null, + null, + null, + null, + null, + replicationConfig + ); const authInfo = new AuthInfo({ canonicalID: 'abcdef/lifecycle', accountDisplayName: 'Lifecycle Service Account', }); - const replicationInfo = getReplicationInfo(TEST_CONFIG, - 'fookey', bucketInfo, true, 123, null, null, authInfo); + const replicationInfo = getReplicationInfo(TEST_CONFIG, 'fookey', bucketInfo, true, 123, null, null, authInfo); assert.deepStrictEqual(replicationInfo, undefined); }); - it('should get replication info when service account type can ' + - 'trigger replication', () => { + it('should get replication info when service account type can ' + 'trigger replication', () => { const replicationConfig = { role: 'arn:aws:iam::root:role/s3-replication-role', - rules: [{ - prefix: '', - enabled: true, - storageClass: 'awsbackend', - }], + rules: [ + { + prefix: '', + enabled: true, + storageClass: 'awsbackend', + }, + ], destination: 'tosomewhere', }; const bucketInfo = new BucketInfo( - 'testbucket', 'abcdef/md-ingestion', + 'testbucket', + 'abcdef/md-ingestion', 'Metadata Ingestion Service Account', new Date().toJSON(), - null, null, null, null, null, null, null, null, null, - replicationConfig); + null, + null, + null, + null, + null, + null, + null, + null, + null, + replicationConfig + ); const authInfo = new AuthInfo({ canonicalID: 'abcdef/md-ingestion', accountDisplayName: 'Metadata Ingestion Service Account', }); - const replicationInfo = getReplicationInfo(TEST_CONFIG, - 'fookey', bucketInfo, true, 123, null, null, authInfo); + const replicationInfo = getReplicationInfo(TEST_CONFIG, 'fookey', bucketInfo, true, 123, null, null, authInfo); assert.deepStrictEqual(replicationInfo, { status: 'PENDING', - backends: [{ - site: 'awsbackend', - status: 'PENDING', - dataStoreVersionId: '', - }], + backends: [ + { + site: 'awsbackend', + status: 'PENDING', + dataStoreVersionId: '', + }, + ], content: ['METADATA'], destination: 'tosomewhere', storageClass: 'awsbackend', @@ -250,20 +305,24 @@ describe('getReplicationInfo helper', () => { it('should get replication info with default StorageClass when rules are enabled', () => { const replicationConfig = { role: 'arn:aws:iam::root:role/s3-replication-role-1,arn:aws:iam::root:role/s3-replication-role-2', - rules: [{ - prefix: '', - enabled: true, - }], + rules: [ + { + prefix: '', + enabled: true, + }, + ], destination: 'tosomewhere', }; const replicationInfo = _getObjectReplicationInfo(TEST_CONFIG, replicationConfig); assert.deepStrictEqual(replicationInfo, { status: 'PENDING', - backends: [{ - site: 'zenko', - status: 'PENDING', - dataStoreVersionId: '', - }], + backends: [ + { + site: 'zenko', + status: 'PENDING', + dataStoreVersionId: '', + }, + ], content: ['METADATA'], destination: 'tosomewhere', storageClass: 'zenko', @@ -276,26 +335,29 @@ describe('getReplicationInfo helper', () => { it('should return undefined with specified StorageClass mode if no replication endpoint is configured', () => { const replicationConfig = { role: 'arn:aws:iam::root:role/s3-replication-role', - rules: [{ - prefix: '', - enabled: true, - storageClass: 'awsbackend', - }], + rules: [ + { + prefix: '', + enabled: true, + storageClass: 'awsbackend', + }, + ], destination: 'tosomewhere', }; const configWithNoReplicationEndpoint = { locationConstraints: TEST_CONFIG.locationConstraints, replicationEndpoints: [], }; - const replicationInfo = _getObjectReplicationInfo(configWithNoReplicationEndpoint, - replicationConfig); + const replicationInfo = _getObjectReplicationInfo(configWithNoReplicationEndpoint, replicationConfig); assert.deepStrictEqual(replicationInfo, { status: 'PENDING', - backends: [{ - site: 'awsbackend', - status: 'PENDING', - dataStoreVersionId: '', - }], + backends: [ + { + site: 'awsbackend', + status: 'PENDING', + dataStoreVersionId: '', + }, + ], content: ['METADATA'], destination: 'tosomewhere', storageClass: 'awsbackend', @@ -308,18 +370,19 @@ describe('getReplicationInfo helper', () => { it('should return undefined with default StorageClass if no replication endpoint is configured', () => { const replicationConfig = { role: 'arn:aws:iam::root:role/s3-replication-role-1,arn:aws:iam::root:role/s3-replication-role-2', - rules: [{ - prefix: '', - enabled: true, - }], + rules: [ + { + prefix: '', + enabled: true, + }, + ], destination: 'tosomewhere', }; const configWithNoReplicationEndpoint = { locationConstraints: TEST_CONFIG.locationConstraints, replicationEndpoints: [], }; - const replicationInfo = _getObjectReplicationInfo(configWithNoReplicationEndpoint, - replicationConfig); + const replicationInfo = _getObjectReplicationInfo(configWithNoReplicationEndpoint, replicationConfig); assert.deepStrictEqual(replicationInfo, undefined); }); }); diff --git a/tests/unit/api/apiUtils/lifecycle.js b/tests/unit/api/apiUtils/lifecycle.js index 209aaf4403..831db6c8a1 100644 --- a/tests/unit/api/apiUtils/lifecycle.js +++ b/tests/unit/api/apiUtils/lifecycle.js @@ -1,6 +1,5 @@ const assert = require('assert'); -const { validateMaxScannedEntries } = - require('../../../../lib/api/apiUtils/object/lifecycle'); +const { validateMaxScannedEntries } = require('../../../../lib/api/apiUtils/object/lifecycle'); const tests = [ { diff --git a/tests/unit/api/apiUtils/locationKeysHaveChanged.js b/tests/unit/api/apiUtils/locationKeysHaveChanged.js index 39d16712e9..7c0575ed56 100644 --- a/tests/unit/api/apiUtils/locationKeysHaveChanged.js +++ b/tests/unit/api/apiUtils/locationKeysHaveChanged.js @@ -1,6 +1,5 @@ const assert = require('assert'); -const locationKeysHaveChanged = - require('../../../../lib/api/apiUtils/object/locationKeysHaveChanged'); +const locationKeysHaveChanged = require('../../../../lib/api/apiUtils/object/locationKeysHaveChanged'); describe('Check if location keys have changed between object locations', () => { it('should return true for no match ', () => { diff --git a/tests/unit/api/apiUtils/objectLockHelpers.js b/tests/unit/api/apiUtils/objectLockHelpers.js index 912e42f171..0501535fc8 100644 --- a/tests/unit/api/apiUtils/objectLockHelpers.js +++ b/tests/unit/api/apiUtils/objectLockHelpers.js @@ -16,14 +16,54 @@ const mockOwnerDisplayName = 'accountDisplayName'; const mockCreationDate = new Date().toJSON(); const bucketInfo = new BucketInfo( - mockName, mockOwner, mockOwnerDisplayName, mockCreationDate, - null, null, null, null, null, null, null, null, null, null, - null, null, null, null, null, null, null, true); + mockName, + mockOwner, + mockOwnerDisplayName, + mockCreationDate, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + true +); const objLockDisabledBucketInfo = new BucketInfo( - mockName, mockOwner, mockOwnerDisplayName, mockCreationDate, - null, null, null, null, null, null, null, null, null, null, - null, null, null, null, null, null, null, false); + mockName, + mockOwner, + mockOwnerDisplayName, + mockCreationDate, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + false +); const log = new DummyRequestLogger(); @@ -33,13 +73,12 @@ describe('objectLockHelpers: validateHeaders', () => { 'x-amz-object-lock-retain-until-date': '2050-10-12', 'x-amz-object-lock-mode': 'COMPLIANCE', }; - const objectLockValidationError - = validateHeaders(objLockDisabledBucketInfo, headers, log); + const objectLockValidationError = validateHeaders(objLockDisabledBucketInfo, headers, log); const expectedError = errorInstances.InvalidRequest.customizeDescription( - 'Bucket is missing ObjectLockConfiguration'); + 'Bucket is missing ObjectLockConfiguration' + ); assert.strictEqual(objectLockValidationError.is.InvalidRequest, true); - assert.strictEqual(objectLockValidationError.description, - expectedError.description); + assert.strictEqual(objectLockValidationError.description, expectedError.description); }); it('should pass with valid retention headers', () => { @@ -47,8 +86,7 @@ describe('objectLockHelpers: validateHeaders', () => { 'x-amz-object-lock-retain-until-date': '2050-10-12', 'x-amz-object-lock-mode': 'COMPLIANCE', }; - const objectLockValidationError - = validateHeaders(bucketInfo, headers, log); + const objectLockValidationError = validateHeaders(bucketInfo, headers, log); assert.strictEqual(objectLockValidationError, null); }); @@ -56,8 +94,7 @@ describe('objectLockHelpers: validateHeaders', () => { const headers = { 'x-amz-object-lock-legal-hold': 'ON', }; - const objectLockValidationError - = validateHeaders(bucketInfo, headers, log); + const objectLockValidationError = validateHeaders(bucketInfo, headers, log); assert.strictEqual(objectLockValidationError, null); }); @@ -65,8 +102,7 @@ describe('objectLockHelpers: validateHeaders', () => { const headers = { 'x-amz-object-lock-legal-hold': 'OFF', }; - const objectLockValidationError - = validateHeaders(bucketInfo, headers, log); + const objectLockValidationError = validateHeaders(bucketInfo, headers, log); assert.strictEqual(objectLockValidationError, null); }); @@ -76,8 +112,7 @@ describe('objectLockHelpers: validateHeaders', () => { 'x-amz-object-lock-mode': 'GOVERNANCE', 'x-amz-object-lock-legal-hold': 'ON', }; - const objectLockValidationError - = validateHeaders(bucketInfo, headers, log); + const objectLockValidationError = validateHeaders(bucketInfo, headers, log); assert.strictEqual(objectLockValidationError, null); }); @@ -85,28 +120,24 @@ describe('objectLockHelpers: validateHeaders', () => { const headers = { 'x-amz-object-lock-retain-until-date': '2005-10-12', }; - const objectLockValidationError - = validateHeaders(bucketInfo, headers, log); + const objectLockValidationError = validateHeaders(bucketInfo, headers, log); const expectedError = errorInstances.InvalidArgument.customizeDescription( - 'x-amz-object-lock-retain-until-date and x-amz-object-lock-mode ' + - 'must both be supplied'); + 'x-amz-object-lock-retain-until-date and x-amz-object-lock-mode ' + 'must both be supplied' + ); assert.strictEqual(objectLockValidationError.is.InvalidArgument, true); - assert.strictEqual(objectLockValidationError.description, - expectedError.description); + assert.strictEqual(objectLockValidationError.description, expectedError.description); }); it('should fail with missing object-lock-retain-until-date header', () => { const headers = { 'x-amz-object-lock-mode': 'GOVERNANCE', }; - const objectLockValidationError - = validateHeaders(bucketInfo, headers, log); + const objectLockValidationError = validateHeaders(bucketInfo, headers, log); const expectedError = errorInstances.InvalidArgument.customizeDescription( - 'x-amz-object-lock-retain-until-date and x-amz-object-lock-mode ' + - 'must both be supplied'); + 'x-amz-object-lock-retain-until-date and x-amz-object-lock-mode ' + 'must both be supplied' + ); assert.strictEqual(objectLockValidationError.is.InvalidArgument, true); - assert.strictEqual(objectLockValidationError.description, - expectedError.description); + assert.strictEqual(objectLockValidationError.description, expectedError.description); }); it('should fail with past retention date header', () => { @@ -115,25 +146,23 @@ describe('objectLockHelpers: validateHeaders', () => { 'x-amz-object-lock-mode': 'COMPLIANCE', }; const expectedError = errorInstances.InvalidArgument.customizeDescription( - 'The retain until date must be in the future!'); - const objectLockValidationError - = validateHeaders(bucketInfo, headers, log); + 'The retain until date must be in the future!' + ); + const objectLockValidationError = validateHeaders(bucketInfo, headers, log); assert.strictEqual(objectLockValidationError.is.InvalidArgument, true); - assert.strictEqual(objectLockValidationError.description, - expectedError.description); + assert.strictEqual(objectLockValidationError.description, expectedError.description); }); it('should fail with invalid legal hold header', () => { const headers = { 'x-amz-object-lock-legal-hold': 'on', }; - const objectLockValidationError - = validateHeaders(bucketInfo, headers, log); + const objectLockValidationError = validateHeaders(bucketInfo, headers, log); const expectedError = errorInstances.InvalidArgument.customizeDescription( - 'Legal hold status must be one of "ON", "OFF"'); + 'Legal hold status must be one of "ON", "OFF"' + ); assert.strictEqual(objectLockValidationError.is.InvalidArgument, true); - assert.strictEqual(objectLockValidationError.description, - expectedError.description); + assert.strictEqual(objectLockValidationError.description, expectedError.description); }); it('should fail with invalid retention period header', () => { @@ -141,13 +170,10 @@ describe('objectLockHelpers: validateHeaders', () => { 'x-amz-object-lock-retain-until-date': '2050-10-12', 'x-amz-object-lock-mode': 'Governance', }; - const objectLockValidationError - = validateHeaders(bucketInfo, headers, log); - const expectedError = errorInstances.InvalidArgument.customizeDescription( - 'Unknown wormMode directive'); + const objectLockValidationError = validateHeaders(bucketInfo, headers, log); + const expectedError = errorInstances.InvalidArgument.customizeDescription('Unknown wormMode directive'); assert.strictEqual(objectLockValidationError.is.InvalidArgument, true); - assert.strictEqual(objectLockValidationError.description, - expectedError.description); + assert.strictEqual(objectLockValidationError.description, expectedError.description); }); }); @@ -158,11 +184,9 @@ describe('objectLockHelpers: calculateRetainUntilDate', () => { days: 90, }; const date = moment(); - const expectedRetainUntilDate - = date.add(mockConfigWithDays.days, 'days'); + const expectedRetainUntilDate = date.add(mockConfigWithDays.days, 'days'); const retainUntilDate = calculateRetainUntilDate(mockConfigWithDays); - assert.strictEqual(retainUntilDate.slice(0, 16), - expectedRetainUntilDate.toISOString().slice(0, 16)); + assert.strictEqual(retainUntilDate.slice(0, 16), expectedRetainUntilDate.toISOString().slice(0, 16)); }); it('should calculate retainUntilDate for config with years', () => { @@ -171,11 +195,9 @@ describe('objectLockHelpers: calculateRetainUntilDate', () => { years: 3, }; const date = moment(); - const expectedRetainUntilDate - = date.add(mockConfigWithYears.years * 365, 'days'); + const expectedRetainUntilDate = date.add(mockConfigWithYears.years * 365, 'days'); const retainUntilDate = calculateRetainUntilDate(mockConfigWithYears); - assert.strictEqual(retainUntilDate.slice(0, 16), - expectedRetainUntilDate.toISOString().slice(0, 16)); + assert.strictEqual(retainUntilDate.slice(0, 16), expectedRetainUntilDate.toISOString().slice(0, 16)); }); }); @@ -269,7 +291,6 @@ describe('objectLockHelpers: compareObjectLockInformation', () => { }); }); - const pastDate = moment().subtract(1, 'days'); const futureDate = moment().add(100, 'days'); @@ -608,46 +629,50 @@ describe('objectLockHelpers: ObjectLockInfo', () => { }); }); - describe('isExpired: ', () => isExpiredTestCases.forEach(testCase => { - const objLockInfo = new ObjectLockInfo({ date: testCase.date }); - it(testCase.desc, () => assert.strictEqual(objLockInfo.isExpired(), testCase.expected)); - })); - - describe('isLocked: ', () => isLockedTestCases.forEach(testCase => { - describe(`${testCase.desc}`, () => { - it(`should show policy as ${testCase.expected ? '' : 'not'} locked without legal hold`, () => { - const objLockInfo = new ObjectLockInfo(testCase.policy); - assert.strictEqual(objLockInfo.isLocked(), testCase.expected); - }); - - // legal hold should show as locked regardless of policy - it('should show policy as locked with legal hold', () => { - const policy = Object.assign({}, testCase.policy, { legalHold: true }); - const objLockInfo = new ObjectLockInfo(policy); - assert.strictEqual(objLockInfo.isLocked(), true); + describe('isExpired: ', () => + isExpiredTestCases.forEach(testCase => { + const objLockInfo = new ObjectLockInfo({ date: testCase.date }); + it(testCase.desc, () => assert.strictEqual(objLockInfo.isExpired(), testCase.expected)); + })); + + describe('isLocked: ', () => + isLockedTestCases.forEach(testCase => { + describe(`${testCase.desc}`, () => { + it(`should show policy as ${testCase.expected ? '' : 'not'} locked without legal hold`, () => { + const objLockInfo = new ObjectLockInfo(testCase.policy); + assert.strictEqual(objLockInfo.isLocked(), testCase.expected); + }); + + // legal hold should show as locked regardless of policy + it('should show policy as locked with legal hold', () => { + const policy = Object.assign({}, testCase.policy, { legalHold: true }); + const objLockInfo = new ObjectLockInfo(policy); + assert.strictEqual(objLockInfo.isLocked(), true); + }); }); - }); - })); + })); - describe('canModifyPolicy: ', () => policyChangeTestCases.forEach(testCase => { - describe(testCase.desc, () => { - const objLockInfo = new ObjectLockInfo(testCase.from); - it(`should ${testCase.allowed ? 'allow' : 'deny'} modifying the policy without bypass`, - () => assert.strictEqual(objLockInfo.canModifyPolicy(testCase.to), testCase.allowed)); + describe('canModifyPolicy: ', () => + policyChangeTestCases.forEach(testCase => { + describe(testCase.desc, () => { + const objLockInfo = new ObjectLockInfo(testCase.from); + it(`should ${testCase.allowed ? 'allow' : 'deny'} modifying the policy without bypass`, () => + assert.strictEqual(objLockInfo.canModifyPolicy(testCase.to), testCase.allowed)); - it(`should ${testCase.allowedWithBypass ? 'allow' : 'deny'} modifying the policy with bypass`, - () => assert.strictEqual(objLockInfo.canModifyPolicy(testCase.to, true), testCase.allowedWithBypass)); - }); - })); + it(`should ${testCase.allowedWithBypass ? 'allow' : 'deny'} modifying the policy with bypass`, () => + assert.strictEqual(objLockInfo.canModifyPolicy(testCase.to, true), testCase.allowedWithBypass)); + }); + })); - describe('canModifyObject: ', () => canModifyObjectTestCases.forEach(testCase => { - describe(testCase.desc, () => { - const objLockInfo = new ObjectLockInfo(testCase.policy); - it(`should ${testCase.allowed ? 'allow' : 'deny'} modifying object without bypass`, - () => assert.strictEqual(objLockInfo.canModifyObject(), testCase.allowed)); + describe('canModifyObject: ', () => + canModifyObjectTestCases.forEach(testCase => { + describe(testCase.desc, () => { + const objLockInfo = new ObjectLockInfo(testCase.policy); + it(`should ${testCase.allowed ? 'allow' : 'deny'} modifying object without bypass`, () => + assert.strictEqual(objLockInfo.canModifyObject(), testCase.allowed)); - it(`should ${testCase.allowedWithBypass ? 'allow' : 'deny'} modifying object with bypass`, - () => assert.strictEqual(objLockInfo.canModifyObject(true), testCase.allowedWithBypass)); - }); - })); + it(`should ${testCase.allowedWithBypass ? 'allow' : 'deny'} modifying object with bypass`, () => + assert.strictEqual(objLockInfo.canModifyObject(true), testCase.allowedWithBypass)); + }); + })); }); diff --git a/tests/unit/api/apiUtils/quotas/quotaUtils.js b/tests/unit/api/apiUtils/quotas/quotaUtils.js index 9d0db41f07..a154706758 100644 --- a/tests/unit/api/apiUtils/quotas/quotaUtils.js +++ b/tests/unit/api/apiUtils/quotas/quotaUtils.js @@ -75,15 +75,13 @@ describe('validateQuotas (buckets)', () => { validateQuotas(request, mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => { assert.ifError(err); assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { action: 'objectPut', inflight: 1, - } - ), true); + }), + true + ); done(); }); }); @@ -102,15 +100,13 @@ describe('validateQuotas (buckets)', () => { assert.strictEqual(err.is.QuotaExceeded, true); assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1); assert.strictEqual(request.finalizerHooks.length, 1); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { action: 'objectPut', inflight: 1, - } - ), true); + }), + true + ); done(); }); }); @@ -128,15 +124,13 @@ describe('validateQuotas (buckets)', () => { validateQuotas(request, mockBucket, {}, ['objectDelete'], 'objectDelete', 0, false, mockLog, err => { assert.ifError(err); assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { action: 'objectDelete', inflight: 0, - } - ), true); + }), + true + ); done(); }); }); @@ -154,15 +148,13 @@ describe('validateQuotas (buckets)', () => { validateQuotas(request, mockBucket, {}, ['objectDelete'], 'objectDelete', -50, false, mockLog, err => { assert.ifError(err); assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { action: 'objectDelete', inflight: -50, - } - ), true); + }), + true + ); done(); }); }); @@ -180,15 +172,13 @@ describe('validateQuotas (buckets)', () => { validateQuotas(request, mockBucket, {}, ['objectDelete'], 'objectDeleteVersion', -50, false, mockLog, err => { assert.ifError(err); assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { action: 'objectDelete', inflight: -50, - } - ), true); + }), + true + ); done(); }); }); @@ -206,15 +196,13 @@ describe('validateQuotas (buckets)', () => { validateQuotas(request, mockBucket, {}, ['objectDelete'], 'objectDelete', -5000, false, mockLog, err => { assert.ifError(err); assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { action: 'objectDelete', inflight: -5000, - } - ), true); + }), + true + ); done(); }); }); @@ -229,21 +217,28 @@ describe('validateQuotas (buckets)', () => { QuotaService._getLatestMetricsCallback.yields(null, result1); QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2); - validateQuotas(request, mockBucket, {}, ['objectRestore', 'objectPut'], 'objectRestore', - true, false, mockLog, err => { + validateQuotas( + request, + mockBucket, + {}, + ['objectRestore', 'objectPut'], + 'objectRestore', + true, + false, + mockLog, + err => { assert.ifError(err); assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { action: 'objectRestore', inflight: true, - } - ), true); + }), + true + ); done(); - }); + } + ); }); it('should not include the inflights in the request if they are disabled', done => { @@ -257,21 +252,28 @@ describe('validateQuotas (buckets)', () => { QuotaService._getLatestMetricsCallback.yields(null, result1); QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2); - validateQuotas(request, mockBucket, {}, ['objectRestore', 'objectPut'], 'objectRestore', - true, false, mockLog, err => { + validateQuotas( + request, + mockBucket, + {}, + ['objectRestore', 'objectPut'], + 'objectRestore', + true, + false, + mockLog, + err => { assert.ifError(err); assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { action: 'objectRestore', inflight: undefined, - } - ), true); - done(); - }); + }), + true + ); + done(); + } + ); }); it('should evaluate the quotas and not update the inflights when isStorageReserved is true', done => { @@ -284,21 +286,18 @@ describe('validateQuotas (buckets)', () => { QuotaService._getLatestMetricsCallback.yields(null, result1); QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2); - validateQuotas(request, mockBucket, {}, ['objectPut'], 'objectPut', - true, true, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { - action: 'objectPut', - inflight: 0, - } - ), true); - done(); - }); + validateQuotas(request, mockBucket, {}, ['objectPut'], 'objectPut', true, true, mockLog, err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { + action: 'objectPut', + inflight: 0, + }), + true + ); + done(); + }); }); it('should handle numbers above MAX_SAFE_INTEGER when quota is not exceeded', done => { @@ -308,23 +307,31 @@ describe('validateQuotas (buckets)', () => { }; QuotaService._getLatestMetricsCallback.yields(null, result1); - validateQuotas(request, { - ...mockBucket, - getQuota: () => 9007199254740993n, - }, {}, ['objectPut'], 'objectPut', 1, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { - action: 'objectPut', - inflight: 1, - }, - ), true); - done(); - }); + validateQuotas( + request, + { + ...mockBucket, + getQuota: () => 9007199254740993n, + }, + {}, + ['objectPut'], + 'objectPut', + 1, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { + action: 'objectPut', + inflight: 1, + }), + true + ); + done(); + } + ); }); it('should handle numbers above MAX_SAFE_INTEGER when quota is exceeded', done => { @@ -334,15 +341,25 @@ describe('validateQuotas (buckets)', () => { }; QuotaService._getLatestMetricsCallback.yields(null, result1); - validateQuotas(request, { - ...mockBucket, - getQuota: () => 9007199254740991n, - }, {}, ['objectPut'], 'objectPut', 1, false, mockLog, err => { - assert.strictEqual(err.is.QuotaExceeded, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(request.finalizerHooks.length, 1); - done(); - }); + validateQuotas( + request, + { + ...mockBucket, + getQuota: () => 9007199254740991n, + }, + {}, + ['objectPut'], + 'objectPut', + 1, + false, + mockLog, + err => { + assert.strictEqual(err.is.QuotaExceeded, true); + assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); + assert.strictEqual(request.finalizerHooks.length, 1); + done(); + } + ); }); it('should handle numbers above MAX_SAFE_INTEGER with disabled inflights when quota is not exceeded', done => { @@ -353,23 +370,31 @@ describe('validateQuotas (buckets)', () => { }; QuotaService._getLatestMetricsCallback.yields(null, result1); - validateQuotas(request, { - ...mockBucket, - getQuota: () => 9007199254740993n, - }, {}, ['objectPut'], 'objectPut', 1, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { - action: 'objectPut', - inflight: undefined, - }, - ), true); - done(); - }); + validateQuotas( + request, + { + ...mockBucket, + getQuota: () => 9007199254740993n, + }, + {}, + ['objectPut'], + 'objectPut', + 1, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { + action: 'objectPut', + inflight: undefined, + }), + true + ); + done(); + } + ); }); }); @@ -398,37 +423,67 @@ describe('validateQuotas (with accounts)', () => { }); it('should return null if quota is <= 0', done => { - validateQuotas(request, mockBucketNoQuota, { - account: 'test_1', - quota: 0n, - }, [], '', false, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false); - done(); - }); + validateQuotas( + request, + mockBucketNoQuota, + { + account: 'test_1', + quota: 0n, + }, + [], + '', + false, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false); + done(); + } + ); }); it('should not return null if bucket quota is <= 0 but account quota is > 0', done => { - validateQuotas(request, mockBucketNoQuota, { - account: 'test_1', - quota: 1000n, - }, [], '', false, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false); - done(); - }); + validateQuotas( + request, + mockBucketNoQuota, + { + account: 'test_1', + quota: 1000n, + }, + [], + '', + false, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false); + done(); + } + ); }); it('should return null if scuba is disabled', done => { QuotaService.enabled = false; - validateQuotas(request, mockBucket, { - account: 'test_1', - quota: 1000n, - }, [], '', false, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false); - done(); - }); + validateQuotas( + request, + mockBucket, + { + account: 'test_1', + quota: 1000n, + }, + [], + '', + false, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false); + done(); + } + ); }); it('should return null if metrics retrieval fails', done => { @@ -436,23 +491,31 @@ describe('validateQuotas (with accounts)', () => { const error = new Error('Failed to get metrics'); QuotaService._getLatestMetricsCallback.yields(error); - validateQuotas(request, mockBucket, { - account: 'test_1', - quota: 1000n, - }, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'bucket', - 'bucketName_1640995200000', - null, - { - action: 'objectPut', - inflight: 1, - } - ), true); - done(); - }); + validateQuotas( + request, + mockBucket, + { + account: 'test_1', + quota: 1000n, + }, + ['objectPut', 'getObject'], + 'objectPut', + 1, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('bucket', 'bucketName_1640995200000', null, { + action: 'objectPut', + inflight: 1, + }), + true + ); + done(); + } + ); }); it('should return errors.QuotaExceeded if quota is exceeded', done => { @@ -465,24 +528,32 @@ describe('validateQuotas (with accounts)', () => { QuotaService._getLatestMetricsCallback.yields(null, result1); QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2); - validateQuotas(request, mockBucketNoQuota, { - account: 'test_1', - quota: 100n, - }, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => { - assert.strictEqual(err.is.QuotaExceeded, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1); - assert.strictEqual(request.finalizerHooks.length, 1); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'account', - 'test_1', - null, - { - action: 'objectPut', - inflight: 1, - } - ), true); - done(); - }); + validateQuotas( + request, + mockBucketNoQuota, + { + account: 'test_1', + quota: 100n, + }, + ['objectPut', 'getObject'], + 'objectPut', + 1, + false, + mockLog, + err => { + assert.strictEqual(err.is.QuotaExceeded, true); + assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1); + assert.strictEqual(request.finalizerHooks.length, 1); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('account', 'test_1', null, { + action: 'objectPut', + inflight: 1, + }), + true + ); + done(); + } + ); }); it('should not return QuotaExceeded if the quotas are exceeded but operation is a delete', done => { @@ -495,23 +566,31 @@ describe('validateQuotas (with accounts)', () => { QuotaService._getLatestMetricsCallback.yields(null, result1); QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2); - validateQuotas(request, mockBucketNoQuota, { - account: 'test_1', - quota: 1000n, - }, ['objectDelete'], 'objectDelete', -50, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'account', - 'test_1', - null, - { - action: 'objectDelete', - inflight: -50, - } - ), true); - done(); - }); + validateQuotas( + request, + mockBucketNoQuota, + { + account: 'test_1', + quota: 1000n, + }, + ['objectDelete'], + 'objectDelete', + -50, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('account', 'test_1', null, { + action: 'objectDelete', + inflight: -50, + }), + true + ); + done(); + } + ); }); it('should decrease the inflights by deleting data, and go below 0 to unblock operations', done => { @@ -524,23 +603,31 @@ describe('validateQuotas (with accounts)', () => { QuotaService._getLatestMetricsCallback.yields(null, result1); QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2); - validateQuotas(request, mockBucketNoQuota, { - account: 'test_1', - quota: 1000n, - }, ['objectDelete'], 'objectDelete', -5000, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'account', - 'test_1', - null, - { - action: 'objectDelete', - inflight: -5000, - } - ), true); - done(); - }); + validateQuotas( + request, + mockBucketNoQuota, + { + account: 'test_1', + quota: 1000n, + }, + ['objectDelete'], + 'objectDelete', + -5000, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('account', 'test_1', null, { + action: 'objectDelete', + inflight: -5000, + }), + true + ); + done(); + } + ); }); it('should return null if quota is not exceeded', done => { @@ -553,23 +640,31 @@ describe('validateQuotas (with accounts)', () => { QuotaService._getLatestMetricsCallback.yields(null, result1); QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2); - validateQuotas(request, mockBucket, { - account: 'test_1', - quota: 1000n, - }, ['objectRestore', 'objectPut'], 'objectRestore', true, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 4); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'account', - 'test_1', - null, - { - action: 'objectRestore', - inflight: true, - } - ), true); - done(); - }); + validateQuotas( + request, + mockBucket, + { + account: 'test_1', + quota: 1000n, + }, + ['objectRestore', 'objectPut'], + 'objectRestore', + true, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 4); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('account', 'test_1', null, { + action: 'objectRestore', + inflight: true, + }), + true + ); + done(); + } + ); }); it('should return quota exceeded if account and bucket quotas are different', done => { @@ -582,15 +677,25 @@ describe('validateQuotas (with accounts)', () => { QuotaService._getLatestMetricsCallback.yields(null, result1); QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2); - validateQuotas(request, mockBucket, { - account: 'test_1', - quota: 1000n, - }, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => { - assert.strictEqual(err.is.QuotaExceeded, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 2); - assert.strictEqual(request.finalizerHooks.length, 1); - done(); - }); + validateQuotas( + request, + mockBucket, + { + account: 'test_1', + quota: 1000n, + }, + ['objectPut', 'getObject'], + 'objectPut', + 1, + false, + mockLog, + err => { + assert.strictEqual(err.is.QuotaExceeded, true); + assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 2); + assert.strictEqual(request.finalizerHooks.length, 1); + done(); + } + ); }); it('should update the request with one function per action to clear quota updates', done => { @@ -603,23 +708,31 @@ describe('validateQuotas (with accounts)', () => { QuotaService._getLatestMetricsCallback.yields(null, result1); QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2); - validateQuotas(request, mockBucket, { - account: 'test_1', - quota: 1000n, - }, ['objectRestore', 'objectPut'], 'objectRestore', true, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 4); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'account', - 'test_1', - null, - { - action: 'objectRestore', - inflight: true, - } - ), true); - done(); - }); + validateQuotas( + request, + mockBucket, + { + account: 'test_1', + quota: 1000n, + }, + ['objectRestore', 'objectPut'], + 'objectRestore', + true, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 4); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('account', 'test_1', null, { + action: 'objectRestore', + inflight: true, + }), + true + ); + done(); + } + ); }); it('should evaluate the quotas and not update the inflights when isStorageReserved is true', done => { @@ -632,23 +745,31 @@ describe('validateQuotas (with accounts)', () => { QuotaService._getLatestMetricsCallback.yields(null, result1); QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2); - validateQuotas(request, mockBucket, { - account: 'test_1', - quota: 1000n, - }, ['objectPut'], 'objectPut', true, true, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'account', - 'test_1', - null, - { - action: 'objectPut', - inflight: 0, - } - ), true); - done(); - }); + validateQuotas( + request, + mockBucket, + { + account: 'test_1', + quota: 1000n, + }, + ['objectPut'], + 'objectPut', + true, + true, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('account', 'test_1', null, { + action: 'objectPut', + inflight: 0, + }), + true + ); + done(); + } + ); }); it('should handle account numbers above MAX_SAFE_INTEGER when quota is not exceeded', done => { @@ -658,23 +779,31 @@ describe('validateQuotas (with accounts)', () => { }; QuotaService._getLatestMetricsCallback.yields(null, result1); - validateQuotas(request, mockBucketNoQuota, { - account: 'test_1', - quota: 9007199254740993n, - }, ['objectPut'], 'objectPut', 1, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'account', - 'test_1', - null, - { - action: 'objectPut', - inflight: 1, - }, - ), true); - done(); - }); + validateQuotas( + request, + mockBucketNoQuota, + { + account: 'test_1', + quota: 9007199254740993n, + }, + ['objectPut'], + 'objectPut', + 1, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('account', 'test_1', null, { + action: 'objectPut', + inflight: 1, + }), + true + ); + done(); + } + ); }); it('should handle account numbers above MAX_SAFE_INTEGER when quota is exceeded', done => { @@ -684,15 +813,25 @@ describe('validateQuotas (with accounts)', () => { }; QuotaService._getLatestMetricsCallback.yields(null, result1); - validateQuotas(request, mockBucketNoQuota, { - account: 'test_1', - quota: 9007199254740991n, - }, ['objectPut'], 'objectPut', 1, false, mockLog, err => { - assert.strictEqual(err.is.QuotaExceeded, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(request.finalizerHooks.length, 1); - done(); - }); + validateQuotas( + request, + mockBucketNoQuota, + { + account: 'test_1', + quota: 9007199254740991n, + }, + ['objectPut'], + 'objectPut', + 1, + false, + mockLog, + err => { + assert.strictEqual(err.is.QuotaExceeded, true); + assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); + assert.strictEqual(request.finalizerHooks.length, 1); + done(); + } + ); }); it('should handle account numbers above MAX_SAFE_INTEGER with disabled inflights', done => { @@ -703,23 +842,31 @@ describe('validateQuotas (with accounts)', () => { }; QuotaService._getLatestMetricsCallback.yields(null, result1); - validateQuotas(request, mockBucketNoQuota, { - account: 'test_1', - quota: 9007199254740993n, - }, ['objectPut'], 'objectPut', 1, false, mockLog, err => { - assert.ifError(err); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); - assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith( - 'account', - 'test_1', - null, - { - action: 'objectPut', - inflight: undefined, - }, - ), true); - done(); - }); + validateQuotas( + request, + mockBucketNoQuota, + { + account: 'test_1', + quota: 9007199254740993n, + }, + ['objectPut'], + 'objectPut', + 1, + false, + mockLog, + err => { + assert.ifError(err); + assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true); + assert.strictEqual( + QuotaService._getLatestMetricsCallback.calledWith('account', 'test_1', null, { + action: 'objectPut', + inflight: undefined, + }), + true + ); + done(); + } + ); }); }); @@ -746,7 +893,7 @@ describe('processBytesToWrite', () => { ...hotObject, dataStoreName: 'glacier', archive: { - archiveInfo: '{archiveID,archiveVersion}' + archiveInfo: '{archiveID,archiveVersion}', }, }; const restoringObject = { diff --git a/tests/unit/api/apiUtils/tagConditionKeys.js b/tests/unit/api/apiUtils/tagConditionKeys.js index 3a86628a7d..57205707b5 100644 --- a/tests/unit/api/apiUtils/tagConditionKeys.js +++ b/tests/unit/api/apiUtils/tagConditionKeys.js @@ -8,8 +8,11 @@ const { TaggingConfigTester, createRequestContext, } = require('../../helpers'); -const { tagConditionKeyAuth, updateRequestContextsWithTags, makeTagQuery } = - require('../../../../lib/api/apiUtils/authorization/tagConditionKeys'); +const { + tagConditionKeyAuth, + updateRequestContextsWithTags, + makeTagQuery, +} = require('../../../../lib/api/apiUtils/authorization/tagConditionKeys'); const { bucketPut } = require('../../../../lib/api/bucketPut'); const objectPut = require('../../../../lib/api/objectPut'); @@ -29,21 +32,22 @@ const bucketPutReq = { const taggingUtil = new TaggingConfigTester(); -const objectPutReq = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-tagging': makeTagQuery(taggingUtil.getTags()), +const objectPutReq = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-tagging': makeTagQuery(taggingUtil.getTags()), + }, + url: `/${bucketName}/${objectKey}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', }, - url: `/${bucketName}/${objectKey}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', -}, postBody); + postBody +); -const objectPutRequestContexts = [ - createRequestContext('objectPut', objectPutReq), -]; +const objectPutRequestContexts = [createRequestContext('objectPut', objectPutReq)]; const objectGetReq = { bucketName, @@ -87,8 +91,7 @@ describe('updateRequestContextsWithTags', () => { updateRequestContextsWithTags(objectPutReq, objectPutRequestContexts, 'objectPut', log, err => { assert.ifError(err); assert(objectPutRequestContexts[0].getNeedTagEval()); - assert.strictEqual(objectPutRequestContexts[0].getRequestObjTags(), - makeTagQuery(taggingUtil.getTags())); + assert.strictEqual(objectPutRequestContexts[0].getRequestObjTags(), makeTagQuery(taggingUtil.getTags())); assert.strictEqual(objectPutRequestContexts[0].getExistingObjTag(), null); done(); }); @@ -97,14 +100,12 @@ describe('updateRequestContextsWithTags', () => { it('should update multiple request contexts with existing object tags', done => { objectPut(authInfo, objectPutReq, 'foobar', log, err => { assert.ifError(err); - updateRequestContextsWithTags(objectGetReq, objectGetRequestContexts, 'objectGet', log, - err => { + updateRequestContextsWithTags(objectGetReq, objectGetRequestContexts, 'objectGet', log, err => { assert.ifError(err); // FIXME introduced by CLDSRV-256, this syntax should be allowed by the linter for (const requestContext of objectGetRequestContexts) { assert(requestContext.getNeedTagEval()); - assert.strictEqual(requestContext.getExistingObjTag(), - makeTagQuery(taggingUtil.getTags())); + assert.strictEqual(requestContext.getExistingObjTag(), makeTagQuery(taggingUtil.getTags())); assert.strictEqual(requestContext.getRequestObjTags(), null); } done(); diff --git a/tests/unit/api/apiUtils/validateChecksumHeaders.js b/tests/unit/api/apiUtils/validateChecksumHeaders.js index 6b1f7dbbf6..b1abd6709c 100644 --- a/tests/unit/api/apiUtils/validateChecksumHeaders.js +++ b/tests/unit/api/apiUtils/validateChecksumHeaders.js @@ -55,7 +55,6 @@ unsupportedSignatureChecksums.forEach(checksum => { }); }); - describe('validateChecksumHeaders', () => { passingCases.forEach(testCase => { it(testCase.description, () => { diff --git a/tests/unit/api/apiUtils/versioning.js b/tests/unit/api/apiUtils/versioning.js index 1edea6d0b0..177d3f6e52 100644 --- a/tests/unit/api/apiUtils/versioning.js +++ b/tests/unit/api/apiUtils/versioning.js @@ -6,11 +6,13 @@ const INF_VID = versioning.VersionID.getInfVid(config.replicationGroupId); const { scaledMsPerDay } = config.getTimeOptions(); const sinon = require('sinon'); -const { processVersioningState, getMasterState, - getVersionSpecificMetadataOptions, - preprocessingVersioningDelete, - overwritingVersioning } = - require('../../../../lib/api/apiUtils/object/versioning'); +const { + processVersioningState, + getMasterState, + getVersionSpecificMetadataOptions, + preprocessingVersioningDelete, + overwritingVersioning, +} = require('../../../../lib/api/apiUtils/object/versioning'); describe('versioning helpers', () => { describe('getMasterState+processVersioningState', () => { @@ -518,17 +520,22 @@ describe('versioning helpers', () => { }, ].forEach(testCase => [false, true].forEach(nullVersionCompatMode => - ['Enabled', 'Suspended'].forEach(versioningStatus => it( - `${testCase.description}${nullVersionCompatMode ? ' (null compat)' : ''}` + - `, versioning Status=${versioningStatus}`, - () => { - const mst = getMasterState(testCase.objMD); - const res = processVersioningState(mst, versioningStatus, nullVersionCompatMode); - const resultName = `versioning${versioningStatus}` + - `${nullVersionCompatMode ? 'Compat' : ''}ExpectedRes`; - const expectedRes = testCase[resultName]; - assert.deepStrictEqual(res, expectedRes); - })))); + ['Enabled', 'Suspended'].forEach(versioningStatus => + it( + `${testCase.description}${nullVersionCompatMode ? ' (null compat)' : ''}` + + `, versioning Status=${versioningStatus}`, + () => { + const mst = getMasterState(testCase.objMD); + const res = processVersioningState(mst, versioningStatus, nullVersionCompatMode); + const resultName = + `versioning${versioningStatus}` + `${nullVersionCompatMode ? 'Compat' : ''}ExpectedRes`; + const expectedRes = testCase[resultName]; + assert.deepStrictEqual(res, expectedRes); + } + ) + ) + ) + ); }); describe('getVersionSpecificMetadataOptions', () => { @@ -583,14 +590,13 @@ describe('versioning helpers', () => { }, ].forEach(testCase => [false, true].forEach(nullVersionCompatMode => - it(`${testCase.description}${nullVersionCompatMode ? ' (null compat)' : ''}`, - () => { - const options = getVersionSpecificMetadataOptions( - testCase.objMD, nullVersionCompatMode); - const expectedResAttr = nullVersionCompatMode ? - 'expectedResCompat' : 'expectedRes'; + it(`${testCase.description}${nullVersionCompatMode ? ' (null compat)' : ''}`, () => { + const options = getVersionSpecificMetadataOptions(testCase.objMD, nullVersionCompatMode); + const expectedResAttr = nullVersionCompatMode ? 'expectedResCompat' : 'expectedRes'; assert.deepStrictEqual(options, testCase[expectedResAttr]); - }))); + }) + ) + ); }); describe('preprocessingVersioningDelete', () => { @@ -669,24 +675,28 @@ describe('versioning helpers', () => { }, ].forEach(testCase => [false, true].forEach(nullVersionCompatMode => - it(`${testCase.description}${nullVersionCompatMode ? ' (null compat)' : ''}`, - () => { + it(`${testCase.description}${nullVersionCompatMode ? ' (null compat)' : ''}`, () => { const mockBucketMD = { getVersioningConfiguration: () => ({ Status: 'Enabled' }), }; const options = preprocessingVersioningDelete( - 'foobucket', mockBucketMD, testCase.objMD, testCase.reqVersionId, - nullVersionCompatMode); - const expectedResAttr = nullVersionCompatMode ? - 'expectedResCompat' : 'expectedRes'; + 'foobucket', + mockBucketMD, + testCase.objMD, + testCase.reqVersionId, + nullVersionCompatMode + ); + const expectedResAttr = nullVersionCompatMode ? 'expectedResCompat' : 'expectedRes'; assert.deepStrictEqual(options, testCase[expectedResAttr]); - }))); + }) + ) + ); }); describe('overwritingVersioning', () => { const days = 3; const archiveInfo = { - 'archiveID': '126783123678', + archiveID: '126783123678', }; const now = Date.now(); let clock; @@ -702,70 +712,70 @@ describe('versioning helpers', () => { [ { description: 'Should update archive with restore infos', - objMD: { - 'versionId': '2345678', + objMD: { + versionId: '2345678', 'creation-time': now, 'last-modified': now, - 'originOp': 's3:PutObject', + originOp: 's3:PutObject', 'x-amz-storage-class': 'cold-location', - 'archive': { - 'restoreRequestedDays': days, - 'restoreRequestedAt': now, - archiveInfo - } + archive: { + restoreRequestedDays: days, + restoreRequestedAt: now, + archiveInfo, + }, }, expectedRes: { - 'creationTime': now, - 'lastModifiedDate': now, - 'updateMicroVersionId': true, - 'originOp': 's3:ObjectRestore:Completed', - 'taggingCopy': undefined, - 'amzStorageClass': 'cold-location', - 'archive': { + creationTime: now, + lastModifiedDate: now, + updateMicroVersionId: true, + originOp: 's3:ObjectRestore:Completed', + taggingCopy: undefined, + amzStorageClass: 'cold-location', + archive: { archiveInfo, - 'restoreRequestedDays': 3, - 'restoreRequestedAt': now, - 'restoreCompletedAt': new Date(now), - 'restoreWillExpireAt': new Date(now + (days * scaledMsPerDay)), - } - } + restoreRequestedDays: 3, + restoreRequestedAt: now, + restoreCompletedAt: new Date(now), + restoreWillExpireAt: new Date(now + days * scaledMsPerDay), + }, + }, }, { description: 'Should keep user mds and tags', hasUserMD: true, objMD: { - 'versionId': '2345678', + versionId: '2345678', 'creation-time': now, 'last-modified': now, - 'originOp': 's3:PutObject', + originOp: 's3:PutObject', 'x-amz-meta-test': 'test', 'x-amz-meta-test2': 'test2', - 'tags': { 'testtag': 'testtag', 'testtag2': 'testtag2' }, + tags: { testtag: 'testtag', testtag2: 'testtag2' }, 'x-amz-storage-class': 'cold-location', - 'archive': { - 'restoreRequestedDays': days, - 'restoreRequestedAt': now, - archiveInfo - } + archive: { + restoreRequestedDays: days, + restoreRequestedAt: now, + archiveInfo, + }, }, expectedRes: { - 'creationTime': now, - 'lastModifiedDate': now, - 'updateMicroVersionId': true, - 'originOp': 's3:ObjectRestore:Completed', - 'metaHeaders': { + creationTime: now, + lastModifiedDate: now, + updateMicroVersionId: true, + originOp: 's3:ObjectRestore:Completed', + metaHeaders: { 'x-amz-meta-test': 'test', 'x-amz-meta-test2': 'test2', }, - 'taggingCopy': { 'testtag': 'testtag', 'testtag2': 'testtag2' }, - 'amzStorageClass': 'cold-location', - 'archive': { + taggingCopy: { testtag: 'testtag', testtag2: 'testtag2' }, + amzStorageClass: 'cold-location', + archive: { archiveInfo, - 'restoreRequestedDays': days, - 'restoreRequestedAt': now, - 'restoreCompletedAt': new Date(now), - 'restoreWillExpireAt': new Date(now + (days * scaledMsPerDay)), - } + restoreRequestedDays: days, + restoreRequestedAt: now, + restoreCompletedAt: new Date(now), + restoreWillExpireAt: new Date(now + days * scaledMsPerDay), + }, }, }, { @@ -773,257 +783,243 @@ describe('versioning helpers', () => { objMD: { 'creation-time': now, 'last-modified': now, - 'originOp': 's3:PutObject', - 'nullVersionId': 'vnull', - 'isNull': true, + originOp: 's3:PutObject', + nullVersionId: 'vnull', + isNull: true, 'x-amz-storage-class': 'cold-location', - 'archive': { - 'restoreRequestedDays': days, - 'restoreRequestedAt': now, - archiveInfo - } + archive: { + restoreRequestedDays: days, + restoreRequestedAt: now, + archiveInfo, + }, }, expectedRes: { - 'creationTime': now, - 'lastModifiedDate': now, - 'updateMicroVersionId': true, - 'originOp': 's3:ObjectRestore:Completed', - 'amzStorageClass': 'cold-location', - 'taggingCopy': undefined, - 'archive': { + creationTime: now, + lastModifiedDate: now, + updateMicroVersionId: true, + originOp: 's3:ObjectRestore:Completed', + amzStorageClass: 'cold-location', + taggingCopy: undefined, + archive: { archiveInfo, - 'restoreRequestedDays': 3, - 'restoreRequestedAt': now, - 'restoreCompletedAt': new Date(now), - 'restoreWillExpireAt': new Date(now + (days * scaledMsPerDay)), - } - } + restoreRequestedDays: 3, + restoreRequestedAt: now, + restoreCompletedAt: new Date(now), + restoreWillExpireAt: new Date(now + days * scaledMsPerDay), + }, + }, }, { description: 'Should not keep x-amz-meta-scal-s3-restore-attempt user MD', hasUserMD: true, objMD: { - 'versionId': '2345678', + versionId: '2345678', 'creation-time': now, 'last-modified': now, - 'originOp': 's3:PutObject', + originOp: 's3:PutObject', 'x-amz-meta-test': 'test', 'x-amz-meta-scal-s3-restore-attempt': 14, 'x-amz-storage-class': 'cold-location', - 'archive': { - 'restoreRequestedDays': days, - 'restoreRequestedAt': now, - archiveInfo - } + archive: { + restoreRequestedDays: days, + restoreRequestedAt: now, + archiveInfo, + }, }, expectedRes: { - 'creationTime': now, - 'lastModifiedDate': now, - 'updateMicroVersionId': true, - 'originOp': 's3:ObjectRestore:Completed', - 'metaHeaders': { + creationTime: now, + lastModifiedDate: now, + updateMicroVersionId: true, + originOp: 's3:ObjectRestore:Completed', + metaHeaders: { 'x-amz-meta-test': 'test', }, - 'taggingCopy': undefined, - 'amzStorageClass': 'cold-location', - 'archive': { + taggingCopy: undefined, + amzStorageClass: 'cold-location', + archive: { archiveInfo, - 'restoreRequestedDays': 3, - 'restoreRequestedAt': now, - 'restoreCompletedAt': new Date(now), - 'restoreWillExpireAt': new Date(now + (days * scaledMsPerDay)), - } - } + restoreRequestedDays: 3, + restoreRequestedAt: now, + restoreCompletedAt: new Date(now), + restoreWillExpireAt: new Date(now + days * scaledMsPerDay), + }, + }, }, { description: 'Should keep replication infos', objMD: { - 'versionId': '2345678', - 'creation-time': now, - 'last-modified': now, - 'originOp': 's3:PutObject', - 'x-amz-storage-class': 'cold-location', - 'replicationInfo': { - 'status': 'COMPLETED', - 'backends': [ - { - 'site': 'azure-blob', - 'status': 'COMPLETED', - 'dataStoreVersionId': '' - } - ], - 'content': [ - 'DATA', - 'METADATA' - ], - 'destination': 'arn:aws:s3:::replicate-cold', - 'storageClass': 'azure-blob', - 'role': 'arn:aws:iam::root:role/s3-replication-role', - 'storageType': 'azure', - 'dataStoreVersionId': '', - }, - archive: { - 'restoreRequestedDays': days, - 'restoreRequestedAt': now, - archiveInfo - } + versionId: '2345678', + 'creation-time': now, + 'last-modified': now, + originOp: 's3:PutObject', + 'x-amz-storage-class': 'cold-location', + replicationInfo: { + status: 'COMPLETED', + backends: [ + { + site: 'azure-blob', + status: 'COMPLETED', + dataStoreVersionId: '', + }, + ], + content: ['DATA', 'METADATA'], + destination: 'arn:aws:s3:::replicate-cold', + storageClass: 'azure-blob', + role: 'arn:aws:iam::root:role/s3-replication-role', + storageType: 'azure', + dataStoreVersionId: '', + }, + archive: { + restoreRequestedDays: days, + restoreRequestedAt: now, + archiveInfo, + }, }, expectedRes: { - 'creationTime': now, - 'lastModifiedDate': now, - 'updateMicroVersionId': true, - 'originOp': 's3:ObjectRestore:Completed', - 'amzStorageClass': 'cold-location', - 'replicationInfo': { - 'status': 'COMPLETED', - 'backends': [ + creationTime: now, + lastModifiedDate: now, + updateMicroVersionId: true, + originOp: 's3:ObjectRestore:Completed', + amzStorageClass: 'cold-location', + replicationInfo: { + status: 'COMPLETED', + backends: [ { - 'site': 'azure-blob', - 'status': 'COMPLETED', - 'dataStoreVersionId': '' - } + site: 'azure-blob', + status: 'COMPLETED', + dataStoreVersionId: '', + }, ], - 'content': [ - 'DATA', - 'METADATA' - ], - 'destination': 'arn:aws:s3:::replicate-cold', - 'storageClass': 'azure-blob', - 'role': 'arn:aws:iam::root:role/s3-replication-role', - 'storageType': 'azure', - 'dataStoreVersionId': '', - }, - 'taggingCopy': undefined, + content: ['DATA', 'METADATA'], + destination: 'arn:aws:s3:::replicate-cold', + storageClass: 'azure-blob', + role: 'arn:aws:iam::root:role/s3-replication-role', + storageType: 'azure', + dataStoreVersionId: '', + }, + taggingCopy: undefined, archive: { archiveInfo, - 'restoreRequestedDays': 3, - 'restoreRequestedAt': now, - 'restoreCompletedAt': new Date(now), - 'restoreWillExpireAt': new Date(now + (days * scaledMsPerDay)), - } - } + restoreRequestedDays: 3, + restoreRequestedAt: now, + restoreCompletedAt: new Date(now), + restoreWillExpireAt: new Date(now + days * scaledMsPerDay), + }, + }, }, { description: 'Should keep legalHold', objMD: { - 'versionId': '2345678', - 'creation-time': now, - 'last-modified': now, - 'originOp': 's3:PutObject', - 'legalHold': true, - 'x-amz-storage-class': 'cold-location', - 'archive': { - 'restoreRequestedDays': days, - 'restoreRequestedAt': now, - archiveInfo - } + versionId: '2345678', + 'creation-time': now, + 'last-modified': now, + originOp: 's3:PutObject', + legalHold: true, + 'x-amz-storage-class': 'cold-location', + archive: { + restoreRequestedDays: days, + restoreRequestedAt: now, + archiveInfo, + }, }, expectedRes: { - 'creationTime': now, - 'lastModifiedDate': now, - 'updateMicroVersionId': true, - 'originOp': 's3:ObjectRestore:Completed', - 'legalHold': true, - 'amzStorageClass': 'cold-location', - 'taggingCopy': undefined, - 'archive': { + creationTime: now, + lastModifiedDate: now, + updateMicroVersionId: true, + originOp: 's3:ObjectRestore:Completed', + legalHold: true, + amzStorageClass: 'cold-location', + taggingCopy: undefined, + archive: { archiveInfo, - 'restoreRequestedDays': 3, - 'restoreRequestedAt': now, - 'restoreCompletedAt': new Date(now), - 'restoreWillExpireAt': new Date(now + (days * scaledMsPerDay)), - } - } + restoreRequestedDays: 3, + restoreRequestedAt: now, + restoreCompletedAt: new Date(now), + restoreWillExpireAt: new Date(now + days * scaledMsPerDay), + }, + }, }, { description: 'Should keep ACLs', objMD: { - 'versionId': '2345678', - 'creation-time': now, - 'last-modified': now, - 'originOp': 's3:PutObject', - 'x-amz-storage-class': 'cold-location', - 'acl': { - 'Canned': '', - 'FULL_CONTROL': [ - '872c04772893deae2b48365752362cd92672eb80eb3deea50d89e834a10ce185' - ], - 'WRITE_ACP': [], - 'READ': [ - 'http://acs.amazonaws.com/groups/global/AllUsers' - ], - 'READ_ACP': [] - }, - 'archive': { - 'restoreRequestedDays': days, - 'restoreRequestedAt': now, - archiveInfo - } + versionId: '2345678', + 'creation-time': now, + 'last-modified': now, + originOp: 's3:PutObject', + 'x-amz-storage-class': 'cold-location', + acl: { + Canned: '', + FULL_CONTROL: ['872c04772893deae2b48365752362cd92672eb80eb3deea50d89e834a10ce185'], + WRITE_ACP: [], + READ: ['http://acs.amazonaws.com/groups/global/AllUsers'], + READ_ACP: [], + }, + archive: { + restoreRequestedDays: days, + restoreRequestedAt: now, + archiveInfo, + }, }, expectedRes: { - 'creationTime': now, - 'lastModifiedDate': now, - 'updateMicroVersionId': true, - 'originOp': 's3:ObjectRestore:Completed', - 'acl': { - 'Canned': '', - 'FULL_CONTROL': [ - '872c04772893deae2b48365752362cd92672eb80eb3deea50d89e834a10ce185' - ], - 'WRITE_ACP': [], - 'READ': [ - 'http://acs.amazonaws.com/groups/global/AllUsers' - ], - 'READ_ACP': [] - }, - 'taggingCopy': undefined, - 'amzStorageClass': 'cold-location', - 'archive': { + creationTime: now, + lastModifiedDate: now, + updateMicroVersionId: true, + originOp: 's3:ObjectRestore:Completed', + acl: { + Canned: '', + FULL_CONTROL: ['872c04772893deae2b48365752362cd92672eb80eb3deea50d89e834a10ce185'], + WRITE_ACP: [], + READ: ['http://acs.amazonaws.com/groups/global/AllUsers'], + READ_ACP: [], + }, + taggingCopy: undefined, + amzStorageClass: 'cold-location', + archive: { archiveInfo, - 'restoreRequestedDays': 3, - 'restoreRequestedAt': now, - 'restoreCompletedAt': new Date(now), - 'restoreWillExpireAt': new Date(now + (days * scaledMsPerDay)), - } + restoreRequestedDays: 3, + restoreRequestedAt: now, + restoreCompletedAt: new Date(now), + restoreWillExpireAt: new Date(now + days * scaledMsPerDay), + }, }, }, - { - description: 'Should keep contentMD5 of the original object', - objMD: { - 'versionId': '2345678', + { + description: 'Should keep contentMD5 of the original object', + objMD: { + versionId: '2345678', 'creation-time': now, 'last-modified': now, - 'originOp': 's3:PutObject', + originOp: 's3:PutObject', 'x-amz-storage-class': 'cold-location', 'content-md5': '123456789-5', - 'acl': {}, - 'archive': { - 'restoreRequestedDays': days, - 'restoreRequestedAt': now, - archiveInfo - } + acl: {}, + archive: { + restoreRequestedDays: days, + restoreRequestedAt: now, + archiveInfo, }, - metadataStoreParams: { - 'contentMD5': '987654321-3', - }, - expectedRes: { - 'creationTime': now, - 'lastModifiedDate': now, - 'updateMicroVersionId': true, - 'originOp': 's3:ObjectRestore:Completed', - 'contentMD5': '123456789-5', - 'restoredEtag': '987654321-3', - 'acl': {}, - 'taggingCopy': undefined, - 'amzStorageClass': 'cold-location', - 'archive': { - archiveInfo, - 'restoreRequestedDays': 3, - 'restoreRequestedAt': now, - 'restoreCompletedAt': new Date(now), - 'restoreWillExpireAt': new Date(now + (days * scaledMsPerDay)), - } - } + }, + metadataStoreParams: { + contentMD5: '987654321-3', + }, + expectedRes: { + creationTime: now, + lastModifiedDate: now, + updateMicroVersionId: true, + originOp: 's3:ObjectRestore:Completed', + contentMD5: '123456789-5', + restoredEtag: '987654321-3', + acl: {}, + taggingCopy: undefined, + amzStorageClass: 'cold-location', + archive: { + archiveInfo, + restoreRequestedDays: 3, + restoreRequestedAt: now, + restoreCompletedAt: new Date(now), + restoreWillExpireAt: new Date(now + days * scaledMsPerDay), + }, + }, }, ].forEach(testCase => { it(testCase.description, () => { diff --git a/tests/unit/api/bucketACLauth.js b/tests/unit/api/bucketACLauth.js index 125fed71d5..7e206cee53 100644 --- a/tests/unit/api/bucketACLauth.js +++ b/tests/unit/api/bucketACLauth.js @@ -1,8 +1,7 @@ const assert = require('assert'); const BucketInfo = require('arsenal').models.BucketInfo; const constants = require('../../../constants'); -const { isBucketAuthorized } - = require('../../../lib/api/apiUtils/authorization/permissionChecks'); +const { isBucketAuthorized } = require('../../../lib/api/apiUtils/authorization/permissionChecks'); const { DummyRequestLogger, makeAuthInfo } = require('../helpers'); const lifecycleServiceAccountId = '0123456789abcdef/lifecycle'; @@ -16,12 +15,10 @@ const ownerCanonicalId = authInfo.getCanonicalID(); const altAcctAuthInfo = makeAuthInfo(altAccessKey); const accountToVet = altAcctAuthInfo.getCanonicalID(); -const bucket = new BucketInfo('niftyBucket', ownerCanonicalId, - authInfo.getAccountDisplayName(), creationDate); +const bucket = new BucketInfo('niftyBucket', ownerCanonicalId, authInfo.getAccountDisplayName(), creationDate); const log = new DummyRequestLogger(); -describe('bucket authorization for bucketGet, bucketHead, ' + - 'objectGet, and objectHead', () => { +describe('bucket authorization for bucketGet, bucketHead, ' + 'objectGet, and objectHead', () => { // Reset the bucket ACLs afterEach(() => { bucket.setFullAcl({ @@ -44,70 +41,97 @@ describe('bucket authorization for bucketGet, bucketHead, ' + const orders = [ { - it: 'should allow access to bucket owner', canned: '', - id: ownerCanonicalId, response: trueArray, auth: authInfo, + it: 'should allow access to bucket owner', + canned: '', + id: ownerCanonicalId, + response: trueArray, + auth: authInfo, }, { - it: 'should allow access to user in bucket owner account', canned: '', - id: ownerCanonicalId, response: trueArray, auth: userAuthInfo, + it: 'should allow access to user in bucket owner account', + canned: '', + id: ownerCanonicalId, + response: trueArray, + auth: userAuthInfo, }, { it: 'should allow access to lifecycle service account', - canned: '', id: lifecycleServiceAccountId, response: trueArray, + canned: '', + id: lifecycleServiceAccountId, + response: trueArray, }, { - it: 'should allow public-user access for unknown ' + - 'service account and private canned ACL', - canned: '', id: unknownServiceAccountId, + it: 'should allow public-user access for unknown ' + 'service account and private canned ACL', + canned: '', + id: unknownServiceAccountId, response: falseArrayBucketTrueArrayObject, }, { it: 'should allow access to anyone if canned public-read ACL', - canned: 'public-read', id: accountToVet, response: trueArray, + canned: 'public-read', + id: accountToVet, + response: trueArray, auth: altAcctAuthInfo, }, { it: 'should allow access to anyone if canned public-read-write ACL', - canned: 'public-read-write', id: accountToVet, response: trueArray, + canned: 'public-read-write', + id: accountToVet, + response: trueArray, auth: altAcctAuthInfo, }, { - it: 'should not allow request on the bucket (bucketGet, bucketHead)' - + ' but should allow request on the object (objectGet, objectHead)' - + ' to public user if authenticated-read ACL', - canned: 'authenticated-read', id: constants.publicId, - response: falseArrayBucketTrueArrayObject, auth: altAcctAuthInfo, + it: + 'should not allow request on the bucket (bucketGet, bucketHead)' + + ' but should allow request on the object (objectGet, objectHead)' + + ' to public user if authenticated-read ACL', + canned: 'authenticated-read', + id: constants.publicId, + response: falseArrayBucketTrueArrayObject, + auth: altAcctAuthInfo, }, { - it: 'should allow access to any authenticated user if authenticated' - + '-read ACL', canned: 'authenticated-read', id: accountToVet, - response: trueArray, auth: altAcctAuthInfo, + it: 'should allow access to any authenticated user if authenticated' + '-read ACL', + canned: 'authenticated-read', + id: accountToVet, + response: trueArray, + auth: altAcctAuthInfo, }, { - it: 'should not allow request on the bucket (bucketGet, bucketHead)' - + ' but should allow request on the object (objectGet, objectHead)' - + ' to public user if private canned ACL', - canned: '', id: accountToVet, - response: falseArrayBucketTrueArrayObject, auth: altAcctAuthInfo, + it: + 'should not allow request on the bucket (bucketGet, bucketHead)' + + ' but should allow request on the object (objectGet, objectHead)' + + ' to public user if private canned ACL', + canned: '', + id: accountToVet, + response: falseArrayBucketTrueArrayObject, + auth: altAcctAuthInfo, }, { - it: 'should not allow request on the bucket (bucketGet, bucketHead)' - + ' but should allow request on the object (objectGet, objectHead)' - + ' to just any user if private canned ACL', - canned: '', id: accountToVet, - response: falseArrayBucketTrueArrayObject, auth: altAcctAuthInfo, + it: + 'should not allow request on the bucket (bucketGet, bucketHead)' + + ' but should allow request on the object (objectGet, objectHead)' + + ' to just any user if private canned ACL', + canned: '', + id: accountToVet, + response: falseArrayBucketTrueArrayObject, + auth: altAcctAuthInfo, }, { - it: 'should allow access to user if account was granted' - + ' FULL_CONTROL', - canned: '', id: accountToVet, response: trueArray, - aclParam: ['FULL_CONTROL', accountToVet], auth: altAcctAuthInfo, + it: 'should allow access to user if account was granted' + ' FULL_CONTROL', + canned: '', + id: accountToVet, + response: trueArray, + aclParam: ['FULL_CONTROL', accountToVet], + auth: altAcctAuthInfo, }, { - it: 'should not allow access to just any user if private' - + ' canned ACL', - canned: '', id: accountToVet, response: trueArray, - aclParam: ['READ', accountToVet], auth: altAcctAuthInfo, + it: 'should not allow access to just any user if private' + ' canned ACL', + canned: '', + id: accountToVet, + response: trueArray, + aclParam: ['READ', accountToVet], + auth: altAcctAuthInfo, }, ]; @@ -117,8 +141,7 @@ describe('bucket authorization for bucketGet, bucketHead, ' + bucket.setSpecificAcl(value.aclParam[1], value.aclParam[0]); } bucket.setCannedAcl(value.canned); - const results = requestTypes.map(type => - isBucketAuthorized(bucket, type, value.id, value.auth, log)); + const results = requestTypes.map(type => isBucketAuthorized(bucket, type, value.id, value.auth, log)); assert.deepStrictEqual(results, value.response); done(); }); @@ -139,45 +162,45 @@ describe('bucket authorization for bucketGetACL', () => { }); it('should allow access to bucket owner', () => { - const result = isBucketAuthorized(bucket, 'bucketGetACL', - ownerCanonicalId, authInfo); + const result = isBucketAuthorized(bucket, 'bucketGetACL', ownerCanonicalId, authInfo); assert.strictEqual(result, true); }); it('should allow access to user in bucket owner account', () => { - const result = isBucketAuthorized(bucket, 'bucketGetACL', - ownerCanonicalId, userAuthInfo); + const result = isBucketAuthorized(bucket, 'bucketGetACL', ownerCanonicalId, userAuthInfo); assert.strictEqual(result, true); }); const orders = [ { it: 'log group only if canned log-delivery-write acl', - id: constants.logId, canned: 'log-delivery-write', auth: null, + id: constants.logId, + canned: 'log-delivery-write', + auth: null, }, { it: 'account only if account was granted FULL_CONTROL right', - id: accountToVet, aclParam: ['FULL_CONTROL', accountToVet], + id: accountToVet, + aclParam: ['FULL_CONTROL', accountToVet], auth: altAcctAuthInfo, }, { it: 'account only if account was granted READ_ACP right', - id: accountToVet, aclParam: ['READ_ACP', accountToVet], + id: accountToVet, + aclParam: ['READ_ACP', accountToVet], auth: altAcctAuthInfo, }, ]; orders.forEach(value => { it(`should allow access to ${value.it}`, done => { - const noAuthResult = isBucketAuthorized(bucket, 'bucketGetACL', - value.id); + const noAuthResult = isBucketAuthorized(bucket, 'bucketGetACL', value.id); assert.strictEqual(noAuthResult, false); if (value.aclParam) { bucket.setSpecificAcl(value.aclParam[1], value.aclParam[0]); } else if (value.canned) { bucket.setCannedAcl(value.canned); } - const authorizedResult = isBucketAuthorized(bucket, 'bucketGetACL', - value.id, value.auth); + const authorizedResult = isBucketAuthorized(bucket, 'bucketGetACL', value.id, value.auth); assert.strictEqual(authorizedResult, true); done(); }); @@ -198,27 +221,22 @@ describe('bucket authorization for bucketPutACL', () => { }); it('should allow access to bucket owner', () => { - const result = isBucketAuthorized(bucket, 'bucketPutACL', - ownerCanonicalId, authInfo); + const result = isBucketAuthorized(bucket, 'bucketPutACL', ownerCanonicalId, authInfo); assert.strictEqual(result, true); }); it('should allow access to user in bucket owner account', () => { - const result = isBucketAuthorized(bucket, 'bucketPutACL', - ownerCanonicalId, userAuthInfo); + const result = isBucketAuthorized(bucket, 'bucketPutACL', ownerCanonicalId, userAuthInfo); assert.strictEqual(result, true); }); const orders = ['FULL_CONTROL', 'WRITE_ACP']; orders.forEach(value => { - it('should allow access to account if ' + - `account was granted ${value} right`, done => { - const noAuthResult = isBucketAuthorized(bucket, 'bucketPutACL', - accountToVet, altAcctAuthInfo); + it('should allow access to account if ' + `account was granted ${value} right`, done => { + const noAuthResult = isBucketAuthorized(bucket, 'bucketPutACL', accountToVet, altAcctAuthInfo); assert.strictEqual(noAuthResult, false); bucket.setSpecificAcl(accountToVet, value); - const authorizedResult = isBucketAuthorized(bucket, 'bucketPutACL', - accountToVet, altAcctAuthInfo); + const authorizedResult = isBucketAuthorized(bucket, 'bucketPutACL', accountToVet, altAcctAuthInfo); assert.strictEqual(authorizedResult, true); done(); }); @@ -239,26 +257,28 @@ describe('bucket authorization for bucketOwnerAction', () => { }); it('should allow access to bucket owner', () => { - const result = isBucketAuthorized(bucket, 'bucketDeleteCors', - ownerCanonicalId, authInfo); + const result = isBucketAuthorized(bucket, 'bucketDeleteCors', ownerCanonicalId, authInfo); assert.strictEqual(result, true); }); it('should allow access to user in bucket owner account', () => { - const result = isBucketAuthorized(bucket, 'bucketDeleteCors', - ownerCanonicalId, userAuthInfo); + const result = isBucketAuthorized(bucket, 'bucketDeleteCors', ownerCanonicalId, userAuthInfo); assert.strictEqual(result, true); }); const orders = [ { - it: 'other account (even if other account has FULL_CONTROL rights' - + ' in bucket)', id: accountToVet, canned: '', - aclParam: ['FULL_CONTROL', accountToVet], auth: altAcctAuthInfo, + it: 'other account (even if other account has FULL_CONTROL rights' + ' in bucket)', + id: accountToVet, + canned: '', + aclParam: ['FULL_CONTROL', accountToVet], + auth: altAcctAuthInfo, }, { it: 'public user (even if bucket is public read write)', - id: constants.publicId, canned: 'public-read-write', auth: altAcctAuthInfo, + id: constants.publicId, + canned: 'public-read-write', + auth: altAcctAuthInfo, }, ]; orders.forEach(value => { @@ -267,8 +287,7 @@ describe('bucket authorization for bucketOwnerAction', () => { bucket.setSpecificAcl(value.aclParam[1], value.aclParam[0]); } bucket.setCannedAcl(value.canned); - const result = isBucketAuthorized(bucket, 'bucketDeleteCors', - value.id, value.auth); + const result = isBucketAuthorized(bucket, 'bucketDeleteCors', value.id, value.auth); assert.strictEqual(result, false); done(); }); @@ -289,26 +308,28 @@ describe('bucket authorization for bucketDelete', () => { }); it('should allow access to bucket owner', () => { - const result = isBucketAuthorized(bucket, 'bucketDelete', - ownerCanonicalId, authInfo); + const result = isBucketAuthorized(bucket, 'bucketDelete', ownerCanonicalId, authInfo); assert.strictEqual(result, true); }); it('should allow access to user in bucket owner account', () => { - const result = isBucketAuthorized(bucket, 'bucketDelete', - ownerCanonicalId, userAuthInfo); + const result = isBucketAuthorized(bucket, 'bucketDelete', ownerCanonicalId, userAuthInfo); assert.strictEqual(result, true); }); const orders = [ { - it: 'other account (even if other account has FULL_CONTROL rights ' - + 'in bucket)', id: accountToVet, canned: '', - aclParam: ['FULL_CONTROL', accountToVet], auth: altAcctAuthInfo, + it: 'other account (even if other account has FULL_CONTROL rights ' + 'in bucket)', + id: accountToVet, + canned: '', + aclParam: ['FULL_CONTROL', accountToVet], + auth: altAcctAuthInfo, }, { it: 'public user (even if bucket is public read write)', - id: constants.publicId, canned: 'public-read-write', auth: null, + id: constants.publicId, + canned: 'public-read-write', + auth: null, }, ]; orders.forEach(value => { @@ -340,14 +361,12 @@ describe('bucket authorization for objectDelete and objectPut', () => { const requestTypes = ['objectDelete', 'objectPut']; it('should allow access to bucket owner', () => { - const results = requestTypes.map(type => - isBucketAuthorized(bucket, type, ownerCanonicalId, authInfo)); + const results = requestTypes.map(type => isBucketAuthorized(bucket, type, ownerCanonicalId, authInfo)); assert.deepStrictEqual(results, [true, true]); }); it('should allow access to user in bucket owner account', () => { - const results = requestTypes.map(type => - isBucketAuthorized(bucket, type, ownerCanonicalId, userAuthInfo)); + const results = requestTypes.map(type => isBucketAuthorized(bucket, type, ownerCanonicalId, userAuthInfo)); assert.deepStrictEqual(results, [true, true]); }); @@ -355,45 +374,50 @@ describe('bucket authorization for objectDelete and objectPut', () => { // NOTE objectPut is not needed for lifecycle but still // allowed, we would want more fine-grained implementation of // ACLs for service accounts later. - const results = requestTypes.map(type => - isBucketAuthorized(bucket, type, lifecycleServiceAccountId)); + const results = requestTypes.map(type => isBucketAuthorized(bucket, type, lifecycleServiceAccountId)); assert.deepStrictEqual(results, [true, true]); }); it('should deny access to unknown service account', () => { - const results = requestTypes.map(type => - isBucketAuthorized(bucket, type, unknownServiceAccountId)); + const results = requestTypes.map(type => isBucketAuthorized(bucket, type, unknownServiceAccountId)); assert.deepStrictEqual(results, [false, false]); }); const orders = [ { it: 'anyone if canned public-read-write ACL', - canned: 'public-read-write', id: constants.publicId, + canned: 'public-read-write', + id: constants.publicId, response: [true, true], }, { - it: 'user if account was granted FULL_CONTROL', canned: '', - id: accountToVet, response: [false, false], - aclParam: ['FULL_CONTROL', accountToVet], auth: altAcctAuthInfo, + it: 'user if account was granted FULL_CONTROL', + canned: '', + id: accountToVet, + response: [false, false], + aclParam: ['FULL_CONTROL', accountToVet], + auth: altAcctAuthInfo, }, { - it: 'user if account was granted WRITE right', canned: '', - id: accountToVet, response: [false, false], - aclParam: ['WRITE', accountToVet], auth: altAcctAuthInfo, + it: 'user if account was granted WRITE right', + canned: '', + id: accountToVet, + response: [false, false], + aclParam: ['WRITE', accountToVet], + auth: altAcctAuthInfo, }, ]; orders.forEach(value => { it(`should allow access to ${value.it}`, done => { bucket.setCannedAcl(value.canned); - const noAuthResults = requestTypes.map(type => - isBucketAuthorized(bucket, type, value.id, value.auth)); + const noAuthResults = requestTypes.map(type => isBucketAuthorized(bucket, type, value.id, value.auth)); assert.deepStrictEqual(noAuthResults, value.response); if (value.aclParam) { bucket.setSpecificAcl(value.aclParam[1], value.aclParam[0]); } const authResults = requestTypes.map(type => - isBucketAuthorized(bucket, type, accountToVet, altAcctAuthInfo)); + isBucketAuthorized(bucket, type, accountToVet, altAcctAuthInfo) + ); assert.deepStrictEqual(authResults, [true, true]); done(); }); @@ -401,14 +425,11 @@ describe('bucket authorization for objectDelete and objectPut', () => { }); describe('bucket authorization for objectPutACL and objectGetACL', () => { - it('should allow access to anyone since checks ' + - 'are done at object level', done => { + it('should allow access to anyone since checks ' + 'are done at object level', done => { const requestTypes = ['objectPutACL', 'objectGetACL']; - const results = requestTypes.map(type => - isBucketAuthorized(bucket, type, accountToVet, altAcctAuthInfo)); + const results = requestTypes.map(type => isBucketAuthorized(bucket, type, accountToVet, altAcctAuthInfo)); assert.deepStrictEqual(results, [true, true]); - const publicUserResults = requestTypes.map(type => - isBucketAuthorized(bucket, type, constants.publicId)); + const publicUserResults = requestTypes.map(type => isBucketAuthorized(bucket, type, constants.publicId)); assert.deepStrictEqual(publicUserResults, [true, true]); done(); }); diff --git a/tests/unit/api/bucketDelete.js b/tests/unit/api/bucketDelete.js index 6cd7d580ba..0b53fceb8b 100644 --- a/tests/unit/api/bucketDelete.js +++ b/tests/unit/api/bucketDelete.js @@ -11,8 +11,7 @@ const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutEncryption = require('../../../lib/api/bucketPutEncryption'); const { templateSSEConfig, templateRequest } = require('../utils/bucketEncryption'); const constants = require('../../../constants'); -const initiateMultipartUpload - = require('../../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../../lib/api/initiateMultipartUpload'); const metadata = require('../metadataswitch'); const metadataMem = require('arsenal').storage.metadata.inMemory.metadata; const objectPut = require('../../../lib/api/objectPut'); @@ -20,7 +19,6 @@ const objectPutPart = require('../../../lib/api/objectPutPart'); const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const DummyRequest = require('../DummyRequest'); - const log = new DummyRequestLogger(); const canonicalID = 'accessKey1'; const authInfo = makeAuthInfo(canonicalID); @@ -32,55 +30,59 @@ const objectName = 'objectName'; const mpuBucket = `${constants.mpuBucketPrefix}${bucketName}`; function createMPU(testRequest, initiateRequest, deleteOverviewMPUObj, cb) { - async.waterfall([ - next => bucketPut(authInfo, testRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => { - parseString(result, next); - }, - (json, next) => { - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - objectKey: objectName, - namespace, - url: `/${objectName}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, log, err => { - if (err) { - return next(err); - } - return next(null, testUploadId); + async.waterfall( + [ + next => bucketPut(authInfo, testRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => { + parseString(result, next); + }, + (json, next) => { + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + objectKey: objectName, + namespace, + url: `/${objectName}?partNumber=1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, err => { + if (err) { + return next(err); + } + return next(null, testUploadId); + }); + }, + ], + (err, testUploadId) => { + assert.strictEqual(err, null); + const mpuBucketKeyMap = metadataMem.metadata.keyMaps.get(mpuBucket); + assert.strictEqual(mpuBucketKeyMap.size, 2); + if (deleteOverviewMPUObj) { + const overviewKey = + `overview${constants.splitter}` + `${objectName}${constants.splitter}${testUploadId}`; + // remove overview key from in mem mpu bucket + mpuBucketKeyMap.delete(overviewKey); + assert.strictEqual(mpuBucketKeyMap.size, 1); + } + bucketDelete(authInfo, testRequest, log, err => { + assert.strictEqual(err, null); + cb(); }); - }, - ], (err, testUploadId) => { - assert.strictEqual(err, null); - const mpuBucketKeyMap = - metadataMem.metadata.keyMaps.get(mpuBucket); - assert.strictEqual(mpuBucketKeyMap.size, 2); - if (deleteOverviewMPUObj) { - const overviewKey = `overview${constants.splitter}` + - `${objectName}${constants.splitter}${testUploadId}`; - // remove overview key from in mem mpu bucket - mpuBucketKeyMap.delete(overviewKey); - assert.strictEqual(mpuBucketKeyMap.size, 1); } - bucketDelete(authInfo, testRequest, log, err => { - assert.strictEqual(err, null); - cb(); - }); - }); + ); } describe('bucketDelete API', () => { @@ -106,13 +108,16 @@ describe('bucketDelete API', () => { }; it('should return an error if the bucket is not empty', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - headers: {}, - url: `/${bucketName}/${objectName}`, - namespace, - objectKey: objectName, - }, postBody); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + headers: {}, + url: `/${bucketName}/${objectName}`, + namespace, + objectKey: objectName, + }, + postBody + ); bucketPut(authInfo, testRequest, log, err => { assert.strictEqual(err, null); @@ -122,21 +127,22 @@ describe('bucketDelete API', () => { assert.strictEqual(err.is.BucketNotEmpty, true); metadata.getBucket(bucketName, log, (err, md) => { assert.strictEqual(md.getName(), bucketName); - metadata.listObject(usersBucket, + metadata.listObject( + usersBucket, { prefix: authInfo.getCanonicalID() }, - log, (err, listResponse) => { - assert.strictEqual(listResponse.Contents.length, - 1); + log, + (err, listResponse) => { + assert.strictEqual(listResponse.Contents.length, 1); done(); - }); + } + ); }); }); }); }); }); - it('should not return an error if the bucket has an initiated mpu', - done => { + it('should not return an error if the bucket has an initiated mpu', done => { bucketPut(authInfo, testRequest, log, err => { assert.strictEqual(err, null); initiateMultipartUpload(authInfo, initiateRequest, log, err => { @@ -155,23 +161,21 @@ describe('bucketDelete API', () => { metadata.getBucket(bucketName, log, (err, md) => { assert.strictEqual(err.is.NoSuchBucket, true); assert.strictEqual(md, undefined); - metadata.listObject(usersBucket, { prefix: canonicalID }, - log, (err, listResponse) => { - assert.strictEqual(listResponse.Contents.length, 0); - done(); - }); + metadata.listObject(usersBucket, { prefix: canonicalID }, log, (err, listResponse) => { + assert.strictEqual(listResponse.Contents.length, 0); + done(); + }); }); }); }); }); - it('should delete a bucket even if the bucket has ongoing mpu', - done => createMPU(testRequest, initiateRequest, false, done)); + it('should delete a bucket even if the bucket has ongoing mpu', done => + createMPU(testRequest, initiateRequest, false, done)); // if only part object (and no overview objects) is in mpu shadow bucket - it('should delete a bucket even if the bucket has an orphan part', - done => createMPU(testRequest, initiateRequest, true, done)); - + it('should delete a bucket even if the bucket has an orphan part', done => + createMPU(testRequest, initiateRequest, true, done)); it('should prevent anonymous user delete bucket API access', done => { const publicAuthInfo = makeAuthInfo(constants.publicId); diff --git a/tests/unit/api/bucketDeleteCors.js b/tests/unit/api/bucketDeleteCors.js index 981fd62a8d..4a4f63aac2 100644 --- a/tests/unit/api/bucketDeleteCors.js +++ b/tests/unit/api/bucketDeleteCors.js @@ -3,10 +3,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutCors = require('../../../lib/api/bucketPutCors'); const bucketDeleteCors = require('../../../lib/api/bucketDeleteCors'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - CorsConfigTester } = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, CorsConfigTester } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); const log = new DummyRequestLogger(); @@ -21,10 +18,8 @@ const testBucketPutRequest = { url: '/', actionImplicitDenies: false, }; -const testBucketPutCorsRequest = - corsUtil.createBucketCorsRequest('PUT', bucketName); -const testBucketDeleteCorsRequest = - corsUtil.createBucketCorsRequest('DELETE', bucketName); +const testBucketPutCorsRequest = corsUtil.createBucketCorsRequest('PUT', bucketName); +const testBucketDeleteCorsRequest = corsUtil.createBucketCorsRequest('DELETE', bucketName); describe('deleteBucketCors API', () => { beforeEach(done => { @@ -35,9 +30,8 @@ describe('deleteBucketCors API', () => { }); afterEach(() => cleanup()); - it('should delete a bucket\'s cors configuration in metadata', done => { - bucketDeleteCors(authInfo, testBucketDeleteCorsRequest, log, - err => { + it("should delete a bucket's cors configuration in metadata", done => { + bucketDeleteCors(authInfo, testBucketDeleteCorsRequest, log, err => { if (err) { process.stdout.write(`Unexpected err ${err}`); return done(err); diff --git a/tests/unit/api/bucketDeleteEncryption.js b/tests/unit/api/bucketDeleteEncryption.js index 13bf39bb6a..eb84985d98 100644 --- a/tests/unit/api/bucketDeleteEncryption.js +++ b/tests/unit/api/bucketDeleteEncryption.js @@ -51,7 +51,8 @@ describe('bucketDeleteEncryption API', () => { }); }); }); - })); + }) + ); it('should remove sse and clear key for aws:kms with a configured master key id', done => { const post = templateSSEConfig({ algorithm: 'aws:kms', keyId: '12345' }); diff --git a/tests/unit/api/bucketDeleteLifecycle.js b/tests/unit/api/bucketDeleteLifecycle.js index 7654018b32..45d0bf67eb 100644 --- a/tests/unit/api/bucketDeleteLifecycle.js +++ b/tests/unit/api/bucketDeleteLifecycle.js @@ -4,10 +4,7 @@ const async = require('async'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutLifecycle = require('../../../lib/api/bucketDeleteLifecycle'); const bucketDeleteLifecycle = require('../../../lib/api/bucketDeleteLifecycle'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); const log = new DummyRequestLogger(); @@ -22,12 +19,13 @@ function _makeRequest(includeXml) { actionImplicitDenies: false, }; if (includeXml) { - request.post = '' + - '' + - 'Enabled' + - '1' + - ''; + request.post = + '' + + '' + + 'Enabled' + + '1' + + ''; } return request; } @@ -44,16 +42,19 @@ describe('deleteBucketLifecycle API', () => { }); }); it('should delete bucket lifecycle', done => { - async.series([ - next => bucketPutLifecycle(authInfo, _makeRequest(true), log, next), - next => bucketDeleteLifecycle(authInfo, _makeRequest(), log, next), - next => metadata.getBucket(bucketName, log, next), - ], (err, results) => { - assert.equal(err, null, `Expected success, got error: ${err}`); - const bucket = results[2]; - const lifecycleConfig = bucket.getLifecycleConfiguration(); - assert.equal(lifecycleConfig, null); - done(); - }); + async.series( + [ + next => bucketPutLifecycle(authInfo, _makeRequest(true), log, next), + next => bucketDeleteLifecycle(authInfo, _makeRequest(), log, next), + next => metadata.getBucket(bucketName, log, next), + ], + (err, results) => { + assert.equal(err, null, `Expected success, got error: ${err}`); + const bucket = results[2]; + const lifecycleConfig = bucket.getLifecycleConfiguration(); + assert.equal(lifecycleConfig, null); + done(); + } + ); }); }); diff --git a/tests/unit/api/bucketDeletePolicy.js b/tests/unit/api/bucketDeletePolicy.js index 48d5a3346e..48053ccfa9 100644 --- a/tests/unit/api/bucketDeletePolicy.js +++ b/tests/unit/api/bucketDeletePolicy.js @@ -4,10 +4,7 @@ const async = require('async'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutPolicy = require('../../../lib/api/bucketPutPolicy'); const bucketDeletePolicy = require('../../../lib/api/bucketDeletePolicy'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); const log = new DummyRequestLogger(); @@ -50,16 +47,19 @@ describe('deleteBucketPolicy API', () => { }); }); it('should delete bucket policy', done => { - async.series([ - next => bucketPutPolicy(authInfo, _makeRequest(true), log, next), - next => bucketDeletePolicy(authInfo, _makeRequest(), log, next), - next => metadata.getBucket(bucketName, log, next), - ], (err, results) => { - assert.equal(err, null, `Expected success, got error: ${err}`); - const bucket = results[2]; - const bucketPolicy = bucket.getBucketPolicy(); - assert.equal(bucketPolicy, null); - done(); - }); + async.series( + [ + next => bucketPutPolicy(authInfo, _makeRequest(true), log, next), + next => bucketDeletePolicy(authInfo, _makeRequest(), log, next), + next => metadata.getBucket(bucketName, log, next), + ], + (err, results) => { + assert.equal(err, null, `Expected success, got error: ${err}`); + const bucket = results[2]; + const bucketPolicy = bucket.getBucketPolicy(); + assert.equal(bucketPolicy, null); + done(); + } + ); }); }); diff --git a/tests/unit/api/bucketDeleteTagging.js b/tests/unit/api/bucketDeleteTagging.js index 7eeb98f1f8..72ae95a6bf 100644 --- a/tests/unit/api/bucketDeleteTagging.js +++ b/tests/unit/api/bucketDeleteTagging.js @@ -1,11 +1,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - TaggingConfigTester } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, TaggingConfigTester } = require('../helpers'); const bucketPutTagging = require('../../../lib/api/bucketPutTagging'); const bucketGetTagging = require('../../../lib/api/bucketGetTagging'); const bucketDeleteTagging = require('../../../lib/api/bucketDeleteTagging'); @@ -30,45 +26,41 @@ describe('deleteBucketTagging API', () => { it('should delete tags resource', done => { const taggingUtil = new TaggingConfigTester(); - const testBucketPutTaggingRequest = taggingUtil - .createBucketTaggingRequest('PUT', bucketName); + const testBucketPutTaggingRequest = taggingUtil.createBucketTaggingRequest('PUT', bucketName); bucketPutTagging(authInfo, testBucketPutTaggingRequest, log, err => { assert.strictEqual(err, undefined); - const testBucketGetTaggingRequest = taggingUtil - .createBucketTaggingRequest('GET', bucketName); - return bucketGetTagging(authInfo, testBucketGetTaggingRequest, log, - (err, xml) => { + const testBucketGetTaggingRequest = taggingUtil.createBucketTaggingRequest('GET', bucketName); + return bucketGetTagging(authInfo, testBucketGetTaggingRequest, log, (err, xml) => { + assert.ifError(err); + assert.strictEqual(xml, taggingUtil.constructXml()); + const testBucketDeleteTaggingRequest = taggingUtil.createBucketTaggingRequest('DELETE', bucketName); + return bucketDeleteTagging(authInfo, testBucketDeleteTaggingRequest, log, err => { assert.ifError(err); - assert.strictEqual(xml, taggingUtil.constructXml()); - const testBucketDeleteTaggingRequest = taggingUtil - .createBucketTaggingRequest('DELETE', bucketName); - return bucketDeleteTagging(authInfo, testBucketDeleteTaggingRequest, - log, err => { - assert.ifError(err); - return bucketGetTagging(authInfo, testBucketGetTaggingRequest, - log, err => { - assert(err.NoSuchTagSet); - return done(); - }); - }); + return bucketGetTagging(authInfo, testBucketGetTaggingRequest, log, err => { + assert(err.NoSuchTagSet); + return done(); + }); }); + }); }); }); it('should return access denied if the authorization check fails', done => { const taggingUtil = new TaggingConfigTester(); - const testBucketPutTaggingRequest = taggingUtil - .createBucketTaggingRequest('PUT', bucketName); + const testBucketPutTaggingRequest = taggingUtil.createBucketTaggingRequest('PUT', bucketName); bucketPutTagging(authInfo, testBucketPutTaggingRequest, log, err => { assert.ifError(err); - const testBucketDeleteTaggingRequest = taggingUtil - .createBucketTaggingRequest('DELETE', bucketName, null, true); - return bucketDeleteTagging(authInfo, testBucketDeleteTaggingRequest, - log, err => { - assert(err.AccessDenied); - return done(); - }); + const testBucketDeleteTaggingRequest = taggingUtil.createBucketTaggingRequest( + 'DELETE', + bucketName, + null, + true + ); + return bucketDeleteTagging(authInfo, testBucketDeleteTaggingRequest, log, err => { + assert(err.AccessDenied); + return done(); + }); }); }); }); diff --git a/tests/unit/api/bucketDeleteWebsite.js b/tests/unit/api/bucketDeleteWebsite.js index 41bd286c87..4d1623e363 100644 --- a/tests/unit/api/bucketDeleteWebsite.js +++ b/tests/unit/api/bucketDeleteWebsite.js @@ -3,19 +3,14 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutWebsite = require('../../../lib/api/bucketPutWebsite'); const bucketDeleteWebsite = require('../../../lib/api/bucketDeleteWebsite'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - WebsiteConfig } -= require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, WebsiteConfig } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); const log = new DummyRequestLogger(); const authInfo = makeAuthInfo('accessKey1'); const bucketName = 'bucketname'; const config = new WebsiteConfig('index.html', 'error.html'); -config.addRoutingRule({ ReplaceKeyPrefixWith: 'documents/' }, -{ KeyPrefixEquals: 'docs/' }); +config.addRoutingRule({ ReplaceKeyPrefixWith: 'documents/' }, { KeyPrefixEquals: 'docs/' }); const testBucketPutRequest = { bucketName, headers: { host: `${bucketName}.s3.amazonaws.com` }, @@ -31,8 +26,7 @@ const testBucketDeleteWebsiteRequest = { query: { website: '' }, actionImplicitDenies: false, }; -const testBucketPutWebsiteRequest = Object.assign({ post: config.getXml() }, - testBucketDeleteWebsiteRequest); +const testBucketPutWebsiteRequest = Object.assign({ post: config.getXml() }, testBucketDeleteWebsiteRequest); describe('deleteBucketWebsite API', () => { beforeEach(done => { @@ -43,9 +37,8 @@ describe('deleteBucketWebsite API', () => { }); afterEach(() => cleanup()); - it('should delete a bucket\'s website configuration in metadata', done => { - bucketDeleteWebsite(authInfo, testBucketDeleteWebsiteRequest, log, - err => { + it("should delete a bucket's website configuration in metadata", done => { + bucketDeleteWebsite(authInfo, testBucketDeleteWebsiteRequest, log, err => { if (err) { process.stdout.write(`Unexpected err ${err}`); return done(err); @@ -55,8 +48,7 @@ describe('deleteBucketWebsite API', () => { process.stdout.write(`Err retrieving bucket MD ${err}`); return done(err); } - assert.strictEqual(bucket.getWebsiteConfiguration(), - null); + assert.strictEqual(bucket.getWebsiteConfiguration(), null); return done(); }); }); diff --git a/tests/unit/api/bucketGet.js b/tests/unit/api/bucketGet.js index ba166fd088..10629a6076 100644 --- a/tests/unit/api/bucketGet.js +++ b/tests/unit/api/bucketGet.js @@ -22,40 +22,55 @@ const objectName1 = `${prefix}${delimiter}objectName1`; const objectName2 = `${prefix}${delimiter}objectName2`; const objectName3 = 'invalidURI~~~b'; const objectName4 = `${objectName1}&><"\'`; -const testPutBucketRequest = new DummyRequest({ - bucketName, - headers: {}, - url: `/${bucketName}`, - namespace, -}, Buffer.alloc(0)); -const testPutObjectRequest1 = new DummyRequest({ - bucketName, - headers: {}, - url: `/${bucketName}/${objectName1}`, - namespace, - objectKey: objectName1, -}, postBody); -const testPutObjectRequest2 = new DummyRequest({ - bucketName, - headers: {}, - url: `/${bucketName}/${objectName2}`, - namespace, - objectKey: objectName2, -}, postBody); -const testPutObjectRequest3 = new DummyRequest({ - bucketName, - headers: {}, - url: `/${bucketName}/${objectName3}`, - namespace, - objectKey: objectName3, -}, postBody); -const testPutObjectRequest4 = new DummyRequest({ - bucketName, - headers: {}, - url: `/${bucketName}/${objectName3}`, - namespace, - objectKey: objectName4, -}, postBody); +const testPutBucketRequest = new DummyRequest( + { + bucketName, + headers: {}, + url: `/${bucketName}`, + namespace, + }, + Buffer.alloc(0) +); +const testPutObjectRequest1 = new DummyRequest( + { + bucketName, + headers: {}, + url: `/${bucketName}/${objectName1}`, + namespace, + objectKey: objectName1, + }, + postBody +); +const testPutObjectRequest2 = new DummyRequest( + { + bucketName, + headers: {}, + url: `/${bucketName}/${objectName2}`, + namespace, + objectKey: objectName2, + }, + postBody +); +const testPutObjectRequest3 = new DummyRequest( + { + bucketName, + headers: {}, + url: `/${bucketName}/${objectName3}`, + namespace, + objectKey: objectName3, + }, + postBody +); +const testPutObjectRequest4 = new DummyRequest( + { + bucketName, + headers: {}, + url: `/${bucketName}/${objectName3}`, + namespace, + objectKey: objectName4, + }, + postBody +); const baseGetRequest = { bucketName, @@ -70,39 +85,33 @@ const tests = [ name: 'list of all objects if no delimiter specified', request: Object.assign({ query: {}, url: baseUrl }, baseGetRequest), assertion: result => { - assert.strictEqual(result.ListBucketResult.Contents[1].Key[0], - objectName1); - assert.strictEqual(result.ListBucketResult.Contents[2].Key[0], - objectName2); - assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], - objectName3); + assert.strictEqual(result.ListBucketResult.Contents[1].Key[0], objectName1); + assert.strictEqual(result.ListBucketResult.Contents[2].Key[0], objectName2); + assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], objectName3); }, }, { - name: 'return name of common prefix of common prefix objects if ' + - 'delimiter and prefix specified', - request: Object.assign({ - url: `/${bucketName}?delimiter=${delimiter}&prefix=${prefix}`, - query: { delimiter, prefix }, - }, baseGetRequest), + name: 'return name of common prefix of common prefix objects if ' + 'delimiter and prefix specified', + request: Object.assign( + { + url: `/${bucketName}?delimiter=${delimiter}&prefix=${prefix}`, + query: { delimiter, prefix }, + }, + baseGetRequest + ), assertion: result => - assert.strictEqual(result.ListBucketResult - .CommonPrefixes[0].Prefix[0], `${prefix}${delimiter}`), + assert.strictEqual(result.ListBucketResult.CommonPrefixes[0].Prefix[0], `${prefix}${delimiter}`), }, { name: 'return empty list when max-keys is set to 0', - request: Object.assign({ query: { 'max-keys': '0' }, url: baseUrl }, - baseGetRequest), - assertion: result => - assert.strictEqual(result.ListBucketResult.Contents, undefined), + request: Object.assign({ query: { 'max-keys': '0' }, url: baseUrl }, baseGetRequest), + assertion: result => assert.strictEqual(result.ListBucketResult.Contents, undefined), }, { name: 'return no more keys than max-keys specified', - request: Object.assign({ query: { 'max-keys': '1' }, url: baseUrl }, - baseGetRequest), + request: Object.assign({ query: { 'max-keys': '1' }, url: baseUrl }, baseGetRequest), assertion: result => { - assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], - objectName3); + assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], objectName3); assert.strictEqual(result.ListBucketResult.Contents[1], undefined); }, }, @@ -116,13 +125,9 @@ const tests = [ baseGetRequest ), assertion: result => { - assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], - objectName3); + assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], objectName3); assert.strictEqual(result.ListBucketResult.Contents[1], undefined); - assert.strictEqual( - result.ListBucketResult.NextContinuationToken[0], - 'aW52YWxpZFVSSX5+fmI=' - ); + assert.strictEqual(result.ListBucketResult.NextContinuationToken[0], 'aW52YWxpZFVSSX5+fmI='); }, }, { @@ -135,40 +140,27 @@ const tests = [ baseGetRequest ), assertion: result => { - assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], - objectName3); + assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], objectName3); assert.strictEqual(result.ListBucketResult.Contents[1], undefined); - assert.strictEqual( - result.ListBucketResult.NextContinuationToken[0], - 'aW52YWxpZFVSSX5+fmI=' - ); + assert.strictEqual(result.ListBucketResult.NextContinuationToken[0], 'aW52YWxpZFVSSX5+fmI='); }, }, { - name: 'return max-keys number from request even if greater than ' + - 'actual keys returned', - request: Object.assign({ query: { 'max-keys': '99999' }, url: baseUrl }, - baseGetRequest), - assertion: result => - assert.strictEqual(result.ListBucketResult.MaxKeys[0], '99999'), + name: 'return max-keys number from request even if greater than ' + 'actual keys returned', + request: Object.assign({ query: { 'max-keys': '99999' }, url: baseUrl }, baseGetRequest), + assertion: result => assert.strictEqual(result.ListBucketResult.MaxKeys[0], '99999'), }, { name: 'return max-keys number from request even when value is 0', - request: Object.assign({ query: { 'max-keys': '0' }, url: baseUrl }, - baseGetRequest), - assertion: result => - assert.strictEqual(result.ListBucketResult.MaxKeys[0], '0'), + request: Object.assign({ query: { 'max-keys': '0' }, url: baseUrl }, baseGetRequest), + assertion: result => assert.strictEqual(result.ListBucketResult.MaxKeys[0], '0'), }, { name: 'url encode object key name if requested', - request: Object.assign( - { query: { 'encoding-type': 'url' }, url: baseUrl }, - baseGetRequest), + request: Object.assign({ query: { 'encoding-type': 'url' }, url: baseUrl }, baseGetRequest), assertion: result => { - assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], - querystring.escape(objectName3)); - assert.strictEqual(result.ListBucketResult.Contents[1].Key[0], - querystring.escape(objectName1)); + assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], querystring.escape(objectName3)); + assert.strictEqual(result.ListBucketResult.Contents[1].Key[0], querystring.escape(objectName1)); }, }, ]; @@ -182,28 +174,25 @@ describe('bucketGet API', () => { it(`should ${test.name}`, done => { const testGetRequest = test.request; - async.waterfall([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - (corsHeaders, next) => objectPut(authInfo, - testPutObjectRequest1, undefined, log, next), - (resHeaders, next) => objectPut(authInfo, - testPutObjectRequest2, undefined, log, next), - (resHeaders, next) => objectPut(authInfo, - testPutObjectRequest3, undefined, log, next), - (resHeaders, next) => - bucketGet(authInfo, testGetRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, result) => { - test.assertion(result); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest1, undefined, log, next), + (resHeaders, next) => objectPut(authInfo, testPutObjectRequest2, undefined, log, next), + (resHeaders, next) => objectPut(authInfo, testPutObjectRequest3, undefined, log, next), + (resHeaders, next) => bucketGet(authInfo, testGetRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + test.assertion(result); + done(); + } + ); }); }); it('should return an InvalidArgument error if max-keys == -1', done => { - const testGetRequest = Object.assign({ query: { 'max-keys': '-1' } }, - baseGetRequest); + const testGetRequest = Object.assign({ query: { 'max-keys': '-1' } }, baseGetRequest); bucketGet(authInfo, testGetRequest, log, err => { assert.strictEqual(err.is.InvalidArgument, true); done(); @@ -211,81 +200,73 @@ describe('bucketGet API', () => { }); it('should escape invalid xml characters in object key names', done => { - const testGetRequest = Object.assign({ query: {}, url: baseUrl }, - baseGetRequest); + const testGetRequest = Object.assign({ query: {}, url: baseUrl }, baseGetRequest); - async.waterfall([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest4, - undefined, log, next), - (resHeaders, next) => bucketGet(authInfo, testGetRequest, - log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, result) => { - assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], - testPutObjectRequest4.objectKey); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest4, undefined, log, next), + (resHeaders, next) => bucketGet(authInfo, testGetRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual(result.ListBucketResult.Contents[0].Key[0], testPutObjectRequest4.objectKey); + done(); + } + ); }); it('should return xml that refers to the s3 docs for xml specs', done => { - const testGetRequest = Object.assign({ query: {}, url: baseUrl }, - baseGetRequest); + const testGetRequest = Object.assign({ query: {}, url: baseUrl }, baseGetRequest); - async.waterfall([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - (corsHeaders, next) => - bucketGet(authInfo, testGetRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, result) => { - assert.strictEqual(result.ListBucketResult.$.xmlns, - 'http://s3.amazonaws.com/doc/2006-03-01/'); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + (corsHeaders, next) => bucketGet(authInfo, testGetRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual(result.ListBucketResult.$.xmlns, 'http://s3.amazonaws.com/doc/2006-03-01/'); + done(); + } + ); }); }); -const testsForV2 = [...tests, +const testsForV2 = [ + ...tests, { name: 'return no owner info when --fetch-owner option is not used', request: Object.assign({ query: {}, url: baseUrl }, baseGetRequest), assertion: result => { - const owners - = result.ListBucketResult.Contents.filter(c => c.Owner); + const owners = result.ListBucketResult.Contents.filter(c => c.Owner); assert.strictEqual(owners.length, 0); }, }, { name: 'return owner info when --fetch-owner option is used', - request: Object.assign({ query: { 'fetch-owner': 'true' }, - url: baseUrl }, baseGetRequest), + request: Object.assign({ query: { 'fetch-owner': 'true' }, url: baseUrl }, baseGetRequest), assertion: result => { - const owners - = result.ListBucketResult.Contents.filter(c => + const owners = result.ListBucketResult.Contents.filter( + c => c.Owner[0].ID[0] === authInfo.canonicalID && - c.Owner[0].DisplayName[0] === authInfo.accountDisplayName); - assert.strictEqual(owners.length, - result.ListBucketResult.Contents.length); + c.Owner[0].DisplayName[0] === authInfo.accountDisplayName + ); + assert.strictEqual(owners.length, result.ListBucketResult.Contents.length); }, }, { name: 'return no owner info when --no-fetch-owner option is used', - request: Object.assign({ query: { 'fetch-owner': 'false' }, - url: baseUrl }, baseGetRequest), + request: Object.assign({ query: { 'fetch-owner': 'false' }, url: baseUrl }, baseGetRequest), assertion: result => { - const owners - = result.ListBucketResult.Contents.filter(c => c.Owner); + const owners = result.ListBucketResult.Contents.filter(c => c.Owner); assert.strictEqual(owners.length, 0); }, }, { name: 'return max-keys number from request even when value is 0', - request: Object.assign({ query: { 'max-keys': '0' }, url: baseUrl }, - baseGetRequest), - assertion: result => - assert.strictEqual(result.ListBucketResult.MaxKeys[0], '0'), + request: Object.assign({ query: { 'max-keys': '0' }, url: baseUrl }, baseGetRequest), + assertion: result => assert.strictEqual(result.ListBucketResult.MaxKeys[0], '0'), }, ]; @@ -297,40 +278,35 @@ describe('bucketGet API V2', () => { testsForV2.forEach(test => { /* eslint-disable no-param-reassign */ test.request.query['list-type'] = 2; - test.request.url = test.request.url.indexOf('?') > -1 ? - `${test.request.url}&list-type=2` : - `${test.request.url}?list-type=2`; + test.request.url = + test.request.url.indexOf('?') > -1 ? `${test.request.url}&list-type=2` : `${test.request.url}?list-type=2`; /* eslint-enable no-param-reassign */ it(`should ${test.name}`, done => { const testGetRequest = test.request; - async.waterfall([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - (corsHeaders, next) => objectPut(authInfo, - testPutObjectRequest1, undefined, log, next), - (resHeaders, next) => objectPut(authInfo, - testPutObjectRequest2, undefined, log, next), - (resHeaders, next) => objectPut(authInfo, - testPutObjectRequest3, undefined, log, next), - (resHeaders, next) => - bucketGet(authInfo, testGetRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, result) => { - // v2 requests should return 'KeyCount' in response - const keyCount = - Number.parseInt(result.ListBucketResult.KeyCount[0], 10); - const keysReturned = result.ListBucketResult.Contents ? - result.ListBucketResult.Contents.length : 0; - assert.strictEqual(keyCount, keysReturned); - // assert the results from tests - test.assertion(result); - if (result.ListBucketResult.IsTruncated && result.ListBucketResult.IsTruncated[0] === 'false') { - assert.strictEqual(result.ListBucketResult.NextContinuationToken, undefined); + async.waterfall( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest1, undefined, log, next), + (resHeaders, next) => objectPut(authInfo, testPutObjectRequest2, undefined, log, next), + (resHeaders, next) => objectPut(authInfo, testPutObjectRequest3, undefined, log, next), + (resHeaders, next) => bucketGet(authInfo, testGetRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + // v2 requests should return 'KeyCount' in response + const keyCount = Number.parseInt(result.ListBucketResult.KeyCount[0], 10); + const keysReturned = result.ListBucketResult.Contents ? result.ListBucketResult.Contents.length : 0; + assert.strictEqual(keyCount, keysReturned); + // assert the results from tests + test.assertion(result); + if (result.ListBucketResult.IsTruncated && result.ListBucketResult.IsTruncated[0] === 'false') { + assert.strictEqual(result.ListBucketResult.NextContinuationToken, undefined); + } + done(); } - done(); - }); + ); }); }); }); diff --git a/tests/unit/api/bucketGetACL.js b/tests/unit/api/bucketGetACL.js index 5a4327a9d7..8b576a1d0a 100644 --- a/tests/unit/api/bucketGetACL.js +++ b/tests/unit/api/bucketGetACL.js @@ -41,7 +41,7 @@ describe('bucketGetACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'private', }, url: '/?acl', @@ -49,24 +49,26 @@ describe('bucketGetACL API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => - bucketPutACL(authInfo, testPutACLRequest, log, next), - (corsHeaders, next) => bucketGetACL(authInfo, - testGetACLRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1], undefined); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => bucketPutACL(authInfo, testPutACLRequest, log, next), + (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[1], undefined); + done(); + } + ); }); it('should get a canned public-read-write ACL', done => { @@ -74,7 +76,7 @@ describe('bucketGetACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'public-read-write', }, url: '/?acl', @@ -82,35 +84,36 @@ describe('bucketGetACL API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => - bucketPutACL(authInfo, testPutACLRequest, log, next), - (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, - log, next), - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0].URI[0], - constants.publicId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1] - .Permission[0], 'READ'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2].Grantee[0].URI[0], - constants.publicId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2] - .Permission[0], 'WRITE'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[3], undefined); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => bucketPutACL(authInfo, testPutACLRequest, log, next), + (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].URI[0], + constants.publicId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], 'READ'); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[2].Grantee[0].URI[0], + constants.publicId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2].Permission[0], 'WRITE'); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[3], undefined); + done(); + } + ); }); it('should get a canned public-read ACL', done => { @@ -118,7 +121,7 @@ describe('bucketGetACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'public-read', }, url: '/?acl', @@ -126,29 +129,31 @@ describe('bucketGetACL API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => - bucketPutACL(authInfo, testPutACLRequest, log, next), - (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, - log, next), - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0].URI[0], - constants.publicId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1] - .Permission[0], 'READ'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2], undefined); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => bucketPutACL(authInfo, testPutACLRequest, log, next), + (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].URI[0], + constants.publicId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], 'READ'); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2], undefined); + done(); + } + ); }); it('should get a canned authenticated-read ACL', done => { @@ -156,7 +161,7 @@ describe('bucketGetACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'authenticated-read', }, url: '/?acl', @@ -164,30 +169,31 @@ describe('bucketGetACL API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => - bucketPutACL(authInfo, testPutACLRequest, log, next), - (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, - log, next), - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .URI[0], constants.allAuthedUsersId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1] - .Permission[0], 'READ'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2], undefined); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => bucketPutACL(authInfo, testPutACLRequest, log, next), + (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].URI[0], + constants.allAuthedUsersId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], 'READ'); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2], undefined); + done(); + } + ); }); it('should get a canned log-delivery-write ACL', done => { @@ -195,7 +201,7 @@ describe('bucketGetACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'log-delivery-write', }, url: '/?acl', @@ -203,36 +209,36 @@ describe('bucketGetACL API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => - bucketPutACL(authInfo, testPutACLRequest, log, next), - (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, - log, next), - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .URI[0], constants.logId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1] - .Permission[0], 'WRITE'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2].Grantee[0] - .URI[0], constants.logId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2] - .Permission[0], 'READ_ACP'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[3], undefined); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => bucketPutACL(authInfo, testPutACLRequest, log, next), + (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].URI[0], + constants.logId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], 'WRITE'); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[2].Grantee[0].URI[0], + constants.logId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2].Permission[0], 'READ_ACP'); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[3], undefined); + done(); + } + ); }); it('should get specifically set ACLs', done => { @@ -240,104 +246,96 @@ describe('bucketGetACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-grant-full-control': - 'emailaddress="sampleaccount1@sampling.com"' + - ',emailaddress="sampleaccount2@sampling.com"', + 'emailaddress="sampleaccount1@sampling.com"' + ',emailaddress="sampleaccount2@sampling.com"', 'x-amz-grant-read': `uri=${constants.logId}`, 'x-amz-grant-write': `uri=${constants.publicId}`, - 'x-amz-grant-read-acp': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + - 'f8f8d5218e7cd47ef2be', - 'x-amz-grant-write-acp': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + - 'f8f8d5218e7cd47ef2bf', + 'x-amz-grant-read-acp': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + 'f8f8d5218e7cd47ef2be', + 'x-amz-grant-write-acp': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + 'f8f8d5218e7cd47ef2bf', }, url: '/?acl', query: { acl: '' }, actionImplicitDenies: false, }; - const canonicalIDforSample1 = - '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be'; - const canonicalIDforSample2 = - '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf'; + const canonicalIDforSample1 = '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be'; + const canonicalIDforSample2 = '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf'; - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => - bucketPutACL(authInfo, testPutACLRequest, log, next), - (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, - log, next), - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalIDforSample1); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .DisplayName[0], 'sampleaccount1@sampling.com'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .ID[0], canonicalIDforSample2); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .DisplayName[0], 'sampleaccount2@sampling.com'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2].Grantee[0] - .ID[0], canonicalIDforSample2); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2].Grantee[0] - .DisplayName[0], 'sampleaccount2@sampling.com'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2].Permission[0], - 'WRITE_ACP'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[3].Grantee[0] - .ID[0], canonicalIDforSample1); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[3].Grantee[0] - .DisplayName[0], 'sampleaccount1@sampling.com'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[3].Permission[0], - 'READ_ACP'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[4].Grantee[0] - .URI[0], constants.publicId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[4] - .Permission[0], 'WRITE'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[5].Grantee[0] - .URI[0], constants.logId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[5] - .Permission[0], 'READ'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[6], undefined); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => bucketPutACL(authInfo, testPutACLRequest, log, next), + (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalIDforSample1 + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].DisplayName[0], + 'sampleaccount1@sampling.com' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].ID[0], + canonicalIDforSample2 + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].DisplayName[0], + 'sampleaccount2@sampling.com' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[2].Grantee[0].ID[0], + canonicalIDforSample2 + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[2].Grantee[0].DisplayName[0], + 'sampleaccount2@sampling.com' + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2].Permission[0], 'WRITE_ACP'); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[3].Grantee[0].ID[0], + canonicalIDforSample1 + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[3].Grantee[0].DisplayName[0], + 'sampleaccount1@sampling.com' + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[3].Permission[0], 'READ_ACP'); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[4].Grantee[0].URI[0], + constants.publicId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[4].Permission[0], 'WRITE'); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[5].Grantee[0].URI[0], + constants.logId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[5].Permission[0], 'READ'); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[6], undefined); + done(); + } + ); }); - const grantsByURI = [ - constants.publicId, - constants.allAuthedUsersId, - constants.logId, - ]; + const grantsByURI = [constants.publicId, constants.allAuthedUsersId, constants.logId]; grantsByURI.forEach(uri => { - it('should get all ACLs when predefined group - ' + - `${uri} is used for multiple grants`, done => { + it('should get all ACLs when predefined group - ' + `${uri} is used for multiple grants`, done => { const testPutACLRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-grant-full-control': `uri = ${uri}`, 'x-amz-grant-read': `uri = ${uri}`, 'x-amz-grant-write': `uri = ${uri}`, @@ -349,36 +347,35 @@ describe('bucketGetACL API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, - log, next), (corsHeaders, next) => - bucketPutACL(authInfo, testPutACLRequest, log, next), - (corsHeaders, next) => bucketGetACL(authInfo, - testGetACLRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.ifError(err); - const grants = - result.AccessControlPolicy.AccessControlList[0].Grant; - grants.forEach(grant => { - assert.strictEqual(grant.Permission.length, 1); - assert.strictEqual(grant.Grantee.length, 1); - assert.strictEqual(grant.Grantee[0].URI.length, 1); - assert.strictEqual(grant.Grantee[0].URI[0], `${uri}`); - }); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => bucketPutACL(authInfo, testPutACLRequest, log, next), + (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.ifError(err); + const grants = result.AccessControlPolicy.AccessControlList[0].Grant; + grants.forEach(grant => { + assert.strictEqual(grant.Permission.length, 1); + assert.strictEqual(grant.Grantee.length, 1); + assert.strictEqual(grant.Grantee[0].URI.length, 1); + assert.strictEqual(grant.Grantee[0].URI[0], `${uri}`); + }); + done(); + } + ); }); }); - it('should get all ACLs when predefined groups are used for ' + - 'more than one grant', done => { + it('should get all ACLs when predefined groups are used for ' + 'more than one grant', done => { const { allAuthedUsersId, publicId } = constants; const testPutACLRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-grant-write': `uri = ${allAuthedUsersId} `, 'x-amz-grant-write-acp': `uri = ${allAuthedUsersId} `, 'x-amz-grant-read': `uri = ${publicId} `, @@ -389,33 +386,33 @@ describe('bucketGetACL API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => - bucketPutACL(authInfo, testPutACLRequest, log, next), - (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, - log, next), - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.ifError(err); - const grants = - result.AccessControlPolicy.AccessControlList[0].Grant; - grants.forEach(grant => { - const permissions = grant.Permission; - assert.strictEqual(permissions.length, 1); - const permission = permissions[0]; - assert.strictEqual(grant.Grantee.length, 1); - const grantees = grant.Grantee[0].URI; - assert.strictEqual(grantees.length, 1); - const grantee = grantees[0]; - if (['WRITE', 'WRITE_ACP'].includes(permission)) { - assert.strictEqual(grantee, constants.allAuthedUsersId); - } - if (['READ', 'READ_ACP'].includes(permission)) { - assert.strictEqual(grantee, constants.publicId); - } - }); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => bucketPutACL(authInfo, testPutACLRequest, log, next), + (corsHeaders, next) => bucketGetACL(authInfo, testGetACLRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.ifError(err); + const grants = result.AccessControlPolicy.AccessControlList[0].Grant; + grants.forEach(grant => { + const permissions = grant.Permission; + assert.strictEqual(permissions.length, 1); + const permission = permissions[0]; + assert.strictEqual(grant.Grantee.length, 1); + const grantees = grant.Grantee[0].URI; + assert.strictEqual(grantees.length, 1); + const grantee = grantees[0]; + if (['WRITE', 'WRITE_ACP'].includes(permission)) { + assert.strictEqual(grantee, constants.allAuthedUsersId); + } + if (['READ', 'READ_ACP'].includes(permission)) { + assert.strictEqual(grantee, constants.publicId); + } + }); + done(); + } + ); }); }); diff --git a/tests/unit/api/bucketGetCors.js b/tests/unit/api/bucketGetCors.js index 01c8bf1839..675125f45c 100644 --- a/tests/unit/api/bucketGetCors.js +++ b/tests/unit/api/bucketGetCors.js @@ -4,10 +4,7 @@ const crypto = require('crypto'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutCors = require('../../../lib/api/bucketPutCors'); const bucketGetCors = require('../../../lib/api/bucketGetCors'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } -= require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const log = new DummyRequestLogger(); const authInfo = makeAuthInfo('accessKey1'); @@ -32,25 +29,24 @@ function _makeCorsRequest(xml) { if (xml) { request.post = xml; - request.headers['content-md5'] = crypto.createHash('md5') - .update(request.post, 'utf8').digest('base64'); + request.headers['content-md5'] = crypto.createHash('md5').update(request.post, 'utf8').digest('base64'); } return request; } const testGetCorsRequest = _makeCorsRequest(); function _comparePutGetXml(sampleXml, done) { - const fullXml = '' + - `${sampleXml}`; + const fullXml = + '' + + `${sampleXml}`; const testPutCorsRequest = _makeCorsRequest(fullXml); bucketPutCors(authInfo, testPutCorsRequest, log, err => { if (err) { process.stdout.write(`Err putting cors config ${err}`); return done(err); } - return bucketGetCors(authInfo, testGetCorsRequest, log, - (err, res) => { + return bucketGetCors(authInfo, testGetCorsRequest, log, (err, res) => { assert.strictEqual(err, null, `Unexpected err ${err}`); assert.strictEqual(res, fullXml); done(); @@ -65,8 +61,7 @@ describe('getBucketCors API', () => { }); afterEach(() => cleanup()); - it('should return same XML as uploaded for AllowedMethod and ' + - 'AllowedOrigin', done => { + it('should return same XML as uploaded for AllowedMethod and ' + 'AllowedOrigin', done => { const sampleXml = '' + 'PUT' + @@ -91,7 +86,7 @@ describe('getBucketCors API', () => { _comparePutGetXml(sampleXml, done); }); - it('should return same XML as uploaded for AllowedHeader\'s', done => { + it("should return same XML as uploaded for AllowedHeader's", done => { const sampleXml = '' + 'PUT' + @@ -103,7 +98,7 @@ describe('getBucketCors API', () => { _comparePutGetXml(sampleXml, done); }); - it('should return same XML as uploaded for ExposedHeader\'s', done => { + it("should return same XML as uploaded for ExposedHeader's", done => { const sampleXml = '' + 'PUT' + diff --git a/tests/unit/api/bucketGetLifecycle.js b/tests/unit/api/bucketGetLifecycle.js index 95025d74a7..d80d336b72 100644 --- a/tests/unit/api/bucketGetLifecycle.js +++ b/tests/unit/api/bucketGetLifecycle.js @@ -3,12 +3,8 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketGetLifecycle = require('../../../lib/api/bucketGetLifecycle'); const bucketPutLifecycle = require('../../../lib/api/bucketPutLifecycle'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } - = require('../helpers'); -const { getLifecycleRequest, getLifecycleXml } = - require('../utils/lifecycleHelpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); +const { getLifecycleRequest, getLifecycleXml } = require('../utils/lifecycleHelpers'); const log = new DummyRequestLogger(); const authInfo = makeAuthInfo('accessKey1'); @@ -25,8 +21,7 @@ describe('getBucketLifecycle API', () => { beforeEach(done => bucketPut(authInfo, testBucketPutRequest, log, done)); afterEach(() => cleanup()); - it('should return NoSuchLifecycleConfiguration error if ' + - 'bucket has no lifecycle', done => { + it('should return NoSuchLifecycleConfiguration error if ' + 'bucket has no lifecycle', done => { const lifecycleRequest = getLifecycleRequest(bucketName); bucketGetLifecycle(authInfo, lifecycleRequest, log, err => { assert.strictEqual(err.is.NoSuchLifecycleConfiguration, true); @@ -36,8 +31,7 @@ describe('getBucketLifecycle API', () => { describe('after bucket lifecycle has been put', () => { beforeEach(done => { - const putRequest = - getLifecycleRequest(bucketName, getLifecycleXml()); + const putRequest = getLifecycleRequest(bucketName, getLifecycleXml()); bucketPutLifecycle(authInfo, putRequest, log, err => { assert.equal(err, null); done(); @@ -48,8 +42,7 @@ describe('getBucketLifecycle API', () => { const getRequest = getLifecycleRequest(bucketName); bucketGetLifecycle(authInfo, getRequest, log, (err, res) => { assert.equal(err, null); - const expectedXML = '' + - `${getLifecycleXml()}`; + const expectedXML = '' + `${getLifecycleXml()}`; assert.deepStrictEqual(expectedXML, res); done(); }); diff --git a/tests/unit/api/bucketGetLocation.js b/tests/unit/api/bucketGetLocation.js index 73b52835eb..6017af16dc 100644 --- a/tests/unit/api/bucketGetLocation.js +++ b/tests/unit/api/bucketGetLocation.js @@ -2,10 +2,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketGetLocation = require('../../../lib/api/bucketGetLocation'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } -= require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const { config } = require('../../../lib/Config'); const log = new DummyRequestLogger(); @@ -32,11 +29,13 @@ const testGetLocationRequest = { const locationConstraints = config.locationConstraints; function getBucketRequestObject(location) { - const post = location ? '' + - '' + - `${location}` + - '' : undefined; + const post = location + ? '' + + '' + + `${location}` + + '' + : undefined; return Object.assign({ post }, testBucketPutRequest); } @@ -59,13 +58,11 @@ describe('getBucketLocation API', () => { }); afterEach(() => cleanup()); it(`should return ${location} LocationConstraint xml`, done => { - bucketGetLocation(authInfo, testGetLocationRequest, log, - (err, res) => { - assert.strictEqual(err, null, - `Unexpected ${err} getting location constraint`); - const xml = ` - ` + - `${location}`; + bucketGetLocation(authInfo, testGetLocationRequest, log, (err, res) => { + assert.strictEqual(err, null, `Unexpected ${err} getting location constraint`); + const xml = + ` + ` + `${location}`; assert.deepStrictEqual(res, xml); return done(); }); @@ -81,13 +78,11 @@ describe('getBucketLocation API', () => { }); afterEach(() => cleanup()); it('should return empty string LocationConstraint xml', done => { - bucketGetLocation(authInfo, testGetLocationRequest, log, - (err, res) => { - assert.strictEqual(err, null, - `Unexpected ${err} getting location constraint`); - const xml = ` - ` + - ''; + bucketGetLocation(authInfo, testGetLocationRequest, log, (err, res) => { + assert.strictEqual(err, null, `Unexpected ${err} getting location constraint`); + const xml = + ` + ` + ''; assert.deepStrictEqual(res, xml); return done(); }); diff --git a/tests/unit/api/bucketGetNotification.js b/tests/unit/api/bucketGetNotification.js index 5091b45ef9..f48606d411 100644 --- a/tests/unit/api/bucketGetNotification.js +++ b/tests/unit/api/bucketGetNotification.js @@ -3,10 +3,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketGetNotification = require('../../../lib/api/bucketGetNotification'); const bucketPutNotification = require('../../../lib/api/bucketPutNotification'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const log = new DummyRequestLogger(); const authInfo = makeAuthInfo('accessKey1'); @@ -40,7 +37,8 @@ function getNotificationXml() { const filterName = 'Prefix'; const filterValue = 'logs/'; - return '' + + return ( + '' + '' + `${id}` + `${queueArn}` + @@ -51,10 +49,10 @@ function getNotificationXml() { `${filterValue}` + '' + '' + - ''; + '' + ); } - describe('getBucketNotification API', () => { before(cleanup); beforeEach(done => bucketPut(authInfo, testBucketPutRequest, log, done)); @@ -70,8 +68,7 @@ describe('getBucketNotification API', () => { describe('after bucket notification has been put', () => { beforeEach(done => { - const putRequest = - getNotificationRequest(bucketName, getNotificationXml()); + const putRequest = getNotificationRequest(bucketName, getNotificationXml()); bucketPutNotification(authInfo, putRequest, log, err => { assert.ifError(err); done(); @@ -82,8 +79,7 @@ describe('getBucketNotification API', () => { const getRequest = getNotificationRequest(bucketName); bucketGetNotification(authInfo, getRequest, log, (err, res) => { assert.ifError(err); - const expectedXML = '' + - `${getNotificationXml()}`; + const expectedXML = '' + `${getNotificationXml()}`; assert.deepStrictEqual(expectedXML, res); done(); }); diff --git a/tests/unit/api/bucketGetObjectLock.js b/tests/unit/api/bucketGetObjectLock.js index 39c55574e7..d40d32a119 100644 --- a/tests/unit/api/bucketGetObjectLock.js +++ b/tests/unit/api/bucketGetObjectLock.js @@ -20,7 +20,7 @@ const bucketPutReq = { const testBucketPutReqWithObjLock = { bucketName, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-bucket-object-lock-enabled': 'True', }, url: '/', @@ -31,7 +31,7 @@ function getObjectLockConfigRequest(bucketName, xml) { const request = { bucketName, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-bucket-object-lock-enabled': 'true', }, url: '/?object-lock', @@ -59,10 +59,7 @@ function getObjectLockXml(mode, type, time) { // object lock is enabled and object lock configuration is set if (arguments.length === 3) { - xmlStr += xml.ruleOpen + - retentionMode + - retentionTime + - xml.ruleClose; + xmlStr += xml.ruleOpen + retentionMode + retentionTime + xml.ruleClose; } xmlStr += xml.objLockConfigClose; return xmlStr; @@ -72,14 +69,16 @@ describe('bucketGetObjectLock API', () => { before(done => bucketPut(authInfo, bucketPutReq, log, done)); after(cleanup); - it('should return ObjectLockConfigurationNotFoundError error if ' + - 'object lock is not enabled on the bucket', done => { - const objectLockRequest = getObjectLockConfigRequest(bucketName); - bucketGetObjectLock(authInfo, objectLockRequest, log, err => { - assert.strictEqual(err.is.ObjectLockConfigurationNotFoundError, true); - done(); - }); - }); + it( + 'should return ObjectLockConfigurationNotFoundError error if ' + 'object lock is not enabled on the bucket', + done => { + const objectLockRequest = getObjectLockConfigRequest(bucketName); + bucketGetObjectLock(authInfo, objectLockRequest, log, err => { + assert.strictEqual(err.is.ObjectLockConfigurationNotFoundError, true); + done(); + }); + } + ); }); describe('bucketGetObjectLock API', () => { @@ -87,8 +86,7 @@ describe('bucketGetObjectLock API', () => { beforeEach(done => bucketPut(authInfo, testBucketPutReqWithObjLock, log, done)); afterEach(cleanup); - it('should return config without \'rule\' if object lock configuration ' + - 'not set on the bucket', done => { + it("should return config without 'rule' if object lock configuration " + 'not set on the bucket', done => { const objectLockRequest = getObjectLockConfigRequest(bucketName); bucketGetObjectLock(authInfo, objectLockRequest, log, (err, res) => { assert.ifError(err); diff --git a/tests/unit/api/bucketGetPolicy.js b/tests/unit/api/bucketGetPolicy.js index 504f702c92..b51e378390 100644 --- a/tests/unit/api/bucketGetPolicy.js +++ b/tests/unit/api/bucketGetPolicy.js @@ -3,10 +3,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketGetPolicy = require('../../../lib/api/bucketGetPolicy'); const bucketPutPolicy = require('../../../lib/api/bucketPutPolicy'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const log = new DummyRequestLogger(); const authInfo = makeAuthInfo('accessKey1'); @@ -43,8 +40,7 @@ describe('getBucketPolicy API', () => { beforeEach(done => bucketPut(authInfo, testBasicRequest, log, done)); afterEach(() => cleanup()); - it('should return NoSuchBucketPolicy error if ' + - 'bucket has no policy', done => { + it('should return NoSuchBucketPolicy error if ' + 'bucket has no policy', done => { bucketGetPolicy(authInfo, testBasicRequest, log, err => { assert.strictEqual(err.is.NoSuchBucketPolicy, true); done(); diff --git a/tests/unit/api/bucketGetReplication.js b/tests/unit/api/bucketGetReplication.js index 268c902025..be29801cf2 100644 --- a/tests/unit/api/bucketGetReplication.js +++ b/tests/unit/api/bucketGetReplication.js @@ -2,8 +2,7 @@ const assert = require('assert'); const { parseString } = require('xml2js'); const { DummyRequestLogger } = require('../helpers'); -const { getReplicationConfigurationXML } = - require('../../../lib/api/apiUtils/bucket/getReplicationConfiguration'); +const { getReplicationConfigurationXML } = require('../../../lib/api/apiUtils/bucket/getReplicationConfiguration'); // Compare the values from the parsedXML with the original configuration values. function checkXML(parsedXML, config) { @@ -58,29 +57,25 @@ describe("'getReplicationConfigurationXML' function", () => { it('should return XML from the bucket replication configuration', done => getAndCheckXML(getReplicationConfig(), done)); - it('should not return XML with StorageClass tag if `storageClass` ' + - 'property is omitted', done => { + it('should not return XML with StorageClass tag if `storageClass` ' + 'property is omitted', done => { const config = getReplicationConfig(); delete config.rules[0].storageClass; return getAndCheckXML(config, done); }); - it("should return XML with StorageClass tag set to 'Disabled' if " + - '`enabled` property is false', done => { + it("should return XML with StorageClass tag set to 'Disabled' if " + '`enabled` property is false', done => { const config = getReplicationConfig(); config.rules[0].enabled = false; return getAndCheckXML(config, done); }); - it('should return XML with a self-closing Prefix tag if `prefix` ' + - "property is ''", done => { + it('should return XML with a self-closing Prefix tag if `prefix` ' + "property is ''", done => { const config = getReplicationConfig(); config.rules[0].prefix = ''; return getAndCheckXML(config, done); }); - it('should return XML from the bucket replication configuration with ' + - 'multiple rules', done => { + it('should return XML from the bucket replication configuration with ' + 'multiple rules', done => { const config = getReplicationConfig(); config.rules.push({ id: 'test-id-2', diff --git a/tests/unit/api/bucketGetTagging.js b/tests/unit/api/bucketGetTagging.js index a1c5f502db..686d92d243 100644 --- a/tests/unit/api/bucketGetTagging.js +++ b/tests/unit/api/bucketGetTagging.js @@ -1,11 +1,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - TaggingConfigTester, -} = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, TaggingConfigTester } = require('../helpers'); const bucketPutTagging = require('../../../lib/api/bucketPutTagging'); const bucketGetTagging = require('../../../lib/api/bucketGetTagging'); const log = new DummyRequestLogger(); @@ -29,14 +25,11 @@ describe('getBucketTagging API', () => { it('should return tags resource', done => { const taggingUtil = new TaggingConfigTester(); - const testBucketPutTaggingRequest = taggingUtil - .createBucketTaggingRequest('PUT', bucketName); + const testBucketPutTaggingRequest = taggingUtil.createBucketTaggingRequest('PUT', bucketName); bucketPutTagging(authInfo, testBucketPutTaggingRequest, log, err => { assert.strictEqual(err, undefined); - const testBucketGetTaggingRequest = taggingUtil - .createBucketTaggingRequest('GET', bucketName); - return bucketGetTagging(authInfo, testBucketGetTaggingRequest, log, - (err, xml) => { + const testBucketGetTaggingRequest = taggingUtil.createBucketTaggingRequest('GET', bucketName); + return bucketGetTagging(authInfo, testBucketGetTaggingRequest, log, (err, xml) => { if (err) { process.stdout.write(`Err getting object tagging ${err}`); return done(err); @@ -49,19 +42,15 @@ describe('getBucketTagging API', () => { it('should return access denied if the authorization check fails', done => { const taggingUtil = new TaggingConfigTester(); - const testBucketPutTaggingRequest = taggingUtil - .createBucketTaggingRequest('PUT', bucketName); + const testBucketPutTaggingRequest = taggingUtil.createBucketTaggingRequest('PUT', bucketName); bucketPutTagging(authInfo, testBucketPutTaggingRequest, log, err => { assert.strictEqual(err, undefined); - const testBucketGetTaggingRequest = taggingUtil - .createBucketTaggingRequest('GET', bucketName, true); + const testBucketGetTaggingRequest = taggingUtil.createBucketTaggingRequest('GET', bucketName, true); const badAuthInfo = makeAuthInfo('accessKey2'); - return bucketGetTagging(badAuthInfo, testBucketGetTaggingRequest, log, - err => { + return bucketGetTagging(badAuthInfo, testBucketGetTaggingRequest, log, err => { assert.strictEqual(err.AccessDenied, true); return done(); }); }); }); }); - diff --git a/tests/unit/api/bucketGetWebsite.js b/tests/unit/api/bucketGetWebsite.js index 75caf14129..eb3c9fc999 100644 --- a/tests/unit/api/bucketGetWebsite.js +++ b/tests/unit/api/bucketGetWebsite.js @@ -3,10 +3,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutWebsite = require('../../../lib/api/bucketPutWebsite'); const bucketGetWebsite = require('../../../lib/api/bucketGetWebsite'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } -= require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const log = new DummyRequestLogger(); const authInfo = makeAuthInfo('accessKey1'); @@ -37,18 +34,18 @@ function _makeWebsiteRequest(xml) { const testGetWebsiteRequest = _makeWebsiteRequest(); function _comparePutGetXml(sampleXml, done) { - const fullXml = '' + - `${sampleXml}`; + const fullXml = + '' + + `${sampleXml}`; const testPutWebsiteRequest = _makeWebsiteRequest(fullXml); bucketPutWebsite(authInfo, testPutWebsiteRequest, log, err => { if (err) { process.stdout.write(`Err putting website config ${err}`); return done(err); } - return bucketGetWebsite(authInfo, testGetWebsiteRequest, log, - (err, res) => { + return bucketGetWebsite(authInfo, testGetWebsiteRequest, log, (err, res) => { assert.strictEqual(err, null, `Unexpected err ${err}`); assert.strictEqual(res, fullXml); done(); @@ -64,8 +61,7 @@ describe('getBucketWebsite API', () => { afterEach(() => cleanup()); it('should return same IndexDocument XML as uploaded', done => { - const sampleXml = - 'index.html'; + const sampleXml = 'index.html'; _comparePutGetXml(sampleXml, done); }); it('should return same ErrorDocument XML as uploaded', done => { diff --git a/tests/unit/api/bucketHead.js b/tests/unit/api/bucketHead.js index 3c006a549c..b2fa311b30 100644 --- a/tests/unit/api/bucketHead.js +++ b/tests/unit/api/bucketHead.js @@ -37,8 +37,7 @@ describe('bucketHead API', () => { }); }); - it('should return no error if bucket exists and user is authorized', - done => { + it('should return no error if bucket exists and user is authorized', done => { bucketPut(authInfo, testRequest, log, () => { bucketHead(authInfo, testRequest, log, err => { assert.strictEqual(err, null); diff --git a/tests/unit/api/bucketPolicyAuth.js b/tests/unit/api/bucketPolicyAuth.js index 6a6d654f65..b9f177f5bc 100644 --- a/tests/unit/api/bucketPolicyAuth.js +++ b/tests/unit/api/bucketPolicyAuth.js @@ -2,8 +2,11 @@ const assert = require('assert'); const { BucketInfo, BucketPolicy } = require('arsenal').models; const AuthInfo = require('arsenal').auth.AuthInfo; const constants = require('../../../constants'); -const { isBucketAuthorized, isObjAuthorized, validatePolicyResource } - = require('../../../lib/api/apiUtils/authorization/permissionChecks'); +const { + isBucketAuthorized, + isObjAuthorized, + validatePolicyResource, +} = require('../../../lib/api/apiUtils/authorization/permissionChecks'); const { DummyRequestLogger, makeAuthInfo } = require('../helpers'); const DummyRequest = require('../DummyRequest'); @@ -20,8 +23,12 @@ const altAcctCanonicalId = altAcctAuthInfo.getCanonicalID(); const accountId = authInfo.getShortid(); const altAcctId = altAcctAuthInfo.getShortid(); const creationDate = new Date().toJSON(); -const bucket = new BucketInfo('policyBucketAuthTester', bucketOwnerCanonicalId, - authInfo.getAccountDisplayName(), creationDate); +const bucket = new BucketInfo( + 'policyBucketAuthTester', + bucketOwnerCanonicalId, + authInfo.getAccountDisplayName(), + creationDate +); const object = { 'owner-id': objectOwnerCanonicalId }; const bucAction = 'bucketHead'; const objAction = 'objectPut'; @@ -90,8 +97,7 @@ const authTests = [ expected: true, }, { - name: 'should allow access if account id principal is contained in ' + - 'user arn of non-', + name: 'should allow access if account id principal is contained in ' + 'user arn of non-', bucketId: objectOwnerCanonicalId, bucketAuthInfo: user1AuthInfo, objectId: objectOwnerCanonicalId, @@ -103,8 +109,7 @@ const authTests = [ expected: true, }, { - name: 'should allow access if account id principal is contained in ' + - 'account arn of non-', + name: 'should allow access if account id principal is contained in ' + 'account arn of non-', bucketId: altAcctCanonicalId, bucketAuthInfo: altAcctAuthInfo, objectId: altAcctCanonicalId, @@ -116,8 +121,7 @@ const authTests = [ expected: true, }, { - name: 'should allow access if account arn principal is contained in ' + - 'user arn of non-', + name: 'should allow access if account arn principal is contained in ' + 'user arn of non-', bucketId: objectOwnerCanonicalId, bucketAuthInfo: user1AuthInfo, objectId: objectOwnerCanonicalId, @@ -129,8 +133,7 @@ const authTests = [ expected: true, }, { - name: 'should allow access even if user arn principal doesn\'t match ' + - 'user arn of user in account of ', + name: "should allow access even if user arn principal doesn't match " + 'user arn of user in account of ', bucketId: objectOwnerCanonicalId, bucketAuthInfo: user1AuthInfo, objectId: objectOwnerCanonicalId, @@ -142,8 +145,7 @@ const authTests = [ expected: true, }, { - name: 'should deny access if account arn principal doesn\'t match ' + - 'user arn of non-', + name: "should deny access if account arn principal doesn't match " + 'user arn of non-', bucketId: altAcctCanonicalId, bucketAuthInfo: altAcctUserAuthInfo, objectId: altAcctCanonicalId, @@ -155,8 +157,7 @@ const authTests = [ expected: false, }, { - name: 'should deny access if user arn principal doesn\'t match ' + - 'user arn of non-', + name: "should deny access if user arn principal doesn't match " + 'user arn of non-', bucketId: altAcctCanonicalId, bucketAuthInfo: altAcctUserAuthInfo, objectId: altAcctCanonicalId, @@ -168,7 +169,7 @@ const authTests = [ expected: false, }, { - name: 'should deny access if principal doesn\'t match non-', + name: "should deny access if principal doesn't match non-", bucketId: altAcctCanonicalId, bucketAuthInfo: altAcctAuthInfo, objectId: altAcctCanonicalId, @@ -180,8 +181,7 @@ const authTests = [ expected: false, }, { - name: 'should allow access if principal and action match policy for ' + - 'non-', + name: 'should allow access if principal and action match policy for ' + 'non-', bucketId: altAcctCanonicalId, bucketAuthInfo: altAcctAuthInfo, objectId: altAcctCanonicalId, @@ -193,8 +193,7 @@ const authTests = [ expected: true, }, { - name: 'should deny access if principal matches but action does not ' + - 'match policy for non-', + name: 'should deny access if principal matches but action does not ' + 'match policy for non-', bucketId: altAcctCanonicalId, bucketAuthInfo: altAcctAuthInfo, objectId: altAcctCanonicalId, @@ -253,8 +252,7 @@ const resourceTests = [ expected: false, }, { - name: 'false if policy resource is array and any elements do not ' + - 'match bucket arn', + name: 'false if policy resource is array and any elements do not ' + 'match bucket arn', rValue: [`arn:aws:s3:::${bucketName}`, 'arn:aws:s3:::nomatch'], expected: false, }, @@ -263,16 +261,13 @@ const resourceTests = [ describe('bucket policy authorization', () => { describe('isBucketAuthorized with no policy set', () => { it('should allow access to bucket owner', done => { - const allowed = isBucketAuthorized(bucket, 'bucketPut', - bucketOwnerCanonicalId, null, log); + const allowed = isBucketAuthorized(bucket, 'bucketPut', bucketOwnerCanonicalId, null, log); assert.equal(allowed, true); done(); }); - it('should deny access to non-bucket owner', - done => { - const allowed = isBucketAuthorized(bucket, 'bucketPut', - altAcctCanonicalId, null, log); + it('should deny access to non-bucket owner', done => { + const allowed = isBucketAuthorized(bucket, 'bucketPut', altAcctCanonicalId, null, log); assert.equal(allowed, false); done(); }); @@ -280,23 +275,18 @@ describe('bucket policy authorization', () => { describe('isBucketAuthorized with bucket policy set', () => { beforeEach(function beFn() { - this.currentTest.basePolicy = new BucketPolicy(JSON.stringify( - basePolicyObj)).getBucketPolicy(); + this.currentTest.basePolicy = new BucketPolicy(JSON.stringify(basePolicyObj)).getBucketPolicy(); bucket.setBucketPolicy(this.currentTest.basePolicy); }); - it('should allow access to non-bucket owner if principal is set to "*"', - done => { - const allowed = isBucketAuthorized(bucket, bucAction, - altAcctCanonicalId, null, log); + it('should allow access to non-bucket owner if principal is set to "*"', done => { + const allowed = isBucketAuthorized(bucket, bucAction, altAcctCanonicalId, null, log); assert.equal(allowed, true); done(); }); - it('should allow access to public user if principal is set to "*"', - done => { - const allowed = isBucketAuthorized(bucket, bucAction, - constants.publicId, publicUserAuthInfo, log); + it('should allow access to public user if principal is set to "*"', done => { + const allowed = isBucketAuthorized(bucket, bucAction, constants.publicId, publicUserAuthInfo, log); assert.equal(allowed, true); done(); }); @@ -305,8 +295,7 @@ describe('bucket policy authorization', () => { const newPolicy = this.test.basePolicy; newPolicy.Statement[0].Principal = { AWS: authInfo.getArn() }; bucket.setBucketPolicy(newPolicy); - const allowed = isBucketAuthorized(bucket, bucAction, - constants.publicId, publicUserAuthInfo, log); + const allowed = isBucketAuthorized(bucket, bucAction, constants.publicId, publicUserAuthInfo, log); assert.equal(allowed, false); done(); }); @@ -316,33 +305,31 @@ describe('bucket policy authorization', () => { const newPolicy = this.test.basePolicy; newPolicy.Statement[0][t.keyToChange] = t.bucketValue; bucket.setBucketPolicy(newPolicy); - const allowed = isBucketAuthorized(bucket, bucAction, - t.bucketId, t.bucketAuthInfo, log); + const allowed = isBucketAuthorized(bucket, bucAction, t.bucketId, t.bucketAuthInfo, log); assert.equal(allowed, t.expected); done(); }); }); - it('should deny access to non-bucket owner if two statements apply ' + - 'to principal but one denies access', function itFn(done) { - const newPolicy = this.test.basePolicy; - newPolicy.Statement[1] = { - Effect: 'Deny', - Principal: { CanonicalUser: [altAcctCanonicalId] }, - Resource: `arn:aws:s3:::${bucket.getName()}`, - Action: 's3:*', - }; - bucket.setBucketPolicy(newPolicy); - const allowed = isBucketAuthorized(bucket, bucAction, - altAcctCanonicalId, null, log); - assert.equal(allowed, false); - done(); - }); + it( + 'should deny access to non-bucket owner if two statements apply ' + 'to principal but one denies access', + function itFn(done) { + const newPolicy = this.test.basePolicy; + newPolicy.Statement[1] = { + Effect: 'Deny', + Principal: { CanonicalUser: [altAcctCanonicalId] }, + Resource: `arn:aws:s3:::${bucket.getName()}`, + Action: 's3:*', + }; + bucket.setBucketPolicy(newPolicy); + const allowed = isBucketAuthorized(bucket, bucAction, altAcctCanonicalId, null, log); + assert.equal(allowed, false); + done(); + } + ); - it('should deny access to non-bucket owner with an unsupported action type', - done => { - const allowed = isBucketAuthorized(bucket, 'unsupportedAction', - altAcctCanonicalId, null, log); + it('should deny access to non-bucket owner with an unsupported action type', done => { + const allowed = isBucketAuthorized(bucket, 'unsupportedAction', altAcctCanonicalId, null, log); assert.equal(allowed, false); done(); }); @@ -365,14 +352,12 @@ describe('bucket policy authorization', () => { bucket.setBucketPolicy(newPolicy); // Check that the policy denies access, as expected - assert.ok(!isBucketAuthorized(bucket, bucAction, - bucketOwnerCanonicalId, user1AuthInfo, log, request)); + assert.ok(!isBucketAuthorized(bucket, bucAction, bucketOwnerCanonicalId, user1AuthInfo, log, request)); // But with bypassUserBucketPolicies set to true, it should still be authorized // based on ACL permissions (which we mock to return true) request.bypassUserBucketPolicies = true; - assert.ok(isBucketAuthorized(bucket, bucAction, - bucketOwnerCanonicalId, user1AuthInfo, log, request)); + assert.ok(isBucketAuthorized(bucket, bucAction, bucketOwnerCanonicalId, user1AuthInfo, log, request)); }); }); @@ -382,16 +367,13 @@ describe('bucket policy authorization', () => { }); it('should allow access to object owner', done => { - const allowed = isObjAuthorized(bucket, object, objAction, - objectOwnerCanonicalId, null, log); + const allowed = isObjAuthorized(bucket, object, objAction, objectOwnerCanonicalId, null, log); assert.equal(allowed, true); done(); }); - it('should deny access to non-object owner', - done => { - const allowed = isObjAuthorized(bucket, object, objAction, - altAcctCanonicalId, null, log); + it('should deny access to non-object owner', done => { + const allowed = isObjAuthorized(bucket, object, objAction, altAcctCanonicalId, null, log); assert.equal(allowed, false); done(); }); @@ -400,25 +382,19 @@ describe('bucket policy authorization', () => { describe('isObjAuthorized with bucket policy set', () => { beforeEach(function beFn() { const newPolicyObj = basePolicyObj; - newPolicyObj.Statement.Resource = - `arn:aws:s3:::${bucket.getName()}/*`; - this.currentTest.basePolicy = new BucketPolicy(JSON.stringify( - newPolicyObj)).getBucketPolicy(); + newPolicyObj.Statement.Resource = `arn:aws:s3:::${bucket.getName()}/*`; + this.currentTest.basePolicy = new BucketPolicy(JSON.stringify(newPolicyObj)).getBucketPolicy(); bucket.setBucketPolicy(this.currentTest.basePolicy); }); - it('should allow access to non-object owner if principal is set to "*"', - done => { - const allowed = isObjAuthorized(bucket, object, objAction, - altAcctCanonicalId, null, log); + it('should allow access to non-object owner if principal is set to "*"', done => { + const allowed = isObjAuthorized(bucket, object, objAction, altAcctCanonicalId, null, log); assert.equal(allowed, true); done(); }); - it('should allow access to public user if principal is set to "*"', - done => { - const allowed = isObjAuthorized(bucket, object, objAction, - constants.publicId, publicUserAuthInfo, log); + it('should allow access to public user if principal is set to "*"', done => { + const allowed = isObjAuthorized(bucket, object, objAction, constants.publicId, publicUserAuthInfo, log); assert.equal(allowed, true); done(); }); @@ -428,30 +404,34 @@ describe('bucket policy authorization', () => { const newPolicy = this.test.basePolicy; newPolicy.Statement[0][t.keyToChange] = t.objectValue; bucket.setBucketPolicy(newPolicy); - const allowed = isObjAuthorized(bucket, object, objAction, - t.objectId, t.objectAuthInfo, log, null, t.impDenies); + const allowed = isObjAuthorized( + bucket, + object, + objAction, + t.objectId, + t.objectAuthInfo, + log, + null, + t.impDenies + ); assert.equal(allowed, t.expected); done(); }); }); - it('should allow access to non-object owner for objectHead action with s3:GetObject permission', - function itFn(done) { + it('should allow access to non-object owner for objectHead action with s3:GetObject permission', function itFn(done) { const newPolicy = this.test.basePolicy; newPolicy.Statement[0].Action = ['s3:GetObject']; bucket.setBucketPolicy(newPolicy); - const allowed = isObjAuthorized(bucket, object, 'objectHead', - altAcctCanonicalId, altAcctAuthInfo, log); + const allowed = isObjAuthorized(bucket, object, 'objectHead', altAcctCanonicalId, altAcctAuthInfo, log); assert.equal(allowed, true); done(); }); - it('should deny access to non-object owner for objectHead action without s3:GetObject permission', - function itFn(done) { + it('should deny access to non-object owner for objectHead action without s3:GetObject permission', function itFn(done) { const newPolicy = this.test.basePolicy; newPolicy.Statement[0].Action = ['s3:PutObject']; bucket.setBucketPolicy(newPolicy); - const allowed = isObjAuthorized(bucket, object, 'objectHead', - altAcctCanonicalId, altAcctAuthInfo, log); + const allowed = isObjAuthorized(bucket, object, 'objectHead', altAcctCanonicalId, altAcctAuthInfo, log); assert.equal(allowed, false); done(); }); @@ -474,36 +454,37 @@ describe('bucket policy authorization', () => { bucket.setBucketPolicy(newPolicy); // Check that the policy denies access, as expected - assert.ok(!isObjAuthorized(bucket, object, 'objectGet', - bucketOwnerCanonicalId, user1AuthInfo, log, request)); + assert.ok( + !isObjAuthorized(bucket, object, 'objectGet', bucketOwnerCanonicalId, user1AuthInfo, log, request) + ); // But with bypassUserBucketPolicies set to true, it should still be authorized // based on ACL permissions (which we mock to return true) request.bypassUserBucketPolicies = true; - assert.ok(isObjAuthorized(bucket, object, 'objectGet', - bucketOwnerCanonicalId, user1AuthInfo, log, request)); + assert.ok( + isObjAuthorized(bucket, object, 'objectGet', bucketOwnerCanonicalId, user1AuthInfo, log, request) + ); }); - it('should deny access to non-object owner if two statements apply ' + - 'to principal but one denies access', function itFn(done) { - const newPolicy = this.test.basePolicy; - newPolicy.Statement[1] = { - Effect: 'Deny', - Principal: { CanonicalUser: [altAcctCanonicalId] }, - Resource: `arn:aws:s3:::${bucket.getName()}/*`, - Action: 's3:*', - }; - bucket.setBucketPolicy(newPolicy); - const allowed = isObjAuthorized(bucket, object, objAction, - altAcctCanonicalId, null, log); - assert.equal(allowed, false); - done(); - }); + it( + 'should deny access to non-object owner if two statements apply ' + 'to principal but one denies access', + function itFn(done) { + const newPolicy = this.test.basePolicy; + newPolicy.Statement[1] = { + Effect: 'Deny', + Principal: { CanonicalUser: [altAcctCanonicalId] }, + Resource: `arn:aws:s3:::${bucket.getName()}/*`, + Action: 's3:*', + }; + bucket.setBucketPolicy(newPolicy); + const allowed = isObjAuthorized(bucket, object, objAction, altAcctCanonicalId, null, log); + assert.equal(allowed, false); + done(); + } + ); - it('should deny access to non-object owner with an unsupported action type', - done => { - const allowed = isObjAuthorized(bucket, object, 'unsupportedAction', - altAcctCanonicalId, null, log); + it('should deny access to non-object owner with an unsupported action type', done => { + const allowed = isObjAuthorized(bucket, object, 'unsupportedAction', altAcctCanonicalId, null, log); assert.equal(allowed, false); done(); }); @@ -519,8 +500,16 @@ describe('bucket policy authorization', () => { bucket.setBucketPolicy(newPolicy); const results = requestTypes.map(type => { - const allowed = isObjAuthorized(bucket, object, type, - altAcctCanonicalId, altAcctAuthInfo, log, null, impDenies); + const allowed = isObjAuthorized( + bucket, + object, + type, + altAcctCanonicalId, + altAcctAuthInfo, + log, + null, + impDenies + ); return allowed; }); assert.deepStrictEqual(results, [true, true]); @@ -543,8 +532,16 @@ describe('bucket policy authorization', () => { bucket.setBucketPolicy(newPolicy); const results = requestTypes.map(type => { - const allowed = isObjAuthorized(bucket, object, type, - altAcctCanonicalId, altAcctAuthInfo, log, null, impDenies); + const allowed = isObjAuthorized( + bucket, + object, + type, + altAcctCanonicalId, + altAcctAuthInfo, + log, + null, + impDenies + ); return allowed; }); assert.deepStrictEqual(results, [false, false]); @@ -557,14 +554,12 @@ describe('bucket policy authorization', () => { const newPolicy = basePolicyObj; newPolicy.Statement.Resource = t.rValue; newPolicy.Statement = [newPolicy.Statement]; - assert.equal( - validatePolicyResource(bucketName, newPolicy), t.expected); + assert.equal(validatePolicyResource(bucketName, newPolicy), t.expected); done(); }); }); - it('should return false if any statement resource does not match ' + - 'bucket arn', done => { + it('should return false if any statement resource does not match ' + 'bucket arn', done => { const newPolicy = basePolicyObj; newPolicy.Statement = [newPolicy.Statement]; newPolicy.Statement[1] = basePolicyObj.Statement; @@ -584,23 +579,15 @@ describe('bucket policy authorization', () => { CanonicalUser: [altAcctCanonicalId], }, Action: 's3:*', - Resource: [ - `arn:aws:s3:::${bucket.getName()}`, - `arn:aws:s3:::${bucket.getName()}/*`, - ], + Resource: [`arn:aws:s3:::${bucket.getName()}`, `arn:aws:s3:::${bucket.getName()}/*`], }, { Effect: 'Deny', Principal: { CanonicalUser: [altAcctCanonicalId], }, - Action: [ - 's3:PutObjectRetention', - ], - Resource: [ - `arn:aws:s3:::${bucket.getName()}`, - `arn:aws:s3:::${bucket.getName()}/*`, - ], + Action: ['s3:PutObjectRetention'], + Resource: [`arn:aws:s3:::${bucket.getName()}`, `arn:aws:s3:::${bucket.getName()}/*`], Condition: { NumericGreaterThan: { 's3:object-lock-remaining-retention-days': 10, @@ -619,10 +606,7 @@ describe('bucket policy authorization', () => { CanonicalUser: [altAcctCanonicalId], }, Action: 's3:*', - Resource: [ - `arn:aws:s3:::${bucket.getName()}`, - `arn:aws:s3:::${bucket.getName()}/*`, - ], + Resource: [`arn:aws:s3:::${bucket.getName()}`, `arn:aws:s3:::${bucket.getName()}/*`], Condition: { IpAddress: { 'aws:SourceIp': '123.123.123.123', @@ -685,11 +669,17 @@ describe('bucket policy authorization', () => { requestParams[t.requestConditionKey] = t.conditionValue; const request = new DummyRequest(requestParams); - const results = isObjAuthorized(bucket, object, t.requestType, - altAcctCanonicalId, altAcctAuthInfo, log, request); + const results = isObjAuthorized( + bucket, + object, + t.requestType, + altAcctCanonicalId, + altAcctAuthInfo, + log, + request + ); assert.strictEqual(results, t.expectedVerdict); - } - ); + }); }); }); }); diff --git a/tests/unit/api/bucketPut.js b/tests/unit/api/bucketPut.js index 7583396a10..0710cf39f8 100644 --- a/tests/unit/api/bucketPut.js +++ b/tests/unit/api/bucketPut.js @@ -87,27 +87,27 @@ describe('checkLocationConstraint function', () => { config.backends.data = initialConfigData; }); testChecks.forEach(testCheck => { - const returnText = testCheck.isError ? `${testCheck.expectedError} error` - : 'the appropriate location constraint'; - it(`with data backend: "${testCheck.data}", ` + - `location: "${testCheck.locationSent}",` + - ` and host: "${testCheck.parsedHost}", should return ${returnText} `, - done => { - config.backends.data = testCheck.data; - request.parsedHost = testCheck.parsedHost; - const checkLocation = checkLocationConstraint(request, - testCheck.locationSent, log); - if (testCheck.isError) { - assert.notEqual(checkLocation.error, null, - 'Expected failure but got success'); - assert(checkLocation.error.is[testCheck.expectedError]); - } else { - assert.ifError(checkLocation.error); - assert.strictEqual(checkLocation.locationConstraint, - testCheck.locationReturn); + const returnText = testCheck.isError + ? `${testCheck.expectedError} error` + : 'the appropriate location constraint'; + it( + `with data backend: "${testCheck.data}", ` + + `location: "${testCheck.locationSent}",` + + ` and host: "${testCheck.parsedHost}", should return ${returnText} `, + done => { + config.backends.data = testCheck.data; + request.parsedHost = testCheck.parsedHost; + const checkLocation = checkLocationConstraint(request, testCheck.locationSent, log); + if (testCheck.isError) { + assert.notEqual(checkLocation.error, null, 'Expected failure but got success'); + assert(checkLocation.error.is[testCheck.expectedError]); + } else { + assert.ifError(checkLocation.error); + assert.strictEqual(checkLocation.locationConstraint, testCheck.locationReturn); + } + done(); } - done(); - }); + ); }); }); @@ -119,11 +119,10 @@ describe('bucketPut API', () => { it('should return an error if bucket already exists', done => { const otherAuthInfo = makeAuthInfo('accessKey2'); bucketPut(authInfo, testRequest, log, () => { - bucketPut(otherAuthInfo, testRequest, - log, err => { - assert.strictEqual(err.is.BucketAlreadyExists, true); - done(); - }); + bucketPut(otherAuthInfo, testRequest, log, err => { + assert.strictEqual(err.is.BucketAlreadyExists, true); + done(); + }); }); }); @@ -136,12 +135,10 @@ describe('bucketPut API', () => { assert.strictEqual(md.getName(), bucketName); assert.strictEqual(md.getOwner(), canonicalID); const prefix = `${canonicalID}${splitter}`; - metadata.listObject(usersBucket, { prefix }, - log, (err, listResponse) => { - assert.strictEqual(listResponse.Contents[0].key, - `${canonicalID}${splitter}${bucketName}`); - done(); - }); + metadata.listObject(usersBucket, { prefix }, log, (err, listResponse) => { + assert.strictEqual(listResponse.Contents[0].key, `${canonicalID}${splitter}${bucketName}`); + done(); + }); }); }); }); @@ -152,7 +149,7 @@ describe('bucketPut API', () => { url: '/', post: '', headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-bucket-object-lock-enabled': `${status}`, }, }); @@ -189,16 +186,13 @@ describe('bucketPut API', () => { }); }); - it('should return an error if ACL set in header ' + - 'with an invalid group URI', done => { + it('should return an error if ACL set in header ' + 'with an invalid group URI', done => { const testRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-grant-full-control': - 'uri="http://acs.amazonaws.com/groups/' + - 'global/NOTAVALIDGROUP"', + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/' + 'global/NOTAVALIDGROUP"', }, url: '/', post: '', @@ -212,13 +206,12 @@ describe('bucketPut API', () => { }); }); - it('should return an error if ACL set in header ' + - 'with an invalid canned ACL', done => { + it('should return an error if ACL set in header ' + 'with an invalid canned ACL', done => { const testRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'not-valid-option', }, url: '/', @@ -233,15 +226,13 @@ describe('bucketPut API', () => { }); }); - it('should return an error if ACL set in header ' + - 'with an invalid email address', done => { + it('should return an error if ACL set in header ' + 'with an invalid email address', done => { const testRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-grant-read': - 'emailaddress="fake@faking.com"', + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-grant-read': 'emailaddress="fake@faking.com"', }, url: '/', post: '', @@ -255,15 +246,13 @@ describe('bucketPut API', () => { }); }); - it('should set a canned ACL while creating bucket' + - ' if option set out in header', done => { + it('should set a canned ACL while creating bucket' + ' if option set out in header', done => { const testRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-acl': - 'public-read', + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-acl': 'public-read', }, url: '/', post: '', @@ -278,45 +267,33 @@ describe('bucketPut API', () => { }); }); - it('should set specific ACL grants while creating bucket' + - ' if options set out in header', done => { + it('should set specific ACL grants while creating bucket' + ' if options set out in header', done => { const testRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-grant-full-control': - 'emailaddress="sampleaccount1@sampling.com"' + - ',emailaddress="sampleaccount2@sampling.com"', + 'emailaddress="sampleaccount1@sampling.com"' + ',emailaddress="sampleaccount2@sampling.com"', 'x-amz-grant-read': `uri=${constants.logId}`, 'x-amz-grant-write': `uri=${constants.publicId}`, - 'x-amz-grant-read-acp': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + - 'f8f8d5218e7cd47ef2be', - 'x-amz-grant-write-acp': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + - 'f8f8d5218e7cd47ef2bf', + 'x-amz-grant-read-acp': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + 'f8f8d5218e7cd47ef2be', + 'x-amz-grant-write-acp': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + 'f8f8d5218e7cd47ef2bf', }, url: '/', post: '', }; - const canonicalIDforSample1 = - '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be'; - const canonicalIDforSample2 = - '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf'; + const canonicalIDforSample1 = '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be'; + const canonicalIDforSample2 = '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf'; bucketPut(authInfo, testRequest, log, err => { assert.strictEqual(err, null, 'Error creating bucket'); metadata.getBucket(bucketName, log, (err, md) => { assert.strictEqual(md.getAcl().READ[0], constants.logId); assert.strictEqual(md.getAcl().WRITE[0], constants.publicId); - assert(md.getAcl() - .FULL_CONTROL.indexOf(canonicalIDforSample1) > -1); - assert(md.getAcl() - .FULL_CONTROL.indexOf(canonicalIDforSample2) > -1); - assert(md.getAcl() - .READ_ACP.indexOf(canonicalIDforSample1) > -1); - assert(md.getAcl() - .WRITE_ACP.indexOf(canonicalIDforSample2) > -1); + assert(md.getAcl().FULL_CONTROL.indexOf(canonicalIDforSample1) > -1); + assert(md.getAcl().FULL_CONTROL.indexOf(canonicalIDforSample2) > -1); + assert(md.getAcl().READ_ACP.indexOf(canonicalIDforSample1) > -1); + assert(md.getAcl().WRITE_ACP.indexOf(canonicalIDforSample2) > -1); done(); }); }); @@ -348,8 +325,7 @@ describe('bucketPut API', () => { assert.deepStrictEqual(err, null); metadata.getBucket(bucketName, log, (err, bucketInfo) => { assert.deepStrictEqual(err, null); - assert.deepStrictEqual(newLocation, - bucketInfo.getLocationConstraint()); + assert.deepStrictEqual(newLocation, bucketInfo.getLocationConstraint()); done(); }); }); @@ -393,21 +369,25 @@ describe('bucketPut API', () => { const newLCs = Object.assign({}, config.locationConstraints, newLC); const req = Object.assign({}, testRequest, { bucketName, - post: '' + + post: + '' + '' + - `${newLCKey}` + + `${newLCKey}` + '', }); afterEach(() => config.setLocationConstraints(originalLCs)); - it('should return error if location constraint config is not updated', - done => bucketPut(authInfo, req, log, err => { + it('should return error if location constraint config is not updated', done => + bucketPut(authInfo, req, log, err => { assert.strictEqual(err.is.InvalidLocationConstraint, true); - assert.strictEqual(err.description, 'value of the location you are ' + - `attempting to set - ${newLCKey} - is not listed in the ` + - 'locationConstraint config'); + assert.strictEqual( + err.description, + 'value of the location you are ' + + `attempting to set - ${newLCKey} - is not listed in the ` + + 'locationConstraint config' + ); done(); })); @@ -438,12 +418,7 @@ describe('bucketPut API', () => { { description: 'many allowed auth', error: undefined, - results: [ - { isAllowed: true }, - { isAllowed: true }, - { isAllowed: true }, - { isAllowed: true }, - ], + results: [{ isAllowed: true }, { isAllowed: true }, { isAllowed: true }, { isAllowed: true }], calledWith: [null, constraint], }, { @@ -477,20 +452,17 @@ describe('bucketPut API', () => { { description: 'one not allowed auth of many', error: undefined, - results: [ - { isAllowed: true }, - { isAllowed: true }, - { isAllowed: false }, - { isAllowed: true }, - ], + results: [{ isAllowed: true }, { isAllowed: true }, { isAllowed: false }, { isAllowed: true }], calledWith: [errors.AccessDenied], }, - ].forEach(tc => it(tc.description, () => { - const cb = sinon.fake(); - const handler = _handleAuthResults(constraint, log, cb); - handler(tc.error, tc.results); - assert.deepStrictEqual(cb.getCalls()[0].args, tc.calledWith); - })); + ].forEach(tc => + it(tc.description, () => { + const cb = sinon.fake(); + const handler = _handleAuthResults(constraint, log, cb); + handler(tc.error, tc.results); + assert.deepStrictEqual(cb.getCalls()[0].args, tc.calledWith); + }) + ); }); }); @@ -510,7 +482,7 @@ describe('bucketPut API with bucket-level encryption', () => { const testRequestWithEncryption = { ...testRequest, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-scal-server-side-encryption': 'AES256', }, }; @@ -534,7 +506,7 @@ describe('bucketPut API with bucket-level encryption', () => { const testRequestWithEncryption = { ...testRequest, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-scal-server-side-encryption': 'aws:kms', }, }; @@ -559,7 +531,7 @@ describe('bucketPut API with bucket-level encryption', () => { const testRequestWithEncryption = { ...testRequest, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-scal-server-side-encryption': 'aws:kms', 'x-amz-scal-server-side-encryption-aws-kms-key-id': keyId, }, @@ -588,7 +560,7 @@ describe('bucketPut API with bucket-level encryption', () => { const testRequestWithEncryption = { ...testRequest, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-scal-server-side-encryption': 'AES256', 'x-amz-scal-server-side-encryption-aws-kms-key-id': keyId, }, @@ -618,7 +590,7 @@ describe('bucketPut API with account level encryption', () => { const testRequestWithEncryption = { ...testRequest, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-scal-server-side-encryption': 'AES256', }, }; @@ -643,7 +615,7 @@ describe('bucketPut API with account level encryption', () => { const testRequestWithEncryption = { ...testRequest, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-scal-server-side-encryption': 'aws:kms', }, }; @@ -669,7 +641,7 @@ describe('bucketPut API with account level encryption', () => { const testRequestWithEncryption = { ...testRequest, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-scal-server-side-encryption': 'aws:kms', 'x-amz-scal-server-side-encryption-aws-kms-key-id': keyId, }, @@ -705,7 +677,7 @@ describe('bucketPut API with failed encryption service', () => { const testRequestWithEncryption = { ...testRequest, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-scal-server-side-encryption': 'AES256', }, }; @@ -719,8 +691,9 @@ describe('bucketPut API with failed encryption service', () => { describe('bucketPut API with failed vault service', () => { beforeEach(() => { sinon.stub(inMemory, 'supportsDefaultKeyPerAccount').value(true); - sinon.stub(vault, 'getOrCreateEncryptionKeyId').callsFake((accountCanonicalId, log, cb) => - cb(errors.ServiceFailure)); + sinon + .stub(vault, 'getOrCreateEncryptionKeyId') + .callsFake((accountCanonicalId, log, cb) => cb(errors.ServiceFailure)); }); afterEach(() => { @@ -732,7 +705,7 @@ describe('bucketPut API with failed vault service', () => { const testRequestWithEncryption = { ...testRequest, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-scal-server-side-encryption': 'AES256', }, }; diff --git a/tests/unit/api/bucketPutACL.js b/tests/unit/api/bucketPutACL.js index dfc507c2bd..a6c8971ea0 100644 --- a/tests/unit/api/bucketPutACL.js +++ b/tests/unit/api/bucketPutACL.js @@ -18,21 +18,15 @@ const testBucketPutRequest = { headers: { host: `${bucketName}.s3.amazonaws.com` }, url: '/', }; -const canonicalIDforSample1 = - '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be'; -const canonicalIDforSample2 = - '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf'; +const canonicalIDforSample1 = '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be'; +const canonicalIDforSample2 = '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf'; const invalidIds = { 'too short': 'id="invalid_id"', - 'too long': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2aaa', - 'only numbers': - 'id=0000000000000000000000000000000000000000000000000000000000000000', - 'only letters': - 'id=abcdefabcdefabcdefabcdefabcdefacbdefabcdefabcdefabcdefabcdefabcd', - 'non-hex letters': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2ZZ', + 'too long': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2aaa', + 'only numbers': 'id=0000000000000000000000000000000000000000000000000000000000000000', + 'only letters': 'id=abcdefabcdefabcdefabcdefabcdefacbdefabcdefabcdefabcdefabcdefabcd', + 'non-hex letters': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2ZZ', }; describe('putBucketACL API', () => { @@ -66,7 +60,7 @@ describe('putBucketACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'not-a-valid-option', }, url: '/?acl', @@ -85,7 +79,7 @@ describe('putBucketACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'public-read-write', }, url: '/?acl', @@ -101,13 +95,12 @@ describe('putBucketACL API', () => { }); }); - it('should set a canned public-read ACL followed by ' - + 'a canned authenticated-read ACL', done => { + it('should set a canned public-read ACL followed by ' + 'a canned authenticated-read ACL', done => { const testACLRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'public-read', }, url: '/?acl', @@ -118,7 +111,7 @@ describe('putBucketACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'authenticated-read', }, url: '/?acl', @@ -132,8 +125,7 @@ describe('putBucketACL API', () => { bucketPutACL(authInfo, testACLRequest2, log, err => { assert.strictEqual(err, undefined); metadata.getBucket(bucketName, log, (err, md) => { - assert.strictEqual(md.getAcl().Canned, - 'authenticated-read'); + assert.strictEqual(md.getAcl().Canned, 'authenticated-read'); done(); }); }); @@ -141,13 +133,12 @@ describe('putBucketACL API', () => { }); }); - it('should set a canned private ACL ' + - 'followed by a log-delivery-write ACL', done => { + it('should set a canned private ACL ' + 'followed by a log-delivery-write ACL', done => { const testACLRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'private', }, url: '/?acl', @@ -158,7 +149,7 @@ describe('putBucketACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'log-delivery-write', }, url: '/?acl', @@ -173,8 +164,7 @@ describe('putBucketACL API', () => { bucketPutACL(authInfo, testACLRequest2, log, err => { assert.strictEqual(err, undefined); metadata.getBucket(bucketName, log, (err, md) => { - assert.strictEqual(md.getAcl().Canned, - 'log-delivery-write'); + assert.strictEqual(md.getAcl().Canned, 'log-delivery-write'); done(); }); }); @@ -187,18 +177,13 @@ describe('putBucketACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-grant-full-control': - 'emailaddress="sampleaccount1@sampling.com"' + - ',emailaddress="sampleaccount2@sampling.com"', + 'emailaddress="sampleaccount1@sampling.com"' + ',emailaddress="sampleaccount2@sampling.com"', 'x-amz-grant-read': `uri=${constants.logId}`, 'x-amz-grant-write': `uri=${constants.publicId}`, - 'x-amz-grant-read-acp': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + - 'f8f8d5218e7cd47ef2be', - 'x-amz-grant-write-acp': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + - 'f8f8d5218e7cd47ef2bf', + 'x-amz-grant-read-acp': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + 'f8f8d5218e7cd47ef2be', + 'x-amz-grant-write-acp': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + 'f8f8d5218e7cd47ef2bf', }, url: '/?acl', query: { acl: '' }, @@ -208,95 +193,77 @@ describe('putBucketACL API', () => { assert.strictEqual(err, undefined); metadata.getBucket(bucketName, log, (err, md) => { assert.strictEqual(md.getAcl().WRITE[0], constants.publicId); - assert(md.getAcl().FULL_CONTROL - .indexOf(canonicalIDforSample1) > -1); - assert(md.getAcl().FULL_CONTROL - .indexOf(canonicalIDforSample2) > -1); - assert(md.getAcl().READ_ACP - .indexOf(canonicalIDforSample1) > -1); - assert(md.getAcl().WRITE_ACP - .indexOf(canonicalIDforSample2) > -1); + assert(md.getAcl().FULL_CONTROL.indexOf(canonicalIDforSample1) > -1); + assert(md.getAcl().FULL_CONTROL.indexOf(canonicalIDforSample2) > -1); + assert(md.getAcl().READ_ACP.indexOf(canonicalIDforSample1) > -1); + assert(md.getAcl().WRITE_ACP.indexOf(canonicalIDforSample2) > -1); done(); }); }); }); - it('should set all ACLs sharing the same email in request headers', - done => { - const testACLRequest = { - bucketName, - namespace, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-grant-full-control': - 'emailaddress="sampleaccount1@sampling.com"' + - ',emailaddress="sampleaccount2@sampling.com"', - 'x-amz-grant-read': - 'emailaddress="sampleaccount1@sampling.com"', - 'x-amz-grant-write': - 'emailaddress="sampleaccount1@sampling.com"', - 'x-amz-grant-read-acp': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + - 'f8f8d5218e7cd47ef2be', - 'x-amz-grant-write-acp': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + - 'f8f8d5218e7cd47ef2bf', - }, - url: '/?acl', - query: { acl: '' }, - actionImplicitDenies: false, - }; - bucketPutACL(authInfo, testACLRequest, log, err => { - assert.strictEqual(err, undefined); - metadata.getBucket(bucketName, log, (err, md) => { - assert(md.getAcl().WRITE.indexOf(canonicalIDforSample1) - > -1); - assert(md.getAcl().READ.indexOf(canonicalIDforSample1) - > -1); - assert(md.getAcl().FULL_CONTROL - .indexOf(canonicalIDforSample1) > -1); - assert(md.getAcl().FULL_CONTROL - .indexOf(canonicalIDforSample2) > -1); - assert(md.getAcl().READ_ACP - .indexOf(canonicalIDforSample1) > -1); - assert(md.getAcl().WRITE_ACP - .indexOf(canonicalIDforSample2) > -1); - done(); - }); + it('should set all ACLs sharing the same email in request headers', done => { + const testACLRequest = { + bucketName, + namespace, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-grant-full-control': + 'emailaddress="sampleaccount1@sampling.com"' + ',emailaddress="sampleaccount2@sampling.com"', + 'x-amz-grant-read': 'emailaddress="sampleaccount1@sampling.com"', + 'x-amz-grant-write': 'emailaddress="sampleaccount1@sampling.com"', + 'x-amz-grant-read-acp': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + 'f8f8d5218e7cd47ef2be', + 'x-amz-grant-write-acp': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + 'f8f8d5218e7cd47ef2bf', + }, + url: '/?acl', + query: { acl: '' }, + actionImplicitDenies: false, + }; + bucketPutACL(authInfo, testACLRequest, log, err => { + assert.strictEqual(err, undefined); + metadata.getBucket(bucketName, log, (err, md) => { + assert(md.getAcl().WRITE.indexOf(canonicalIDforSample1) > -1); + assert(md.getAcl().READ.indexOf(canonicalIDforSample1) > -1); + assert(md.getAcl().FULL_CONTROL.indexOf(canonicalIDforSample1) > -1); + assert(md.getAcl().FULL_CONTROL.indexOf(canonicalIDforSample2) > -1); + assert(md.getAcl().READ_ACP.indexOf(canonicalIDforSample1) > -1); + assert(md.getAcl().WRITE_ACP.indexOf(canonicalIDforSample2) > -1); + done(); }); }); + }); Object.keys(invalidIds).forEach(idType => { - it('should return an error if grantee canonical ID provided in ACL ' + - `request invalid because ${idType}`, done => { - const testACLRequest = { - bucketName, - namespace, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-grant-full-control': invalidIds[idType], - }, - url: '/?acl', - query: { acl: '' }, - actionImplicitDenies: false, - }; - return bucketPutACL(authInfo, testACLRequest, log, err => { - assert.strictEqual(err.is.InvalidArgument, true); - done(); - }); - }); + it( + 'should return an error if grantee canonical ID provided in ACL ' + `request invalid because ${idType}`, + done => { + const testACLRequest = { + bucketName, + namespace, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-grant-full-control': invalidIds[idType], + }, + url: '/?acl', + query: { acl: '' }, + actionImplicitDenies: false, + }; + return bucketPutACL(authInfo, testACLRequest, log, err => { + assert.strictEqual(err.is.InvalidArgument, true); + done(); + }); + } + ); }); - it('should return an error if invalid email ' + - 'provided in ACL header request', done => { + it('should return an error if invalid email ' + 'provided in ACL header request', done => { const testACLRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-grant-full-control': - 'emailaddress="sampleaccount1@sampling.com"' + - ',emailaddress="nonexistentEmail@sampling.com"', + 'emailaddress="sampleaccount1@sampling.com"' + ',emailaddress="nonexistentEmail@sampling.com"', }, url: '/?acl', query: { acl: '' }, @@ -314,49 +281,50 @@ describe('putBucketACL API', () => { bucketName, namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, - post: '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - '' + - '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - 'FULL_CONTROL' + - '' + - '' + - '' + - `${constants.publicId}` + - '' + - 'READ' + - '' + - '' + - '' + - `${constants.logId}` + - '' + - 'WRITE' + - '' + - '' + - '' + - 'sampleaccount1@sampling.com' + - '' + - '' + - 'WRITE_ACP' + - '' + - '' + - '' + - '79a59df900b949e55d96a1e698fbacedfd' + - '6e09d98eacf8f8d5218e7cd47ef2bf' + - '' + - 'READ_ACP' + - '' + - '' + + post: + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + '' + + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + 'FULL_CONTROL' + + '' + + '' + + '' + + `${constants.publicId}` + + '' + + 'READ' + + '' + + '' + + '' + + `${constants.logId}` + + '' + + 'WRITE' + + '' + + '' + + '' + + 'sampleaccount1@sampling.com' + + '' + + '' + + 'WRITE_ACP' + + '' + + '' + + '' + + '79a59df900b949e55d96a1e698fbacedfd' + + '6e09d98eacf8f8d5218e7cd47ef2bf' + + '' + + 'READ_ACP' + + '' + + '' + '', url: '/?acl', query: { acl: '' }, @@ -367,14 +335,11 @@ describe('putBucketACL API', () => { assert.strictEqual(err, undefined); metadata.getBucket(bucketName, log, (err, md) => { assert.strictEqual(md.getAcl().Canned, ''); - assert.strictEqual(md.getAcl().FULL_CONTROL[0], - canonicalIDforSample1); + assert.strictEqual(md.getAcl().FULL_CONTROL[0], canonicalIDforSample1); assert.strictEqual(md.getAcl().READ[0], constants.publicId); assert.strictEqual(md.getAcl().WRITE[0], constants.logId); - assert.strictEqual(md.getAcl().WRITE_ACP[0], - canonicalIDforSample1); - assert.strictEqual(md.getAcl().READ_ACP[0], - canonicalIDforSample2); + assert.strictEqual(md.getAcl().WRITE_ACP[0], canonicalIDforSample1); + assert.strictEqual(md.getAcl().READ_ACP[0], canonicalIDforSample2); done(); }); }); @@ -385,14 +350,15 @@ describe('putBucketACL API', () => { bucketName, namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, - post: '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - '' + + post: + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + '' + '', url: '/?acl', query: { acl: '' }, @@ -413,19 +379,19 @@ describe('putBucketACL API', () => { }); }); - it('should not be able to set ACLs without AccessControlList section', - done => { + it('should not be able to set ACLs without AccessControlList section', done => { const testACLRequest = { bucketName, namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, - post: '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + + post: + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + '', url: '/?acl', query: { acl: '' }, @@ -443,33 +409,34 @@ describe('putBucketACL API', () => { bucketName, namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, - post: '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - '' + - '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - 'FULL_CONTROL' + - '' + - '' + - '' + - '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - 'READ' + - '' + - '' + + post: + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + '' + + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + 'FULL_CONTROL' + + '' + + '' + + '' + + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + 'READ' + + '' + + '' + '', url: '/?acl', query: { acl: '' }, @@ -482,27 +449,27 @@ describe('putBucketACL API', () => { }); }); - it('should return an error if invalid grantee user ID ' + - 'provided in ACL request body', done => { + it('should return an error if invalid grantee user ID ' + 'provided in ACL request body', done => { const testACLRequest = { bucketName, namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, - post: '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - '' + - '' + - '' + - 'invalid_id' + - '' + - 'READ_ACP' + - '' + - '' + + post: + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + '' + + '' + + '' + + 'invalid_id' + + '' + + 'READ_ACP' + + '' + + '' + '', url: '/?acl', query: { acl: '' }, @@ -515,27 +482,27 @@ describe('putBucketACL API', () => { }); }); - it('should return an error if invalid email ' + - 'address provided in ACLs set out in request body', done => { + it('should return an error if invalid email ' + 'address provided in ACLs set out in request body', done => { const testACLRequest = { bucketName, namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, - post: '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - '' + - '' + - '' + - 'xyz@amazon.com' + - '' + - 'WRITE_ACP' + - '' + - '' + + post: + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + '' + + '' + + '' + + 'xyz@amazon.com' + + '' + + 'WRITE_ACP' + + '' + + '' + '', url: '/?acl', query: { acl: '' }, @@ -547,31 +514,31 @@ describe('putBucketACL API', () => { }); }); - it('should return an error if xml provided does not match s3 ' - + 'scheme for setting ACLs', done => { + it('should return an error if xml provided does not match s3 ' + 'scheme for setting ACLs', done => { const testACLRequest = { bucketName, namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, /** XML below uses the term "PowerGrant" instead of - * "Grant" which is part of the s3 xml scheme for ACLs - * so an error should be returned - */ - post: '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - '' + - '' + - '' + - 'xyz@amazon.com' + - '' + - 'WRITE_ACP' + - '' + - '' + + * "Grant" which is part of the s3 xml scheme for ACLs + * so an error should be returned + */ + post: + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + '' + + '' + + '' + + 'xyz@amazon.com' + + '' + + 'WRITE_ACP' + + '' + + '' + '', url: '/?acl', query: { acl: '' }, @@ -584,51 +551,54 @@ describe('putBucketACL API', () => { }); }); - - it('should return an error if xml provided does not match s3 ' - + 'scheme for setting ACLs using multiple Grant section', done => { - const testACLRequest = { - bucketName, - namespace, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - /** XML below uses the term "PowerGrant" instead of - * "Grant" which is part of the s3 xml scheme for ACLs - * so an error should be returned - */ - post: ' { + const testACLRequest = { + bucketName, + namespace, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + /** XML below uses the term "PowerGrant" instead of + * "Grant" which is part of the s3 xml scheme for ACLs + * so an error should be returned + */ + post: + '' + - '' + + '' + '79a59df900b949e55d96a1e698fbaced' + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + 'OwnerDisplayName' + - '' + - '' + + '' + + '' + '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - 'FULL_CONTROL' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + 'FULL_CONTROL' + '' + '' + - '' + - 'xyz@amazon.com' + - '' + - 'WRITE_ACP' + + '' + + 'xyz@amazon.com' + + '' + + 'WRITE_ACP' + '' + - '' + - '', - url: '/?acl', - query: { acl: '' }, - actionImplicitDenies: false, - }; + '' + + '', + url: '/?acl', + query: { acl: '' }, + actionImplicitDenies: false, + }; - bucketPutACL(authInfo, testACLRequest, log, err => { - assert.strictEqual(err.is.MalformedACLError, true); - done(); - }); - }); + bucketPutACL(authInfo, testACLRequest, log, err => { + assert.strictEqual(err.is.MalformedACLError, true); + done(); + }); + } + ); it('should return an error if malformed xml provided', done => { const testACLRequest = { @@ -640,20 +610,20 @@ describe('putBucketACL API', () => { post: { '' + - '' + + '' + '79a59df900b949e55d96a1e698fbaced' + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + 'OwnerDisplayName' + - '' + - '' + + '' + + '' + '' + - '' + - 'xyz@amazon.com' + - '' + - 'WRITE_ACP' + + '' + + 'xyz@amazon.com' + + '' + + 'WRITE_ACP' + '' + - '' + - '', + '' + + '', }, url: '/?acl', query: { acl: '' }, @@ -666,29 +636,29 @@ describe('putBucketACL API', () => { }); }); - it('should return an error if invalid group ' + - 'uri provided in ACLs set out in request body', done => { + it('should return an error if invalid group ' + 'uri provided in ACLs set out in request body', done => { const testACLRequest = { bucketName, namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, // URI in grant below is not valid group URI for s3 - post: '' + - '' + - '79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be' + - 'OwnerDisplayName' + - '' + - '' + - '' + - '' + - 'http://acs.amazonaws.com/groups/' + - 'global/NOTAVALIDGROUP' + - '' + - 'READ' + - '' + - '' + + post: + '' + + '' + + '79a59df900b949e55d96a1e698fbaced' + + 'fd6e09d98eacf8f8d5218e7cd47ef2be' + + 'OwnerDisplayName' + + '' + + '' + + '' + + '' + + 'http://acs.amazonaws.com/groups/' + + 'global/NOTAVALIDGROUP' + + '' + + 'READ' + + '' + + '' + '', url: '/?acl', query: { acl: '' }, @@ -701,16 +671,13 @@ describe('putBucketACL API', () => { }); }); - it('should return an error if invalid group uri' + - 'provided in ACL header request', done => { + it('should return an error if invalid group uri' + 'provided in ACL header request', done => { const testACLRequest = { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'x-amz-grant-full-control': - 'uri="http://acs.amazonaws.com/groups/' + - 'global/NOTAVALIDGROUP"', + host: `${bucketName}.s3.amazonaws.com`, + 'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/' + 'global/NOTAVALIDGROUP"', }, url: '/?acl', query: { acl: '' }, diff --git a/tests/unit/api/bucketPutCors.js b/tests/unit/api/bucketPutCors.js index 343410f82b..250f532b61 100644 --- a/tests/unit/api/bucketPutCors.js +++ b/tests/unit/api/bucketPutCors.js @@ -3,13 +3,8 @@ const { errors } = require('arsenal'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutCors = require('../../../lib/api/bucketPutCors'); -const { _validator, parseCorsXml } - = require('../../../lib/api/apiUtils/bucket/bucketCors'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - CorsConfigTester } - = require('../helpers'); +const { _validator, parseCorsXml } = require('../../../lib/api/apiUtils/bucket/bucketCors'); +const { cleanup, DummyRequestLogger, makeAuthInfo, CorsConfigTester } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); const log = new DummyRequestLogger(); @@ -31,13 +26,14 @@ function _testPutBucketCors(authInfo, request, log, errCode, cb) { } function _generateSampleXml(value) { - const xml = '' + - '' + - 'PUT' + - 'www.example.com' + - `${value}` + - '' + - ''; + const xml = + '' + + '' + + 'PUT' + + 'www.example.com' + + `${value}` + + '' + + ''; return xml; } @@ -49,10 +45,9 @@ describe('putBucketCORS API', () => { }); afterEach(() => cleanup()); - it('should update a bucket\'s metadata with cors resource', done => { + it("should update a bucket's metadata with cors resource", done => { const corsUtil = new CorsConfigTester(); - const testBucketPutCorsRequest = corsUtil - .createBucketCorsRequest('PUT', bucketName); + const testBucketPutCorsRequest = corsUtil.createBucketCorsRequest('PUT', bucketName); bucketPutCors(authInfo, testBucketPutCorsRequest, log, err => { if (err) { process.stdout.write(`Err putting website config ${err}`); @@ -72,28 +67,22 @@ describe('putBucketCORS API', () => { it('should return BadDigest if md5 is omitted', done => { const corsUtil = new CorsConfigTester(); - const testBucketPutCorsRequest = corsUtil - .createBucketCorsRequest('PUT', bucketName); + const testBucketPutCorsRequest = corsUtil.createBucketCorsRequest('PUT', bucketName); testBucketPutCorsRequest.headers['content-md5'] = undefined; - _testPutBucketCors(authInfo, testBucketPutCorsRequest, - log, 'BadDigest', done); + _testPutBucketCors(authInfo, testBucketPutCorsRequest, log, 'BadDigest', done); }); it('should return MalformedXML if body greater than 64KB', done => { const corsUtil = new CorsConfigTester(); const body = Buffer.alloc(65537); // 64 * 1024 = 65536 bytes - const testBucketPutCorsRequest = corsUtil - .createBucketCorsRequest('PUT', bucketName, body); - _testPutBucketCors(authInfo, testBucketPutCorsRequest, - log, 'MalformedXML', done); + const testBucketPutCorsRequest = corsUtil.createBucketCorsRequest('PUT', bucketName, body); + _testPutBucketCors(authInfo, testBucketPutCorsRequest, log, 'MalformedXML', done); }); it('should return InvalidRequest if more than one MaxAgeSeconds', done => { const corsUtil = new CorsConfigTester({ maxAgeSeconds: [60, 6000] }); - const testBucketPutCorsRequest = corsUtil - .createBucketCorsRequest('PUT', bucketName); - _testPutBucketCors(authInfo, testBucketPutCorsRequest, - log, 'MalformedXML', done); + const testBucketPutCorsRequest = corsUtil.createBucketCorsRequest('PUT', bucketName); + _testPutBucketCors(authInfo, testBucketPutCorsRequest, log, 'MalformedXML', done); }); }); @@ -104,8 +93,7 @@ describe('PUT bucket cors :: helper validation functions ', () => { const expectedResults = [true, true, false]; for (let i = 0; i < testStrings.length; i++) { - const result = _validator - .validateNumberWildcards(testStrings[i]); + const result = _validator.validateNumberWildcards(testStrings[i]); assert.strictEqual(result, expectedResults[i]); } done(); @@ -126,8 +114,7 @@ describe('PUT bucket cors :: helper validation functions ', () => { it('should return MalformedXML if more than one ID per rule', done => { const testValue = 'testid'; - const xml = _generateSampleXml(`${testValue}` + - `${testValue}`); + const xml = _generateSampleXml(`${testValue}` + `${testValue}`); parseCorsXml(xml, log, err => { assert(err, 'Expected error but found none'); assert.strictEqual(err.is.MalformedXML, true); @@ -158,8 +145,7 @@ describe('PUT bucket cors :: helper validation functions ', () => { describe('validateMaxAgeSeconds ', () => { it('should validate successfully for valid value', done => { const testValue = 60; - const xml = _generateSampleXml(`${testValue}` + - ''); + const xml = _generateSampleXml(`${testValue}` + ''); parseCorsXml(xml, log, (err, result) => { assert.strictEqual(err, null, `Found unexpected err ${err}`); assert.strictEqual(typeof result[0].maxAgeSeconds, 'number'); @@ -168,12 +154,11 @@ describe('PUT bucket cors :: helper validation functions ', () => { }); }); - it('should return MalformedXML if more than one MaxAgeSeconds ' + - 'per rule', done => { + it('should return MalformedXML if more than one MaxAgeSeconds ' + 'per rule', done => { const testValue = '60'; const xml = _generateSampleXml( - `${testValue}` + - `${testValue}`); + `${testValue}` + `${testValue}` + ); parseCorsXml(xml, log, err => { assert(err, 'Expected error but found none'); assert.strictEqual(err.is.MalformedXML, true); @@ -183,8 +168,7 @@ describe('PUT bucket cors :: helper validation functions ', () => { it('should validate & return undefined if empty value', done => { const testValue = ''; - const xml = _generateSampleXml(`${testValue}` + - ''); + const xml = _generateSampleXml(`${testValue}` + ''); parseCorsXml(xml, log, (err, result) => { assert.strictEqual(err, null, `Found unexpected err ${err}`); assert.strictEqual(result[0].MaxAgeSeconds, undefined); diff --git a/tests/unit/api/bucketPutEncryption.js b/tests/unit/api/bucketPutEncryption.js index b635973345..ff76478750 100644 --- a/tests/unit/api/bucketPutEncryption.js +++ b/tests/unit/api/bucketPutEncryption.js @@ -9,7 +9,6 @@ const bucketPutEncryption = require('../../../lib/api/bucketPutEncryption'); const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const { templateSSEConfig, templateRequest, getSSEConfig } = require('../utils/bucketEncryption'); - const log = new DummyRequestLogger(); const authInfo = makeAuthInfo('accessKey1'); const bucketName = 'bucketname'; @@ -44,26 +43,36 @@ describe('bucketPutEncryption API', () => { }); it('should reject a config with no Rule', done => { - bucketPutEncryption(authInfo, templateRequest(bucketName, - { post: ` + bucketPutEncryption( + authInfo, + templateRequest(bucketName, { + post: ` `, - }), log, err => { - assert.strictEqual(err.is.MalformedXML, true); - done(); - }); + }), + log, + err => { + assert.strictEqual(err.is.MalformedXML, true); + done(); + } + ); }); it('should reject a config with no ApplyServerSideEncryptionByDefault section', done => { - bucketPutEncryption(authInfo, templateRequest(bucketName, - { post: ` + bucketPutEncryption( + authInfo, + templateRequest(bucketName, { + post: ` `, - }), log, err => { - assert.strictEqual(err.is.MalformedXML, true); - done(); - }); + }), + log, + err => { + assert.strictEqual(err.is.MalformedXML, true); + done(); + } + ); }); it('should reject a config with no SSEAlgorithm', done => { @@ -170,8 +179,9 @@ describe('bucketPutEncryption API', () => { }); }); - it('should update SSEAlgorithm if existing SSEAlgorithm is AES256, ' + - 'new SSEAlgorithm is aws:kms and no KMSMasterKeyID is provided', + it( + 'should update SSEAlgorithm if existing SSEAlgorithm is AES256, ' + + 'new SSEAlgorithm is aws:kms and no KMSMasterKeyID is provided', done => { const post = templateSSEConfig({ algorithm: 'AES256' }); bucketPutEncryption(authInfo, templateRequest(bucketName, { post }), log, err => { @@ -180,7 +190,10 @@ describe('bucketPutEncryption API', () => { assert.ifError(err); const { masterKeyId } = sseInfo; const newConf = templateSSEConfig({ algorithm: 'aws:kms' }); - return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log, + return bucketPutEncryption( + authInfo, + templateRequest(bucketName, { post: newConf }), + log, err => { assert.ifError(err); return getSSEConfig(bucketName, log, (err, updatedSSEInfo) => { @@ -196,7 +209,8 @@ describe('bucketPutEncryption API', () => { ); }); }); - }); + } + ); it('should update SSEAlgorithm to aws:kms and set KMSMasterKeyID', done => { const post = templateSSEConfig({ algorithm: 'AES256' }); @@ -379,15 +393,13 @@ describe('bucketPutEncryption API with account level encryption', () => { assert.ifError(err); assert.deepStrictEqual(sseInfo, expectedSseInfo); const newConf = templateSSEConfig({ algorithm: 'AES256' }); - return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log, - err => { - assert.ifError(err); - return getSSEConfig(bucketName, log, (err, updatedSSEInfo) => { - assert.deepStrictEqual(updatedSSEInfo, expectedSseInfo); - done(); - }); - } - ); + return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log, err => { + assert.ifError(err); + return getSSEConfig(bucketName, log, (err, updatedSSEInfo) => { + assert.deepStrictEqual(updatedSSEInfo, expectedSseInfo); + done(); + }); + }); }); }); }); @@ -407,22 +419,20 @@ describe('bucketPutEncryption API with account level encryption', () => { }); const keyId = '12345'; const newConf = templateSSEConfig({ algorithm: 'aws:kms', keyId }); - return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log, - err => { - assert.ifError(err); - return getSSEConfig(bucketName, log, (err, updatedSSEInfo) => { - assert.deepStrictEqual(updatedSSEInfo, { - cryptoScheme: 1, - algorithm: 'aws:kms', - mandatory: true, - masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, - configuredMasterKeyId: `${arnPrefix}${keyId}`, - isAccountEncryptionEnabled: true, - }); - done(); + return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log, err => { + assert.ifError(err); + return getSSEConfig(bucketName, log, (err, updatedSSEInfo) => { + assert.deepStrictEqual(updatedSSEInfo, { + cryptoScheme: 1, + algorithm: 'aws:kms', + mandatory: true, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, + configuredMasterKeyId: `${arnPrefix}${keyId}`, + isAccountEncryptionEnabled: true, }); - } - ); + done(); + }); + }); }); }); }); @@ -441,21 +451,19 @@ describe('bucketPutEncryption API with account level encryption', () => { isAccountEncryptionEnabled: true, }); const newConf = templateSSEConfig({ algorithm: 'AES256' }); - return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log, - err => { - assert.ifError(err); - return getSSEConfig(bucketName, log, (err, updatedSSEInfo) => { - assert.deepStrictEqual(updatedSSEInfo, { - cryptoScheme: 1, - algorithm: 'AES256', - mandatory: true, - masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, - isAccountEncryptionEnabled: true, - }); - done(); + return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log, err => { + assert.ifError(err); + return getSSEConfig(bucketName, log, (err, updatedSSEInfo) => { + assert.deepStrictEqual(updatedSSEInfo, { + cryptoScheme: 1, + algorithm: 'AES256', + mandatory: true, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, + isAccountEncryptionEnabled: true, }); - } - ); + done(); + }); + }); }); }); }); @@ -474,21 +482,19 @@ describe('bucketPutEncryption API with account level encryption', () => { configuredMasterKeyId: `${arnPrefix}${keyId}`, }); const newConf = templateSSEConfig({ algorithm: 'AES256' }); - return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log, - err => { - assert.ifError(err); - return getSSEConfig(bucketName, log, (err, updatedSSEInfo) => { - assert.deepStrictEqual(updatedSSEInfo, { - cryptoScheme: 1, - algorithm: 'AES256', - mandatory: true, - masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, - isAccountEncryptionEnabled: true, - }); - done(); + return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log, err => { + assert.ifError(err); + return getSSEConfig(bucketName, log, (err, updatedSSEInfo) => { + assert.deepStrictEqual(updatedSSEInfo, { + cryptoScheme: 1, + algorithm: 'AES256', + mandatory: true, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, + isAccountEncryptionEnabled: true, }); - } - ); + done(); + }); + }); }); }); }); @@ -497,8 +503,9 @@ describe('bucketPutEncryption API with account level encryption', () => { describe('bucketPutEncryption API with failed vault service', () => { beforeEach(done => { sinon.stub(inMemory, 'supportsDefaultKeyPerAccount').value(true); - sinon.stub(vault, 'getOrCreateEncryptionKeyId').callsFake((accountCanonicalId, log, cb) => - cb(errors.ServiceFailure)); + sinon + .stub(vault, 'getOrCreateEncryptionKeyId') + .callsFake((accountCanonicalId, log, cb) => cb(errors.ServiceFailure)); bucketPut(authInfo, bucketPutRequest, log, done); }); diff --git a/tests/unit/api/bucketPutLifecycle.js b/tests/unit/api/bucketPutLifecycle.js index b3cd0071ec..2468cfe38b 100644 --- a/tests/unit/api/bucketPutLifecycle.js +++ b/tests/unit/api/bucketPutLifecycle.js @@ -2,12 +2,8 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutLifecycle = require('../../../lib/api/bucketPutLifecycle'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } - = require('../helpers'); -const { getLifecycleRequest, getLifecycleXml } = - require('../utils/lifecycleHelpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); +const { getLifecycleRequest, getLifecycleXml } = require('../utils/lifecycleHelpers'); const metadata = require('../../../lib/metadata/wrapper'); const log = new DummyRequestLogger(); @@ -82,9 +78,8 @@ describe('putBucketLifecycle API', () => { beforeEach(done => bucketPut(authInfo, testBucketPutRequest, log, done)); afterEach(() => cleanup()); - it('should update a bucket\'s metadata with lifecycle config obj', done => { - const testPutLifecycleRequest = getLifecycleRequest(bucketName, - getLifecycleXml()); + it("should update a bucket's metadata with lifecycle config obj", done => { + const testPutLifecycleRequest = getLifecycleRequest(bucketName, getLifecycleXml()); bucketPutLifecycle(authInfo, testPutLifecycleRequest, log, err => { if (err) { process.stdout.write(`Err putting lifecycle config ${err}`); @@ -95,10 +90,8 @@ describe('putBucketLifecycle API', () => { process.stdout.write(`Err retrieving bucket MD ${err}`); return done(err); } - const bucketLifecycleConfig = - bucket.getLifecycleConfiguration(); - assert.deepStrictEqual( - bucketLifecycleConfig, expectedLifecycleConfig); + const bucketLifecycleConfig = bucket.getLifecycleConfiguration(); + assert.deepStrictEqual(bucketLifecycleConfig, expectedLifecycleConfig); return done(); }); }); diff --git a/tests/unit/api/bucketPutNotification.js b/tests/unit/api/bucketPutNotification.js index 42456fba5b..cca311b78f 100644 --- a/tests/unit/api/bucketPutNotification.js +++ b/tests/unit/api/bucketPutNotification.js @@ -2,10 +2,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutNotification = require('../../../lib/api/bucketPutNotification'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); const log = new DummyRequestLogger(); @@ -22,11 +19,7 @@ const expectedNotifConfig = { queueConfig: [ { id: 'notification-id', - events: [ - 's3:ObjectCreated:*', - 's3:ObjectTagging:*', - 's3:ObjectAcl:Put', - ], + events: ['s3:ObjectCreated:*', 's3:ObjectTagging:*', 's3:ObjectAcl:Put'], queueArn: 'arn:scality:bucketnotif:::target1', filterRules: undefined, }, @@ -34,16 +27,18 @@ const expectedNotifConfig = { }; function getNotifRequest(empty) { - const queueConfig = empty ? '' : - '' + - 'notification-id' + - 'arn:scality:bucketnotif:::target1' + - 's3:ObjectCreated:*' + - 's3:ObjectTagging:*' + - 's3:ObjectAcl:Put' + - ''; + const queueConfig = empty + ? '' + : '' + + 'notification-id' + + 'arn:scality:bucketnotif:::target1' + + 's3:ObjectCreated:*' + + 's3:ObjectTagging:*' + + 's3:ObjectAcl:Put' + + ''; - const notifXml = '' + + const notifXml = + '' + `${queueConfig}` + ''; diff --git a/tests/unit/api/bucketPutObjectLock.js b/tests/unit/api/bucketPutObjectLock.js index e048bb40f2..ca51f163fe 100644 --- a/tests/unit/api/bucketPutObjectLock.js +++ b/tests/unit/api/bucketPutObjectLock.js @@ -2,10 +2,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutObjectLock = require('../../../lib/api/bucketPutObjectLock'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, -} = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); const log = new DummyRequestLogger(); @@ -18,7 +15,8 @@ const bucketPutRequest = { actionImplicitDenies: false, }; -const objectLockXml = '' + 'Enabled' + '' + @@ -57,21 +55,20 @@ describe('putBucketObjectLock API', () => { }); describe('with Object Lock enabled on bucket', () => { - const bucketObjLockRequest = Object.assign({}, bucketPutRequest, - { headers: { 'x-amz-bucket-object-lock-enabled': 'true' } }); + const bucketObjLockRequest = Object.assign({}, bucketPutRequest, { + headers: { 'x-amz-bucket-object-lock-enabled': 'true' }, + }); beforeEach(done => bucketPut(authInfo, bucketObjLockRequest, log, done)); afterEach(() => cleanup()); - it('should update a bucket\'s metadata with object lock config', done => { + it("should update a bucket's metadata with object lock config", done => { bucketPutObjectLock(authInfo, putObjLockRequest, log, err => { assert.ifError(err); return metadata.getBucket(bucketName, log, (err, bucket) => { assert.ifError(err); - const bucketObjectLockConfig = bucket. - getObjectLockConfiguration(); - assert.deepStrictEqual( - bucketObjectLockConfig, expectedObjectLockConfig); + const bucketObjectLockConfig = bucket.getObjectLockConfiguration(); + assert.deepStrictEqual(bucketObjectLockConfig, expectedObjectLockConfig); return done(); }); }); diff --git a/tests/unit/api/bucketPutPolicy.js b/tests/unit/api/bucketPutPolicy.js index b135ef09f7..c3ef92e2a9 100644 --- a/tests/unit/api/bucketPutPolicy.js +++ b/tests/unit/api/bucketPutPolicy.js @@ -2,10 +2,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutPolicy = require('../../../lib/api/bucketPutPolicy'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); const log = new DummyRequestLogger(); @@ -48,9 +45,8 @@ describe('putBucketPolicy API', () => { }); afterEach(() => cleanup()); - it('should update a bucket\'s metadata with bucket policy obj', done => { - bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), - log, err => { + it("should update a bucket's metadata with bucket policy obj", done => { + bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log, err => { if (err) { process.stdout.write(`Err putting bucket policy ${err}`); return done(err); @@ -67,11 +63,9 @@ describe('putBucketPolicy API', () => { }); }); - it('should return error if policy resource does not include bucket name', - done => { + it('should return error if policy resource does not include bucket name', done => { expectedBucketPolicy.Statement[0].Resource = 'arn:aws::s3:::badname'; - bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), - log, err => { + bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log, err => { assert.strictEqual(err.is.MalformedPolicy, true); assert.strictEqual(err.description, 'Policy has invalid resource'); return done(); @@ -79,10 +73,8 @@ describe('putBucketPolicy API', () => { }); it('should not return error if policy contains conditions', done => { - expectedBucketPolicy.Statement[0].Condition = - { IpAddress: { 'aws:SourceIp': '123.123.123.123' } }; - bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log, - err => { + expectedBucketPolicy.Statement[0].Condition = { IpAddress: { 'aws:SourceIp': '123.123.123.123' } }; + bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log, err => { assert.ifError(err); done(); }); @@ -90,18 +82,15 @@ describe('putBucketPolicy API', () => { it('should return error if policy contains service principal', done => { expectedBucketPolicy.Statement[0].Principal = { Service: ['test.com'] }; - bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log, - err => { + bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log, err => { assert.strictEqual(err.is.NotImplemented, true); done(); }); }); it('should return error if policy contains federated principal', done => { - expectedBucketPolicy.Statement[0].Principal = - { Federated: 'www.test.com' }; - bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log, - err => { + expectedBucketPolicy.Statement[0].Principal = { Federated: 'www.test.com' }; + bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log, err => { assert.strictEqual(err.is.NotImplemented, true); done(); }); diff --git a/tests/unit/api/bucketPutReplication.js b/tests/unit/api/bucketPutReplication.js index 3c891bbe0e..f06fd38f41 100644 --- a/tests/unit/api/bucketPutReplication.js +++ b/tests/unit/api/bucketPutReplication.js @@ -1,12 +1,9 @@ const assert = require('assert'); const { DummyRequestLogger } = require('../helpers'); -const { getReplicationConfiguration } = - require('../../../lib/api/apiUtils/bucket/getReplicationConfiguration'); -const validateReplicationConfig = - require('../../../lib/api/apiUtils/bucket/validateReplicationConfig'); -const replicationUtils = - require('../../functional/aws-node-sdk/lib/utility/replication'); +const { getReplicationConfiguration } = require('../../../lib/api/apiUtils/bucket/getReplicationConfiguration'); +const validateReplicationConfig = require('../../../lib/api/apiUtils/bucket/validateReplicationConfig'); +const replicationUtils = require('../../functional/aws-node-sdk/lib/utility/replication'); const log = new DummyRequestLogger(); // Check for the expected error response code and status code. @@ -15,8 +12,10 @@ function checkError(xml, expectedErr, cb) { if (expectedErr === null) { assert.strictEqual(err, null, `expected no error but got '${err}'`); } else { - assert(err.is[expectedErr], 'incorrect error response: should be ' + - `'Error: ${expectedErr}' but got '${err}'`); + assert( + err.is[expectedErr], + 'incorrect error response: should be ' + `'Error: ${expectedErr}' but got '${err}'` + ); } return cb(); }); @@ -29,45 +28,42 @@ function checkGeneratedID(xml, cb) { return cb(err); } const id = res.rules[0].id; - assert.strictEqual(typeof id, 'string', 'expected rule ID to be ' + - `string but got ${typeof id}`); - assert.strictEqual(id.length, 48, 'expected rule ID to be a length ' + - `of 48 but got ${id.length}`); + assert.strictEqual(typeof id, 'string', 'expected rule ID to be ' + `string but got ${typeof id}`); + assert.strictEqual(id.length, 48, 'expected rule ID to be a length ' + `of 48 but got ${id.length}`); return cb(); }); } // Create replication configuration XML with an tag optionally omitted. function createReplicationXML(missingTag, tagValue) { - let Role = missingTag === 'Role' ? '' : - '' + - 'arn:aws:iam::account-id:role/src-resource,' + - 'arn:aws:iam::account-id:role/dest-resource' + - ''; + let Role = + missingTag === 'Role' + ? '' + : '' + + 'arn:aws:iam::account-id:role/src-resource,' + + 'arn:aws:iam::account-id:role/dest-resource' + + ''; Role = tagValue && tagValue.Role ? `${tagValue.Role}` : Role; let ID = missingTag === 'ID' ? '' : 'foo'; ID = tagValue && tagValue.ID === '' ? '' : ID; const Prefix = missingTag === 'Prefix' ? '' : 'foo'; const Status = missingTag === 'Status' ? '' : 'Enabled'; - const Bucket = missingTag === 'Bucket' ? '' : - 'arn:aws:s3:::destination-bucket'; - let StorageClass = missingTag === 'StorageClass' ? '' : - 'STANDARD'; - StorageClass = tagValue && tagValue.StorageClass ? - `${tagValue.StorageClass}` : StorageClass; - const Destination = missingTag === 'Destination' ? '' : - `${Bucket + StorageClass}`; - const Rule = missingTag === 'Rule' ? '' : - `${ID + Prefix + Status + Destination}`; + const Bucket = missingTag === 'Bucket' ? '' : 'arn:aws:s3:::destination-bucket'; + let StorageClass = missingTag === 'StorageClass' ? '' : 'STANDARD'; + StorageClass = + tagValue && tagValue.StorageClass ? `${tagValue.StorageClass}` : StorageClass; + const Destination = missingTag === 'Destination' ? '' : `${Bucket + StorageClass}`; + const Rule = missingTag === 'Rule' ? '' : `${ID + Prefix + Status + Destination}`; const content = missingTag === null ? '' : `${Role}${Rule}`; - return '${content}` + - ''; + return ( + '${content}` + + '' + ); } -describe('\'getReplicationConfiguration\' function', () => { - it('should not return error when putting valid XML', done => - checkError(createReplicationXML(), null, done)); +describe("'getReplicationConfiguration' function", () => { + it('should not return error when putting valid XML', done => checkError(createReplicationXML(), null, done)); it('should not accept empty replication configuration', done => checkError(createReplicationXML(null), 'MalformedXML', done)); @@ -77,13 +73,13 @@ describe('\'getReplicationConfiguration\' function', () => { const xmlTag = prop === 'Rules' ? 'Rule' : prop; const xml = createReplicationXML(xmlTag); - it(`should not accept replication configuration without '${prop}'`, - done => checkError(xml, 'MalformedXML', done)); + it(`should not accept replication configuration without '${prop}'`, done => + checkError(xml, 'MalformedXML', done)); }); replicationUtils.optionalConfigProperties.forEach(prop => { - it(`should accept replication configuration without '${prop}'`, - done => checkError(createReplicationXML(prop), null, done)); + it(`should accept replication configuration without '${prop}'`, done => + checkError(createReplicationXML(prop), null, done)); }); it(`should accept replication configuration without 'Bucket' when there @@ -95,19 +91,18 @@ describe('\'getReplicationConfiguration\' function', () => { checkError(xml, null, done); }); - it("should create a rule 'ID' if omitted from the replication " + - 'configuration', done => { + it("should create a rule 'ID' if omitted from the replication " + 'configuration', done => { const xml = createReplicationXML('ID'); return checkGeneratedID(xml, done); }); - it('should create an \'ID\' if rule ID is \'\'', done => { + it("should create an 'ID' if rule ID is ''", done => { const xml = createReplicationXML(undefined, { ID: '' }); return checkGeneratedID(xml, done); }); }); -describe('\'validateReplicationConfig\' function', () => { +describe("'validateReplicationConfig' function", () => { const nonTransientBucket = { getLocationConstraint: () => 'us-east-1', }; @@ -115,57 +110,65 @@ describe('\'validateReplicationConfig\' function', () => { getLocationConstraint: () => 'transientfile', }; - it('should validate configuration when bucket location is ' + - 'not transient and preferred read location is not specified', () => { - const withoutPreferredRead = { - role: 'arn:aws:iam::account-id:role/src-resource,' + - 'arn:aws:iam::account-id:role/dest-resource', - destination: 'arn:aws:s3:::destination-bucket', - rules: [{ - prefix: 'test-prefix', - enabled: true, - id: 'test-id', - storageClass: 'STANDARD,us-east-2', - }], - }; - const result = validateReplicationConfig(withoutPreferredRead, - nonTransientBucket); - assert.strictEqual(result, true); - }); - - it('should validate configuration when bucket location is transient ' + - 'and preferred read location is specified', () => { - const withPreferredRead = { - role: 'arn:aws:iam::account-id:role/src-resource,' + - 'arn:aws:iam::account-id:role/dest-resource', - destination: 'arn:aws:s3:::destination-bucket', - rules: [{ - prefix: 'test-prefix', - enabled: true, - id: 'test-id', - storageClass: 'STANDARD,us-east-2:preferred_read', - }], - }; - const result = validateReplicationConfig(withPreferredRead, - transientBucket); - assert.strictEqual(result, true); - }); - - it('should not validate configuration when bucket location is ' + - 'transient and preferred read location is not specified', () => { - const withoutPreferredRead = { - role: 'arn:aws:iam::account-id:role/src-resource,' + - 'arn:aws:iam::account-id:role/dest-resource', - destination: 'arn:aws:s3:::destination-bucket', - rules: [{ - prefix: 'test-prefix', - enabled: true, - id: 'test-id', - storageClass: 'STANDARD,us-east-2', - }], - }; - const result = validateReplicationConfig(withoutPreferredRead, - transientBucket); - assert.strictEqual(result, false); - }); + it( + 'should validate configuration when bucket location is ' + + 'not transient and preferred read location is not specified', + () => { + const withoutPreferredRead = { + role: 'arn:aws:iam::account-id:role/src-resource,' + 'arn:aws:iam::account-id:role/dest-resource', + destination: 'arn:aws:s3:::destination-bucket', + rules: [ + { + prefix: 'test-prefix', + enabled: true, + id: 'test-id', + storageClass: 'STANDARD,us-east-2', + }, + ], + }; + const result = validateReplicationConfig(withoutPreferredRead, nonTransientBucket); + assert.strictEqual(result, true); + } + ); + + it( + 'should validate configuration when bucket location is transient ' + 'and preferred read location is specified', + () => { + const withPreferredRead = { + role: 'arn:aws:iam::account-id:role/src-resource,' + 'arn:aws:iam::account-id:role/dest-resource', + destination: 'arn:aws:s3:::destination-bucket', + rules: [ + { + prefix: 'test-prefix', + enabled: true, + id: 'test-id', + storageClass: 'STANDARD,us-east-2:preferred_read', + }, + ], + }; + const result = validateReplicationConfig(withPreferredRead, transientBucket); + assert.strictEqual(result, true); + } + ); + + it( + 'should not validate configuration when bucket location is ' + + 'transient and preferred read location is not specified', + () => { + const withoutPreferredRead = { + role: 'arn:aws:iam::account-id:role/src-resource,' + 'arn:aws:iam::account-id:role/dest-resource', + destination: 'arn:aws:s3:::destination-bucket', + rules: [ + { + prefix: 'test-prefix', + enabled: true, + id: 'test-id', + storageClass: 'STANDARD,us-east-2', + }, + ], + }; + const result = validateReplicationConfig(withoutPreferredRead, transientBucket); + assert.strictEqual(result, false); + } + ); }); diff --git a/tests/unit/api/bucketPutTagging.js b/tests/unit/api/bucketPutTagging.js index 1d95729c46..4b6324b078 100644 --- a/tests/unit/api/bucketPutTagging.js +++ b/tests/unit/api/bucketPutTagging.js @@ -1,11 +1,7 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - TaggingConfigTester, -} = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, TaggingConfigTester } = require('../helpers'); const bucketPutTagging = require('../../../lib/api/bucketPutTagging'); const log = new DummyRequestLogger(); const authInfo = makeAuthInfo('accessKey1'); @@ -28,8 +24,7 @@ describe('putBucketTagging API', () => { it('should set tags resource', done => { const taggingUtil = new TaggingConfigTester(); - const testBucketPutTaggingRequest = taggingUtil - .createBucketTaggingRequest('PUT', bucketName); + const testBucketPutTaggingRequest = taggingUtil.createBucketTaggingRequest('PUT', bucketName); bucketPutTagging(authInfo, testBucketPutTaggingRequest, log, err => { if (err) { process.stdout.write(`Err putting object tagging ${err}`); @@ -42,8 +37,7 @@ describe('putBucketTagging API', () => { it('should return access denied if the authorization check fails', done => { const taggingUtil = new TaggingConfigTester(); - const testBucketPutTaggingRequest = taggingUtil - .createBucketTaggingRequest('PUT', bucketName); + const testBucketPutTaggingRequest = taggingUtil.createBucketTaggingRequest('PUT', bucketName); const authInfo = makeAuthInfo('accessKey2'); bucketPutTagging(authInfo, testBucketPutTaggingRequest, log, err => { assert(err.AccessDenied); diff --git a/tests/unit/api/bucketPutVersioning.js b/tests/unit/api/bucketPutVersioning.js index 462fadcf74..6756264028 100644 --- a/tests/unit/api/bucketPutVersioning.js +++ b/tests/unit/api/bucketPutVersioning.js @@ -6,46 +6,44 @@ const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutVersioning = require('../../../lib/api/bucketPutVersioning'); const bucketPutReplication = require('../../../lib/api/bucketPutReplication'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo } = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); const xmlEnableVersioning = -'' + -'Enabled' + -''; + '' + + 'Enabled' + + ''; const xmlSuspendVersioning = -'' + -'Suspended' + -''; + '' + + 'Suspended' + + ''; const locConstraintVersioned = -'' + -'withversioning' + -''; + '' + + 'withversioning' + + ''; const locConstraintNonVersioned = -'' + -'withoutversioning' + -''; + '' + + 'withoutversioning' + + ''; const xmlReplicationConfiguration = -'' + + '' + 'arn:aws:iam::account-id:role/src-resource' + '' + - '' + - 'Enabled' + - '' + - 'arn:aws:s3:::destination-bucket' + - 'us-east-2' + - '' + + '' + + 'Enabled' + + '' + + 'arn:aws:s3:::destination-bucket' + + 'us-east-2' + + '' + '' + -''; + ''; -const externalVersioningErrorMessage = 'We do not currently support putting ' + -'a versioned object to a location-constraint of type Azure or GCP.'; +const externalVersioningErrorMessage = + 'We do not currently support putting ' + 'a versioned object to a location-constraint of type Azure or GCP.'; const log = new DummyRequestLogger(); const bucketName = 'bucketname'; @@ -94,55 +92,58 @@ describe('bucketPutVersioning API', () => { const tests = [ { - msg: 'should successfully enable versioning on location ' + - 'constraint with supportsVersioning set to true', + msg: + 'should successfully enable versioning on location ' + + 'constraint with supportsVersioning set to true', input: xmlEnableVersioning, output: { Status: 'Enabled' }, }, { - msg: 'should successfully suspend versioning on location ' + - 'constraint with supportsVersioning set to true', + msg: + 'should successfully suspend versioning on location ' + + 'constraint with supportsVersioning set to true', input: xmlSuspendVersioning, output: { Status: 'Suspended' }, }, ]; - tests.forEach(test => it(test.msg, done => { - const request = _putVersioningRequest(test.input); - bucketPutVersioning(authInfo, request, log, err => { - assert.ifError(err, - `Expected success, but got err: ${err}`); - metadata.getBucket(bucketName, log, (err, bucket) => { - assert.ifError(err, - `Expected success, but got err: ${err}`); - assert.deepStrictEqual(bucket._versioningConfiguration, - test.output); - done(); + tests.forEach(test => + it(test.msg, done => { + const request = _putVersioningRequest(test.input); + bucketPutVersioning(authInfo, request, log, err => { + assert.ifError(err, `Expected success, but got err: ${err}`); + metadata.getBucket(bucketName, log, (err, bucket) => { + assert.ifError(err, `Expected success, but got err: ${err}`); + assert.deepStrictEqual(bucket._versioningConfiguration, test.output); + done(); + }); }); - }); - })); + }) + ); it('should not suspend versioning on bucket with replication', done => { - async.series([ - // Enable versioning to allow putting a replication config. - next => { - const request = _putVersioningRequest(xmlEnableVersioning); - bucketPutVersioning(authInfo, request, log, next); - }, - // Put the replication config on the bucket. - next => { - const request = - _putReplicationRequest(xmlReplicationConfiguration); - bucketPutReplication(authInfo, request, log, next); - }, - // Attempt to suspend versioning. - next => { - const request = _putVersioningRequest(xmlSuspendVersioning); - bucketPutVersioning(authInfo, request, log, err => { - assert(err.is.InvalidBucketState); - next(); - }); - }, - ], done); + async.series( + [ + // Enable versioning to allow putting a replication config. + next => { + const request = _putVersioningRequest(xmlEnableVersioning); + bucketPutVersioning(authInfo, request, log, next); + }, + // Put the replication config on the bucket. + next => { + const request = _putReplicationRequest(xmlReplicationConfiguration); + bucketPutReplication(authInfo, request, log, next); + }, + // Attempt to suspend versioning. + next => { + const request = _putVersioningRequest(xmlSuspendVersioning); + bucketPutVersioning(authInfo, request, log, err => { + assert(err.is.InvalidBucketState); + next(); + }); + }, + ], + done + ); }); }); @@ -154,28 +155,28 @@ describe('bucketPutVersioning API', () => { const tests = [ { - msg: 'should return error if enabling versioning on location ' + - 'constraint with supportsVersioning set to false', + msg: + 'should return error if enabling versioning on location ' + + 'constraint with supportsVersioning set to false', input: xmlEnableVersioning, - output: { error: errorInstances.NotImplemented.customizeDescription( - externalVersioningErrorMessage) }, + output: { error: errorInstances.NotImplemented.customizeDescription(externalVersioningErrorMessage) }, }, { - msg: 'should return error if suspending versioning on ' + - ' location constraint with supportsVersioning set to false', + msg: + 'should return error if suspending versioning on ' + + ' location constraint with supportsVersioning set to false', input: xmlSuspendVersioning, - output: { error: errorInstances.NotImplemented.customizeDescription( - externalVersioningErrorMessage) }, + output: { error: errorInstances.NotImplemented.customizeDescription(externalVersioningErrorMessage) }, }, ]; - tests.forEach(test => it(test.msg, done => { - const putBucketVersioningRequest = - _putVersioningRequest(test.input); - bucketPutVersioning(authInfo, putBucketVersioningRequest, log, - err => { - assert.deepStrictEqual(err, test.output.error); - done(); - }); - })); + tests.forEach(test => + it(test.msg, done => { + const putBucketVersioningRequest = _putVersioningRequest(test.input); + bucketPutVersioning(authInfo, putBucketVersioningRequest, log, err => { + assert.deepStrictEqual(err, test.output.error); + done(); + }); + }) + ); }); }); diff --git a/tests/unit/api/bucketPutWebsite.js b/tests/unit/api/bucketPutWebsite.js index 45944910af..5bb42f4460 100644 --- a/tests/unit/api/bucketPutWebsite.js +++ b/tests/unit/api/bucketPutWebsite.js @@ -3,13 +3,8 @@ const { parseString } = require('xml2js'); const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutWebsite = require('../../../lib/api/bucketPutWebsite'); -const { xmlContainsElem } - = require('../../../lib/api/apiUtils/bucket/bucketWebsite'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - WebsiteConfig } - = require('../helpers'); +const { xmlContainsElem } = require('../../../lib/api/apiUtils/bucket/bucketWebsite'); +const { cleanup, DummyRequestLogger, makeAuthInfo, WebsiteConfig } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); const log = new DummyRequestLogger(); @@ -41,12 +36,10 @@ describe('putBucketWebsite API', () => { beforeEach(done => bucketPut(authInfo, testBucketPutRequest, log, done)); afterEach(() => cleanup()); - it('should update a bucket\'s metadata with website config obj', done => { + it("should update a bucket's metadata with website config obj", done => { const config = new WebsiteConfig('index.html', 'error.html'); - config.addRoutingRule({ ReplaceKeyPrefixWith: 'documents/' }, - { KeyPrefixEquals: 'docs/' }); - const testBucketPutWebsiteRequest = - _getPutWebsiteRequest(config.getXml()); + config.addRoutingRule({ ReplaceKeyPrefixWith: 'documents/' }, { KeyPrefixEquals: 'docs/' }); + const testBucketPutWebsiteRequest = _getPutWebsiteRequest(config.getXml()); bucketPutWebsite(authInfo, testBucketPutWebsiteRequest, log, err => { if (err) { process.stdout.write(`Err putting website config ${err}`); @@ -58,122 +51,122 @@ describe('putBucketWebsite API', () => { return done(err); } const bucketWebsiteConfig = bucket.getWebsiteConfiguration(); - assert.strictEqual(bucketWebsiteConfig._indexDocument, - config.IndexDocument.Suffix); - assert.strictEqual(bucketWebsiteConfig._errorDocument, - config.ErrorDocument.Key); - assert.strictEqual(bucketWebsiteConfig._routingRules[0] - ._condition.keyPrefixEquals, - config.RoutingRules[0].Condition.KeyPrefixEquals); - assert.strictEqual(bucketWebsiteConfig._routingRules[0] - ._redirect.replaceKeyPrefixWith, - config.RoutingRules[0].Redirect.ReplaceKeyPrefixWith); + assert.strictEqual(bucketWebsiteConfig._indexDocument, config.IndexDocument.Suffix); + assert.strictEqual(bucketWebsiteConfig._errorDocument, config.ErrorDocument.Key); + assert.strictEqual( + bucketWebsiteConfig._routingRules[0]._condition.keyPrefixEquals, + config.RoutingRules[0].Condition.KeyPrefixEquals + ); + assert.strictEqual( + bucketWebsiteConfig._routingRules[0]._redirect.replaceKeyPrefixWith, + config.RoutingRules[0].Redirect.ReplaceKeyPrefixWith + ); return done(); }); }); }); describe('helper functions', () => { - it('xmlContainsElem should return true if xml contains ' + - 'specified element', done => { - const xml = '' + - 'value' + - ''; + it('xmlContainsElem should return true if xml contains ' + 'specified element', done => { + const xml = '' + 'value' + ''; parseString(xml, (err, result) => { if (err) { process.stdout.write(`Unexpected err ${err} parsing xml`); return done(err); } - const containsRes = xmlContainsElem(result.Toplevel.Parent, - 'Element'); + const containsRes = xmlContainsElem(result.Toplevel.Parent, 'Element'); assert.strictEqual(containsRes, true); return done(); }); }); - it('xmlContainsElem should return false if xml does not contain ' + - 'specified element', done => { - const xml = '' + - 'value' + - ''; + it('xmlContainsElem should return false if xml does not contain ' + 'specified element', done => { + const xml = '' + 'value' + ''; parseString(xml, (err, result) => { if (err) { process.stdout.write(`Unexpected err ${err} parsing xml`); return done(err); } - const containsRes = xmlContainsElem(result.Toplevel.Parent, - 'Element'); + const containsRes = xmlContainsElem(result.Toplevel.Parent, 'Element'); assert.strictEqual(containsRes, false); return done(); }); }); - it('xmlContainsElem should return true if parent contains list of ' + - 'elements and isList is specified in options', done => { - const xml = '' + - 'value' + - 'value' + - 'value' + - ''; - parseString(xml, (err, result) => { - if (err) { - process.stdout.write(`Unexpected err ${err} parsing xml`); - return done(err); - } - const containsRes = xmlContainsElem(result.Toplevel.Parent, - 'Element', { isList: true }); - assert.strictEqual(containsRes, true); - return done(); - }); - }); - it('xmlContainsElem should return true if parent contains at least ' + - 'one of the elements specified, if multiple', done => { - const xml = '' + - 'value' + - ''; - parseString(xml, (err, result) => { - if (err) { - process.stdout.write(`Unexpected err ${err} parsing xml`); - return done(err); - } - const containsRes = xmlContainsElem(result.Toplevel.Parent, - ['ElementA', 'ElementB']); - assert.strictEqual(containsRes, true); - return done(); - }); - }); - it('xmlContainsElem should return false if parent contains only one ' + - 'of multiple elements specified and checkForAll specified in options', - done => { - const xml = '' + - 'value' + - ''; - parseString(xml, (err, result) => { - if (err) { - process.stdout.write(`Unexpected err ${err} parsing xml`); - return done(err); - } - const containsRes = xmlContainsElem(result.Toplevel.Parent, - ['ElementA', 'ElementB'], { checkForAll: true }); - assert.strictEqual(containsRes, false); - return done(); - }); - }); - it('xmlContainsElem should return true if parent contains all ' + - 'of multiple elements specified and checkForAll specified in options', - done => { - const xml = '' + - 'value' + - 'value' + - ''; - parseString(xml, (err, result) => { - if (err) { - process.stdout.write(`Unexpected err ${err} parsing xml`); - return done(err); - } - const containsRes = xmlContainsElem(result.Toplevel.Parent, - ['ElementA', 'ElementB'], { checkForAll: true }); - assert.strictEqual(containsRes, true); - return done(); - }); - }); + it( + 'xmlContainsElem should return true if parent contains list of ' + + 'elements and isList is specified in options', + done => { + const xml = + '' + + 'value' + + 'value' + + 'value' + + ''; + parseString(xml, (err, result) => { + if (err) { + process.stdout.write(`Unexpected err ${err} parsing xml`); + return done(err); + } + const containsRes = xmlContainsElem(result.Toplevel.Parent, 'Element', { isList: true }); + assert.strictEqual(containsRes, true); + return done(); + }); + } + ); + it( + 'xmlContainsElem should return true if parent contains at least ' + + 'one of the elements specified, if multiple', + done => { + const xml = '' + 'value' + ''; + parseString(xml, (err, result) => { + if (err) { + process.stdout.write(`Unexpected err ${err} parsing xml`); + return done(err); + } + const containsRes = xmlContainsElem(result.Toplevel.Parent, ['ElementA', 'ElementB']); + assert.strictEqual(containsRes, true); + return done(); + }); + } + ); + it( + 'xmlContainsElem should return false if parent contains only one ' + + 'of multiple elements specified and checkForAll specified in options', + done => { + const xml = '' + 'value' + ''; + parseString(xml, (err, result) => { + if (err) { + process.stdout.write(`Unexpected err ${err} parsing xml`); + return done(err); + } + const containsRes = xmlContainsElem(result.Toplevel.Parent, ['ElementA', 'ElementB'], { + checkForAll: true, + }); + assert.strictEqual(containsRes, false); + return done(); + }); + } + ); + it( + 'xmlContainsElem should return true if parent contains all ' + + 'of multiple elements specified and checkForAll specified in options', + done => { + const xml = + '' + + 'value' + + 'value' + + ''; + parseString(xml, (err, result) => { + if (err) { + process.stdout.write(`Unexpected err ${err} parsing xml`); + return done(err); + } + const containsRes = xmlContainsElem(result.Toplevel.Parent, ['ElementA', 'ElementB'], { + checkForAll: true, + }); + assert.strictEqual(containsRes, true); + return done(); + }); + } + ); }); }); diff --git a/tests/unit/api/deleteMarker.js b/tests/unit/api/deleteMarker.js index a5610c057e..4ebaa59b80 100644 --- a/tests/unit/api/deleteMarker.js +++ b/tests/unit/api/deleteMarker.js @@ -48,10 +48,11 @@ function _createBucketPutVersioningReq(status) { query: { versioning: '' }, actionImplicitDenies: false, }; - const xml = '' + - `${status}` + - ''; + const xml = + '' + + `${status}` + + ''; request.post = xml; return request; } @@ -79,8 +80,7 @@ function _createMultiObjectDeleteRequest(numObjects) { } xml.push(''); request.post = xml.join(''); - request.headers['content-md5'] = crypto.createHash('md5') - .update(request.post, 'utf8').digest('base64'); + request.headers['content-md5'] = crypto.createHash('md5').update(request.post, 'utf8').digest('base64'); return request; } @@ -94,12 +94,7 @@ const expectedAcl = { READ_ACP: [], }; -const undefHeadersExpected = [ - 'cache-control', - 'content-disposition', - 'content-encoding', - 'expires', -]; +const undefHeadersExpected = ['cache-control', 'content-disposition', 'content-encoding', 'expires']; describe('delete marker creation', () => { beforeEach(done => { @@ -108,8 +103,7 @@ describe('delete marker creation', () => { if (err) { return done(err); } - return bucketPutVersioning(authInfo, enableVersioningRequest, - log, done); + return bucketPutVersioning(authInfo, enableVersioningRequest, log, done); }); }); @@ -119,46 +113,43 @@ describe('delete marker creation', () => { function _assertDeleteMarkerMd(deleteResultVersionId, isLatest, callback) { const options = { - versionId: isLatest ? undefined : - versionIdUtils.decode(deleteResultVersionId), + versionId: isLatest ? undefined : versionIdUtils.decode(deleteResultVersionId), }; - return metadata.getObjectMD(bucketName, objectName, options, log, - (err, deleteMarkerMD) => { - assert.strictEqual(err, null); - const mdVersionId = deleteMarkerMD.versionId; - assert.strictEqual(deleteMarkerMD.isDeleteMarker, true); - assert.strictEqual( - versionIdUtils.encode(mdVersionId), - deleteResultVersionId); - assert.strictEqual(deleteMarkerMD['content-length'], 0); - assert.strictEqual(deleteMarkerMD.location, null); - assert.deepStrictEqual(deleteMarkerMD.acl, expectedAcl); - undefHeadersExpected.forEach(header => { - assert.strictEqual(deleteMarkerMD[header], undefined); - }); - return callback(); + return metadata.getObjectMD(bucketName, objectName, options, log, (err, deleteMarkerMD) => { + assert.strictEqual(err, null); + const mdVersionId = deleteMarkerMD.versionId; + assert.strictEqual(deleteMarkerMD.isDeleteMarker, true); + assert.strictEqual(versionIdUtils.encode(mdVersionId), deleteResultVersionId); + assert.strictEqual(deleteMarkerMD['content-length'], 0); + assert.strictEqual(deleteMarkerMD.location, null); + assert.deepStrictEqual(deleteMarkerMD.acl, expectedAcl); + undefHeadersExpected.forEach(header => { + assert.strictEqual(deleteMarkerMD[header], undefined); }); + return callback(); + }); } - it('should create a delete marker if versioning enabled and deleting ' + - 'object without specifying version id', done => { - objectDelete(authInfo, testDeleteRequest, log, (err, delResHeaders) => { - if (err) { - return done(err); - } - assert.strictEqual(delResHeaders['x-amz-delete-marker'], true); - assert(delResHeaders['x-amz-version-id']); - return _assertDeleteMarkerMd(delResHeaders['x-amz-version-id'], - true, done); - }); - }); + it( + 'should create a delete marker if versioning enabled and deleting ' + 'object without specifying version id', + done => { + objectDelete(authInfo, testDeleteRequest, log, (err, delResHeaders) => { + if (err) { + return done(err); + } + assert.strictEqual(delResHeaders['x-amz-delete-marker'], true); + assert(delResHeaders['x-amz-version-id']); + return _assertDeleteMarkerMd(delResHeaders['x-amz-version-id'], true, done); + }); + } + ); - it('multi-object delete should create delete markers if versioning ' + - 'enabled and items do not have version id specified', done => { - const testMultiObjectDeleteRequest = - _createMultiObjectDeleteRequest(3); - return multiObjectDelete(authInfo, testMultiObjectDeleteRequest, log, - (err, xml) => { + it( + 'multi-object delete should create delete markers if versioning ' + + 'enabled and items do not have version id specified', + done => { + const testMultiObjectDeleteRequest = _createMultiObjectDeleteRequest(3); + return multiObjectDelete(authInfo, testMultiObjectDeleteRequest, log, (err, xml) => { if (err) { return done(err); } @@ -167,14 +158,18 @@ describe('delete marker creation', () => { return done(err); } const results = parsedResult.DeleteResult.Deleted; - return async.forEach(results, (result, cb) => { - assert.strictEqual(result.Key[0], objectName); - assert.strictEqual(result.DeleteMarker[0], 'true'); - assert(result.DeleteMarkerVersionId[0]); - _assertDeleteMarkerMd(result.DeleteMarkerVersionId[0], - false, cb); - }, err => done(err)); + return async.forEach( + results, + (result, cb) => { + assert.strictEqual(result.Key[0], objectName); + assert.strictEqual(result.DeleteMarker[0], 'true'); + assert(result.DeleteMarkerVersionId[0]); + _assertDeleteMarkerMd(result.DeleteMarkerVersionId[0], false, cb); + }, + err => done(err) + ); }); }); - }); + } + ); }); diff --git a/tests/unit/api/deletedFlagBucket.js b/tests/unit/api/deletedFlagBucket.js index 83d699f4e2..0218a3a97f 100644 --- a/tests/unit/api/deletedFlagBucket.js +++ b/tests/unit/api/deletedFlagBucket.js @@ -14,18 +14,12 @@ const bucketPutWebsite = require('../../../lib/api/bucketPutWebsite'); const bucketDelete = require('../../../lib/api/bucketDelete'); const bucketDeleteCors = require('../../../lib/api/bucketDeleteCors'); const bucketDeleteWebsite = require('../../../lib/api/bucketDeleteWebsite'); -const completeMultipartUpload - = require('../../../lib/api/completeMultipartUpload'); +const completeMultipartUpload = require('../../../lib/api/completeMultipartUpload'); const { config } = require('../../../lib/Config'); const constants = require('../../../constants'); const DummyRequest = require('../DummyRequest'); -const initiateMultipartUpload - = require('../../../lib/api/initiateMultipartUpload'); -const { cleanup, - createAlteredRequest, - DummyRequestLogger, - makeAuthInfo } - = require('../helpers'); +const initiateMultipartUpload = require('../../../lib/api/initiateMultipartUpload'); +const { cleanup, createAlteredRequest, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const listMultipartUploads = require('../../../lib/api/listMultipartUploads'); const listParts = require('../../../lib/api/listParts'); const metadata = require('../metadataswitch'); @@ -69,21 +63,16 @@ const serviceGetRequest = { const userBucketOwner = 'admin'; const creationDate = new Date().toJSON(); -const usersBucket = new BucketInfo(usersBucketName, - userBucketOwner, userBucketOwner, creationDate); - +const usersBucket = new BucketInfo(usersBucketName, userBucketOwner, userBucketOwner, creationDate); function checkBucketListing(authInfo, bucketName, expectedListingLength, done) { return serviceGet(authInfo, serviceGetRequest, log, (err, data) => { parseString(data, (err, result) => { if (expectedListingLength > 0) { - assert.strictEqual(result.ListAllMyBucketsResult - .Buckets[0].Bucket.length, expectedListingLength); - assert.strictEqual(result.ListAllMyBucketsResult - .Buckets[0].Bucket[0].Name[0], bucketName); + assert.strictEqual(result.ListAllMyBucketsResult.Buckets[0].Bucket.length, expectedListingLength); + assert.strictEqual(result.ListAllMyBucketsResult.Buckets[0].Bucket[0].Name[0], bucketName); } else { - assert.strictEqual(result.ListAllMyBucketsResult - .Buckets[0].length, 0); + assert.strictEqual(result.ListAllMyBucketsResult.Buckets[0].length, 0); } done(); }); @@ -105,15 +94,12 @@ function confirmDeleted(done) { }); } - describe('deleted flag bucket handling', () => { beforeEach(done => { cleanup(); - const bucketMD = new BucketInfo(bucketName, canonicalID, - authInfo.getAccountDisplayName(), creationDate); + const bucketMD = new BucketInfo(bucketName, canonicalID, authInfo.getAccountDisplayName(), creationDate); bucketMD.addDeletedFlag(); - bucketMD.setSpecificAcl(otherAccountAuthInfo.getCanonicalID(), - 'FULL_CONTROL'); + bucketMD.setSpecificAcl(otherAccountAuthInfo.getCanonicalID(), 'FULL_CONTROL'); bucketMD.setLocationConstraint(locationConstraint); metadata.createBucket(bucketName, bucketMD, log, () => { metadata.createBucket(usersBucketName, usersBucket, log, () => { @@ -122,77 +108,103 @@ describe('deleted flag bucket handling', () => { }); }); - it('putBucket request should recreate bucket with deleted flag if ' + - 'request is from same account that originally put', done => { - bucketPut(authInfo, baseTestRequest, log, err => { - assert.ifError(err); - metadata.getBucket(bucketName, log, (err, data) => { - assert.strictEqual(data._transient, false); - assert.strictEqual(data._deleted, false); - assert.strictEqual(data._owner, authInfo.getCanonicalID()); - return checkBucketListing(authInfo, bucketName, 1, done); + it( + 'putBucket request should recreate bucket with deleted flag if ' + + 'request is from same account that originally put', + done => { + bucketPut(authInfo, baseTestRequest, log, err => { + assert.ifError(err); + metadata.getBucket(bucketName, log, (err, data) => { + assert.strictEqual(data._transient, false); + assert.strictEqual(data._deleted, false); + assert.strictEqual(data._owner, authInfo.getCanonicalID()); + return checkBucketListing(authInfo, bucketName, 1, done); + }); }); - }); - }); + } + ); - it('putBucket request should return error if ' + - 'different account sends put bucket request for bucket with ' + - 'deleted flag', done => { - bucketPut(otherAccountAuthInfo, baseTestRequest, log, err => { - assert.strictEqual(err.is.BucketAlreadyExists, true); - metadata.getBucket(bucketName, log, (err, data) => { - assert.strictEqual(data._transient, false); - assert.strictEqual(data._deleted, true); - assert.strictEqual(data._owner, authInfo.getCanonicalID()); - return checkBucketListing(otherAccountAuthInfo, - bucketName, 0, done); + it( + 'putBucket request should return error if ' + + 'different account sends put bucket request for bucket with ' + + 'deleted flag', + done => { + bucketPut(otherAccountAuthInfo, baseTestRequest, log, err => { + assert.strictEqual(err.is.BucketAlreadyExists, true); + metadata.getBucket(bucketName, log, (err, data) => { + assert.strictEqual(data._transient, false); + assert.strictEqual(data._deleted, true); + assert.strictEqual(data._owner, authInfo.getCanonicalID()); + return checkBucketListing(otherAccountAuthInfo, bucketName, 0, done); + }); }); - }); - }); + } + ); - it('ACLs from new putBucket request should overwrite ACLs saved ' + - 'in metadata of bucket with deleted flag', done => { - const alteredRequest = createAlteredRequest({ - 'x-amz-acl': 'public-read' }, 'headers', - baseTestRequest, baseTestRequest.headers); - bucketPut(authInfo, alteredRequest, log, err => { - assert.ifError(err); - metadata.getBucket(bucketName, log, (err, data) => { - assert.strictEqual(data._transient, false); - assert.strictEqual(data._deleted, false); - assert.strictEqual(data._acl.Canned, 'public-read'); - assert.strictEqual(data._owner, authInfo.getCanonicalID()); - return checkBucketListing(authInfo, bucketName, 1, done); + it( + 'ACLs from new putBucket request should overwrite ACLs saved ' + 'in metadata of bucket with deleted flag', + done => { + const alteredRequest = createAlteredRequest( + { + 'x-amz-acl': 'public-read', + }, + 'headers', + baseTestRequest, + baseTestRequest.headers + ); + bucketPut(authInfo, alteredRequest, log, err => { + assert.ifError(err); + metadata.getBucket(bucketName, log, (err, data) => { + assert.strictEqual(data._transient, false); + assert.strictEqual(data._deleted, false); + assert.strictEqual(data._acl.Canned, 'public-read'); + assert.strictEqual(data._owner, authInfo.getCanonicalID()); + return checkBucketListing(authInfo, bucketName, 1, done); + }); }); - }); - }); + } + ); - it('putBucketACL request should recreate bucket with deleted flag if ' + - 'request is from same account that originally put', done => { - const putACLRequest = createAlteredRequest({ - 'x-amz-acl': 'public-read' }, 'headers', - baseTestRequest, baseTestRequest.headers); - putACLRequest.query = { acl: '' }; - bucketPutACL(authInfo, putACLRequest, log, err => { - assert.ifError(err); - metadata.getBucket(bucketName, log, (err, data) => { - assert.strictEqual(data._transient, false); - assert.strictEqual(data._acl.Canned, 'public-read'); - assert.strictEqual(data._owner, authInfo.getCanonicalID()); - return checkBucketListing(authInfo, bucketName, 1, done); + it( + 'putBucketACL request should recreate bucket with deleted flag if ' + + 'request is from same account that originally put', + done => { + const putACLRequest = createAlteredRequest( + { + 'x-amz-acl': 'public-read', + }, + 'headers', + baseTestRequest, + baseTestRequest.headers + ); + putACLRequest.query = { acl: '' }; + bucketPutACL(authInfo, putACLRequest, log, err => { + assert.ifError(err); + metadata.getBucket(bucketName, log, (err, data) => { + assert.strictEqual(data._transient, false); + assert.strictEqual(data._acl.Canned, 'public-read'); + assert.strictEqual(data._owner, authInfo.getCanonicalID()); + return checkBucketListing(authInfo, bucketName, 1, done); + }); }); - }); - }); + } + ); - it('putBucketACL request on bucket with deleted flag should return ' + - 'NoSuchBucket error if request is from another authorized account', + it( + 'putBucketACL request on bucket with deleted flag should return ' + + 'NoSuchBucket error if request is from another authorized account', // Do not want different account recreating a bucket that the bucket // owner wanted deleted even if the other account is authorized to // change the ACLs done => { - const putACLRequest = createAlteredRequest({ - 'x-amz-acl': 'public-read' }, 'headers', - baseTestRequest, baseTestRequest.headers); + const putACLRequest = createAlteredRequest( + { + 'x-amz-acl': 'public-read', + }, + 'headers', + baseTestRequest, + baseTestRequest.headers + ); bucketPutACL(otherAccountAuthInfo, putACLRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); metadata.getBucket(bucketName, log, (err, data) => { @@ -203,14 +215,21 @@ describe('deleted flag bucket handling', () => { done(); }); }); - }); + } + ); - it('putBucketACL request on bucket with deleted flag should return ' + - 'AccessDenied error if request is from unauthorized account', + it( + 'putBucketACL request on bucket with deleted flag should return ' + + 'AccessDenied error if request is from unauthorized account', done => { - const putACLRequest = createAlteredRequest({ - 'x-amz-acl': 'public-read' }, 'headers', - baseTestRequest, baseTestRequest.headers); + const putACLRequest = createAlteredRequest( + { + 'x-amz-acl': 'public-read', + }, + 'headers', + baseTestRequest, + baseTestRequest.headers + ); const unauthorizedAccount = makeAuthInfo('keepMeOut'); bucketPutACL(unauthorizedAccount, putACLRequest, log, err => { assert.strictEqual(err.is.AccessDenied, true); @@ -222,7 +241,8 @@ describe('deleted flag bucket handling', () => { done(); }); }); - }); + } + ); describe('objectPut on a bucket with deleted flag', () => { const objName = 'objectName'; @@ -232,10 +252,8 @@ describe('deleted flag bucket handling', () => { }); }); - it('objectPut request from account that originally created ' + - 'should recreate bucket', done => { - const setUpRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('objectPut request from account that originally created ' + 'should recreate bucket', done => { + const setUpRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); setUpRequest.objectKey = objName; const postBody = Buffer.from('I am a body', 'utf8'); const md5Hash = crypto.createHash('md5'); @@ -247,46 +265,43 @@ describe('deleted flag bucket handling', () => { assert.strictEqual(data._transient, false); assert.strictEqual(data._deleted, false); assert.strictEqual(data._owner, authInfo.getCanonicalID()); - metadata.getObjectMD(bucketName, objName, {}, log, - (err, obj) => { - assert.ifError(err); - assert.strictEqual(obj['content-md5'], etag); - return checkBucketListing(authInfo, - bucketName, 1, done); - }); + metadata.getObjectMD(bucketName, objName, {}, log, (err, obj) => { + assert.ifError(err); + assert.strictEqual(obj['content-md5'], etag); + return checkBucketListing(authInfo, bucketName, 1, done); + }); }); }); }); }); - it('should return NoSuchBucket error on an objectPut request from ' + - 'different account when there is a deleted flag', done => { - const setUpRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); - setUpRequest.objectKey = 'objectName'; - const postBody = Buffer.from('I am a body', 'utf8'); - const putObjRequest = new DummyRequest(setUpRequest, postBody); - objectPut(otherAccountAuthInfo, putObjRequest, undefined, log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - done(); - }); - }); + it( + 'should return NoSuchBucket error on an objectPut request from ' + + 'different account when there is a deleted flag', + done => { + const setUpRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); + setUpRequest.objectKey = 'objectName'; + const postBody = Buffer.from('I am a body', 'utf8'); + const putObjRequest = new DummyRequest(setUpRequest, postBody); + objectPut(otherAccountAuthInfo, putObjRequest, undefined, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + done(); + }); + } + ); describe('initiateMultipartUpload on a bucket with deleted flag', () => { const objName = 'objectName'; after(done => { - metadata.deleteObjectMD(`${constants.mpuBucketPrefix}` + - `${bucketName}`, objName, {}, log, () => { - metadata.deleteBucket(`${constants.mpuBucketPrefix}` + - `${bucketName}`, log, () => { - done(); - }); + metadata.deleteObjectMD(`${constants.mpuBucketPrefix}` + `${bucketName}`, objName, {}, log, () => { + metadata.deleteBucket(`${constants.mpuBucketPrefix}` + `${bucketName}`, log, () => { + done(); }); + }); }); it('should recreate bucket with deleted flag', done => { - const initiateRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + const initiateRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); initiateRequest.objectKey = objName; initiateRequest.url = `/${objName}?uploads`; initiateMultipartUpload(authInfo, initiateRequest, log, err => { @@ -295,185 +310,196 @@ describe('deleted flag bucket handling', () => { assert.strictEqual(data._transient, false); assert.strictEqual(data._deleted, false); assert.strictEqual(data._owner, authInfo.getCanonicalID()); - metadata.listObject(`${constants.mpuBucketPrefix}` + - `${bucketName}`, + metadata.listObject( + `${constants.mpuBucketPrefix}` + `${bucketName}`, { prefix: `overview${constants.splitter}${objName}` }, - log, (err, results) => { + log, + (err, results) => { assert.ifError(err); assert.strictEqual(results.Contents.length, 1); done(); - }); + } + ); }); }); }); }); - it('should return NoSuchBucket error on an initiateMultipartUpload ' + - 'request from different account when there is a deleted flag', done => { - const initiateRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); - initiateRequest.objectKey = 'objectName'; - initiateMultipartUpload(otherAccountAuthInfo, initiateRequest, log, - err => { + it( + 'should return NoSuchBucket error on an initiateMultipartUpload ' + + 'request from different account when there is a deleted flag', + done => { + const initiateRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); + initiateRequest.objectKey = 'objectName'; + initiateMultipartUpload(otherAccountAuthInfo, initiateRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); - }); + } + ); - it('deleteBucket request should complete deletion ' + - 'of bucket with deleted flag', done => { + it('deleteBucket request should complete deletion ' + 'of bucket with deleted flag', done => { bucketDelete(authInfo, baseTestRequest, log, err => { assert.ifError(err); confirmDeleted(done); }); }); - it('deleteBucket request should return error if account not ' + - 'authorized', done => { - bucketDelete(otherAccountAuthInfo, baseTestRequest, - log, err => { - assert.strictEqual(err.is.AccessDenied, true); - done(); - }); + it('deleteBucket request should return error if account not ' + 'authorized', done => { + bucketDelete(otherAccountAuthInfo, baseTestRequest, log, err => { + assert.strictEqual(err.is.AccessDenied, true); + done(); + }); }); - it('bucketDeleteWebsite request on bucket with delete flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - bucketDeleteWebsite(authInfo, baseTestRequest, - log, err => { + it( + 'bucketDeleteWebsite request on bucket with delete flag should return ' + + 'NoSuchBucket error and complete deletion', + done => { + bucketDeleteWebsite(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); confirmDeleted(done); }); - }); + } + ); - it('bucketGet request on bucket with delete flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - bucketGet(authInfo, baseTestRequest, - log, err => { + it( + 'bucketGet request on bucket with delete flag should return ' + 'NoSuchBucket error and complete deletion', + done => { + bucketGet(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); confirmDeleted(done); }); - }); + } + ); - it('bucketGetACL request on bucket with delete flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - bucketGetACL(authInfo, baseTestRequest, - log, err => { + it( + 'bucketGetACL request on bucket with delete flag should return ' + 'NoSuchBucket error and complete deletion', + done => { + bucketGetACL(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); confirmDeleted(done); }); - }); + } + ); - it('bucketGetCors request on bucket with delete flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - bucketGetCors(authInfo, baseTestRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - confirmDeleted(done); - }); - }); + it( + 'bucketGetCors request on bucket with delete flag should return ' + 'NoSuchBucket error and complete deletion', + done => { + bucketGetCors(authInfo, baseTestRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + confirmDeleted(done); + }); + } + ); - it('bucketPutCors request on bucket with delete flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - const bucketPutCorsRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); - bucketPutCorsRequest.post = '' + - 'PUT' + - 'http://www.example.com' + - ''; - bucketPutCorsRequest.headers['content-md5'] = crypto.createHash('md5') - .update(bucketPutCorsRequest.post, 'utf8').digest('base64'); - bucketPutCors(authInfo, bucketPutCorsRequest, log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - confirmDeleted(done); - }); - }); + it( + 'bucketPutCors request on bucket with delete flag should return ' + 'NoSuchBucket error and complete deletion', + done => { + const bucketPutCorsRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); + bucketPutCorsRequest.post = + '' + + 'PUT' + + 'http://www.example.com' + + ''; + bucketPutCorsRequest.headers['content-md5'] = crypto + .createHash('md5') + .update(bucketPutCorsRequest.post, 'utf8') + .digest('base64'); + bucketPutCors(authInfo, bucketPutCorsRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + confirmDeleted(done); + }); + } + ); - it('bucketDeleteCors request on bucket with delete flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - bucketDeleteCors(authInfo, baseTestRequest, log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - confirmDeleted(done); - }); - }); + it( + 'bucketDeleteCors request on bucket with delete flag should return ' + + 'NoSuchBucket error and complete deletion', + done => { + bucketDeleteCors(authInfo, baseTestRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + confirmDeleted(done); + }); + } + ); - it('bucketGetWebsite request on bucket with delete flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - bucketGetWebsite(authInfo, baseTestRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - confirmDeleted(done); - }); - }); + it( + 'bucketGetWebsite request on bucket with delete flag should return ' + + 'NoSuchBucket error and complete deletion', + done => { + bucketGetWebsite(authInfo, baseTestRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + confirmDeleted(done); + }); + } + ); - it('bucketPutWebsite request on bucket with delete flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - const bucketPutWebsiteRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); - bucketPutWebsiteRequest.post = '' + - 'index.html' + - ''; - bucketPutWebsite(authInfo, bucketPutWebsiteRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - confirmDeleted(done); - }); - }); + it( + 'bucketPutWebsite request on bucket with delete flag should return ' + + 'NoSuchBucket error and complete deletion', + done => { + const bucketPutWebsiteRequest = createAlteredRequest( + {}, + 'headers', + baseTestRequest, + baseTestRequest.headers + ); + bucketPutWebsiteRequest.post = + '' + + 'index.html' + + ''; + bucketPutWebsite(authInfo, bucketPutWebsiteRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + confirmDeleted(done); + }); + } + ); - it('bucketHead request on bucket with delete flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - bucketHead(authInfo, baseTestRequest, - log, err => { + it( + 'bucketHead request on bucket with delete flag should return ' + 'NoSuchBucket error and complete deletion', + done => { + bucketHead(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); confirmDeleted(done); }); - }); + } + ); - function checkForNoSuchUploadError(apiAction, partNumber, done, - extraArgNeeded) { - const mpuRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + function checkForNoSuchUploadError(apiAction, partNumber, done, extraArgNeeded) { + const mpuRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); const uploadId = '5555'; mpuRequest.objectKey = 'objectName'; mpuRequest.query = { uploadId, partNumber }; if (extraArgNeeded) { - return apiAction(authInfo, mpuRequest, undefined, - log, err => { - assert.strictEqual(err.is.NoSuchUpload, true); - return done(); - }); - } - return apiAction(authInfo, mpuRequest, - log, err => { + return apiAction(authInfo, mpuRequest, undefined, log, err => { assert.strictEqual(err.is.NoSuchUpload, true); return done(); }); + } + return apiAction(authInfo, mpuRequest, log, err => { + assert.strictEqual(err.is.NoSuchUpload, true); + return done(); + }); } - it('completeMultipartUpload request on bucket with deleted flag should ' + - 'return NoSuchUpload error', done => { + it('completeMultipartUpload request on bucket with deleted flag should ' + 'return NoSuchUpload error', done => { checkForNoSuchUploadError(completeMultipartUpload, null, done); }); - it('listParts request on bucket with deleted flag should ' + - 'return NoSuchUpload error', done => { + it('listParts request on bucket with deleted flag should ' + 'return NoSuchUpload error', done => { checkForNoSuchUploadError(listParts, null, done); }); describe('multipartDelete request on a bucket with deleted flag', () => { - it('should return NoSuchUpload error if legacyAWSBehavior is enabled', - done => { - config.locationConstraints[locationConstraint]. - legacyAwsBehavior = true; + it('should return NoSuchUpload error if legacyAWSBehavior is enabled', done => { + config.locationConstraints[locationConstraint].legacyAwsBehavior = true; checkForNoSuchUploadError(multipartDelete, null, done); }); - it('should return no error if legacyAWSBehavior is not enabled', - done => { - config.locationConstraints[locationConstraint]. - legacyAwsBehavior = false; - const mpuRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('should return no error if legacyAWSBehavior is not enabled', done => { + config.locationConstraints[locationConstraint].legacyAwsBehavior = false; + const mpuRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); const uploadId = '5555'; mpuRequest.objectKey = 'objectName'; mpuRequest.query = { uploadId }; @@ -484,64 +510,61 @@ describe('deleted flag bucket handling', () => { }); }); - it('objectPutPart request on bucket with deleted flag should ' + - 'return NoSuchUpload error', done => { + it('objectPutPart request on bucket with deleted flag should ' + 'return NoSuchUpload error', done => { checkForNoSuchUploadError(objectPutPart, '1', done, true); }); - it('list multipartUploads request on bucket with deleted flag should ' + - 'return NoSuchBucket error', done => { - const listRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('list multipartUploads request on bucket with deleted flag should ' + 'return NoSuchBucket error', done => { + const listRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); listRequest.query = {}; - listMultipartUploads(authInfo, listRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - done(); - }); + listMultipartUploads(authInfo, listRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + done(); + }); }); - it('objectGet request on bucket with deleted flag should' + - 'return NoSuchBucket error and finish deletion', + it( + 'objectGet request on bucket with deleted flag should' + 'return NoSuchBucket error and finish deletion', done => { - objectGet(authInfo, baseTestRequest, false, - log, err => { + objectGet(authInfo, baseTestRequest, false, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); confirmDeleted(done); }); - }); + } + ); - it('objectGetACL request on bucket with deleted flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - objectGetACL(authInfo, baseTestRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - confirmDeleted(done); - }); - }); + it( + 'objectGetACL request on bucket with deleted flag should return ' + 'NoSuchBucket error and complete deletion', + done => { + objectGetACL(authInfo, baseTestRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + confirmDeleted(done); + }); + } + ); - it('objectHead request on bucket with deleted flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - objectHead(authInfo, baseTestRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - confirmDeleted(done); - }); - }); + it( + 'objectHead request on bucket with deleted flag should return ' + 'NoSuchBucket error and complete deletion', + done => { + objectHead(authInfo, baseTestRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + confirmDeleted(done); + }); + } + ); - it('objectPutACL request on bucket with deleted flag should return ' + - 'NoSuchBucket error and complete deletion', done => { - objectPutACL(authInfo, baseTestRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - confirmDeleted(done); - }); - }); + it( + 'objectPutACL request on bucket with deleted flag should return ' + 'NoSuchBucket error and complete deletion', + done => { + objectPutACL(authInfo, baseTestRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + confirmDeleted(done); + }); + } + ); - it('objectDelete request on bucket with deleted flag should return ' + - 'NoSuchBucket error', done => { - objectDelete(authInfo, baseTestRequest, - log, err => { + it('objectDelete request on bucket with deleted flag should return ' + 'NoSuchBucket error', done => { + objectDelete(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); confirmDeleted(done); }); diff --git a/tests/unit/api/listMultipartUploads.js b/tests/unit/api/listMultipartUploads.js index e24fc13f40..7783bff2b5 100644 --- a/tests/unit/api/listMultipartUploads.js +++ b/tests/unit/api/listMultipartUploads.js @@ -4,8 +4,7 @@ const querystring = require('querystring'); const { parseString } = require('xml2js'); const { bucketPut } = require('../../../lib/api/bucketPut'); -const initiateMultipartUpload - = require('../../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../../lib/api/initiateMultipartUpload'); const listMultipartUploads = require('../../../lib/api/listMultipartUploads'); const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); @@ -59,40 +58,39 @@ describe('listMultipartUploads API', () => { actionImplicitDenies: false, }; - it('should return the name of the common prefix ' + - 'of common prefix object keys for multipart uploads if delimiter ' + - 'and prefix specified', done => { - const commonPrefix = `${prefix}${delimiter}`; - const testListRequest = { - bucketName, - namespace, - headers: { host: '/' }, - url: `/${bucketName}?uploads&delimiter=/&prefix=sub`, - query: { delimiter, prefix }, - actionImplicitDenies: false, - }; - - async.waterfall([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest1, log, next), - (result, corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest2, log, next), - (result, corsHeaders, next) => listMultipartUploads(authInfo, - testListRequest, log, next), - (result, corsHeaders, next) => - parseString(result, corsHeaders, next), - ], - (err, result) => { - assert.strictEqual(result.ListMultipartUploadsResult - .CommonPrefixes[0].Prefix[0], - commonPrefix); - done(); - }); - }); - - it('should return list of all multipart uploads if ' + - 'no delimiter specified', done => { + it( + 'should return the name of the common prefix ' + + 'of common prefix object keys for multipart uploads if delimiter ' + + 'and prefix specified', + done => { + const commonPrefix = `${prefix}${delimiter}`; + const testListRequest = { + bucketName, + namespace, + headers: { host: '/' }, + url: `/${bucketName}?uploads&delimiter=/&prefix=sub`, + query: { delimiter, prefix }, + actionImplicitDenies: false, + }; + + async.waterfall( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest1, log, next), + (result, corsHeaders, next) => + initiateMultipartUpload(authInfo, testInitiateMPURequest2, log, next), + (result, corsHeaders, next) => listMultipartUploads(authInfo, testListRequest, log, next), + (result, corsHeaders, next) => parseString(result, corsHeaders, next), + ], + (err, result) => { + assert.strictEqual(result.ListMultipartUploadsResult.CommonPrefixes[0].Prefix[0], commonPrefix); + done(); + } + ); + } + ); + + it('should return list of all multipart uploads if ' + 'no delimiter specified', done => { const testListRequest = { bucketName, namespace, @@ -102,31 +100,24 @@ describe('listMultipartUploads API', () => { actionImplicitDenies: false, }; - - async.waterfall([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest1, log, next), - (result, corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest2, log, next), - (result, corsHeaders, next) => - listMultipartUploads(authInfo, testListRequest, log, next), - (result, corsHeaders, next) => - parseString(result, corsHeaders, next), - ], - (err, result) => { - assert.strictEqual(result.ListMultipartUploadsResult - .Upload[0].Key[0], objectName1); - assert.strictEqual(result.ListMultipartUploadsResult - .Upload[1].Key[0], objectName2); - assert.strictEqual(result.ListMultipartUploadsResult - .IsTruncated[0], 'false'); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest1, log, next), + (result, corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest2, log, next), + (result, corsHeaders, next) => listMultipartUploads(authInfo, testListRequest, log, next), + (result, corsHeaders, next) => parseString(result, corsHeaders, next), + ], + (err, result) => { + assert.strictEqual(result.ListMultipartUploadsResult.Upload[0].Key[0], objectName1); + assert.strictEqual(result.ListMultipartUploadsResult.Upload[1].Key[0], objectName2); + assert.strictEqual(result.ListMultipartUploadsResult.IsTruncated[0], 'false'); + done(); + } + ); }); - it('should return no more keys than ' + - 'max-uploads specified', done => { + it('should return no more keys than ' + 'max-uploads specified', done => { const testListRequest = { bucketName, namespace, @@ -136,34 +127,26 @@ describe('listMultipartUploads API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest1, log, next), - (result, corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest2, log, next), - (result, corsHeaders, next) => listMultipartUploads(authInfo, - testListRequest, log, next), - (result, corsHeaders, next) => - parseString(result, corsHeaders, next), - ], - (err, result) => { - assert.strictEqual(result.ListMultipartUploadsResult - .Upload[0].Key[0], objectName1); - assert.strictEqual(result.ListMultipartUploadsResult - .Upload[1], undefined); - assert.strictEqual(result.ListMultipartUploadsResult - .IsTruncated[0], 'true'); - assert.strictEqual(result.ListMultipartUploadsResult - .NextKeyMarker[0], objectName1); - assert(result.ListMultipartUploadsResult - .NextUploadIdMarker[0].length > 5); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest1, log, next), + (result, corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest2, log, next), + (result, corsHeaders, next) => listMultipartUploads(authInfo, testListRequest, log, next), + (result, corsHeaders, next) => parseString(result, corsHeaders, next), + ], + (err, result) => { + assert.strictEqual(result.ListMultipartUploadsResult.Upload[0].Key[0], objectName1); + assert.strictEqual(result.ListMultipartUploadsResult.Upload[1], undefined); + assert.strictEqual(result.ListMultipartUploadsResult.IsTruncated[0], 'true'); + assert.strictEqual(result.ListMultipartUploadsResult.NextKeyMarker[0], objectName1); + assert(result.ListMultipartUploadsResult.NextUploadIdMarker[0].length > 5); + done(); + } + ); }); - it('should url encode object key name ' + - 'if requested', done => { + it('should url encode object key name ' + 'if requested', done => { const testListRequest = { bucketName, namespace, @@ -173,30 +156,24 @@ describe('listMultipartUploads API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest1, log, next), - (result, corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest2, log, next), - (result, corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest3, log, next), - (result, corsHeaders, next) => listMultipartUploads(authInfo, - testListRequest, log, next), - (result, corsHeaders, next) => - parseString(result, corsHeaders, next), - ], - (err, result) => { - assert.strictEqual(result.ListMultipartUploadsResult - .Upload[0].Key[0], querystring.escape(objectName3)); - assert.strictEqual(result.ListMultipartUploadsResult - .Upload[1].Key[0], querystring.escape(objectName1)); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest1, log, next), + (result, corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest2, log, next), + (result, corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest3, log, next), + (result, corsHeaders, next) => listMultipartUploads(authInfo, testListRequest, log, next), + (result, corsHeaders, next) => parseString(result, corsHeaders, next), + ], + (err, result) => { + assert.strictEqual(result.ListMultipartUploadsResult.Upload[0].Key[0], querystring.escape(objectName3)); + assert.strictEqual(result.ListMultipartUploadsResult.Upload[1].Key[0], querystring.escape(objectName1)); + done(); + } + ); }); - it('should return key following specified ' + - 'key-marker', done => { + it('should return key following specified ' + 'key-marker', done => { const testListRequest = { bucketName, namespace, @@ -206,25 +183,20 @@ describe('listMultipartUploads API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest1, log, next), - (result, corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest2, log, next), - (result, corsHeaders, next) => initiateMultipartUpload(authInfo, - testInitiateMPURequest3, log, next), - (result, corsHeaders, next) => listMultipartUploads(authInfo, - testListRequest, log, next), - (result, corsHeaders, next) => - parseString(result, corsHeaders, next), - ], - (err, result) => { - assert.strictEqual(result.ListMultipartUploadsResult - .Upload[0].Key[0], objectName2); - assert.strictEqual(result.ListMultipartUploadsResult - .Upload[1], undefined); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest1, log, next), + (result, corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest2, log, next), + (result, corsHeaders, next) => initiateMultipartUpload(authInfo, testInitiateMPURequest3, log, next), + (result, corsHeaders, next) => listMultipartUploads(authInfo, testListRequest, log, next), + (result, corsHeaders, next) => parseString(result, corsHeaders, next), + ], + (err, result) => { + assert.strictEqual(result.ListMultipartUploadsResult.Upload[0].Key[0], objectName2); + assert.strictEqual(result.ListMultipartUploadsResult.Upload[1], undefined); + done(); + } + ); }); }); diff --git a/tests/unit/api/listParts.js b/tests/unit/api/listParts.js index 48a9668e11..86c157ee03 100644 --- a/tests/unit/api/listParts.js +++ b/tests/unit/api/listParts.js @@ -23,11 +23,9 @@ const mpuBucket = `${constants.mpuBucketPrefix}${bucketName}`; const uploadKey = '$makememulti'; const sixMBObjectETag = '"f3a9fb2071d3503b703938a74eb99846"'; const lastPieceETag = '"555e4cd2f9eff38109d7a3ab13995a32"'; -const overviewKey = `overview${splitter}$makememulti${splitter}4db92ccc-` + - 'd89d-49d3-9fa6-e9c2c1eb31b0'; +const overviewKey = `overview${splitter}$makememulti${splitter}4db92ccc-` + 'd89d-49d3-9fa6-e9c2c1eb31b0'; const partOneKey = `4db92ccc-d89d-49d3-9fa6-e9c2c1eb31b0${splitter}00001`; -const partTwoKey = '4db92ccc-d89d-49d3-9fa6-e9c2c1eb31b0' + - `${splitter}00002`; +const partTwoKey = '4db92ccc-d89d-49d3-9fa6-e9c2c1eb31b0' + `${splitter}00002`; const partThreeKey = `4db92ccc-d89d-49d3-9fa6-e9c2c1eb31b0${splitter}00003`; const partFourKey = `4db92ccc-d89d-49d3-9fa6-e9c2c1eb31b0${splitter}00004`; const partFiveKey = `4db92ccc-d89d-49d3-9fa6-e9c2c1eb31b0${splitter}00005`; @@ -36,74 +34,82 @@ describe('List Parts API', () => { beforeEach(done => { cleanup(); const creationDate = new Date().toJSON(); - const sampleNormalBucketInstance = new BucketInfo(bucketName, - canonicalID, authInfo.getAccountDisplayName(), creationDate, - BucketInfo.currentModelVersion()); - const sampleMPUInstance = new BucketInfo(mpuBucket, - 'admin', 'admin', creationDate, BucketInfo.currentModelVersion()); - metadata.createBucket(bucketName, sampleNormalBucketInstance, log, - () => { - metadata.createBucket(mpuBucket, sampleMPUInstance, log, () => { - inMemMetadata.keyMaps.get(mpuBucket).set(overviewKey, { - 'id': '4db92ccc-d89d-49d3-9fa6-e9c2c1eb31b0', - 'owner-display-name': authInfo.getAccountDisplayName(), - 'owner-id': canonicalID, - 'initiator': { - DisplayName: authInfo.getAccountDisplayName(), - ID: canonicalID, - }, - 'key': '$makememulti', - 'initiated': '2015-11-30T22:40:07.858Z', - 'uploadId': '4db92ccc-d89d-49d3-9fa6-e9c2c1eb31b0', - 'acl': { - Canned: 'private', - FULL_CONTROL: [], - WRITE_ACP: [], - READ: [], - READ_ACP: [], - }, - 'eventualStorageBucket': 'freshestbucket', - 'mdBucketModelVersion': 2, - }); + const sampleNormalBucketInstance = new BucketInfo( + bucketName, + canonicalID, + authInfo.getAccountDisplayName(), + creationDate, + BucketInfo.currentModelVersion() + ); + const sampleMPUInstance = new BucketInfo( + mpuBucket, + 'admin', + 'admin', + creationDate, + BucketInfo.currentModelVersion() + ); + metadata.createBucket(bucketName, sampleNormalBucketInstance, log, () => { + metadata.createBucket(mpuBucket, sampleMPUInstance, log, () => { + inMemMetadata.keyMaps.get(mpuBucket).set(overviewKey, { + id: '4db92ccc-d89d-49d3-9fa6-e9c2c1eb31b0', + 'owner-display-name': authInfo.getAccountDisplayName(), + 'owner-id': canonicalID, + initiator: { + DisplayName: authInfo.getAccountDisplayName(), + ID: canonicalID, + }, + key: '$makememulti', + initiated: '2015-11-30T22:40:07.858Z', + uploadId: '4db92ccc-d89d-49d3-9fa6-e9c2c1eb31b0', + acl: { + Canned: 'private', + FULL_CONTROL: [], + WRITE_ACP: [], + READ: [], + READ_ACP: [], + }, + eventualStorageBucket: 'freshestbucket', + mdBucketModelVersion: 2, + }); - inMemMetadata.keyMaps.get(mpuBucket).set(partOneKey, { - 'key': partOneKey, - 'last-modified': '2015-11-30T22:41:18.658Z', - 'content-md5': 'f3a9fb2071d3503b703938a74eb99846', - 'content-length': '6000000', - 'partLocations': ['068db6a6745a79d54c1b29ff99f9f131'], - }); - inMemMetadata.keyMaps.get(mpuBucket).set(partTwoKey, { - 'key': partTwoKey, - 'last-modified': '2015-11-30T22:41:40.207Z', - 'content-md5': 'f3a9fb2071d3503b703938a74eb99846', - 'content-length': '6000000', - 'partLocations': ['ff22f316b16956ff5118c93abce7d62d'], - }); - inMemMetadata.keyMaps.get(mpuBucket).set(partThreeKey, { - 'key': partThreeKey, - 'last-modified': '2015-11-30T22:41:52.102Z', - 'content-md5': 'f3a9fb2071d3503b703938a74eb99846', - 'content-length': '6000000', - 'partLocations': ['dea282f70edb6fc5f9433cd6f525d4a6'], - }); - inMemMetadata.keyMaps.get(mpuBucket).set(partFourKey, { - 'key': partFourKey, - 'last-modified': '2015-11-30T22:42:03.493Z', - 'content-md5': 'f3a9fb2071d3503b703938a74eb99846', - 'content-length': '6000000', - 'partLocations': ['afe24bc40153982e1f7f28066f7af6a4'], - }); - inMemMetadata.keyMaps.get(mpuBucket).set(partFiveKey, { - 'key': partFiveKey, - 'last-modified': '2015-11-30T22:42:22.876Z', - 'content-md5': '555e4cd2f9eff38109d7a3ab13995a32', - 'content-length': '18', - 'partLocations': ['85bc16f5769687070fb13cfe66b5e41f'], - }); - done(); + inMemMetadata.keyMaps.get(mpuBucket).set(partOneKey, { + key: partOneKey, + 'last-modified': '2015-11-30T22:41:18.658Z', + 'content-md5': 'f3a9fb2071d3503b703938a74eb99846', + 'content-length': '6000000', + partLocations: ['068db6a6745a79d54c1b29ff99f9f131'], + }); + inMemMetadata.keyMaps.get(mpuBucket).set(partTwoKey, { + key: partTwoKey, + 'last-modified': '2015-11-30T22:41:40.207Z', + 'content-md5': 'f3a9fb2071d3503b703938a74eb99846', + 'content-length': '6000000', + partLocations: ['ff22f316b16956ff5118c93abce7d62d'], + }); + inMemMetadata.keyMaps.get(mpuBucket).set(partThreeKey, { + key: partThreeKey, + 'last-modified': '2015-11-30T22:41:52.102Z', + 'content-md5': 'f3a9fb2071d3503b703938a74eb99846', + 'content-length': '6000000', + partLocations: ['dea282f70edb6fc5f9433cd6f525d4a6'], }); + inMemMetadata.keyMaps.get(mpuBucket).set(partFourKey, { + key: partFourKey, + 'last-modified': '2015-11-30T22:42:03.493Z', + 'content-md5': 'f3a9fb2071d3503b703938a74eb99846', + 'content-length': '6000000', + partLocations: ['afe24bc40153982e1f7f28066f7af6a4'], + }); + inMemMetadata.keyMaps.get(mpuBucket).set(partFiveKey, { + key: partFiveKey, + 'last-modified': '2015-11-30T22:42:22.876Z', + 'content-md5': '555e4cd2f9eff38109d7a3ab13995a32', + 'content-length': '18', + partLocations: ['85bc16f5769687070fb13cfe66b5e41f'], + }); + done(); }); + }); }); it('should list all parts of a multipart upload', done => { @@ -125,24 +131,15 @@ describe('List Parts API', () => { assert.strictEqual(json.ListPartsResult.Key[0], uploadKey); assert.strictEqual(json.ListPartsResult.UploadId[0], uploadId); assert.strictEqual(json.ListPartsResult.MaxParts[0], '1000'); - assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], - authInfo.getCanonicalID()); - assert.strictEqual(json.ListPartsResult.IsTruncated[0], - 'false'); - assert.strictEqual(json.ListPartsResult.PartNumberMarker, - undefined); - assert.strictEqual(json.ListPartsResult.NextPartNumberMarker, - undefined); - assert.strictEqual(json.ListPartsResult.Part[0].PartNumber[0], - '1'); - assert.strictEqual(json.ListPartsResult.Part[0].ETag[0], - sixMBObjectETag); - assert.strictEqual(json.ListPartsResult.Part[0].Size[0], - '6000000'); - assert.strictEqual(json.ListPartsResult.Part[4].PartNumber[0], - '5'); - assert.strictEqual(json.ListPartsResult.Part[4].ETag[0], - lastPieceETag); + assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], authInfo.getCanonicalID()); + assert.strictEqual(json.ListPartsResult.IsTruncated[0], 'false'); + assert.strictEqual(json.ListPartsResult.PartNumberMarker, undefined); + assert.strictEqual(json.ListPartsResult.NextPartNumberMarker, undefined); + assert.strictEqual(json.ListPartsResult.Part[0].PartNumber[0], '1'); + assert.strictEqual(json.ListPartsResult.Part[0].ETag[0], sixMBObjectETag); + assert.strictEqual(json.ListPartsResult.Part[0].Size[0], '6000000'); + assert.strictEqual(json.ListPartsResult.Part[4].PartNumber[0], '5'); + assert.strictEqual(json.ListPartsResult.Part[4].ETag[0], lastPieceETag); assert.strictEqual(json.ListPartsResult.Part[4].Size[0], '18'); assert.strictEqual(json.ListPartsResult.Part.length, 5); done(); @@ -168,15 +165,13 @@ describe('List Parts API', () => { listParts(authInfo, listRequest, log, (err, xml) => { assert.strictEqual(err, null); parseString(xml, (err, json) => { - assert.strictEqual(json.ListPartsResult.Key[0], - urlEncodedObjectKey); + assert.strictEqual(json.ListPartsResult.Key[0], urlEncodedObjectKey); done(); }); }); }); - it('should list only up to requested number ' + - 'of max parts of a multipart upload', done => { + it('should list only up to requested number ' + 'of max parts of a multipart upload', done => { const listRequest = { bucketName, namespace, @@ -198,27 +193,20 @@ describe('List Parts API', () => { assert.strictEqual(json.ListPartsResult.Key[0], uploadKey); assert.strictEqual(json.ListPartsResult.UploadId[0], uploadId); assert.strictEqual(json.ListPartsResult.MaxParts[0], '4'); - assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], - authInfo.getCanonicalID()); + assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], authInfo.getCanonicalID()); assert.strictEqual(json.ListPartsResult.IsTruncated[0], 'true'); - assert.strictEqual(json.ListPartsResult.PartNumberMarker, - undefined); - assert.strictEqual(json.ListPartsResult.NextPartNumberMarker[0], - '4'); - assert.strictEqual(json.ListPartsResult.Part[2].PartNumber[0], - '3'); - assert.strictEqual(json.ListPartsResult.Part[2].ETag[0], - sixMBObjectETag); - assert.strictEqual(json.ListPartsResult.Part[2].Size[0], - '6000000'); + assert.strictEqual(json.ListPartsResult.PartNumberMarker, undefined); + assert.strictEqual(json.ListPartsResult.NextPartNumberMarker[0], '4'); + assert.strictEqual(json.ListPartsResult.Part[2].PartNumber[0], '3'); + assert.strictEqual(json.ListPartsResult.Part[2].ETag[0], sixMBObjectETag); + assert.strictEqual(json.ListPartsResult.Part[2].Size[0], '6000000'); assert.strictEqual(json.ListPartsResult.Part.length, 4); done(); }); }); }); - it('should list all parts if requested max-parts ' + - 'is greater than total number of parts', done => { + it('should list all parts if requested max-parts ' + 'is greater than total number of parts', done => { const listRequest = { bucketName, namespace, @@ -240,20 +228,13 @@ describe('List Parts API', () => { assert.strictEqual(json.ListPartsResult.Key[0], uploadKey); assert.strictEqual(json.ListPartsResult.UploadId[0], uploadId); assert.strictEqual(json.ListPartsResult.MaxParts[0], '6'); - assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], - authInfo.getCanonicalID()); - assert.strictEqual(json.ListPartsResult.IsTruncated[0], - 'false'); - assert.strictEqual(json.ListPartsResult.PartNumberMarker, - undefined); - assert.strictEqual(json.ListPartsResult.NextPartNumberMarker, - undefined); - assert.strictEqual(json.ListPartsResult.Part[2].PartNumber[0], - '3'); - assert.strictEqual(json.ListPartsResult.Part[2].ETag[0], - sixMBObjectETag); - assert.strictEqual(json.ListPartsResult.Part[2].Size[0], - '6000000'); + assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], authInfo.getCanonicalID()); + assert.strictEqual(json.ListPartsResult.IsTruncated[0], 'false'); + assert.strictEqual(json.ListPartsResult.PartNumberMarker, undefined); + assert.strictEqual(json.ListPartsResult.NextPartNumberMarker, undefined); + assert.strictEqual(json.ListPartsResult.Part[2].PartNumber[0], '3'); + assert.strictEqual(json.ListPartsResult.Part[2].ETag[0], sixMBObjectETag); + assert.strictEqual(json.ListPartsResult.Part[2].Size[0], '6000000'); assert.strictEqual(json.ListPartsResult.Part.length, 5); done(); }); @@ -282,30 +263,21 @@ describe('List Parts API', () => { assert.strictEqual(json.ListPartsResult.Key[0], uploadKey); assert.strictEqual(json.ListPartsResult.UploadId[0], uploadId); assert.strictEqual(json.ListPartsResult.MaxParts[0], '1000'); - assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], - authInfo.getCanonicalID()); - assert.strictEqual(json.ListPartsResult.IsTruncated[0], - 'false'); - assert.strictEqual(json.ListPartsResult.PartNumberMarker[0], - '2'); - assert.strictEqual(json.ListPartsResult.NextPartNumberMarker, - undefined); - assert.strictEqual(json.ListPartsResult.Part[0].PartNumber[0], - '3'); - assert.strictEqual(json.ListPartsResult.Part[0].ETag[0], - sixMBObjectETag); - assert.strictEqual(json.ListPartsResult.Part[0].Size[0], - '6000000'); - assert.strictEqual(json.ListPartsResult.Part[2].PartNumber[0], - '5'); + assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], authInfo.getCanonicalID()); + assert.strictEqual(json.ListPartsResult.IsTruncated[0], 'false'); + assert.strictEqual(json.ListPartsResult.PartNumberMarker[0], '2'); + assert.strictEqual(json.ListPartsResult.NextPartNumberMarker, undefined); + assert.strictEqual(json.ListPartsResult.Part[0].PartNumber[0], '3'); + assert.strictEqual(json.ListPartsResult.Part[0].ETag[0], sixMBObjectETag); + assert.strictEqual(json.ListPartsResult.Part[0].Size[0], '6000000'); + assert.strictEqual(json.ListPartsResult.Part[2].PartNumber[0], '5'); assert.strictEqual(json.ListPartsResult.Part.length, 3); done(); }); }); }); - it('should handle a part-number-marker specified ' + - 'and a max-parts specified', done => { + it('should handle a part-number-marker specified ' + 'and a max-parts specified', done => { const listRequest = { bucketName, namespace, @@ -328,21 +300,14 @@ describe('List Parts API', () => { assert.strictEqual(json.ListPartsResult.Key[0], uploadKey); assert.strictEqual(json.ListPartsResult.UploadId[0], uploadId); assert.strictEqual(json.ListPartsResult.MaxParts[0], '2'); - assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], - authInfo.getCanonicalID()); + assert.strictEqual(json.ListPartsResult.Initiator[0].ID[0], authInfo.getCanonicalID()); assert.strictEqual(json.ListPartsResult.IsTruncated[0], 'true'); - assert.strictEqual(json.ListPartsResult.PartNumberMarker[0], - '2'); - assert.strictEqual(json.ListPartsResult.NextPartNumberMarker[0], - '4'); - assert.strictEqual(json.ListPartsResult.Part[0].PartNumber[0], - '3'); - assert.strictEqual(json.ListPartsResult.Part[0].ETag[0], - sixMBObjectETag); - assert.strictEqual(json.ListPartsResult.Part[0].Size[0], - '6000000'); - assert.strictEqual(json.ListPartsResult.Part[1].PartNumber[0], - '4'); + assert.strictEqual(json.ListPartsResult.PartNumberMarker[0], '2'); + assert.strictEqual(json.ListPartsResult.NextPartNumberMarker[0], '4'); + assert.strictEqual(json.ListPartsResult.Part[0].PartNumber[0], '3'); + assert.strictEqual(json.ListPartsResult.Part[0].ETag[0], sixMBObjectETag); + assert.strictEqual(json.ListPartsResult.Part[0].Size[0], '6000000'); + assert.strictEqual(json.ListPartsResult.Part[1].PartNumber[0], '4'); assert.strictEqual(json.ListPartsResult.Part.length, 2); done(); }); diff --git a/tests/unit/api/multiObjectDelete.js b/tests/unit/api/multiObjectDelete.js index e1d5f3bf52..eab509802c 100644 --- a/tests/unit/api/multiObjectDelete.js +++ b/tests/unit/api/multiObjectDelete.js @@ -2,8 +2,11 @@ const crypto = require('crypto'); const assert = require('assert'); const { errors, storage } = require('arsenal'); -const { decodeObjectVersion, getObjMetadataAndDelete, initializeMultiObjectDeleteWithBatchingSupport } - = require('../../../lib/api/multiObjectDelete'); +const { + decodeObjectVersion, + getObjMetadataAndDelete, + initializeMultiObjectDeleteWithBatchingSupport, +} = require('../../../lib/api/multiObjectDelete'); const multiObjectDelete = require('../../../lib/api/multiObjectDelete'); const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const DummyRequest = require('../DummyRequest'); @@ -39,10 +42,13 @@ const testBucketPutRequest = new DummyRequest({ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { let testPutObjectRequest1; let testPutObjectRequest2; - const request = new DummyRequest({ - headers: {}, - parsedContentLength: contentLength, - }, postBody); + const request = new DummyRequest( + { + headers: {}, + parsedContentLength: contentLength, + }, + postBody + ); const bucket = { isVersioningEnabled: () => false, getVersioningConfiguration: () => null, @@ -52,34 +58,34 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { beforeEach(done => { cleanup(); sinon.spy(metadataswitch, 'deleteObjectMD'); - testPutObjectRequest1 = new DummyRequest({ - bucketName, - namespace, - objectKey: objectKey1, - headers: {}, - url: `/${bucketName}/${objectKey1}`, - }, postBody); - testPutObjectRequest2 = new DummyRequest({ - bucketName, - namespace, - objectKey: objectKey2, - headers: {}, - url: `/${bucketName}/${objectKey2}`, - }, postBody); + testPutObjectRequest1 = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectKey1, + headers: {}, + url: `/${bucketName}/${objectKey1}`, + }, + postBody + ); + testPutObjectRequest2 = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectKey2, + headers: {}, + url: `/${bucketName}/${objectKey2}`, + }, + postBody + ); bucketPut(authInfo, testBucketPutRequest, log, () => { - objectPut(authInfo, testPutObjectRequest1, - undefined, log, () => { - objectPut(authInfo, testPutObjectRequest2, - undefined, log, () => { - assert.strictEqual(metadata.keyMaps - .get(bucketName) - .has(objectKey1), true); - assert.strictEqual(metadata.keyMaps - .get(bucketName) - .has(objectKey2), true); - done(); - }); + objectPut(authInfo, testPutObjectRequest1, undefined, log, () => { + objectPut(authInfo, testPutObjectRequest2, undefined, log, () => { + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey1), true); + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey2), true); + done(); }); + }); }); }); @@ -87,60 +93,76 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { sinon.restore(); }); - it('should successfully get object metadata and then ' + - 'delete metadata and data', done => { - getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, bucket, - true, [], [{ key: objectKey1 }, { key: objectKey2 }], log, - (err, quietSetting, errorResults, numOfObjects, - successfullyDeleted, totalContentLengthDeleted) => { + it('should successfully get object metadata and then ' + 'delete metadata and data', done => { + getObjMetadataAndDelete( + authInfo, + 'foo', + request, + bucketName, + bucket, + true, + [], + [{ key: objectKey1 }, { key: objectKey2 }], + log, + (err, quietSetting, errorResults, numOfObjects, successfullyDeleted, totalContentLengthDeleted) => { assert.ifError(err); assert.strictEqual(quietSetting, true); assert.deepStrictEqual(errorResults, []); assert.strictEqual(numOfObjects, 2); assert.strictEqual(totalContentLengthDeleted, contentLength); - assert.strictEqual(metadata.keyMaps.get(bucketName) - .has(objectKey1), false); - assert.strictEqual(metadata.keyMaps.get(bucketName) - .has(objectKey2), false); + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey1), false); + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey2), false); // call to delete data is async so wait 20 ms to check // that data deleted setTimeout(() => { // eslint-disable-next-line - assert.deepStrictEqual(ds, [ , , , ]); + assert.deepStrictEqual(ds, [, , ,]); done(); }, 20); - }); + } + ); }); it('should return success results if no such key', done => { - getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, bucket, - true, [], [{ key: 'madeup1' }, { key: 'madeup2' }], log, - (err, quietSetting, errorResults, numOfObjects, - successfullyDeleted, totalContentLengthDeleted) => { + getObjMetadataAndDelete( + authInfo, + 'foo', + request, + bucketName, + bucket, + true, + [], + [{ key: 'madeup1' }, { key: 'madeup2' }], + log, + (err, quietSetting, errorResults, numOfObjects, successfullyDeleted, totalContentLengthDeleted) => { assert.ifError(err); assert.strictEqual(quietSetting, true); assert.deepStrictEqual(errorResults, []); assert.strictEqual(numOfObjects, 0); - assert.strictEqual(totalContentLengthDeleted, - 0); - assert.strictEqual(metadata.keyMaps.get(bucketName) - .has(objectKey1), true); - assert.strictEqual(metadata.keyMaps.get(bucketName) - .has(objectKey2), true); + assert.strictEqual(totalContentLengthDeleted, 0); + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey1), true); + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey2), true); done(); - }); + } + ); }); - it('should return error results if err from metadata getting object' + - 'is error other than NoSuchKey', done => { + it('should return error results if err from metadata getting object' + 'is error other than NoSuchKey', done => { // we fake an error by calling on an imaginary bucket // even though the getObjMetadataAndDelete function would // never be called if there was no bucket (would error out earlier // in API) - getObjMetadataAndDelete(authInfo, 'foo', request, 'madeupbucket', - bucket, true, [], [{ key: objectKey1 }, { key: objectKey2 }], log, - (err, quietSetting, errorResults, numOfObjects, - successfullyDeleted, totalContentLengthDeleted) => { + getObjMetadataAndDelete( + authInfo, + 'foo', + request, + 'madeupbucket', + bucket, + true, + [], + [{ key: objectKey1 }, { key: objectKey2 }], + log, + (err, quietSetting, errorResults, numOfObjects, successfullyDeleted, totalContentLengthDeleted) => { assert.ifError(err); assert.strictEqual(quietSetting, true); assert.deepStrictEqual(errorResults, [ @@ -153,31 +175,35 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { error: errors.NoSuchBucket, }, ]); - assert.strictEqual(totalContentLengthDeleted, - 0); - assert.strictEqual(metadata.keyMaps.get(bucketName) - .has(objectKey1), true); - assert.strictEqual(metadata.keyMaps.get(bucketName) - .has(objectKey2), true); + assert.strictEqual(totalContentLengthDeleted, 0); + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey1), true); + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey2), true); done(); - }); + } + ); }); - it('should return no error or success results if no objects in play', - done => { - getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, - bucket, true, [], [], log, - (err, quietSetting, errorResults, numOfObjects, - successfullyDeleted, totalContentLengthDeleted) => { - assert.ifError(err); - assert.strictEqual(quietSetting, true); - assert.deepStrictEqual(errorResults, []); - assert.strictEqual(numOfObjects, 0); - assert.strictEqual(totalContentLengthDeleted, - 0); - done(); - }); - }); + it('should return no error or success results if no objects in play', done => { + getObjMetadataAndDelete( + authInfo, + 'foo', + request, + bucketName, + bucket, + true, + [], + [], + log, + (err, quietSetting, errorResults, numOfObjects, successfullyDeleted, totalContentLengthDeleted) => { + assert.ifError(err); + assert.strictEqual(quietSetting, true); + assert.deepStrictEqual(errorResults, []); + assert.strictEqual(numOfObjects, 0); + assert.strictEqual(totalContentLengthDeleted, 0); + done(); + } + ); + }); it('should pass along error results', done => { const errorResultsSample = [ @@ -190,18 +216,25 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { error: errors.AccessDenied, }, ]; - getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, bucket, - true, errorResultsSample, - [{ key: objectKey1 }, { key: objectKey2 }], log, - (err, quietSetting, errorResults, numOfObjects, - successfullyDeleted, totalContentLengthDeleted) => { + getObjMetadataAndDelete( + authInfo, + 'foo', + request, + bucketName, + bucket, + true, + errorResultsSample, + [{ key: objectKey1 }, { key: objectKey2 }], + log, + (err, quietSetting, errorResults, numOfObjects, successfullyDeleted, totalContentLengthDeleted) => { assert.ifError(err); assert.strictEqual(quietSetting, true); assert.deepStrictEqual(errorResults, errorResultsSample); assert.strictEqual(numOfObjects, 2); assert.strictEqual(totalContentLengthDeleted, contentLength); done(); - }); + } + ); }); it('should properly batch delete data even if there are errors in other objects', done => { @@ -209,35 +242,51 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { deleteObjectStub.onCall(0).callsArgWith(7, errors.InternalError); deleteObjectStub.onCall(1).callsArgWith(7, null); - getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, bucket, - true, [], [{ key: objectKey1 }, { key: objectKey2 }], log, - (err, quietSetting, errorResults, numOfObjects, - successfullyDeleted, totalContentLengthDeleted) => { - assert.ifError(err); - assert.strictEqual(quietSetting, true); - assert.deepStrictEqual(errorResults, [ - { - entry: { - key: objectKey1, + getObjMetadataAndDelete( + authInfo, + 'foo', + request, + bucketName, + bucket, + true, + [], + [{ key: objectKey1 }, { key: objectKey2 }], + log, + (err, quietSetting, errorResults, numOfObjects, successfullyDeleted, totalContentLengthDeleted) => { + assert.ifError(err); + assert.strictEqual(quietSetting, true); + assert.deepStrictEqual(errorResults, [ + { + entry: { + key: objectKey1, + }, + error: errors.InternalError, }, - error: errors.InternalError, - }, - ]); - assert.strictEqual(numOfObjects, 1); - assert.strictEqual(totalContentLengthDeleted, contentLength / 2); - // Expect still in memory as we stubbed the function - assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey1), true); - assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey2), true); - // ensure object 2 only is in the list of successful deletions - assert.strictEqual(successfullyDeleted.length, 1); - assert.deepStrictEqual(successfullyDeleted[0].entry.key, objectKey2); - return done(); - }); + ]); + assert.strictEqual(numOfObjects, 1); + assert.strictEqual(totalContentLengthDeleted, contentLength / 2); + // Expect still in memory as we stubbed the function + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey1), true); + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey2), true); + // ensure object 2 only is in the list of successful deletions + assert.strictEqual(successfullyDeleted.length, 1); + assert.deepStrictEqual(successfullyDeleted[0].entry.key, objectKey2); + return done(); + } + ); }); it('should pass overheadField to metadata', done => { - getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, bucket, - true, [], [{ key: objectKey1 }, { key: objectKey2 }], log, + getObjMetadataAndDelete( + authInfo, + 'foo', + request, + bucketName, + bucket, + true, + [], + [{ key: objectKey1 }, { key: objectKey2 }], + log, (err, quietSetting, errorResults, numOfObjects) => { assert.ifError(err); assert.strictEqual(numOfObjects, 2); @@ -258,7 +307,8 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { sinon.match.any ); done(); - }); + } + ); }); }); @@ -311,8 +361,9 @@ describe('initializeMultiObjectDeleteWithBatchingSupport', () => { }); it('should not return an error if the metadataGetObjects function fails', done => { - const metadataGetObjectsStub = - sinon.stub(metadataUtils, 'metadataGetObjects').yields(new Error('metadata error'), null); + const metadataGetObjectsStub = sinon + .stub(metadataUtils, 'metadataGetObjects') + .yields(new Error('metadata error'), null); const objectVersion = 'someVersionId'; sinon.stub(multiObjectDelete, 'decodeObjectVersion').returns([null, objectVersion]); @@ -383,7 +434,7 @@ describe('multiObjectDelete function', () => { objectKey: 'objectname', parsedHost: 'localhost', headers: { - 'content-md5': crypto.createHash('md5').update(post, 'utf8').digest('base64') + 'content-md5': crypto.createHash('md5').update(post, 'utf8').digest('base64'), }, post, socket: { @@ -393,22 +444,16 @@ describe('multiObjectDelete function', () => { }); const authInfo = makeAuthInfo('123456'); - sinon.stub(metadataWrapper, 'getBucket').callsFake((bucketName, log, cb) => - cb(null, new BucketInfo( - 'bucketname', - '123456', - 'accountA', - new Date().toISOString(), - 15, - ))); + sinon + .stub(metadataWrapper, 'getBucket') + .callsFake((bucketName, log, cb) => + cb(null, new BucketInfo('bucketname', '123456', 'accountA', new Date().toISOString(), 15)) + ); multiObjectDelete.multiObjectDelete(authInfo, request, log, (err, res) => { // Expected result is an access denied on the object, and no error, as the API was authorized assert.strictEqual(err, null); - assert.strictEqual( - res.includes('objectnameAccessDenied'), - true - ); + assert.strictEqual(res.includes('objectnameAccessDenied'), true); done(); }); }); diff --git a/tests/unit/api/multipartDelete.js b/tests/unit/api/multipartDelete.js index be9e9ec81e..d40ec902f7 100644 --- a/tests/unit/api/multipartDelete.js +++ b/tests/unit/api/multipartDelete.js @@ -6,8 +6,7 @@ const { cleanup, DummyRequestLogger } = require('../helpers'); const { config } = require('../../../lib/Config'); const DummyRequest = require('../DummyRequest'); const { bucketPut } = require('../../../lib/api/bucketPut'); -const initiateMultipartUpload - = require('../../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../../lib/api/initiateMultipartUpload'); const multipartDelete = require('../../../lib/api/multipartDelete'); const objectPutPart = require('../../../lib/api/objectPutPart'); const { makeAuthInfo } = require('../helpers'); @@ -36,62 +35,63 @@ const initiateRequest = { const eastLocation = 'us-east-1'; const westLocation = 'scality-internal-file'; -function _createAndAbortMpu(usEastSetting, fakeUploadID, locationConstraint, - callback) { - config.locationConstraints['us-east-1'].legacyAwsBehavior = - usEastSetting; - const post = '' + +function _createAndAbortMpu(usEastSetting, fakeUploadID, locationConstraint, callback) { + config.locationConstraints['us-east-1'].legacyAwsBehavior = usEastSetting; + const post = + '' + '' + `${locationConstraint}` + ''; const testBucketPutRequest = Object.assign({ post }, bucketPutRequest); - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => - initiateMultipartUpload(authInfo, initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - // use uploadId parsed from initiateMpu request to construct - // uploadPart and deleteMpu requests - const uploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const partBody = Buffer.from('I am a part\n', 'utf8'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, - query: { - partNumber: '1', - uploadId, - }, - actionImplicitDenies: false, - }, partBody); - const testUploadId = fakeUploadID ? 'nonexistinguploadid' : - uploadId; - const deleteMpuRequest = { - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?uploadId=${testUploadId}`, - query: { uploadId: testUploadId }, - actionImplicitDenies: false, - }; - next(null, partRequest, deleteMpuRequest); - }, - (partRequest, deleteMpuRequest, next) => - objectPutPart(authInfo, partRequest, undefined, log, err => { - if (err) { - return next(err); - } - return next(null, deleteMpuRequest); - }), - (deleteMpuRequest, next) => - multipartDelete(authInfo, deleteMpuRequest, log, next), - ], callback); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + // use uploadId parsed from initiateMpu request to construct + // uploadPart and deleteMpu requests + const uploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partBody = Buffer.from('I am a part\n', 'utf8'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + query: { + partNumber: '1', + uploadId, + }, + actionImplicitDenies: false, + }, + partBody + ); + const testUploadId = fakeUploadID ? 'nonexistinguploadid' : uploadId; + const deleteMpuRequest = { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?uploadId=${testUploadId}`, + query: { uploadId: testUploadId }, + actionImplicitDenies: false, + }; + next(null, partRequest, deleteMpuRequest); + }, + (partRequest, deleteMpuRequest, next) => + objectPutPart(authInfo, partRequest, undefined, log, err => { + if (err) { + return next(err); + } + return next(null, deleteMpuRequest); + }), + (deleteMpuRequest, next) => multipartDelete(authInfo, deleteMpuRequest, log, next), + ], + callback + ); } describe('Multipart Delete API', () => { @@ -100,41 +100,47 @@ describe('Multipart Delete API', () => { }); afterEach(() => { // set back to original - config.locationConstraints['us-east-1'].legacyAwsBehavior = - true; + config.locationConstraints['us-east-1'].legacyAwsBehavior = true; cleanup(); }); - it('should not return error if mpu exists with uploadId and at least ' + - 'one part', done => { + it('should not return error if mpu exists with uploadId and at least ' + 'one part', done => { _createAndAbortMpu(true, false, eastLocation, err => { assert.ifError(err); done(err); }); }); - it('should still not return error if uploadId does not exist on ' + - 'multipart abort call, in region other than us-east-1', done => { - _createAndAbortMpu(true, true, westLocation, err => { - assert.ifError(err); - done(err); - }); - }); + it( + 'should still not return error if uploadId does not exist on ' + + 'multipart abort call, in region other than us-east-1', + done => { + _createAndAbortMpu(true, true, westLocation, err => { + assert.ifError(err); + done(err); + }); + } + ); - it('bucket created in us-east-1: should return 404 if uploadId does not ' + - 'exist and legacyAwsBehavior set to true', - done => { - _createAndAbortMpu(true, true, eastLocation, err => { - assert.strictEqual(err.is.NoSuchUpload, true); - done(); - }); - }); + it( + 'bucket created in us-east-1: should return 404 if uploadId does not ' + + 'exist and legacyAwsBehavior set to true', + done => { + _createAndAbortMpu(true, true, eastLocation, err => { + assert.strictEqual(err.is.NoSuchUpload, true); + done(); + }); + } + ); - it('bucket created in us-east-1: should return no error ' + - 'if uploadId does not exist and legacyAwsBehavior set to false', done => { - _createAndAbortMpu(false, true, eastLocation, err => { - assert.strictEqual(err, null, `Expected no error, got ${err}`); - done(); - }); - }); + it( + 'bucket created in us-east-1: should return no error ' + + 'if uploadId does not exist and legacyAwsBehavior set to false', + done => { + _createAndAbortMpu(false, true, eastLocation, err => { + assert.strictEqual(err, null, `Expected no error, got ${err}`); + done(); + }); + } + ); }); diff --git a/tests/unit/api/multipartUpload.js b/tests/unit/api/multipartUpload.js index 0aedf3e503..93536a7333 100644 --- a/tests/unit/api/multipartUpload.js +++ b/tests/unit/api/multipartUpload.js @@ -12,15 +12,12 @@ const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutPolicy = require('../../../lib/api/bucketPutPolicy'); const bucketPutVersioning = require('../../../lib/api/bucketPutVersioning'); const objectPut = require('../../../lib/api/objectPut'); -const completeMultipartUpload - = require('../../../lib/api/completeMultipartUpload'); +const completeMultipartUpload = require('../../../lib/api/completeMultipartUpload'); const constants = require('../../../constants'); -const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } = require('../helpers'); const getObjectLegalHold = require('../../../lib/api/objectGetLegalHold'); const getObjectRetention = require('../../../lib/api/objectGetRetention'); -const initiateMultipartUpload - = require('../../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../../lib/api/initiateMultipartUpload'); const multipartDelete = require('../../../lib/api/multipartDelete'); const objectPutPart = require('../../../lib/api/objectPutPart'); const DummyRequest = require('../DummyRequest'); @@ -51,16 +48,17 @@ const bucketPutRequest = { namespace, headers: { host: `${bucketName}.s3.amazonaws.com` }, url: '/', - post: '' + - 'scality-internal-mem' + - '', + post: + '' + + 'scality-internal-mem' + + '', actionImplicitDenies: false, }; const lockEnabledBucketRequest = Object.assign({}, bucketPutRequest); lockEnabledBucketRequest.bucketName = lockedBucket; lockEnabledBucketRequest.headers = { - 'host': `${lockedBucket}.s3.amazonaws.com`, + host: `${lockedBucket}.s3.amazonaws.com`, 'x-amz-bucket-object-lock-enabled': 'true', }; const initiateRequest = { @@ -79,13 +77,13 @@ retentionInitiateRequest.bucketName = lockedBucket; retentionInitiateRequest.headers = { 'x-amz-object-lock-mode': 'GOVERNANCE', 'x-amz-object-lock-retain-until-date': futureDate, - 'host': `${lockedBucket}.s3.amazonaws.com`, + host: `${lockedBucket}.s3.amazonaws.com`, }; const legalHoldInitiateRequest = Object.assign({}, initiateRequest); legalHoldInitiateRequest.bucketName = lockedBucket; legalHoldInitiateRequest.headers = { 'x-amz-object-lock-legal-hold': 'ON', - 'host': `${lockedBucket}.s3.amazonaws.com`, + host: `${lockedBucket}.s3.amazonaws.com`, }; const getObjectLockInfoRequest = { @@ -107,29 +105,31 @@ const expectedLegalHold = { function _createPutPartRequest(uploadId, partNumber, partBody) { const md5Hash = crypto.createHash('md5').update(partBody); const calculatedHash = md5Hash.digest('hex'); - return new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=${partNumber}&uploadId=${uploadId}`, - query: { - partNumber, - uploadId, + return new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=${partNumber}&uploadId=${uploadId}`, + query: { + partNumber, + uploadId, + }, + calculatedHash, + actionImplicitDenies: false, }, - calculatedHash, - actionImplicitDenies: false, - }, partBody); + partBody + ); } function _createCompleteMpuRequest(uploadId, parts) { const completeBody = []; completeBody.push(''); parts.forEach(part => { - completeBody.push('' + - `${part.partNumber}` + - `"${part.eTag}"` + - ''); + completeBody.push( + '' + `${part.partNumber}` + `"${part.eTag}"` + '' + ); }); completeBody.push(''); return { @@ -154,8 +154,8 @@ async function _uploadMpuObject(params = {}) { return json.InitiateMultipartUploadResult.UploadId[0]; }; const _objectPutPart = util.promisify(objectPutPart); - const _completeMultipartUpload = (...params) => util.promisify(cb => - completeMultipartUpload(...params, (err, xml, headers) => cb(err, { xml, headers })))(); + const _completeMultipartUpload = (...params) => + util.promisify(cb => completeMultipartUpload(...params, (err, xml, headers) => cb(err, { xml, headers })))(); const headers = { ...initiateRequest.headers }; if (params.location) { @@ -189,41 +189,33 @@ describe('Multipart Upload API', () => { }); it('mpuBucketPrefix should be a defined constant', () => { - assert(constants.mpuBucketPrefix, - 'Expected mpuBucketPrefix to be defined'); + assert(constants.mpuBucketPrefix, 'Expected mpuBucketPrefix to be defined'); }); it('should initiate a multipart upload', done => { bucketPut(authInfo, bucketPutRequest, log, err => { assert.ifError(err); - initiateMultipartUpload(authInfo, initiateRequest, - log, (err, result) => { - assert.ifError(err); - parseString(result, (err, json) => { - assert.strictEqual(json.InitiateMultipartUploadResult - .Bucket[0], bucketName); - assert.strictEqual(json.InitiateMultipartUploadResult - .Key[0], objectKey); - assert(json.InitiateMultipartUploadResult.UploadId[0]); - assert(metadata.buckets.get(mpuBucket)._name, - mpuBucket); - const mpuKeys = metadata.keyMaps.get(mpuBucket); - assert.strictEqual(mpuKeys.size, 1); - assert(mpuKeys.keys().next().value - .startsWith(`overview${splitter}${objectKey}`)); - done(); - }); + initiateMultipartUpload(authInfo, initiateRequest, log, (err, result) => { + assert.ifError(err); + parseString(result, (err, json) => { + assert.strictEqual(json.InitiateMultipartUploadResult.Bucket[0], bucketName); + assert.strictEqual(json.InitiateMultipartUploadResult.Key[0], objectKey); + assert(json.InitiateMultipartUploadResult.UploadId[0]); + assert(metadata.buckets.get(mpuBucket)._name, mpuBucket); + const mpuKeys = metadata.keyMaps.get(mpuBucket); + assert.strictEqual(mpuKeys.size, 1); + assert(mpuKeys.keys().next().value.startsWith(`overview${splitter}${objectKey}`)); + done(); }); + }); }); }); - it('should return an error on an initiate multipart upload call if ' + - 'no destination bucket', done => { - initiateMultipartUpload(authInfo, initiateRequest, - log, err => { - assert(err.is.NoSuchBucket); - done(); - }); + it('should return an error on an initiate multipart upload call if ' + 'no destination bucket', done => { + initiateMultipartUpload(authInfo, initiateRequest, log, err => { + assert(err.is.NoSuchBucket); + done(); + }); }); it('should not mpu with storage-class header not equal to STANDARD', done => { @@ -237,797 +229,544 @@ describe('Multipart Upload API', () => { }, url: `/${objectKey}?uploads`, }; - initiateMultipartUpload(authInfo, initiateRequestCold, - log, err => { - assert.strictEqual(err.is.InvalidStorageClass, true); - done(); - }); + initiateMultipartUpload(authInfo, initiateRequestCold, log, err => { + assert.strictEqual(err.is.InvalidStorageClass, true); + done(); + }); }); it('should upload a part', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => { - const mpuKeys = metadata.keyMaps.get(mpuBucket); - assert.strictEqual(mpuKeys.size, 1); - assert(mpuKeys.keys().next().value - .startsWith(`overview${splitter}${objectKey}`)); - parseString(result, next); - }, - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - objectKey, - namespace, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '1', - uploadId: testUploadId, + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => { + const mpuKeys = metadata.keyMaps.get(mpuBucket); + assert.strictEqual(mpuKeys.size, 1); + assert(mpuKeys.keys().next().value.startsWith(`overview${splitter}${objectKey}`)); + parseString(result, next); }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, log, err => { + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here assert.ifError(err); - const keysInMPUkeyMap = []; - metadata.keyMaps.get(mpuBucket).forEach((val, key) => { - keysInMPUkeyMap.push(key); - }); - const sortedKeyMap = keysInMPUkeyMap.sort(a => { - if (a.slice(0, 8) === 'overview') { - return -1; - } - return 0; + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + objectKey, + namespace, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, err => { + assert.ifError(err); + const keysInMPUkeyMap = []; + metadata.keyMaps.get(mpuBucket).forEach((val, key) => { + keysInMPUkeyMap.push(key); + }); + const sortedKeyMap = keysInMPUkeyMap.sort(a => { + if (a.slice(0, 8) === 'overview') { + return -1; + } + return 0; + }); + const overviewEntry = sortedKeyMap[0]; + const partKey = sortedKeyMap[1]; + const partEntryArray = partKey.split(splitter); + const partUploadId = partEntryArray[0]; + const firstPartNumber = partEntryArray[1]; + const partETag = metadata.keyMaps.get(mpuBucket).get(partKey)['content-md5']; + assert.strictEqual(keysInMPUkeyMap.length, 2); + assert.strictEqual(metadata.keyMaps.get(mpuBucket).get(overviewEntry).key, objectKey); + assert.strictEqual(partUploadId, testUploadId); + assert.strictEqual(firstPartNumber, '00001'); + assert.strictEqual(partETag, calculatedHash); + done(); }); - const overviewEntry = sortedKeyMap[0]; - const partKey = sortedKeyMap[1]; - const partEntryArray = partKey.split(splitter); - const partUploadId = partEntryArray[0]; - const firstPartNumber = partEntryArray[1]; - const partETag = metadata.keyMaps.get(mpuBucket) - .get(partKey)['content-md5']; - assert.strictEqual(keysInMPUkeyMap.length, 2); - assert.strictEqual(metadata.keyMaps.get(mpuBucket) - .get(overviewEntry).key, - objectKey); - assert.strictEqual(partUploadId, testUploadId); - assert.strictEqual(firstPartNumber, '00001'); - assert.strictEqual(partETag, calculatedHash); - done(); - }); - }); + } + ); }); - it('should upload a part even if the client sent a base 64 ETag ' + - '(and the stored ETag in metadata should be hex)', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - const calculatedHash = md5Hash.update(bufferBody).digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, log, err => { - assert.ifError(err); - const keysInMPUkeyMap = []; - metadata.keyMaps.get(mpuBucket).forEach((val, key) => { - keysInMPUkeyMap.push(key); - }); - const sortedKeyMap = keysInMPUkeyMap.sort(a => { - if (a.slice(0, 8) === 'overview') { - return -1; - } - return 0; - }); - const partKey = sortedKeyMap[1]; - const partETag = metadata.keyMaps.get(mpuBucket) - .get(partKey)['content-md5']; - assert.strictEqual(keysInMPUkeyMap.length, 2); - assert.strictEqual(partETag, calculatedHash); - done(); - }); - }); - }); + it( + 'should upload a part even if the client sent a base 64 ETag ' + + '(and the stored ETag in metadata should be hex)', + done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + const calculatedHash = md5Hash.update(bufferBody).digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, err => { + assert.ifError(err); + const keysInMPUkeyMap = []; + metadata.keyMaps.get(mpuBucket).forEach((val, key) => { + keysInMPUkeyMap.push(key); + }); + const sortedKeyMap = keysInMPUkeyMap.sort(a => { + if (a.slice(0, 8) === 'overview') { + return -1; + } + return 0; + }); + const partKey = sortedKeyMap[1]; + const partETag = metadata.keyMaps.get(mpuBucket).get(partKey)['content-md5']; + assert.strictEqual(keysInMPUkeyMap.length, 2); + assert.strictEqual(partETag, calculatedHash); + done(); + }); + } + ); + } + ); it('should return an error if too many parts', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '10001', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, log, - (err, result) => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '10001', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, (err, result) => { assert(err.is.TooManyParts); assert.strictEqual(result, undefined); done(); }); - }); + } + ); }); it('should return an error if part number is not an integer', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - objectKey, - namespace, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: 'I am not an integer', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, log, - (err, result) => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + objectKey, + namespace, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: 'I am not an integer', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, (err, result) => { assert(err.is.InvalidArgument); assert.strictEqual(result, undefined); done(); }); - }); + } + ); }); it('should return an error if content-length is too large', done => { // Note this is only faking a large file // by setting a large content-length. It is not actually putting a // large file. Functional tests will test actual large data. - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'content-length': '5368709121', - }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - parsedContentLength: 5368709121, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, - log, (err, result) => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'content-length': '5368709121', + }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + parsedContentLength: 5368709121, + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, (err, result) => { assert(err.is.EntityTooLarge); assert.strictEqual(result, undefined); done(); }); - }); + } + ); }); it('should upload two parts', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest1 = new DummyRequest({ - bucketName, - namespace, - objectKey, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest1, undefined, log, () => { - const postBody2 = Buffer.from('I am a second part', 'utf8'); - const md5Hash2 = crypto.createHash('md5'); - const bufferBody2 = Buffer.from(postBody2); - md5Hash2.update(bufferBody2); - const secondCalculatedMD5 = md5Hash2.digest('hex'); - const partRequest2 = new DummyRequest({ - bucketName, - namespace, - objectKey, - url: `/${objectKey}?partNumber=` + - `1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '2', - uploadId: testUploadId, + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest1 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, }, - calculatedHash: secondCalculatedMD5, - }, postBody2); - objectPutPart(authInfo, partRequest2, undefined, log, err => { - assert.ifError(err); + postBody + ); + objectPutPart(authInfo, partRequest1, undefined, log, () => { + const postBody2 = Buffer.from('I am a second part', 'utf8'); + const md5Hash2 = crypto.createHash('md5'); + const bufferBody2 = Buffer.from(postBody2); + md5Hash2.update(bufferBody2); + const secondCalculatedMD5 = md5Hash2.digest('hex'); + const partRequest2 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?partNumber=` + `1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: '2', + uploadId: testUploadId, + }, + calculatedHash: secondCalculatedMD5, + }, + postBody2 + ); + objectPutPart(authInfo, partRequest2, undefined, log, err => { + assert.ifError(err); - const keysInMPUkeyMap = []; - metadata.keyMaps.get(mpuBucket).forEach((val, key) => { - keysInMPUkeyMap.push(key); - }); - const sortedKeyMap = keysInMPUkeyMap.sort(a => { - if (a.slice(0, 8) === 'overview') { - return -1; - } - return 0; + const keysInMPUkeyMap = []; + metadata.keyMaps.get(mpuBucket).forEach((val, key) => { + keysInMPUkeyMap.push(key); + }); + const sortedKeyMap = keysInMPUkeyMap.sort(a => { + if (a.slice(0, 8) === 'overview') { + return -1; + } + return 0; + }); + const overviewEntry = sortedKeyMap[0]; + const partKey = sortedKeyMap[2]; + const secondPartEntryArray = partKey.split(splitter); + const partUploadId = secondPartEntryArray[0]; + const secondPartETag = metadata.keyMaps.get(mpuBucket).get(partKey)['content-md5']; + const secondPartNumber = secondPartEntryArray[1]; + assert.strictEqual(keysInMPUkeyMap.length, 3); + assert.strictEqual(metadata.keyMaps.get(mpuBucket).get(overviewEntry).key, objectKey); + assert.strictEqual(partUploadId, testUploadId); + assert.strictEqual(secondPartNumber, '00002'); + assert.strictEqual(secondPartETag, secondCalculatedMD5); + done(); }); - const overviewEntry = sortedKeyMap[0]; - const partKey = sortedKeyMap[2]; - const secondPartEntryArray = partKey.split(splitter); - const partUploadId = secondPartEntryArray[0]; - const secondPartETag = metadata.keyMaps.get(mpuBucket) - .get(partKey)['content-md5']; - const secondPartNumber = secondPartEntryArray[1]; - assert.strictEqual(keysInMPUkeyMap.length, 3); - assert.strictEqual(metadata - .keyMaps.get(mpuBucket).get(overviewEntry).key, - objectKey); - assert.strictEqual(partUploadId, testUploadId); - assert.strictEqual(secondPartNumber, '00002'); - assert.strictEqual(secondPartETag, secondCalculatedMD5); - done(); }); - }); - }); + } + ); }); it('should complete a multipart upload', done => { const partBody = Buffer.from('I am a part\n', 'utf8'); - initiateRequest.headers['x-amz-meta-stuff'] = - 'I am some user metadata'; - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5').update(partBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - // Note that the body of the post set in the request here does - // not really matter in this test. - // The put is not going through the route so the md5 is being - // calculated above and manually being set in the request below. - // What is being tested is that the calculatedHash being sent - // to the API for the part is stored and then used to - // calculate the final ETag upon completion - // of the multipart upload. - calculatedHash, - }, partBody); - objectPutPart(authInfo, partRequest, undefined, log, () => { - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = { - bucketName, - namespace, - objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - actionImplicitDenies: false, - }; - const awsVerifiedETag = - '"953e9e776f285afc0bfcf1ab4668299d-1"'; - completeMultipartUpload(authInfo, - completeRequest, log, (err, result) => { + initiateRequest.headers['x-amz-meta-stuff'] = 'I am some user metadata'; + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5').update(partBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + // Note that the body of the post set in the request here does + // not really matter in this test. + // The put is not going through the route so the md5 is being + // calculated above and manually being set in the request below. + // What is being tested is that the calculatedHash being sent + // to the API for the part is stored and then used to + // calculate the final ETag upon completion + // of the multipart upload. + calculatedHash, + }, + partBody + ); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + const awsVerifiedETag = '"953e9e776f285afc0bfcf1ab4668299d-1"'; + completeMultipartUpload(authInfo, completeRequest, log, (err, result) => { assert.ifError(err); parseString(result, (err, json) => { assert.ifError(err); assert.strictEqual( json.CompleteMultipartUploadResult.Location[0], - `http://${bucketName}.s3.amazonaws.com` - + `/${objectKey}`); - assert.strictEqual( - json.CompleteMultipartUploadResult.Bucket[0], - bucketName); - assert.strictEqual( - json.CompleteMultipartUploadResult.Key[0], - objectKey); - assert.strictEqual( - json.CompleteMultipartUploadResult.ETag[0], - awsVerifiedETag); - const MD = metadata.keyMaps.get(bucketName) - .get(objectKey); + `http://${bucketName}.s3.amazonaws.com` + `/${objectKey}` + ); + assert.strictEqual(json.CompleteMultipartUploadResult.Bucket[0], bucketName); + assert.strictEqual(json.CompleteMultipartUploadResult.Key[0], objectKey); + assert.strictEqual(json.CompleteMultipartUploadResult.ETag[0], awsVerifiedETag); + const MD = metadata.keyMaps.get(bucketName).get(objectKey); assert(MD); - assert.strictEqual(MD['x-amz-meta-stuff'], - 'I am some user metadata'); + assert.strictEqual(MD['x-amz-meta-stuff'], 'I am some user metadata'); assert.strictEqual(MD.uploadId, testUploadId); done(); }); }); - }); - }); - }); - - it('should complete a multipart upload even if etag is sent ' + - 'in post body without quotes (a la Cyberduck)', done => { - const partBody = Buffer.from('I am a part\n', 'utf8'); - initiateRequest.headers['x-amz-meta-stuff'] = - 'I am some user metadata'; - async.waterfall([ - function waterfall1(next) { - bucketPut(authInfo, bucketPutRequest, log, next); - }, - function waterfall2(corsHeaders, next) { - initiateMultipartUpload( - authInfo, initiateRequest, log, next); - }, - function waterfall3(result, corsHeaders, next) { - parseString(result, next); - }, - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5').update(partBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, partBody); - objectPutPart(authInfo, partRequest, undefined, log, () => { - const completeBody = '' + - '' + - '1' + - // ETag without quotes - `${calculatedHash}` + - '' + - ''; - const completeRequest = { - bucketName, - namespace, - objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - actionImplicitDenies: false, - }; - const awsVerifiedETag = - '"953e9e776f285afc0bfcf1ab4668299d-1"'; - completeMultipartUpload(authInfo, - completeRequest, log, (err, result) => { - assert.ifError(err); - parseString(result, (err, json) => { - assert.ifError(err); - assert.strictEqual( - json.CompleteMultipartUploadResult.Location[0], - `http://${bucketName}.s3.amazonaws.com` - + `/${objectKey}`); - assert.strictEqual( - json.CompleteMultipartUploadResult.Bucket[0], - bucketName); - assert.strictEqual( - json.CompleteMultipartUploadResult.Key[0], - objectKey); - assert.strictEqual( - json.CompleteMultipartUploadResult.ETag[0], - awsVerifiedETag); - const MD = metadata.keyMaps.get(bucketName) - .get(objectKey); - assert(MD); - assert.strictEqual(MD['x-amz-meta-stuff'], - 'I am some user metadata'); - done(); - }); - }); - }); - }); + }); + } + ); }); - it('should return an error if a complete multipart upload' + - ' request contains malformed xml', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, log, () => { - const completeBody = 'Malformed xml'; - const completeRequest = { - bucketName, - objectKey, - namespace, - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - calculatedHash, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, - completeRequest, log, err => { - assert.strictEqual(err.is.MalformedXML, true); - assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, - 2); - done(); + it( + 'should complete a multipart upload even if etag is sent ' + 'in post body without quotes (a la Cyberduck)', + done => { + const partBody = Buffer.from('I am a part\n', 'utf8'); + initiateRequest.headers['x-amz-meta-stuff'] = 'I am some user metadata'; + async.waterfall( + [ + function waterfall1(next) { + bucketPut(authInfo, bucketPutRequest, log, next); + }, + function waterfall2(corsHeaders, next) { + initiateMultipartUpload(authInfo, initiateRequest, log, next); + }, + function waterfall3(result, corsHeaders, next) { + parseString(result, next); + }, + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5').update(partBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + partBody + ); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const completeBody = + '' + + '' + + '1' + + // ETag without quotes + `${calculatedHash}` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + const awsVerifiedETag = '"953e9e776f285afc0bfcf1ab4668299d-1"'; + completeMultipartUpload(authInfo, completeRequest, log, (err, result) => { + assert.ifError(err); + parseString(result, (err, json) => { + assert.ifError(err); + assert.strictEqual( + json.CompleteMultipartUploadResult.Location[0], + `http://${bucketName}.s3.amazonaws.com` + `/${objectKey}` + ); + assert.strictEqual(json.CompleteMultipartUploadResult.Bucket[0], bucketName); + assert.strictEqual(json.CompleteMultipartUploadResult.Key[0], objectKey); + assert.strictEqual(json.CompleteMultipartUploadResult.ETag[0], awsVerifiedETag); + const MD = metadata.keyMaps.get(bucketName).get(objectKey); + assert(MD); + assert.strictEqual(MD['x-amz-meta-stuff'], 'I am some user metadata'); + done(); + }); + }); }); - }); - }); - }); - - it('should return an error if the complete ' + - 'multipart upload request contains xml that ' + - 'does not conform to the AWS spec', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, log, () => { - // XML is missing any part listing so does - // not conform to the AWS spec - const completeBody = '' + - ''; - const completeRequest = { - bucketName, - namespace, - objectKey, - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - calculatedHash, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, completeRequest, log, err => { - assert(err.is.MalformedXML); - done(); - }); - }); - }); - }); - - it('should return an error if the complete ' + - 'multipart upload request contains xml with ' + - 'a part list that is not in numerical order', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); - const bufferBody = Buffer.from(fullSizedPart); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest1 = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, fullSizedPart); - const partRequest2 = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '2', - uploadId: testUploadId, - }, - calculatedHash, - }, fullSizedPart); - objectPutPart(authInfo, partRequest1, undefined, log, () => { - objectPutPart(authInfo, partRequest2, undefined, log, () => { - const completeBody = '' + - '' + - '2' + - `"${calculatedHash}"` + - '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = { + } + ); + } + ); + + it('should return an error if a complete multipart upload' + ' request contains malformed xml', done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { bucketName, namespace, objectKey, - url: `/${objectKey}?uploadId=${testUploadId}`, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, + query: { + partNumber: '1', + uploadId: testUploadId, + }, calculatedHash, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, - completeRequest, log, err => { - assert(err.is.InvalidPartOrder); - assert.strictEqual(metadata.keyMaps - .get(mpuBucket).size, 3); - done(); - }); - }); - }); - }); - }); - - it('should return InvalidPart error if the complete ' + - 'multipart upload request contains xml with a missing part', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); - const bufferBody = Buffer.from(fullSizedPart); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, fullSizedPart); - objectPutPart(authInfo, partRequest, undefined, log, () => { - const completeBody = '' + - '' + - '99999' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = { - bucketName, - namespace, - objectKey, - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - calculatedHash, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, completeRequest, log, err => { - assert(err.is.InvalidPart); - assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); - done(); - }); - }); - }); - }); - - it('should return an error if the complete multipart upload request ' - + 'contains xml with a part ETag that does not match the md5 for ' - + 'the part that was actually sent', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const wrongMD5 = '3858f62230ac3c915f300c664312c11f-9'; - const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); - const partRequest1 = new DummyRequest({ - bucketName, - namespace, - objectKey, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - }, fullSizedPart); - const partRequest2 = new DummyRequest({ - bucketName, - namespace, - objectKey, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '2', - uploadId: testUploadId, - }, - }, postBody); - objectPutPart(authInfo, partRequest1, undefined, log, err => { - assert.deepStrictEqual(err, null); - const calculatedHash = partRequest1.calculatedHash; - objectPutPart(authInfo, partRequest2, undefined, log, err => { - assert.deepStrictEqual(err, null); - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - '' + - '2' + - `${wrongMD5}` + - '' + - ''; + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const completeBody = 'Malformed xml'; const completeRequest = { bucketName, - namespace, objectKey, + namespace, url: `/${objectKey}?uploadId=${testUploadId}`, headers: { host: `${bucketName}.s3.amazonaws.com` }, query: { uploadId: testUploadId }, @@ -1035,189 +774,492 @@ describe('Multipart Upload API', () => { calculatedHash, actionImplicitDenies: false, }; - assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 3); - completeMultipartUpload(authInfo, - completeRequest, log, err => { + completeMultipartUpload(authInfo, completeRequest, log, err => { + assert.strictEqual(err.is.MalformedXML, true); + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); + done(); + }); + }); + } + ); + }); + + it( + 'should return an error if the complete ' + + 'multipart upload request contains xml that ' + + 'does not conform to the AWS spec', + done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, () => { + // XML is missing any part listing so does + // not conform to the AWS spec + const completeBody = '' + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + calculatedHash, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, err => { + assert(err.is.MalformedXML); + done(); + }); + }); + } + ); + } + ); + + it( + 'should return an error if the complete ' + + 'multipart upload request contains xml with ' + + 'a part list that is not in numerical order', + done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); + const bufferBody = Buffer.from(fullSizedPart); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest1 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + fullSizedPart + ); + const partRequest2 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '2', + uploadId: testUploadId, + }, + calculatedHash, + }, + fullSizedPart + ); + objectPutPart(authInfo, partRequest1, undefined, log, () => { + objectPutPart(authInfo, partRequest2, undefined, log, () => { + const completeBody = + '' + + '' + + '2' + + `"${calculatedHash}"` + + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + calculatedHash, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, err => { + assert(err.is.InvalidPartOrder); + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 3); + done(); + }); + }); + }); + } + ); + } + ); + + it( + 'should return InvalidPart error if the complete ' + + 'multipart upload request contains xml with a missing part', + done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); + const bufferBody = Buffer.from(fullSizedPart); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + fullSizedPart + ); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const completeBody = + '' + + '' + + '99999' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + calculatedHash, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, err => { assert(err.is.InvalidPart); + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); done(); }); - }); - }); - }); - }); + }); + } + ); + } + ); + + it( + 'should return an error if the complete multipart upload request ' + + 'contains xml with a part ETag that does not match the md5 for ' + + 'the part that was actually sent', + done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const wrongMD5 = '3858f62230ac3c915f300c664312c11f-9'; + const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); + const partRequest1 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + }, + fullSizedPart + ); + const partRequest2 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: '2', + uploadId: testUploadId, + }, + }, + postBody + ); + objectPutPart(authInfo, partRequest1, undefined, log, err => { + assert.deepStrictEqual(err, null); + const calculatedHash = partRequest1.calculatedHash; + objectPutPart(authInfo, partRequest2, undefined, log, err => { + assert.deepStrictEqual(err, null); + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + '' + + '2' + + `${wrongMD5}` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + calculatedHash, + actionImplicitDenies: false, + }; + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 3); + completeMultipartUpload(authInfo, completeRequest, log, err => { + assert(err.is.InvalidPart); + done(); + }); + }); + }); + } + ); + } + ); + + it( + 'should return an error if there is a part ' + 'other than the last part that is less than 5MB ' + 'in size', + done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest1 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'content-length': '100', + }, + parsedContentLength: 100, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + const partRequest2 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'content-length': '200', + }, + parsedContentLength: 200, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '2', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest1, undefined, log, () => { + objectPutPart(authInfo, partRequest2, undefined, log, () => { + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + '' + + '2' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?uploadId=${testUploadId}`, + query: { uploadId: testUploadId }, + post: completeBody, + calculatedHash, + actionImplicitDenies: false, + }; + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 3); + completeMultipartUpload(authInfo, completeRequest, log, err => { + assert(err.is.EntityTooSmall); + done(); + }); + }); + }); + } + ); + } + ); - it('should return an error if there is a part ' + - 'other than the last part that is less than 5MB ' + - 'in size', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest1 = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'content-length': '100', - }, - parsedContentLength: 100, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - const partRequest2 = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'content-length': '200', - }, - parsedContentLength: 200, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '2', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest1, undefined, log, () => { - objectPutPart(authInfo, partRequest2, undefined, log, () => { - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - '' + - '2' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = { + it('should aggregate the sizes of the parts', done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until her + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest1 = new DummyRequest( + { bucketName, namespace, objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?uploadId=${testUploadId}`, - query: { uploadId: testUploadId }, - post: completeBody, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'content-length': '6000000', + }, + parsedContentLength: 6000000, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, calculatedHash, - actionImplicitDenies: false, - }; - assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 3); - completeMultipartUpload(authInfo, - completeRequest, log, err => { - assert(err.is.EntityTooSmall); - done(); - }); - }); - }); - }); - }); - - it('should aggregate the sizes of the parts', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until her - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest1 = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'content-length': '6000000', - }, - parsedContentLength: 6000000, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - const partRequest2 = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'content-length': '100', - }, - parsedContentLength: 100, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '2', - uploadId: testUploadId, - }, - post: postBody, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest1, undefined, log, () => { - objectPutPart(authInfo, partRequest2, undefined, log, () => { - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - '' + - '2' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = { + }, + postBody + ); + const partRequest2 = new DummyRequest( + { bucketName, namespace, objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?uploadId=${testUploadId}`, - query: { uploadId: testUploadId }, - post: completeBody, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'content-length': '100', + }, + parsedContentLength: 100, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '2', + uploadId: testUploadId, + }, + post: postBody, calculatedHash, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, - completeRequest, log, (err, result) => { + }, + postBody + ); + objectPutPart(authInfo, partRequest1, undefined, log, () => { + objectPutPart(authInfo, partRequest2, undefined, log, () => { + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + '' + + '2' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?uploadId=${testUploadId}`, + query: { uploadId: testUploadId }, + post: completeBody, + calculatedHash, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, (err, result) => { assert.strictEqual(err, null); parseString(result, err => { assert.strictEqual(err, null); - const MD = metadata.keyMaps - .get(bucketName) - .get(objectKey); + const MD = metadata.keyMaps.get(bucketName).get(objectKey); assert(MD); - assert.strictEqual(MD['content-length'], - 6000100); + assert.strictEqual(MD['content-length'], 6000100); done(); }); }); + }); }); - }); - }); + } + ); }); it('should set a canned ACL for a multipart upload', done => { @@ -1226,7 +1268,7 @@ describe('Multipart Upload API', () => { namespace, objectKey, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-meta-stuff': 'I am some user metadata', 'x-amz-acl': 'authenticated-read', }, @@ -1234,103 +1276,105 @@ describe('Multipart Upload API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest1 = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'content-length': 6000000, - }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - const partRequest2 = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'content-length': 100, - }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '2', - uploadId: testUploadId, - }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest1, undefined, log, () => { - objectPutPart(authInfo, partRequest2, undefined, log, () => { - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - '' + - '2' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest1 = new DummyRequest( + { bucketName, namespace, objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?uploadId=${testUploadId}`, - query: { uploadId: testUploadId }, - post: completeBody, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'content-length': 6000000, + }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, calculatedHash, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, - completeRequest, log, (err, result) => { + }, + postBody + ); + const partRequest2 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'content-length': 100, + }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '2', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest1, undefined, log, () => { + objectPutPart(authInfo, partRequest2, undefined, log, () => { + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + '' + + '2' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?uploadId=${testUploadId}`, + query: { uploadId: testUploadId }, + post: completeBody, + calculatedHash, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, (err, result) => { assert.strictEqual(err, null); parseString(result, err => { assert.strictEqual(err, null); - const MD = metadata.keyMaps - .get(bucketName) - .get(objectKey); + const MD = metadata.keyMaps.get(bucketName).get(objectKey); assert(MD); - assert.strictEqual(MD.acl.Canned, - 'authenticated-read'); + assert.strictEqual(MD.acl.Canned, 'authenticated-read'); done(); }); }); + }); }); - }); - }); + } + ); }); it('should set specific ACL grants for a multipart upload', done => { - const granteeId = '79a59df900b949e55d96a1e698fbace' + - 'dfd6e09d98eacf8f8d5218e7cd47ef2be'; + const granteeId = '79a59df900b949e55d96a1e698fbace' + 'dfd6e09d98eacf8f8d5218e7cd47ef2be'; const granteeEmail = 'sampleAccount1@sampling.com'; const initiateRequest = { bucketName, namespace, objectKey, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-meta-stuff': 'I am some user metadata', 'x-amz-grant-read': `emailAddress="${granteeEmail}"`, }, @@ -1338,682 +1382,706 @@ describe('Multipart Upload API', () => { actionImplicitDenies: false, }; - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest1 = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'content-length': 6000000, + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest1 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'content-length': 6000000, + }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + const partRequest2 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'content-length': 100, + }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '2', + uploadId: testUploadId, + }, + post: postBody, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest1, undefined, log, () => { + objectPutPart(authInfo, partRequest2, undefined, log, () => { + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + '' + + '2' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?uploadId=${testUploadId}`, + query: { uploadId: testUploadId }, + post: completeBody, + calculatedHash, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, (err, result) => { + assert.strictEqual(err, null); + parseString(result, err => { + assert.strictEqual(err, null); + const MD = metadata.keyMaps.get(bucketName).get(objectKey); + assert(MD); + assert.strictEqual(MD.acl.READ[0], granteeId); + done(); + }); + }); + }); + }); + } + ); + }); + + it('should abort/delete a multipart upload', done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const bufferMD5 = Buffer.from(postBody, 'base64'); + const calculatedHash = bufferMD5.toString('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const deleteRequest = { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + actionImplicitDenies: false, + }; + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); + multipartDelete(authInfo, deleteRequest, log, err => { + assert.strictEqual(err, null); + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 0); + done(); + }); + }); + } + ); + }); + + it( + 'should return no error if attempt to abort/delete ' + + 'a multipart upload that does not exist and not using ' + + 'legacyAWSBehavior', + done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => { + const mpuKeys = metadata.keyMaps.get(mpuBucket); + assert.strictEqual(mpuKeys.size, 1); + parseString(result, next); + }, + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const bufferMD5 = Buffer.from(postBody, 'base64'); + const calculatedHash = bufferMD5.toString('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const deleteRequest = { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: 'non-existent-upload-id' }, + actionImplicitDenies: false, + }; + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); + multipartDelete(authInfo, deleteRequest, log, err => { + assert.strictEqual(err, null, `Expected no err but got ${err}`); + done(); + }); + }); + } + ); + } + ); + + it('should not leave orphans in data when overwriting an object with a MPU', done => { + const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); + const partBody = Buffer.from('I am a part\n', 'utf8'); + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + }, + fullSizedPart + ); + objectPutPart(authInfo, partRequest, undefined, log, (err, partCalculatedHash) => { + assert.deepStrictEqual(err, null); + next(null, testUploadId, partCalculatedHash); + }); }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, + (testUploadId, part1CalculatedHash, next) => { + const part2Request = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '2', + uploadId: testUploadId, + }, + }, + partBody + ); + objectPutPart(authInfo, part2Request, undefined, log, (err, part2CalculatedHash) => { + assert.deepStrictEqual(err, null); + next(null, testUploadId, part1CalculatedHash, part2CalculatedHash); + }); }, - calculatedHash, - }, postBody); - const partRequest2 = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'content-length': 100, + (testUploadId, part1CalculatedHash, part2CalculatedHash, next) => { + const completeBody = + '' + + '' + + '1' + + `"${part1CalculatedHash}"` + + '' + + '' + + '2' + + `"${part2CalculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, (err, result) => { + assert.deepStrictEqual(err, null); + next(null, result); + }); + }, + (result, next) => { + assert.strictEqual(ds[0], undefined); + assert.deepStrictEqual(ds[1].value, fullSizedPart); + assert.deepStrictEqual(ds[2].value, partBody); + initiateMultipartUpload(authInfo, initiateRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const overwritePartBody = Buffer.from('I am an overwrite part\n', 'utf8'); + const md5Hash = crypto.createHash('md5').update(overwritePartBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + overwritePartBody + ); + objectPutPart(authInfo, partRequest, undefined, log, () => + next(null, testUploadId, calculatedHash) + ); + }, + (testUploadId, calculatedHash, next) => { + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, next); + }, + ], + err => { + assert.deepStrictEqual(err, null); + assert.strictEqual(ds[0], undefined); + assert.strictEqual(ds[1], undefined); + assert.strictEqual(ds[2], undefined); + assert.deepStrictEqual(ds[3].value, Buffer.from('I am an overwrite part\n', 'utf8')); + done(); + } + ); + }); + + it('should not leave orphans in data when overwriting an object part', done => { + const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); + const overWritePart = Buffer.from('Overwrite content', 'utf8'); + let uploadId; + + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + uploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const requestObj = { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + query: { + partNumber: '1', + uploadId, + }, + }; + const partRequest = new DummyRequest(requestObj, fullSizedPart); + objectPutPart(authInfo, partRequest, undefined, log, err => { + assert.deepStrictEqual(err, null); + next(null, requestObj); + }); }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '2', - uploadId: testUploadId, + (requestObj, next) => { + assert.deepStrictEqual(ds[1].value, fullSizedPart); + const partRequest = new DummyRequest(requestObj, overWritePart); + objectPutPart(authInfo, partRequest, undefined, log, (err, partCalculatedHash) => { + assert.deepStrictEqual(err, null); + next(null, partCalculatedHash); + }); }, - post: postBody, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest1, undefined, log, () => { - objectPutPart(authInfo, partRequest2, undefined, log, () => { - const completeBody = '' + + (partCalculatedHash, next) => { + const completeBody = + '' + '' + '1' + - `"${calculatedHash}"` + - '' + - '' + - '2' + - `"${calculatedHash}"` + + `"${partCalculatedHash}"` + '' + ''; + const completeRequest = { bucketName, namespace, objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${uploadId}`, headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?uploadId=${testUploadId}`, - query: { uploadId: testUploadId }, + query: { uploadId }, post: completeBody, - calculatedHash, actionImplicitDenies: false, }; - completeMultipartUpload(authInfo, - completeRequest, log, (err, result) => { - assert.strictEqual(err, null); - parseString(result, err => { - assert.strictEqual(err, null); - const MD = metadata.keyMaps - .get(bucketName) - .get(objectKey); - assert(MD); - assert.strictEqual(MD.acl.READ[0], granteeId); - done(); - }); - }); - }); - }); - }); + completeMultipartUpload(authInfo, completeRequest, log, next); + }, + ], + err => { + assert.deepStrictEqual(err, null); + assert.strictEqual(ds[0], undefined); + assert.deepStrictEqual(ds[1], undefined); + assert.deepStrictEqual(ds[2].value, overWritePart); + done(); + } + ); }); - it('should abort/delete a multipart upload', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const bufferMD5 = Buffer.from(postBody, 'base64'); - const calculatedHash = bufferMD5.toString('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '1', - uploadId: testUploadId, + it('should leave orphaned data when overwriting an object part during completeMPU', done => { + const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); + const overWritePart = Buffer.from('Overwrite content', 'utf8'); + let uploadId; + + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + uploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const requestObj = { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, + query: { + partNumber: '1', + uploadId, + }, + }; + const partRequest = new DummyRequest(requestObj, fullSizedPart); + objectPutPart(authInfo, partRequest, undefined, log, (err, partCalculatedHash) => { + assert.deepStrictEqual(err, null); + next(null, requestObj, partCalculatedHash); + }); }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, log, () => { - const deleteRequest = { - bucketName, - namespace, - objectKey, - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - actionImplicitDenies: false, - }; - assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); - multipartDelete(authInfo, deleteRequest, log, err => { - assert.strictEqual(err, null); - assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 0); - done(); - }); - }); - }); + (requestObj, partCalculatedHash, next) => { + assert.deepStrictEqual(ds[1].value, fullSizedPart); + async.parallel( + [ + done => { + const partRequest = new DummyRequest(requestObj, overWritePart); + objectPutPart(authInfo, partRequest, undefined, log, err => { + assert.deepStrictEqual(err, null); + done(); + }); + }, + done => { + const completeBody = + '' + + '' + + '1' + + `"${partCalculatedHash}"` + + '' + + ''; + + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${uploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, done); + }, + ], + err => next(err) + ); + }, + ], + err => { + assert.deepStrictEqual(err, null); + assert.strictEqual(ds[0], undefined); + assert.deepStrictEqual(ds[1].value, fullSizedPart); + assert.deepStrictEqual(ds[2].value, overWritePart); + done(); + } + ); }); - it('should return no error if attempt to abort/delete ' + - 'a multipart upload that does not exist and not using ' + - 'legacyAWSBehavior', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => { - const mpuKeys = metadata.keyMaps.get(mpuBucket); - assert.strictEqual(mpuKeys.size, 1); - parseString(result, next); - }, - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const bufferMD5 = Buffer.from(postBody, 'base64'); - const calculatedHash = bufferMD5.toString('hex'); - const partRequest = new DummyRequest({ + it('should throw an error on put of an object part with an invalid ' + 'uploadId', done => { + const testUploadId = 'invalidUploadID'; + const partRequest = new DummyRequest( + { bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, query: { partNumber: '1', uploadId: testUploadId, }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, log, () => { - const deleteRequest = { - bucketName, - namespace, - objectKey, - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: 'non-existent-upload-id' }, - actionImplicitDenies: false, - }; - assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); - multipartDelete(authInfo, deleteRequest, log, err => { - assert.strictEqual(err, null, - `Expected no err but got ${err}`); - done(); - }); - }); - }); - }); - - it('should not leave orphans in data when overwriting an object with a MPU', - done => { - const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); - const partBody = Buffer.from('I am a part\n', 'utf8'); - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - }, fullSizedPart); - objectPutPart(authInfo, partRequest, undefined, log, (err, - partCalculatedHash) => { - assert.deepStrictEqual(err, null); - next(null, testUploadId, partCalculatedHash); - }); - }, - (testUploadId, part1CalculatedHash, next) => { - const part2Request = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '2', - uploadId: testUploadId, - }, - }, partBody); - objectPutPart(authInfo, part2Request, undefined, log, (err, - part2CalculatedHash) => { - assert.deepStrictEqual(err, null); - next(null, testUploadId, part1CalculatedHash, - part2CalculatedHash); - }); - }, - (testUploadId, part1CalculatedHash, part2CalculatedHash, next) => { - const completeBody = '' + - '' + - '1' + - `"${part1CalculatedHash}"` + - '' + - '' + - '2' + - `"${part2CalculatedHash}"` + - '' + - ''; - const completeRequest = { - bucketName, - namespace, - objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, completeRequest, log, - (err, result) => { - assert.deepStrictEqual(err, null); - next(null, result); - }); - }, - (result, next) => { - assert.strictEqual(ds[0], undefined); - assert.deepStrictEqual(ds[1].value, fullSizedPart); - assert.deepStrictEqual(ds[2].value, partBody); - initiateMultipartUpload(authInfo, initiateRequest, log, next); - }, - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const overwritePartBody = - Buffer.from('I am an overwrite part\n', 'utf8'); - const md5Hash = crypto.createHash('md5') - .update(overwritePartBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, overwritePartBody); - objectPutPart(authInfo, partRequest, undefined, log, () => - next(null, testUploadId, calculatedHash)); }, - (testUploadId, calculatedHash, next) => { - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = { - bucketName, - namespace, - objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, completeRequest, log, next); - }, - ], - err => { - assert.deepStrictEqual(err, null); - assert.strictEqual(ds[0], undefined); - assert.strictEqual(ds[1], undefined); - assert.strictEqual(ds[2], undefined); - assert.deepStrictEqual(ds[3].value, - Buffer.from('I am an overwrite part\n', 'utf8')); - done(); - }); - }); - - it('should not leave orphans in data when overwriting an object part', - done => { - const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); - const overWritePart = Buffer.from('Overwrite content', 'utf8'); - let uploadId; + postBody + ); - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - uploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const requestObj = { - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, - query: { - partNumber: '1', - uploadId, - }, - }; - const partRequest = new DummyRequest(requestObj, fullSizedPart); - objectPutPart(authInfo, partRequest, undefined, log, err => { - assert.deepStrictEqual(err, null); - next(null, requestObj); - }); - }, - (requestObj, next) => { - assert.deepStrictEqual(ds[1].value, fullSizedPart); - const partRequest = new DummyRequest(requestObj, overWritePart); - objectPutPart(authInfo, partRequest, undefined, log, - (err, partCalculatedHash) => { - assert.deepStrictEqual(err, null); - next(null, partCalculatedHash); - }); - }, - (partCalculatedHash, next) => { - const completeBody = '' + - '' + - '1' + - `"${partCalculatedHash}"` + - '' + - ''; - - const completeRequest = { - bucketName, - namespace, - objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${uploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId }, - post: completeBody, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, completeRequest, log, next); - }, - ], - err => { - assert.deepStrictEqual(err, null); - assert.strictEqual(ds[0], undefined); - assert.deepStrictEqual(ds[1], undefined); - assert.deepStrictEqual(ds[2].value, overWritePart); - done(); - }); + bucketPut(authInfo, bucketPutRequest, log, () => + objectPutPart(authInfo, partRequest, undefined, log, err => { + assert(err.is.NoSuchUpload); + done(); + }) + ); }); - it('should leave orphaned data when overwriting an object part during completeMPU', - done => { - const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); - const overWritePart = Buffer.from('Overwrite content', 'utf8'); - let uploadId; - - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - uploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const requestObj = { - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`, - query: { - partNumber: '1', - uploadId, - }, - }; - const partRequest = new DummyRequest(requestObj, fullSizedPart); - objectPutPart(authInfo, partRequest, undefined, log, (err, partCalculatedHash) => { - assert.deepStrictEqual(err, null); - next(null, requestObj, partCalculatedHash); - }); - }, - (requestObj, partCalculatedHash, next) => { - assert.deepStrictEqual(ds[1].value, fullSizedPart); - async.parallel([ - done => { - const partRequest = new DummyRequest(requestObj, overWritePart); - objectPutPart(authInfo, partRequest, undefined, log, err => { - assert.deepStrictEqual(err, null); - done(); - }); - }, - done => { - const completeBody = '' + - '' + - '1' + - `"${partCalculatedHash}"` + - '' + - ''; - - const completeRequest = { + it( + 'should complete an MPU with fewer parts than were originally ' + 'put and delete data from left out parts', + done => { + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); + const partRequest1 = new DummyRequest( + { bucketName, namespace, objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${uploadId}`, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId }, - post: completeBody, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, completeRequest, log, done); - }, - ], err => next(err)); - }, - ], - err => { - assert.deepStrictEqual(err, null); - assert.strictEqual(ds[0], undefined); - assert.deepStrictEqual(ds[1].value, fullSizedPart); - assert.deepStrictEqual(ds[2].value, overWritePart); - done(); - }); - }); - - it('should throw an error on put of an object part with an invalid ' + - 'uploadId', done => { - const testUploadId = 'invalidUploadID'; - const partRequest = new DummyRequest({ - bucketName, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - }, postBody); + query: { + partNumber: '1', + uploadId: testUploadId, + }, + }, + fullSizedPart + ); + const partRequest2 = new DummyRequest( + { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: '2', + uploadId: testUploadId, + }, + }, + postBody + ); + objectPutPart(authInfo, partRequest1, undefined, log, err => { + assert.deepStrictEqual(err, null); + const md5Hash = crypto.createHash('md5').update(fullSizedPart); + const calculatedHash = md5Hash.digest('hex'); + objectPutPart(authInfo, partRequest2, undefined, log, err => { + assert.deepStrictEqual(err, null); + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + calculatedHash, + actionImplicitDenies: false, + }; + // show that second part data is there + assert(ds[2]); + completeMultipartUpload(authInfo, completeRequest, log, err => { + assert.strictEqual(err, null); + process.nextTick(() => { + // data has been deleted + assert.strictEqual(ds[2], undefined); + done(); + }); + }); + }); + }); + } + ); + } + ); - bucketPut(authInfo, bucketPutRequest, log, () => - objectPutPart(authInfo, partRequest, undefined, log, err => { - assert(err.is.NoSuchUpload); - done(); - }) + it('should not delete data locations on completeMultipartUpload retry', done => { + const partBody = Buffer.from('foo', 'utf8'); + let origDeleteObject; + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, err => next(err)), + next => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partRequest = _createPutPartRequest(testUploadId, 1, partBody); + objectPutPart(authInfo, partRequest, undefined, log, (err, eTag) => next(err, eTag, testUploadId)); + }, + (eTag, testUploadId, next) => { + origDeleteObject = metadataBackend.deleteObject; + metadataBackend.deleteObject = (bucketName, objName, params, log, cb) => { + // prevent deletions from MPU bucket only + if (bucketName === mpuBucket) { + return process.nextTick(() => cb(errors.InternalError)); + } + return origDeleteObject(bucketName, objName, params, log, cb); + }; + const parts = [{ partNumber: 1, eTag }]; + const completeRequest = _createCompleteMpuRequest(testUploadId, parts); + completeMultipartUpload(authInfo, completeRequest, log, err => { + // expect a failure here because we could not + // remove the overview key + assert(err.is.InternalError); + next(null, eTag, testUploadId); + }); + }, + (eTag, testUploadId, next) => { + // allow MPU bucket metadata deletions to happen again + metadataBackend.deleteObject = origDeleteObject; + // retry the completeMultipartUpload with the same + // metadata, as an application would normally do after + // a failure + const parts = [{ partNumber: 1, eTag }]; + const completeRequest = _createCompleteMpuRequest(testUploadId, parts); + completeMultipartUpload(authInfo, completeRequest, log, next); + }, + ], + err => { + assert.ifError(err); + // check that the original data has not been deleted + // during the replay + assert.strictEqual(ds[0], undefined); + assert.notStrictEqual(ds[1], undefined); + assert.deepStrictEqual(ds[1].value, partBody); + done(); + } ); }); - it('should complete an MPU with fewer parts than were originally ' + - 'put and delete data from left out parts', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024); - const partRequest1 = new DummyRequest({ - bucketName, - namespace, - objectKey, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - }, fullSizedPart); - const partRequest2 = new DummyRequest({ - bucketName, - namespace, - objectKey, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '2', - uploadId: testUploadId, - }, - }, postBody); - objectPutPart(authInfo, partRequest1, undefined, log, err => { - assert.deepStrictEqual(err, null); - const md5Hash = crypto.createHash('md5').update(fullSizedPart); + it('should abort an MPU and delete its MD if it has been created by a failed complete before', done => { + const delMeta = metadataBackend.deleteObject; + metadataBackend.deleteObject = (bucketName, objName, params, log, cb) => cb(errors.InternalError); + const partBody = Buffer.from('I am a part\n', 'utf8'); + initiateRequest.headers['x-amz-meta-stuff'] = 'I am some user metadata'; + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5').update(partBody); const calculatedHash = md5Hash.digest('hex'); - objectPutPart(authInfo, partRequest2, undefined, log, err => { - assert.deepStrictEqual(err, null); - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - ''; + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + partBody + ); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; const completeRequest = { bucketName, namespace, objectKey, + parsedHost: 's3.amazonaws.com', url: `/${objectKey}?uploadId=${testUploadId}`, headers: { host: `${bucketName}.s3.amazonaws.com` }, query: { uploadId: testUploadId }, post: completeBody, - calculatedHash, actionImplicitDenies: false, }; - // show that second part data is there - assert(ds[2]); - completeMultipartUpload(authInfo, - completeRequest, log, err => { - assert.strictEqual(err, null); - process.nextTick(() => { - // data has been deleted - assert.strictEqual(ds[2], undefined); - done(); - }); - }); - }); - }); - }); - }); - - it('should not delete data locations on completeMultipartUpload retry', - done => { - const partBody = Buffer.from('foo', 'utf8'); - let origDeleteObject; - async.waterfall([ - next => - bucketPut(authInfo, bucketPutRequest, log, err => next(err)), - next => - initiateMultipartUpload(authInfo, initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const partRequest = _createPutPartRequest(testUploadId, 1, - partBody); - objectPutPart(authInfo, partRequest, undefined, log, - (err, eTag) => next(err, eTag, testUploadId)); - }, - (eTag, testUploadId, next) => { - origDeleteObject = metadataBackend.deleteObject; - metadataBackend.deleteObject = ( - bucketName, objName, params, log, cb) => { - // prevent deletions from MPU bucket only - if (bucketName === mpuBucket) { - return process.nextTick( - () => cb(errors.InternalError)); - } - return origDeleteObject( - bucketName, objName, params, log, cb); - }; - const parts = [{ partNumber: 1, eTag }]; - const completeRequest = _createCompleteMpuRequest( - testUploadId, parts); - completeMultipartUpload(authInfo, completeRequest, log, err => { - // expect a failure here because we could not - // remove the overview key - assert(err.is.InternalError); - next(null, eTag, testUploadId); - }); - }, - (eTag, testUploadId, next) => { - // allow MPU bucket metadata deletions to happen again - metadataBackend.deleteObject = origDeleteObject; - // retry the completeMultipartUpload with the same - // metadata, as an application would normally do after - // a failure - const parts = [{ partNumber: 1, eTag }]; - const completeRequest = _createCompleteMpuRequest( - testUploadId, parts); - completeMultipartUpload(authInfo, completeRequest, log, next); - }, - ], err => { - assert.ifError(err); - // check that the original data has not been deleted - // during the replay - assert.strictEqual(ds[0], undefined); - assert.notStrictEqual(ds[1], undefined); - assert.deepStrictEqual(ds[1].value, partBody); - done(); - }); - }); - - it('should abort an MPU and delete its MD if it has been created by a failed complete before', done => { - const delMeta = metadataBackend.deleteObject; - metadataBackend.deleteObject = (bucketName, objName, params, log, cb) => cb(errors.InternalError); - const partBody = Buffer.from('I am a part\n', 'utf8'); - initiateRequest.headers['x-amz-meta-stuff'] = - 'I am some user metadata'; - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - assert.ifError(err); - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5').update(partBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, partBody); - objectPutPart(authInfo, partRequest, undefined, log, () => { - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = { - bucketName, - namespace, - objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, - completeRequest, log, err => { + completeMultipartUpload(authInfo, completeRequest, log, err => { assert(err.is.InternalError); - const MD = metadata.keyMaps.get(bucketName) - .get(objectKey); + const MD = metadata.keyMaps.get(bucketName).get(objectKey); assert(MD); - assert.strictEqual(MD['x-amz-meta-stuff'], - 'I am some user metadata'); + assert.strictEqual(MD['x-amz-meta-stuff'], 'I am some user metadata'); assert.strictEqual(MD.uploadId, testUploadId); metadataBackend.deleteObject = delMeta; @@ -2033,165 +2101,171 @@ describe('Multipart Upload API', () => { done(); }); }); - }); - }); + }); + } + ); }); it('should complete an MPU and promote its MD if it has been created by a failed complete before', done => { const delMeta = metadataBackend.deleteObject; metadataBackend.deleteObject = (bucketName, objName, params, log, cb) => cb(errors.InternalError); - const partBody = Buffer.from('I am a part\n', 'utf8'); - initiateRequest.headers['x-amz-meta-stuff'] = - 'I am some user metadata'; - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - assert.ifError(err); - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5').update(partBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, partBody); - objectPutPart(authInfo, partRequest, undefined, log, () => { - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = { - bucketName, - namespace, - objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, - completeRequest, log, err => { + const partBody = Buffer.from('I am a part\n', 'utf8'); + initiateRequest.headers['x-amz-meta-stuff'] = 'I am some user metadata'; + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5').update(partBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + partBody + ); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, err => { assert(err.is.InternalError); const MD = metadata.keyMaps.get(bucketName).get(objectKey); assert(MD); - assert.strictEqual(MD['x-amz-meta-stuff'], - 'I am some user metadata'); + assert.strictEqual(MD['x-amz-meta-stuff'], 'I am some user metadata'); assert.strictEqual(MD.uploadId, testUploadId); metadataBackend.deleteObject = delMeta; assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); - completeMultipartUpload(authInfo, - completeRequest, log, err => { - assert.ifError(err); - const MD = metadata.keyMaps.get(bucketName) - .get(objectKey); - assert(MD); - assert.strictEqual(MD['x-amz-meta-stuff'], - 'I am some user metadata'); - assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 0); - done(); - }); + completeMultipartUpload(authInfo, completeRequest, log, err => { + assert.ifError(err); + const MD = metadata.keyMaps.get(bucketName).get(objectKey); + assert(MD); + assert.strictEqual(MD['x-amz-meta-stuff'], 'I am some user metadata'); + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 0); + done(); + }); }); - }); - }); + }); + } + ); }); it('should not pass needOplogUpdate when writing new object', done => { - async.series([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - async () => _uploadMpuObject(), - async () => { - const options = metadataswitch.putObjectMD.lastCall.args[3]; - assert.strictEqual(options.needOplogUpdate, undefined); - assert.strictEqual(options.originOp, undefined); - }, - ], done); + async.series( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + async () => _uploadMpuObject(), + async () => { + const options = metadataswitch.putObjectMD.lastCall.args[3]; + assert.strictEqual(options.needOplogUpdate, undefined); + assert.strictEqual(options.originOp, undefined); + }, + ], + done + ); }); it('should not pass needOplogUpdate when replacing object', done => { - async.series([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - async () => _uploadMpuObject(), - async () => _uploadMpuObject(), - async () => { - const options = metadataswitch.putObjectMD.lastCall.args[3]; - assert.strictEqual(options.needOplogUpdate, undefined); - assert.strictEqual(options.originOp, undefined); - }, - ], done); + async.series( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + async () => _uploadMpuObject(), + async () => _uploadMpuObject(), + async () => { + const options = metadataswitch.putObjectMD.lastCall.args[3]; + assert.strictEqual(options.needOplogUpdate, undefined); + assert.strictEqual(options.originOp, undefined); + }, + ], + done + ); }); it('should pass needOplogUpdate to metadata when replacing archived object', done => { const archived = { - archiveInfo: { foo: 0, bar: 'stuff' } + archiveInfo: { foo: 0, bar: 'stuff' }, }; - async.series([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - async () => _uploadMpuObject(), - next => fakeMetadataArchive(bucketName, objectKey, undefined, archived, next), - async () => _uploadMpuObject(), - async () => { - const options = metadataswitch.putObjectMD.lastCall.args[3]; - assert.strictEqual(options.needOplogUpdate, true); - assert.strictEqual(options.originOp, 's3:ReplaceArchivedObject'); - }, - ], done); + async.series( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + async () => _uploadMpuObject(), + next => fakeMetadataArchive(bucketName, objectKey, undefined, archived, next), + async () => _uploadMpuObject(), + async () => { + const options = metadataswitch.putObjectMD.lastCall.args[3]; + assert.strictEqual(options.needOplogUpdate, true); + assert.strictEqual(options.originOp, 's3:ReplaceArchivedObject'); + }, + ], + done + ); }); it('should pass needOplogUpdate to metadata when replacing archived object in version suspended bucket', done => { const archived = { - archiveInfo: { foo: 0, bar: 'stuff' } + archiveInfo: { foo: 0, bar: 'stuff' }, }; - const suspendVersioningRequest = versioningTestUtils - .createBucketPutVersioningReq(bucketName, 'Suspended'); - async.series([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - next => bucketPutVersioning(authInfo, suspendVersioningRequest, log, next), - async () => _uploadMpuObject(), - next => fakeMetadataArchive(bucketName, objectKey, undefined, archived, next), - async () => _uploadMpuObject(), - async () => { - const options = metadataswitch.putObjectMD.lastCall.args[3]; - assert.strictEqual(options.needOplogUpdate, true); - assert.strictEqual(options.originOp, 's3:ReplaceArchivedObject'); - }, - ], done); + const suspendVersioningRequest = versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Suspended'); + async.series( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + next => bucketPutVersioning(authInfo, suspendVersioningRequest, log, next), + async () => _uploadMpuObject(), + next => fakeMetadataArchive(bucketName, objectKey, undefined, archived, next), + async () => _uploadMpuObject(), + async () => { + const options = metadataswitch.putObjectMD.lastCall.args[3]; + assert.strictEqual(options.needOplogUpdate, true); + assert.strictEqual(options.originOp, 's3:ReplaceArchivedObject'); + }, + ], + done + ); }); }); describe('complete mpu with versioning', () => { - const objData = ['foo0', 'foo1', 'foo2'].map(str => - Buffer.from(str, 'utf8')); + const objData = ['foo0', 'foo1', 'foo2'].map(str => Buffer.from(str, 'utf8')); - const enableVersioningRequest = - versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Enabled'); - const suspendVersioningRequest = versioningTestUtils - .createBucketPutVersioningReq(bucketName, 'Suspended'); + const enableVersioningRequest = versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Enabled'); + const suspendVersioningRequest = versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Suspended'); let testPutObjectRequests; beforeEach(done => { cleanup(); testPutObjectRequests = objData - .slice(0, 2) - .map(data => versioningTestUtils.createPutObjectRequest( - bucketName, objectKey, data)); + .slice(0, 2) + .map(data => versioningTestUtils.createPutObjectRequest(bucketName, objectKey, data)); bucketPut(authInfo, bucketPutRequest, log, done); }); @@ -2200,290 +2274,270 @@ describe('complete mpu with versioning', () => { done(); }); - it('should delete null version when creating new null version, ' + - 'when null version is the latest version', done => { - async.waterfall([ - next => bucketPutVersioning(authInfo, - suspendVersioningRequest, log, err => next(err)), - next => initiateMultipartUpload( - authInfo, initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - const partBody = objData[2]; - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const partRequest = _createPutPartRequest(testUploadId, 1, - partBody); - objectPutPart(authInfo, partRequest, undefined, log, - (err, eTag) => next(err, eTag, testUploadId)); - }, - (eTag, testUploadId, next) => { - const origPutObject = metadataBackend.putObject; - let callCount = 0; - metadataBackend.putObject = - (putBucketName, objName, objVal, params, log, cb) => { - if (callCount === 0) { - // first putObject sets the completeInProgress flag in the overview key - assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`); - assert.strictEqual( - objName, `overview${splitter}${objectKey}${splitter}${testUploadId}`); - assert.strictEqual(objVal.completeInProgress, true); - } else { - assert.strictEqual(params.replayId, testUploadId); + it( + 'should delete null version when creating new null version, ' + 'when null version is the latest version', + done => { + async.waterfall( + [ + next => bucketPutVersioning(authInfo, suspendVersioningRequest, log, err => next(err)), + next => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + const partBody = objData[2]; + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partRequest = _createPutPartRequest(testUploadId, 1, partBody); + objectPutPart(authInfo, partRequest, undefined, log, (err, eTag) => + next(err, eTag, testUploadId) + ); + }, + (eTag, testUploadId, next) => { + const origPutObject = metadataBackend.putObject; + let callCount = 0; + metadataBackend.putObject = (putBucketName, objName, objVal, params, log, cb) => { + if (callCount === 0) { + // first putObject sets the completeInProgress flag in the overview key + assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`); + assert.strictEqual( + objName, + `overview${splitter}${objectKey}${splitter}${testUploadId}` + ); + assert.strictEqual(objVal.completeInProgress, true); + } else { + assert.strictEqual(params.replayId, testUploadId); + metadataBackend.putObject = origPutObject; + } + origPutObject(putBucketName, objName, objVal, params, log, cb); + callCount += 1; + }; + const parts = [{ partNumber: 1, eTag }]; + const completeRequest = _createCompleteMpuRequest(testUploadId, parts); + completeMultipartUpload(authInfo, completeRequest, log, err => next(err, testUploadId)); + }, + (testUploadId, next) => { + const origPutObject = metadataBackend.putObject; + metadataBackend.putObject = (putBucketName, objName, objVal, params, log, cb) => { + assert.strictEqual(params.oldReplayId, testUploadId); metadataBackend.putObject = origPutObject; - } - origPutObject( - putBucketName, objName, objVal, params, log, cb); - callCount += 1; - }; - const parts = [{ partNumber: 1, eTag }]; - const completeRequest = _createCompleteMpuRequest(testUploadId, - parts); - completeMultipartUpload(authInfo, completeRequest, log, - err => next(err, testUploadId)); - }, - (testUploadId, next) => { - const origPutObject = metadataBackend.putObject; - metadataBackend.putObject = - (putBucketName, objName, objVal, params, log, cb) => { - assert.strictEqual(params.oldReplayId, testUploadId); - metadataBackend.putObject = origPutObject; - origPutObject( - putBucketName, objName, objVal, params, log, cb); - }; - // overwrite null version with a non-MPU object - objectPut(authInfo, testPutObjectRequests[1], - undefined, log, err => next(err)); - }, - ], err => { - assert.ifError(err, `Unexpected err: ${err}`); - done(); - }); - }); + origPutObject(putBucketName, objName, objVal, params, log, cb); + }; + // overwrite null version with a non-MPU object + objectPut(authInfo, testPutObjectRequests[1], undefined, log, err => next(err)); + }, + ], + err => { + assert.ifError(err, `Unexpected err: ${err}`); + done(); + } + ); + } + ); + + it( + 'should delete null version when creating new null version, ' + 'when null version is not the latest version', + done => { + async.waterfall( + [ + // putting null version: put obj before versioning configured + next => objectPut(authInfo, testPutObjectRequests[0], undefined, log, err => next(err)), + next => bucketPutVersioning(authInfo, enableVersioningRequest, log, err => next(err)), + // put another version: + next => objectPut(authInfo, testPutObjectRequests[1], undefined, log, err => next(err)), + next => bucketPutVersioning(authInfo, suspendVersioningRequest, log, err => next(err)), + next => { + versioningTestUtils.assertDataStoreValues(ds, objData.slice(0, 2)); + initiateMultipartUpload(authInfo, initiateRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + const partBody = objData[2]; + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partRequest = _createPutPartRequest(testUploadId, 1, partBody); + objectPutPart(authInfo, partRequest, undefined, log, (err, eTag) => + next(err, eTag, testUploadId) + ); + }, + (eTag, testUploadId, next) => { + const origPutObject = metadataBackend.putObject; + let callCount = 0; + metadataBackend.putObject = (putBucketName, objName, objVal, params, log, cb) => { + if (callCount === 0) { + // first putObject sets the completeInProgress flag in the overview key + assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`); + assert.strictEqual( + objName, + `overview${splitter}${objectKey}${splitter}${testUploadId}` + ); + assert.strictEqual(objVal.completeInProgress, true); + } else { + assert.strictEqual(params.replayId, testUploadId); + metadataBackend.putObject = origPutObject; + } + origPutObject(putBucketName, objName, objVal, params, log, cb); + callCount += 1; + }; + const parts = [{ partNumber: 1, eTag }]; + const completeRequest = _createCompleteMpuRequest(testUploadId, parts); + completeMultipartUpload(authInfo, completeRequest, log, err => next(err, testUploadId)); + }, + (testUploadId, next) => { + versioningTestUtils.assertDataStoreValues(ds, [undefined, objData[1], objData[2]]); - it('should delete null version when creating new null version, ' + - 'when null version is not the latest version', done => { - async.waterfall([ - // putting null version: put obj before versioning configured - next => objectPut(authInfo, testPutObjectRequests[0], - undefined, log, err => next(err)), - next => bucketPutVersioning(authInfo, - enableVersioningRequest, log, err => next(err)), - // put another version: - next => objectPut(authInfo, testPutObjectRequests[1], - undefined, log, err => next(err)), - next => bucketPutVersioning(authInfo, - suspendVersioningRequest, log, err => next(err)), - next => { - versioningTestUtils.assertDataStoreValues( - ds, objData.slice(0, 2)); - initiateMultipartUpload(authInfo, initiateRequest, log, next); - }, - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - const partBody = objData[2]; - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const partRequest = _createPutPartRequest(testUploadId, 1, - partBody); - objectPutPart(authInfo, partRequest, undefined, log, - (err, eTag) => next(err, eTag, testUploadId)); - }, - (eTag, testUploadId, next) => { - const origPutObject = metadataBackend.putObject; - let callCount = 0; - metadataBackend.putObject = - (putBucketName, objName, objVal, params, log, cb) => { - if (callCount === 0) { - // first putObject sets the completeInProgress flag in the overview key - assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`); - assert.strictEqual( - objName, `overview${splitter}${objectKey}${splitter}${testUploadId}`); - assert.strictEqual(objVal.completeInProgress, true); - } else { - assert.strictEqual(params.replayId, testUploadId); + const origPutObject = metadataBackend.putObject; + metadataBackend.putObject = (putBucketName, objName, objVal, params, log, cb) => { + assert.strictEqual(params.oldReplayId, testUploadId); metadataBackend.putObject = origPutObject; - } - origPutObject( - putBucketName, objName, objVal, params, log, cb); - callCount += 1; - }; - const parts = [{ partNumber: 1, eTag }]; - const completeRequest = _createCompleteMpuRequest(testUploadId, - parts); - completeMultipartUpload(authInfo, completeRequest, log, - err => next(err, testUploadId)); - }, - (testUploadId, next) => { - versioningTestUtils.assertDataStoreValues( - ds, [undefined, objData[1], objData[2]]); - - const origPutObject = metadataBackend.putObject; - metadataBackend.putObject = - (putBucketName, objName, objVal, params, log, cb) => { - assert.strictEqual(params.oldReplayId, testUploadId); - metadataBackend.putObject = origPutObject; - origPutObject( - putBucketName, objName, objVal, params, log, cb); - }; - // overwrite null version with a non-MPU object - objectPut(authInfo, testPutObjectRequests[1], - undefined, log, err => next(err)); - }, - ], err => { - assert.ifError(err, `Unexpected err: ${err}`); - done(); - }); - }); + origPutObject(putBucketName, objName, objVal, params, log, cb); + }; + // overwrite null version with a non-MPU object + objectPut(authInfo, testPutObjectRequests[1], undefined, log, err => next(err)); + }, + ], + err => { + assert.ifError(err, `Unexpected err: ${err}`); + done(); + } + ); + } + ); - it('should finish deleting metadata on completeMultipartUpload retry', - done => { + it('should finish deleting metadata on completeMultipartUpload retry', done => { let origDeleteObject; - async.waterfall([ - next => bucketPutVersioning(authInfo, - enableVersioningRequest, log, err => next(err)), - next => - initiateMultipartUpload(authInfo, initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - const partBody = objData[2]; - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const partRequest = _createPutPartRequest(testUploadId, 1, - partBody); - objectPutPart(authInfo, partRequest, undefined, log, - (err, eTag) => next(err, eTag, testUploadId)); - }, - (eTag, testUploadId, next) => { - origDeleteObject = metadataBackend.deleteObject; - metadataBackend.deleteObject = ( - bucketName, objName, params, log, cb) => { + async.waterfall( + [ + next => bucketPutVersioning(authInfo, enableVersioningRequest, log, err => next(err)), + next => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + const partBody = objData[2]; + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partRequest = _createPutPartRequest(testUploadId, 1, partBody); + objectPutPart(authInfo, partRequest, undefined, log, (err, eTag) => next(err, eTag, testUploadId)); + }, + (eTag, testUploadId, next) => { + origDeleteObject = metadataBackend.deleteObject; + metadataBackend.deleteObject = (bucketName, objName, params, log, cb) => { // prevent deletions from MPU bucket only - if (bucketName === mpuBucket) { - return process.nextTick( - () => cb(errors.InternalError)); + if (bucketName === mpuBucket) { + return process.nextTick(() => cb(errors.InternalError)); + } + return origDeleteObject(bucketName, objName, params, log, cb); + }; + const parts = [{ partNumber: 1, eTag }]; + const completeRequest = _createCompleteMpuRequest(testUploadId, parts); + completeMultipartUpload(authInfo, completeRequest, log, err => { + // expect a failure here because we could not + // remove the overview key + assert.strictEqual(err.is.InternalError, true); + next(null, eTag, testUploadId); + }); + }, + (eTag, testUploadId, next) => { + // allow MPU bucket metadata deletions to happen again + metadataBackend.deleteObject = origDeleteObject; + // retry the completeMultipartUpload with the same + // metadata, as an application would normally do after + // a failure + const parts = [{ partNumber: 1, eTag }]; + const completeRequest = _createCompleteMpuRequest(testUploadId, parts); + completeMultipartUpload(authInfo, completeRequest, log, next); + }, + ], + err => { + assert.ifError(err); + let nbVersions = 0; + for (const key of metadata.keyMaps.get(bucketName).keys()) { + if (key !== objectKey && key.startsWith(objectKey)) { + nbVersions += 1; } - return origDeleteObject( - bucketName, objName, params, log, cb); - }; - const parts = [{ partNumber: 1, eTag }]; - const completeRequest = _createCompleteMpuRequest( - testUploadId, parts); - completeMultipartUpload(authInfo, completeRequest, log, err => { - // expect a failure here because we could not - // remove the overview key - assert.strictEqual(err.is.InternalError, true); - next(null, eTag, testUploadId); - }); - }, - (eTag, testUploadId, next) => { - // allow MPU bucket metadata deletions to happen again - metadataBackend.deleteObject = origDeleteObject; - // retry the completeMultipartUpload with the same - // metadata, as an application would normally do after - // a failure - const parts = [{ partNumber: 1, eTag }]; - const completeRequest = _createCompleteMpuRequest( - testUploadId, parts); - completeMultipartUpload(authInfo, completeRequest, log, next); - }, - ], err => { - assert.ifError(err); - let nbVersions = 0; - for (const key of metadata.keyMaps.get(bucketName).keys()) { - if (key !== objectKey && key.startsWith(objectKey)) { - nbVersions += 1; } + // There should be only one version of the object, since + // the second call should not have created a new version + assert.strictEqual(nbVersions, 1); + for (const key of metadata.keyMaps.get(mpuBucket).keys()) { + assert.fail('There should be no more keys in MPU bucket, ' + `found "${key}"`); + } + done(); } - // There should be only one version of the object, since - // the second call should not have created a new version - assert.strictEqual(nbVersions, 1); - for (const key of metadata.keyMaps.get(mpuBucket).keys()) { - assert.fail('There should be no more keys in MPU bucket, ' + - `found "${key}"`); - } - done(); - }); + ); }); - it('should complete an MPU and promote its MD if it has been created by a failed complete before' + - 'without creating a new version', done => { - const delMeta = metadataBackend.deleteObject; - metadataBackend.deleteObject = (bucketName, objName, params, log, cb) => cb(errors.InternalError); - const partBody = Buffer.from('I am a part\n', 'utf8'); - initiateRequest.headers['x-amz-meta-stuff'] = - 'I am some user metadata'; - async.waterfall([ - next => bucketPutVersioning(authInfo, - enableVersioningRequest, log, err => next(err)), - next => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - assert.ifError(err); - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5').update(partBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, partBody); - objectPutPart(authInfo, partRequest, undefined, log, () => { - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = { - bucketName, - namespace, - objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - actionImplicitDenies: false, - }; - completeMultipartUpload(authInfo, - completeRequest, log, err => { - assert(err.is.InternalError); - const MD = metadata.keyMaps.get(bucketName) - .get(objectKey); - assert(MD); - const firstVersionId = MD.versionId; - assert.strictEqual(MD['x-amz-meta-stuff'], - 'I am some user metadata'); - assert.strictEqual(MD.uploadId, testUploadId); - metadataBackend.deleteObject = delMeta; - assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); - completeMultipartUpload(authInfo, - completeRequest, log, err => { + it( + 'should complete an MPU and promote its MD if it has been created by a failed complete before' + + 'without creating a new version', + done => { + const delMeta = metadataBackend.deleteObject; + metadataBackend.deleteObject = (bucketName, objName, params, log, cb) => cb(errors.InternalError); + const partBody = Buffer.from('I am a part\n', 'utf8'); + initiateRequest.headers['x-amz-meta-stuff'] = 'I am some user metadata'; + async.waterfall( + [ + next => bucketPutVersioning(authInfo, enableVersioningRequest, log, err => next(err)), + next => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + assert.ifError(err); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5').update(partBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + partBody + ); + objectPutPart(authInfo, partRequest, undefined, log, () => { + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + }; + completeMultipartUpload(authInfo, completeRequest, log, err => { + assert(err.is.InternalError); + const MD = metadata.keyMaps.get(bucketName).get(objectKey); + assert(MD); + const firstVersionId = MD.versionId; + assert.strictEqual(MD['x-amz-meta-stuff'], 'I am some user metadata'); + assert.strictEqual(MD.uploadId, testUploadId); + metadataBackend.deleteObject = delMeta; + assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 2); + completeMultipartUpload(authInfo, completeRequest, log, err => { assert.ifError(err); - const MD = metadata.keyMaps.get(bucketName) - .get(objectKey); + const MD = metadata.keyMaps.get(bucketName).get(objectKey); assert(MD); assert.strictEqual(MD.versionId, firstVersionId); - assert.strictEqual(MD['x-amz-meta-stuff'], - 'I am some user metadata'); + assert.strictEqual(MD['x-amz-meta-stuff'], 'I am some user metadata'); assert.strictEqual(metadata.keyMaps.get(mpuBucket).size, 0); done(); }); + }); }); - }); - }); - }); + } + ); + } + ); }); describe('multipart upload with object lock', () => { @@ -2494,82 +2548,74 @@ describe('multipart upload with object lock', () => { after(cleanup); - it('mpu object should contain retention info when mpu initiated with ' + - 'object retention', done => { + it('mpu object should contain retention info when mpu initiated with ' + 'object retention', done => { let versionId; - async.waterfall([ - next => initiateMultipartUpload(authInfo, retentionInitiateRequest, - log, next), - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - const partBody = Buffer.from('foobar', 'utf8'); - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const partRequest = _createPutPartRequest(testUploadId, 1, - partBody); - partRequest.bucketName = lockedBucket; - partRequest.headers = { host: `${lockedBucket}.s3.amazonaws.com` }; - objectPutPart(authInfo, partRequest, undefined, log, - (err, eTag) => next(err, eTag, testUploadId)); - }, - (eTag, testUploadId, next) => { - const parts = [{ partNumber: 1, eTag }]; - const completeRequest = _createCompleteMpuRequest(testUploadId, - parts); - completeRequest.bucketName = lockedBucket; - completeRequest.headers = { host: `${lockedBucket}.s3.amazonaws.com` }; - completeMultipartUpload(authInfo, completeRequest, log, next); - }, - (xml, headers, next) => { - versionId = headers['x-amz-version-id']; - getObjectRetention(authInfo, getObjectLockInfoRequest, log, next); - }, - (result, corsHeaders, next) => parseString(result, next), - ], (err, json) => { - assert.ifError(err); - assert.deepStrictEqual(json.Retention, expectedRetentionConfig); - changeObjectLock( - [{ bucket: lockedBucket, key: objectKey, versionId }], '', done); - }); + async.waterfall( + [ + next => initiateMultipartUpload(authInfo, retentionInitiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + const partBody = Buffer.from('foobar', 'utf8'); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partRequest = _createPutPartRequest(testUploadId, 1, partBody); + partRequest.bucketName = lockedBucket; + partRequest.headers = { host: `${lockedBucket}.s3.amazonaws.com` }; + objectPutPart(authInfo, partRequest, undefined, log, (err, eTag) => next(err, eTag, testUploadId)); + }, + (eTag, testUploadId, next) => { + const parts = [{ partNumber: 1, eTag }]; + const completeRequest = _createCompleteMpuRequest(testUploadId, parts); + completeRequest.bucketName = lockedBucket; + completeRequest.headers = { host: `${lockedBucket}.s3.amazonaws.com` }; + completeMultipartUpload(authInfo, completeRequest, log, next); + }, + (xml, headers, next) => { + versionId = headers['x-amz-version-id']; + getObjectRetention(authInfo, getObjectLockInfoRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + assert.ifError(err); + assert.deepStrictEqual(json.Retention, expectedRetentionConfig); + changeObjectLock([{ bucket: lockedBucket, key: objectKey, versionId }], '', done); + } + ); }); - it('mpu object should contain legal hold info when mpu initiated with ' + - 'legal hold', done => { + it('mpu object should contain legal hold info when mpu initiated with ' + 'legal hold', done => { let versionId; - async.waterfall([ - next => initiateMultipartUpload(authInfo, legalHoldInitiateRequest, - log, next), - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - const partBody = Buffer.from('foobar', 'utf8'); - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const partRequest = _createPutPartRequest(testUploadId, 1, - partBody); - partRequest.bucketName = lockedBucket; - partRequest.headers = { host: `${lockedBucket}.s3.amazonaws.com` }; - objectPutPart(authInfo, partRequest, undefined, log, - (err, eTag) => next(err, eTag, testUploadId)); - }, - (eTag, testUploadId, next) => { - const parts = [{ partNumber: 1, eTag }]; - const completeRequest = _createCompleteMpuRequest(testUploadId, - parts); - completeRequest.bucketName = lockedBucket; - completeRequest.headers = { host: `${lockedBucket}.s3.amazonaws.com` }; - completeMultipartUpload(authInfo, completeRequest, log, next); - }, - (xml, headers, next) => { - versionId = headers['x-amz-version-id']; - getObjectLegalHold(authInfo, getObjectLockInfoRequest, log, next); - }, - (result, corsHeaders, next) => parseString(result, next), - ], (err, json) => { - assert.ifError(err); - assert.deepStrictEqual(json.LegalHold, expectedLegalHold); - changeObjectLock( - [{ bucket: lockedBucket, key: objectKey, versionId }], '', done); - }); + async.waterfall( + [ + next => initiateMultipartUpload(authInfo, legalHoldInitiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + const partBody = Buffer.from('foobar', 'utf8'); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partRequest = _createPutPartRequest(testUploadId, 1, partBody); + partRequest.bucketName = lockedBucket; + partRequest.headers = { host: `${lockedBucket}.s3.amazonaws.com` }; + objectPutPart(authInfo, partRequest, undefined, log, (err, eTag) => next(err, eTag, testUploadId)); + }, + (eTag, testUploadId, next) => { + const parts = [{ partNumber: 1, eTag }]; + const completeRequest = _createCompleteMpuRequest(testUploadId, parts); + completeRequest.bucketName = lockedBucket; + completeRequest.headers = { host: `${lockedBucket}.s3.amazonaws.com` }; + completeMultipartUpload(authInfo, completeRequest, log, next); + }, + (xml, headers, next) => { + versionId = headers['x-amz-version-id']; + getObjectLegalHold(authInfo, getObjectLockInfoRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + assert.ifError(err); + assert.deepStrictEqual(json.LegalHold, expectedLegalHold); + changeObjectLock([{ bucket: lockedBucket, key: objectKey, versionId }], '', done); + } + ); }); }); @@ -2587,46 +2633,56 @@ describe('multipart upload overheadField', () => { }); it('should pass overheadField', done => { - async.waterfall([ - next => bucketPut(authInfo, bucketPutRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), - (result, corsHeaders, next) => { - const mpuKeys = metadata.keyMaps.get(mpuBucket); - assert.strictEqual(mpuKeys.size, 1); - assert(mpuKeys.keys().next().value - .startsWith(`overview${splitter}${objectKey}`)); - parseString(result, next); - }, - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5'); - const bufferBody = Buffer.from(postBody); - md5Hash.update(bufferBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - objectKey, - namespace, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { - partNumber: '1', - uploadId: testUploadId, + async.waterfall( + [ + next => bucketPut(authInfo, bucketPutRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), + (result, corsHeaders, next) => { + const mpuKeys = metadata.keyMaps.get(mpuBucket); + assert.strictEqual(mpuKeys.size, 1); + assert(mpuKeys.keys().next().value.startsWith(`overview${splitter}${objectKey}`)); + parseString(result, next); }, - calculatedHash, - }, postBody); - objectPutPart(authInfo, partRequest, undefined, log, err => { + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here assert.ifError(err); - sinon.assert.calledWith(metadataswitch.putObjectMD.lastCall, - any, any, any, sinon.match({ overheadField: sinon.match.array }), any, any); - done(); - }); - }); + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5'); + const bufferBody = Buffer.from(postBody); + md5Hash.update(bufferBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + { + bucketName, + objectKey, + namespace, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + postBody + ); + objectPutPart(authInfo, partRequest, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadataswitch.putObjectMD.lastCall, + any, + any, + any, + sinon.match({ overheadField: sinon.match.array }), + any, + any + ); + done(); + }); + } + ); }); }); @@ -2653,12 +2709,13 @@ describe('complete mpu with bucket policy', () => { const partBody = Buffer.from('I am a part\n', 'utf8'); const md5Hash = crypto.createHash('md5').update(partBody); const calculatedHash = md5Hash.digest('hex'); - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - ''; + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; beforeEach(done => { cleanup(); @@ -2685,56 +2742,64 @@ describe('complete mpu with bucket policy', () => { /** root user doesn't check bucket policy */ const authNotRoot = makeAuthInfo(canonicalID, 'not-root'); - async.waterfall([ - next => bucketPutPolicy(authInfo, - bucketPutPolicyRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authNotRoot, - initiateReqFixed, log, next), - (result, corsHeaders, next) => parseString(result, next), - (json, next) => { - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const partRequest = new DummyRequest(Object.assign({ - socket: { - remoteAddress: '1.1.1.1', - }, - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - calculatedHash, - }, requestFix), partBody); - objectPutPart(authNotRoot, partRequest, - undefined, log, err => next(err, testUploadId)); - }, - (testUploadId, next) => { - const completeRequest = new DummyRequest(Object.assign({ - socket: { - remoteAddress: '1.1.1.1', - }, - bucketName, - namespace, - objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - actionImplicitDenies: false, - }, requestFix)); - completeMultipartUpload(authNotRoot, completeRequest, - log, next); - }, - ], - err => { - assert.ifError(err); - done(); - }); + async.waterfall( + [ + next => bucketPutPolicy(authInfo, bucketPutPolicyRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authNotRoot, initiateReqFixed, log, next), + (result, corsHeaders, next) => parseString(result, next), + (json, next) => { + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partRequest = new DummyRequest( + Object.assign( + { + socket: { + remoteAddress: '1.1.1.1', + }, + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, + }, + requestFix + ), + partBody + ); + objectPutPart(authNotRoot, partRequest, undefined, log, err => next(err, testUploadId)); + }, + (testUploadId, next) => { + const completeRequest = new DummyRequest( + Object.assign( + { + socket: { + remoteAddress: '1.1.1.1', + }, + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + }, + requestFix + ) + ); + completeMultipartUpload(authNotRoot, completeRequest, log, next); + }, + ], + err => { + assert.ifError(err); + done(); + } + ); }); it('should set bucketOwnerId if requester is not destination bucket owner', done => { @@ -2750,67 +2815,77 @@ describe('complete mpu with bucket policy', () => { }, ], }); - async.waterfall([ - next => bucketPutPolicy(authInfo, bucketPutPolicyRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfoOtherAcc, - initiateRequest, log, next), - (result, corsHeaders, next) => parseString(result, next), - ], - (err, json) => { - // Need to build request in here since do not have uploadId - // until here - assert.ifError(err); - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; - const md5Hash = crypto.createHash('md5').update(partBody); - const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest(Object.assign({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, - }, - // Note that the body of the post set in the request here does - // not really matter in this test. - // The put is not going through the route so the md5 is being - // calculated above and manually being set in the request below. - // What is being tested is that the calculatedHash being sent - // to the API for the part is stored and then used to - // calculate the final ETag upon completion - // of the multipart upload. - calculatedHash, - socket: { - remoteAddress: '1.1.1.1', - }, - }, requestFix), partBody); - objectPutPart(authInfoOtherAcc, partRequest, undefined, log, err => { + async.waterfall( + [ + next => bucketPutPolicy(authInfo, bucketPutPolicyRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfoOtherAcc, initiateRequest, log, next), + (result, corsHeaders, next) => parseString(result, next), + ], + (err, json) => { + // Need to build request in here since do not have uploadId + // until here assert.ifError(err); - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - ''; - const completeRequest = new DummyRequest(Object.assign({ - bucketName, - namespace, - objectKey, - parsedHost: 's3.amazonaws.com', - url: `/${objectKey}?uploadId=${testUploadId}`, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - query: { uploadId: testUploadId }, - post: completeBody, - actionImplicitDenies: false, - socket: { - remoteAddress: '1.1.1.1', - }, - }, requestFix)); - completeMultipartUpload(authInfoOtherAcc, - completeRequest, log, err => { + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const md5Hash = crypto.createHash('md5').update(partBody); + const calculatedHash = md5Hash.digest('hex'); + const partRequest = new DummyRequest( + Object.assign( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=1&uploadId=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + // Note that the body of the post set in the request here does + // not really matter in this test. + // The put is not going through the route so the md5 is being + // calculated above and manually being set in the request below. + // What is being tested is that the calculatedHash being sent + // to the API for the part is stored and then used to + // calculate the final ETag upon completion + // of the multipart upload. + calculatedHash, + socket: { + remoteAddress: '1.1.1.1', + }, + }, + requestFix + ), + partBody + ); + objectPutPart(authInfoOtherAcc, partRequest, undefined, log, err => { + assert.ifError(err); + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; + const completeRequest = new DummyRequest( + Object.assign( + { + bucketName, + namespace, + objectKey, + parsedHost: 's3.amazonaws.com', + url: `/${objectKey}?uploadId=${testUploadId}`, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + query: { uploadId: testUploadId }, + post: completeBody, + actionImplicitDenies: false, + socket: { + remoteAddress: '1.1.1.1', + }, + }, + requestFix + ) + ); + completeMultipartUpload(authInfoOtherAcc, completeRequest, log, err => { assert.ifError(err); sinon.assert.calledWith( metadataswitch.putObjectMD.lastCall, @@ -2823,8 +2898,9 @@ describe('complete mpu with bucket policy', () => { ); done(); }); - }); - }); + }); + } + ); }); }); @@ -2839,10 +2915,16 @@ describe('multipart upload in ingestion bucket', () => { versionID = versioning.VersionID.encode(versioning.VersionID.generateVersionId('0', '')); // Setup multi-backend, this is required for ingestion - data.switch(new storage.data.MultipleBackendGateway({ - 'us-east-1': dataClient, - 'us-east-2': dataClient, - }, metadata, data.locStorageCheckFn)); + data.switch( + new storage.data.MultipleBackendGateway( + { + 'us-east-1': dataClient, + 'us-east-2': dataClient, + }, + metadata, + data.locStorageCheckFn + ) + ); data.implName = 'multipleBackends'; // "mock" the data location, simulating a backend supporting MPU @@ -2880,17 +2962,19 @@ describe('multipart upload in ingestion bucket', () => { sinon.restore(); }); - const newPutIngestBucketRequest = location => new DummyRequest({ - bucketName, - namespace, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: '/', - post: '' + - '' + - `${location}` + - '', - }); + const newPutIngestBucketRequest = location => + new DummyRequest({ + bucketName, + namespace, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: '/', + post: + '' + + '' + + `${location}` + + '', + }); const archiveRestoreRequested = { archiveInfo: { foo: 0, bar: 'stuff' }, // opaque, can be anything... restoreRequestedAt: new Date().toString(), diff --git a/tests/unit/api/objectACLauth.js b/tests/unit/api/objectACLauth.js index f4ef2de207..4b1c4cef23 100644 --- a/tests/unit/api/objectACLauth.js +++ b/tests/unit/api/objectACLauth.js @@ -2,8 +2,7 @@ const assert = require('assert'); const BucketInfo = require('arsenal').models.BucketInfo; const constants = require('../../../constants'); -const { isObjAuthorized } - = require('../../../lib/api/apiUtils/authorization/permissionChecks'); +const { isObjAuthorized } = require('../../../lib/api/apiUtils/authorization/permissionChecks'); const { DummyRequestLogger, makeAuthInfo } = require('../helpers'); const accessKey = 'accessKey1'; @@ -15,12 +14,11 @@ const userAuthInfo = makeAuthInfo(accessKey, 'user'); const altAcctAuthInfo = makeAuthInfo(altAccessKey); const accountToVet = altAcctAuthInfo.getCanonicalID(); -const bucket = new BucketInfo('niftyBucket', bucketOwnerCanonicalId, - 'iAmTheOwnerDisplayName', creationDate); +const bucket = new BucketInfo('niftyBucket', bucketOwnerCanonicalId, 'iAmTheOwnerDisplayName', creationDate); const objectOwnerCanonicalId = userAuthInfo.getCanonicalID(); const object = { 'owner-id': objectOwnerCanonicalId, - 'acl': { + acl: { Canned: 'private', FULL_CONTROL: [], WRITE_ACP: [], @@ -46,60 +44,61 @@ describe('object acl authorization for objectGet and objectHead', () => { it('should allow access to object owner', () => { const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, objectOwnerCanonicalId, - authInfo, log)); + isObjAuthorized(bucket, object, type, objectOwnerCanonicalId, authInfo, log) + ); assert.deepStrictEqual(results, [true, true]); }); it('should allow access to user in object owner account', () => { const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, objectOwnerCanonicalId, - userAuthInfo, log)); + isObjAuthorized(bucket, object, type, objectOwnerCanonicalId, userAuthInfo, log) + ); assert.deepStrictEqual(results, [true, true]); }); it('should allow access to bucket owner if same account as object owner', () => { const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, bucketOwnerCanonicalId, - authInfo, log)); + isObjAuthorized(bucket, object, type, bucketOwnerCanonicalId, authInfo, log) + ); assert.deepStrictEqual(results, [true, true]); }); it('should allow access to anyone if canned public-read ACL', () => { object.acl.Canned = 'public-read'; const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(results, [true, true]); }); it('should allow access to anyone if canned public-read-write ACL', () => { object.acl.Canned = 'public-read-write'; const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(results, [true, true]); }); - it('should not allow access to public user if ' + - 'authenticated-read ACL', () => { + it('should not allow access to public user if ' + 'authenticated-read ACL', () => { object.acl.Canned = 'authenticated-read'; const publicResults = requestTypes.map(type => - isObjAuthorized(bucket, object, type, constants.publicId, null, log)); + isObjAuthorized(bucket, object, type, constants.publicId, null, log) + ); assert.deepStrictEqual(publicResults, [false, false]); }); - it('should allow access to any authenticated user if ' + - 'authenticated-read ACL', () => { + it('should allow access to any authenticated user if ' + 'authenticated-read ACL', () => { object.acl.Canned = 'authenticated-read'; const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(results, [true, true]); }); - it('should allow access to bucker owner when object owner is alt account if ' + - 'bucket-owner-read ACL', () => { + it('should allow access to bucker owner when object owner is alt account if ' + 'bucket-owner-read ACL', () => { const altAcctObj = { 'owner-id': accountToVet, - 'acl': { + acl: { Canned: 'private', FULL_CONTROL: [], WRITE_ACP: [], @@ -108,70 +107,76 @@ describe('object acl authorization for objectGet and objectHead', () => { }, }; const noAuthResults = requestTypes.map(type => - isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, - log)); + isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, log) + ); assert.deepStrictEqual(noAuthResults, [false, false]); altAcctObj.acl.Canned = 'bucket-owner-read'; const authResults = requestTypes.map(type => - isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, - log)); + isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, log) + ); assert.deepStrictEqual(authResults, [true, true]); }); - it('should allow access to bucker owner when object owner is alt account if ' + - 'bucket-owner-full-control ACL', () => { - const altAcctObj = { - 'owner-id': accountToVet, - 'acl': { - Canned: 'private', - FULL_CONTROL: [], - WRITE_ACP: [], - READ: [], - READ_ACP: [], - }, - }; - const noAuthResults = requestTypes.map(type => - isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, - log)); - assert.deepStrictEqual(noAuthResults, [false, false]); - altAcctObj.acl.Canned = 'bucket-owner-full-control'; - const authResults = requestTypes.map(type => - isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, - log)); - assert.deepStrictEqual(authResults, [true, true]); - }); - - it('should allow access to account if ' + - 'account was granted FULL_CONTROL', () => { + it( + 'should allow access to bucker owner when object owner is alt account if ' + 'bucket-owner-full-control ACL', + () => { + const altAcctObj = { + 'owner-id': accountToVet, + acl: { + Canned: 'private', + FULL_CONTROL: [], + WRITE_ACP: [], + READ: [], + READ_ACP: [], + }, + }; + const noAuthResults = requestTypes.map(type => + isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, log) + ); + assert.deepStrictEqual(noAuthResults, [false, false]); + altAcctObj.acl.Canned = 'bucket-owner-full-control'; + const authResults = requestTypes.map(type => + isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, log) + ); + assert.deepStrictEqual(authResults, [true, true]); + } + ); + + it('should allow access to account if ' + 'account was granted FULL_CONTROL', () => { const noAuthResults = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(noAuthResults, [false, false]); object.acl.FULL_CONTROL = [accountToVet]; const authResults = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(authResults, [true, true]); }); - it('should allow access to account if ' + - 'account was granted READ right', () => { + it('should allow access to account if ' + 'account was granted READ right', () => { const noAuthResults = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(noAuthResults, [false, false]); object.acl.READ = [accountToVet]; const authResults = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(authResults, [true, true]); }); it('should not allow access to public user if private canned ACL', () => { const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(results, [false, false]); }); it('should not allow access to just any user if private canned ACL', () => { const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(results, [false, false]); }); }); @@ -180,11 +185,12 @@ describe('object authorization for objectPut and objectDelete', () => { it('should allow access when no implicitDeny information is provided', () => { const requestTypes = ['objectPut', 'objectDelete']; const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(results, [true, true]); const publicUserResults = requestTypes.map(type => - isObjAuthorized(bucket, object, type, constants.publicId, null, - log)); + isObjAuthorized(bucket, object, type, constants.publicId, null, log) + ); assert.deepStrictEqual(publicUserResults, [true, true]); }); }); @@ -206,71 +212,69 @@ describe('object authorization for objectPutACL and objectGetACL', () => { it('should allow access to object owner', () => { const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, objectOwnerCanonicalId, - authInfo, log)); + isObjAuthorized(bucket, object, type, objectOwnerCanonicalId, authInfo, log) + ); assert.deepStrictEqual(results, [true, true]); }); it('should allow access to user in object owner account', () => { const results = requestTypes.map(type => - isObjAuthorized(bucket, object, type, objectOwnerCanonicalId, - userAuthInfo, log)); + isObjAuthorized(bucket, object, type, objectOwnerCanonicalId, userAuthInfo, log) + ); assert.deepStrictEqual(results, [true, true]); }); - it('should allow access to bucket owner when object owner is alt account if ' + - 'bucket-owner-full-control canned ACL set on object', () => { - const altAcctObj = { - 'owner-id': accountToVet, - 'acl': { - Canned: 'private', - FULL_CONTROL: [], - WRITE_ACP: [], - READ: [], - READ_ACP: [], - }, - }; - const noAuthResults = requestTypes.map(type => - isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, - log)); - assert.deepStrictEqual(noAuthResults, [false, false]); - altAcctObj.acl.Canned = 'bucket-owner-full-control'; - const authorizedResults = requestTypes.map(type => - isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, - null, log)); - assert.deepStrictEqual(authorizedResults, [true, true]); - }); - - it('should allow access to account if ' + - 'account was granted FULL_CONTROL right', () => { + it( + 'should allow access to bucket owner when object owner is alt account if ' + + 'bucket-owner-full-control canned ACL set on object', + () => { + const altAcctObj = { + 'owner-id': accountToVet, + acl: { + Canned: 'private', + FULL_CONTROL: [], + WRITE_ACP: [], + READ: [], + READ_ACP: [], + }, + }; + const noAuthResults = requestTypes.map(type => + isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, log) + ); + assert.deepStrictEqual(noAuthResults, [false, false]); + altAcctObj.acl.Canned = 'bucket-owner-full-control'; + const authorizedResults = requestTypes.map(type => + isObjAuthorized(bucket, altAcctObj, type, bucketOwnerCanonicalId, authInfo, null, log) + ); + assert.deepStrictEqual(authorizedResults, [true, true]); + } + ); + + it('should allow access to account if ' + 'account was granted FULL_CONTROL right', () => { const noAuthResults = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(noAuthResults, [false, false]); object.acl.FULL_CONTROL = [accountToVet]; const authorizedResults = requestTypes.map(type => - isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log)); + isObjAuthorized(bucket, object, type, accountToVet, altAcctAuthInfo, log) + ); assert.deepStrictEqual(authorizedResults, [true, true]); }); - it('should allow objectPutACL access to account if ' + - 'account was granted WRITE_ACP right', () => { - const noAuthResult = isObjAuthorized(bucket, object, 'objectPutACL', - accountToVet, altAcctAuthInfo, log); + it('should allow objectPutACL access to account if ' + 'account was granted WRITE_ACP right', () => { + const noAuthResult = isObjAuthorized(bucket, object, 'objectPutACL', accountToVet, altAcctAuthInfo, log); assert.strictEqual(noAuthResult, false); object.acl.WRITE_ACP = [accountToVet]; - const authorizedResult = isObjAuthorized(bucket, object, 'objectPutACL', - accountToVet, altAcctAuthInfo, log); + const authorizedResult = isObjAuthorized(bucket, object, 'objectPutACL', accountToVet, altAcctAuthInfo, log); assert.strictEqual(authorizedResult, true); }); - it('should allow objectGetACL access to account if ' + - 'account was granted READ_ACP right', () => { - const noAuthResult = isObjAuthorized(bucket, object, 'objectGetACL', - accountToVet, altAcctAuthInfo, log); + it('should allow objectGetACL access to account if ' + 'account was granted READ_ACP right', () => { + const noAuthResult = isObjAuthorized(bucket, object, 'objectGetACL', accountToVet, altAcctAuthInfo, log); assert.strictEqual(noAuthResult, false); object.acl.READ_ACP = [accountToVet]; - const authorizedResult = isObjAuthorized(bucket, object, 'objectGetACL', - accountToVet, altAcctAuthInfo, log); + const authorizedResult = isObjAuthorized(bucket, object, 'objectGetACL', accountToVet, altAcctAuthInfo, log); assert.strictEqual(authorizedResult, true); }); }); @@ -288,12 +292,7 @@ describe('without object metadata', () => { bucket.setBucketPolicy(null); }); - const requestTypes = [ - 'objectGet', - 'objectHead', - 'objectPutACL', - 'objectGetACL', - ]; + const requestTypes = ['objectGet', 'objectHead', 'objectPutACL', 'objectGetACL']; const allowedAccess = [true, true, true, true]; const deniedAccess = [false, false, false, false]; @@ -301,77 +300,87 @@ describe('without object metadata', () => { const tests = [ { it: 'should allow user if part of the bucket owner account', - canned: 'private', id: objectOwnerCanonicalId, + canned: 'private', + id: objectOwnerCanonicalId, authInfo: userAuthInfo, aclParam: null, response: allowedAccess, }, { it: 'should not allow user if not part of the bucket owner account', - canned: 'private', id: accountToVet, + canned: 'private', + id: accountToVet, authInfo: altAcctAuthInfo, aclParam: null, response: deniedAccess, }, { it: 'should allow bucket owner', - canned: 'private', id: bucketOwnerCanonicalId, + canned: 'private', + id: bucketOwnerCanonicalId, aclParam: null, response: allowedAccess, }, { it: 'should not allow public if canned private', - canned: 'private', id: constants.publicId, + canned: 'private', + id: constants.publicId, aclParam: null, response: deniedAccess, }, { it: 'should not allow other accounts if canned private', - canned: 'private', id: accountToVet, + canned: 'private', + id: accountToVet, aclParam: null, response: deniedAccess, }, { it: 'should allow public if bucket is canned public-read', - canned: 'public-read', id: constants.publicId, + canned: 'public-read', + id: constants.publicId, aclParam: null, response: allowedAccess, }, { it: 'should allow public if bucket is canned public-read-write', - canned: 'public-read-write', id: constants.publicId, + canned: 'public-read-write', + id: constants.publicId, aclParam: null, response: allowedAccess, }, { - it: 'should not allow public if bucket is canned ' + - 'authenticated-read', - canned: 'authenticated-read', id: constants.publicId, + it: 'should not allow public if bucket is canned ' + 'authenticated-read', + canned: 'authenticated-read', + id: constants.publicId, aclParam: null, response: deniedAccess, }, { - it: 'should allow authenticated users if bucket is canned ' + - 'authenticated-read', - canned: 'authenticated-read', id: accountToVet, + it: 'should allow authenticated users if bucket is canned ' + 'authenticated-read', + canned: 'authenticated-read', + id: accountToVet, aclParam: null, response: allowedAccess, }, { it: 'should allow account if granted bucket READ', - canned: '', id: accountToVet, + canned: '', + id: accountToVet, aclParam: ['READ', accountToVet], response: allowedAccess, }, { it: 'should allow account if granted bucket FULL_CONTROL', - canned: '', id: accountToVet, + canned: '', + id: accountToVet, aclParam: ['FULL_CONTROL', accountToVet], response: allowedAccess, }, { it: 'should allow public if granted bucket read action in policy', - canned: 'private', id: constants.publicId, + canned: 'private', + id: constants.publicId, aclParam: null, policy: { Version: '2012-10-17', @@ -388,7 +397,8 @@ describe('without object metadata', () => { }, { it: 'should not allow public if denied bucket read action in policy', - canned: 'public-read', id: constants.publicId, + canned: 'public-read', + id: constants.publicId, aclParam: null, policy: { Version: '2012-10-17', @@ -405,7 +415,8 @@ describe('without object metadata', () => { }, { it: 'should allow account if granted bucket read action in policy', - canned: 'private', id: accountToVet, + canned: 'private', + id: accountToVet, aclParam: null, policy: { Version: '2012-10-17', @@ -423,7 +434,8 @@ describe('without object metadata', () => { }, { it: 'should not allow account if denied bucket read action in policy', - canned: 'public-read', id: accountToVet, + canned: 'public-read', + id: accountToVet, aclParam: null, policy: { Version: '2012-10-17', @@ -454,8 +466,7 @@ describe('without object metadata', () => { } bucket.setCannedAcl(value.canned); - const results = requestTypes.map(type => - isObjAuthorized(bucket, null, type, value.id, authInfoUser, log)); + const results = requestTypes.map(type => isObjAuthorized(bucket, null, type, value.id, authInfoUser, log)); assert.deepStrictEqual(results, value.response); done(); }); @@ -473,8 +484,7 @@ describe('without object metadata', () => { }, ], }); - const results = isObjAuthorized(bucket, null, 'initiateMultipartUpload', - accountToVet, altAcctAuthInfo, log); + const results = isObjAuthorized(bucket, null, 'initiateMultipartUpload', accountToVet, altAcctAuthInfo, log); assert.strictEqual(results, true); }); @@ -490,8 +500,7 @@ describe('without object metadata', () => { }, ], }); - const results = isObjAuthorized(bucket, null, 'objectPutPart', - accountToVet, altAcctAuthInfo, log); + const results = isObjAuthorized(bucket, null, 'objectPutPart', accountToVet, altAcctAuthInfo, log); assert.strictEqual(results, true); }); @@ -507,8 +516,7 @@ describe('without object metadata', () => { }, ], }); - const results = isObjAuthorized(bucket, null, 'completeMultipartUpload', - accountToVet, altAcctAuthInfo, log); + const results = isObjAuthorized(bucket, null, 'completeMultipartUpload', accountToVet, altAcctAuthInfo, log); assert.strictEqual(results, true); }); }); diff --git a/tests/unit/api/objectCopy.js b/tests/unit/api/objectCopy.js index 19f01ae80b..9eba530b24 100644 --- a/tests/unit/api/objectCopy.js +++ b/tests/unit/api/objectCopy.js @@ -9,8 +9,7 @@ const bucketPutPolicy = require('../../../lib/api/bucketPutPolicy'); const objectPut = require('../../../lib/api/objectPut'); const objectCopy = require('../../../lib/api/objectCopy'); const DummyRequest = require('../DummyRequest'); -const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } = require('../helpers'); const mpuUtils = require('../utils/mpuUtils'); const metadata = require('../metadataswitch'); const { data } = require('../../../lib/data/wrapper'); @@ -53,50 +52,41 @@ function _createObjectCopyRequest(destBucketName) { const putDestBucketRequest = _createBucketPutRequest(destBucketName); const putSourceBucketRequest = _createBucketPutRequest(sourceBucketName); -const enableVersioningRequest = versioningTestUtils - .createBucketPutVersioningReq(destBucketName, 'Enabled'); -const suspendVersioningRequest = versioningTestUtils - .createBucketPutVersioningReq(destBucketName, 'Suspended'); -const objData = ['foo0', 'foo1', 'foo2'].map(str => - Buffer.from(str, 'utf8')); - +const enableVersioningRequest = versioningTestUtils.createBucketPutVersioningReq(destBucketName, 'Enabled'); +const suspendVersioningRequest = versioningTestUtils.createBucketPutVersioningReq(destBucketName, 'Suspended'); +const objData = ['foo0', 'foo1', 'foo2'].map(str => Buffer.from(str, 'utf8')); describe('objectCopy with versioning', () => { - const testPutObjectRequests = objData.slice(0, 2).map(data => - versioningTestUtils.createPutObjectRequest(destBucketName, objectKey, - data)); - testPutObjectRequests.push(versioningTestUtils - .createPutObjectRequest(sourceBucketName, objectKey, objData[2])); + const testPutObjectRequests = objData + .slice(0, 2) + .map(data => versioningTestUtils.createPutObjectRequest(destBucketName, objectKey, data)); + testPutObjectRequests.push(versioningTestUtils.createPutObjectRequest(sourceBucketName, objectKey, objData[2])); before(done => { cleanup(); sinon.spy(metadata, 'putObjectMD'); - async.series([ - callback => bucketPut(authInfo, putDestBucketRequest, log, - callback), - callback => bucketPut(authInfo, putSourceBucketRequest, log, - callback), - // putting null version: put obj before versioning configured - // in dest bucket - callback => objectPut(authInfo, testPutObjectRequests[0], - undefined, log, callback), - callback => bucketPutVersioning(authInfo, - enableVersioningRequest, log, callback), - // put another version in dest bucket: - callback => objectPut(authInfo, testPutObjectRequests[1], - undefined, log, callback), - callback => bucketPutVersioning(authInfo, - suspendVersioningRequest, log, callback), - // put source object in source bucket - callback => objectPut(authInfo, testPutObjectRequests[2], - undefined, log, callback), - ], err => { - if (err) { - return done(err); + async.series( + [ + callback => bucketPut(authInfo, putDestBucketRequest, log, callback), + callback => bucketPut(authInfo, putSourceBucketRequest, log, callback), + // putting null version: put obj before versioning configured + // in dest bucket + callback => objectPut(authInfo, testPutObjectRequests[0], undefined, log, callback), + callback => bucketPutVersioning(authInfo, enableVersioningRequest, log, callback), + // put another version in dest bucket: + callback => objectPut(authInfo, testPutObjectRequests[1], undefined, log, callback), + callback => bucketPutVersioning(authInfo, suspendVersioningRequest, log, callback), + // put source object in source bucket + callback => objectPut(authInfo, testPutObjectRequests[2], undefined, log, callback), + ], + err => { + if (err) { + return done(err); + } + versioningTestUtils.assertDataStoreValues(ds, objData); + return done(); } - versioningTestUtils.assertDataStoreValues(ds, objData); - return done(); - }); + ); }); after(() => { @@ -104,50 +94,49 @@ describe('objectCopy with versioning', () => { cleanup(); }); - it('should delete null version when creating new null version, ' + - 'even when null version is not the latest version', done => { - // will have another copy of last object in datastore after objectCopy - const expectedValues = [undefined, objData[1], objData[2], objData[2]]; - const testObjectCopyRequest = _createObjectCopyRequest(destBucketName); - objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, - undefined, log, err => { + it( + 'should delete null version when creating new null version, ' + + 'even when null version is not the latest version', + done => { + // will have another copy of last object in datastore after objectCopy + const expectedValues = [undefined, objData[1], objData[2], objData[2]]; + const testObjectCopyRequest = _createObjectCopyRequest(destBucketName); + objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, err => { assert.ifError(err, `Unexpected err: ${err}`); setImmediate(() => { - versioningTestUtils - .assertDataStoreValues(ds, expectedValues); + versioningTestUtils.assertDataStoreValues(ds, expectedValues); done(); }); }); - }); + } + ); it('should not copy object with storage-class header not equal to STANDARD', done => { const testObjectCopyRequest = _createObjectCopyRequest(destBucketName); testObjectCopyRequest.headers['x-amz-storage-class'] = 'COLD'; - objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, - undefined, log, err => { - setImmediate(() => { - assert.strictEqual(err.is.InvalidStorageClass, true); - done(); - }); + objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, err => { + setImmediate(() => { + assert.strictEqual(err.is.InvalidStorageClass, true); + done(); }); + }); }); it('should not set bucketOwnerId if requesting account owns dest bucket', done => { const testObjectCopyRequest = _createObjectCopyRequest(destBucketName); - objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, - undefined, log, err => { - assert.ifError(err); - sinon.assert.calledWith( - metadata.putObjectMD.lastCall, - destBucketName, - objectKey, - sinon.match({ _data: { bucketOwnerId: sinon.match.typeOf('undefined') } }), - sinon.match.any, - sinon.match.any, - sinon.match.any - ); - done(); - }); + objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + sinon.match({ _data: { bucketOwnerId: sinon.match.typeOf('undefined') } }), + sinon.match.any, + sinon.match.any, + sinon.match.any + ); + done(); + }); }); // TODO: S3C-9965 @@ -170,9 +159,7 @@ describe('objectCopy with versioning', () => { Effect: 'Allow', Principal: { AWS: `arn:aws:iam::${authInfo2.shortid}:root` }, Action: ['s3:GetObject'], - Resource: [ - `arn:aws:s3:::${sourceBucketName}/*`, - ], + Resource: [`arn:aws:s3:::${sourceBucketName}/*`], }, ], }), @@ -191,9 +178,7 @@ describe('objectCopy with versioning', () => { Effect: 'Allow', Principal: { AWS: `arn:aws:iam::${authInfo2.shortid}:root` }, Action: ['s3:PutObject'], - Resource: [ - `arn:aws:s3:::${destBucketName}/*`, - ], + Resource: [`arn:aws:s3:::${destBucketName}/*`], }, ], }), @@ -202,50 +187,46 @@ describe('objectCopy with versioning', () => { assert.ifError(err); bucketPutPolicy(authInfo, testPutDestPolicyRequest, log, err => { assert.ifError(err); - objectCopy(authInfo2, testObjectCopyRequest, sourceBucketName, objectKey, - undefined, log, err => { - sinon.assert.calledWith( - metadata.putObjectMD.lastCall, - destBucketName, - objectKey, - sinon.match({ _data: { bucketOwnerId: authInfo.canonicalID } }), - sinon.match.any, - sinon.match.any, - sinon.match.any - ); - assert.ifError(err); - done(); - }); + objectCopy(authInfo2, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, err => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + sinon.match({ _data: { bucketOwnerId: authInfo.canonicalID } }), + sinon.match.any, + sinon.match.any, + sinon.match.any + ); + assert.ifError(err); + done(); + }); }); }); }); }); describe('non-versioned objectCopy', () => { - const testPutObjectRequest = versioningTestUtils - .createPutObjectRequest(sourceBucketName, objectKey, objData[0]); - const testPutDestObjectRequest = versioningTestUtils - .createPutObjectRequest(destBucketName, objectKey, objData[1]); + const testPutObjectRequest = versioningTestUtils.createPutObjectRequest(sourceBucketName, objectKey, objData[0]); + const testPutDestObjectRequest = versioningTestUtils.createPutObjectRequest(destBucketName, objectKey, objData[1]); before(done => { cleanup(); - sinon.stub(metadata, 'putObjectMD') - .callsFake(originalputObjectMD); - async.series([ - callback => bucketPut(authInfo, putDestBucketRequest, log, - callback), - callback => bucketPut(authInfo, putSourceBucketRequest, log, - callback), - // put source object in source bucket - callback => objectPut(authInfo, testPutObjectRequest, - undefined, log, callback), - ], err => { - if (err) { - return done(err); + sinon.stub(metadata, 'putObjectMD').callsFake(originalputObjectMD); + async.series( + [ + callback => bucketPut(authInfo, putDestBucketRequest, log, callback), + callback => bucketPut(authInfo, putSourceBucketRequest, log, callback), + // put source object in source bucket + callback => objectPut(authInfo, testPutObjectRequest, undefined, log, callback), + ], + err => { + if (err) { + return done(err); + } + versioningTestUtils.assertDataStoreValues(ds, objData.slice(0, 1)); + return done(); } - versioningTestUtils.assertDataStoreValues(ds, objData.slice(0, 1)); - return done(); - }); + ); }); after(() => { @@ -256,87 +237,128 @@ describe('non-versioned objectCopy', () => { const testObjectCopyRequest = _createObjectCopyRequest(destBucketName); it('should not leave orphans in data when overwriting a multipart upload', done => { - mpuUtils.createMPU(namespace, destBucketName, objectKey, log, - (err, testUploadId) => { + mpuUtils.createMPU(namespace, destBucketName, objectKey, log, (err, testUploadId) => { assert.ifError(err); - objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, - undefined, log, err => { - assert.ifError(err); - sinon.assert.calledWith(metadata.putObjectMD, - any, any, any, sinon.match({ oldReplayId: testUploadId }), any, any); - done(); - }); + objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD, + any, + any, + any, + sinon.match({ oldReplayId: testUploadId }), + any, + any + ); + done(); + }); }); }); it('should not pass needOplogUpdate when creating object', done => { - async.series([ - next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, - undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - destBucketName, objectKey, any, sinon.match({ - needOplogUpdate: undefined, - originOp: undefined, - }), any, any); - }, - ], done); + async.series( + [ + next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + any, + sinon.match({ + needOplogUpdate: undefined, + originOp: undefined, + }), + any, + any + ); + }, + ], + done + ); }); it('should not pass needOplogUpdate when replacing object', done => { - async.series([ - next => objectPut(authInfo, testPutDestObjectRequest, undefined, log, next), - next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, - undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - destBucketName, objectKey, any, sinon.match({ - needOplogUpdate: undefined, - originOp: undefined, - }), any, any); - }, - ], done); + async.series( + [ + next => objectPut(authInfo, testPutDestObjectRequest, undefined, log, next), + next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + any, + sinon.match({ + needOplogUpdate: undefined, + originOp: undefined, + }), + any, + any + ); + }, + ], + done + ); }); it('should pass needOplogUpdate to metadata when replacing archived object', done => { const archived = { - archiveInfo: { foo: 0, bar: 'stuff' } + archiveInfo: { foo: 0, bar: 'stuff' }, }; - async.series([ - next => objectPut(authInfo, testPutDestObjectRequest, undefined, log, next), - next => fakeMetadataArchive(destBucketName, objectKey, undefined, archived, next), - next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, - undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - destBucketName, objectKey, any, sinon.match({ - needOplogUpdate: true, - originOp: 's3:ReplaceArchivedObject', - }), any, any); - }, - ], done); + async.series( + [ + next => objectPut(authInfo, testPutDestObjectRequest, undefined, log, next), + next => fakeMetadataArchive(destBucketName, objectKey, undefined, archived, next), + next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + any, + sinon.match({ + needOplogUpdate: true, + originOp: 's3:ReplaceArchivedObject', + }), + any, + any + ); + }, + ], + done + ); }); it('should pass needOplogUpdate to metadata when replacing archived object in version suspended bucket', done => { const archived = { - archiveInfo: { foo: 0, bar: 'stuff' } + archiveInfo: { foo: 0, bar: 'stuff' }, }; - async.series([ - next => bucketPutVersioning(authInfo, suspendVersioningRequest, log, next), - next => objectPut(authInfo, testPutDestObjectRequest, undefined, log, next), - next => fakeMetadataArchive(destBucketName, objectKey, undefined, archived, next), - next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, - undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - destBucketName, objectKey, any, sinon.match({ - needOplogUpdate: true, - originOp: 's3:ReplaceArchivedObject', - }), any, any); - }, - ], done); + async.series( + [ + next => bucketPutVersioning(authInfo, suspendVersioningRequest, log, next), + next => objectPut(authInfo, testPutDestObjectRequest, undefined, log, next), + next => fakeMetadataArchive(destBucketName, objectKey, undefined, archived, next), + next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + any, + sinon.match({ + needOplogUpdate: true, + originOp: 's3:ReplaceArchivedObject', + }), + any, + any + ); + }, + ], + done + ); }); }); @@ -344,10 +366,13 @@ describe('objectCopy overheadField', () => { beforeEach(done => { cleanup(); sinon.stub(metadata, 'putObjectMD').callsFake(originalputObjectMD); - async.series([ - next => bucketPut(authInfo, putSourceBucketRequest, log, next), - next => bucketPut(authInfo, putDestBucketRequest, log, next), - ], done); + async.series( + [ + next => bucketPut(authInfo, putSourceBucketRequest, log, next), + next => bucketPut(authInfo, putDestBucketRequest, log, next), + ], + done + ); }); afterEach(() => { @@ -356,62 +381,82 @@ describe('objectCopy overheadField', () => { }); it('should pass overheadField to metadata.putObjectMD for a non-versioned request', done => { - const testPutObjectRequest = - versioningTestUtils.createPutObjectRequest(sourceBucketName, objectKey, objData[0]); + const testPutObjectRequest = versioningTestUtils.createPutObjectRequest( + sourceBucketName, + objectKey, + objData[0] + ); const testObjectCopyRequest = _createObjectCopyRequest(destBucketName); objectPut(authInfo, testPutObjectRequest, undefined, log, err => { assert.ifError(err); - objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, - err => { - assert.ifError(err); - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - destBucketName, objectKey, any, sinon.match({ overheadField: sinon.match.array }), any, any); - done(); - } - ); + objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + any, + sinon.match({ overheadField: sinon.match.array }), + any, + any + ); + done(); + }); }); }); it('should pass overheadField to metadata.putObjectMD for a versioned request', done => { - const testPutObjectRequest = - versioningTestUtils.createPutObjectRequest(sourceBucketName, objectKey, objData[0]); + const testPutObjectRequest = versioningTestUtils.createPutObjectRequest( + sourceBucketName, + objectKey, + objData[0] + ); const testObjectCopyRequest = _createObjectCopyRequest(destBucketName); objectPut(authInfo, testPutObjectRequest, undefined, log, err => { assert.ifError(err); bucketPutVersioning(authInfo, enableVersioningRequest, log, err => { assert.ifError(err); - objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, - err => { - assert.ifError(err); - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - destBucketName, objectKey, any, - sinon.match({ overheadField: sinon.match.array }), any, any - ); - done(); - } - ); + objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + any, + sinon.match({ overheadField: sinon.match.array }), + any, + any + ); + done(); + }); }); }); }); it('should pass overheadField to metadata.putObjectMD for a version-suspended request', done => { - const testPutObjectRequest = - versioningTestUtils.createPutObjectRequest(sourceBucketName, objectKey, objData[0]); + const testPutObjectRequest = versioningTestUtils.createPutObjectRequest( + sourceBucketName, + objectKey, + objData[0] + ); const testObjectCopyRequest = _createObjectCopyRequest(destBucketName); objectPut(authInfo, testPutObjectRequest, undefined, log, err => { assert.ifError(err); bucketPutVersioning(authInfo, suspendVersioningRequest, log, err => { assert.ifError(err); - objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, - err => { - assert.ifError(err); - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - destBucketName, objectKey, any, - sinon.match({ overheadField: sinon.match.array }), any, any - ); - done(); - } - ); + objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + destBucketName, + objectKey, + any, + sinon.match({ overheadField: sinon.match.array }), + any, + any + ); + done(); + }); }); }); }); @@ -426,10 +471,16 @@ describe('objectCopy in ingestion bucket', () => { before(() => { // Setup multi-backend, this is required for ingestion - data.switch(new storage.data.MultipleBackendGateway({ - 'us-east-1': dataClient, - 'us-east-2': dataClient, - }, metadata, data.locStorageCheckFn)); + data.switch( + new storage.data.MultipleBackendGateway( + { + 'us-east-1': dataClient, + 'us-east-2': dataClient, + }, + metadata, + data.locStorageCheckFn + ) + ); data.implName = 'multipleBackends'; // "mock" the data location, simulating a backend supporting server-side copy @@ -462,19 +513,20 @@ describe('objectCopy in ingestion bucket', () => { sinon.restore(); }); - const newPutIngestBucketRequest = location => new DummyRequest({ - bucketName: destBucketName, - namespace, - headers: { host: `${destBucketName}.s3.amazonaws.com` }, - url: '/', - post: '' + - '' + - `${location}` + - '', - }); - const putSourceObjectRequest = versioningTestUtils.createPutObjectRequest( - sourceBucketName, objectKey, objData[0]); + const newPutIngestBucketRequest = location => + new DummyRequest({ + bucketName: destBucketName, + namespace, + headers: { host: `${destBucketName}.s3.amazonaws.com` }, + url: '/', + post: + '' + + '' + + `${location}` + + '', + }); + const putSourceObjectRequest = versioningTestUtils.createPutObjectRequest(sourceBucketName, objectKey, objData[0]); const newPutObjectRequest = params => { const { location } = params || {}; const r = _createObjectCopyRequest(destBucketName); @@ -491,17 +543,28 @@ describe('objectCopy in ingestion bucket', () => { const versionID = versioning.VersionID.encode(versioning.VersionID.generateVersionId('0', '')); dataClient.copyObject = sinon.stub().yields(null, objectKey, versionID); - async.series([ - next => bucketPut(authInfo, putSourceBucketRequest, log, next), - next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), - next => objectPut(authInfo, putSourceObjectRequest, undefined, log, next), - next => objectCopy(authInfo, newPutObjectRequest(), sourceBucketName, objectKey, undefined, log, - (err, xml, headers) => { - assert.ifError(err); - assert.strictEqual(headers['x-amz-version-id'], versionID); - next(); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, putSourceBucketRequest, log, next), + next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), + next => objectPut(authInfo, putSourceObjectRequest, undefined, log, next), + next => + objectCopy( + authInfo, + newPutObjectRequest(), + sourceBucketName, + objectKey, + undefined, + log, + (err, xml, headers) => { + assert.ifError(err); + assert.strictEqual(headers['x-amz-version-id'], versionID); + next(); + } + ), + ], + done + ); }); it('should not use the versionID from the backend when writing in another location', done => { @@ -509,33 +572,55 @@ describe('objectCopy in ingestion bucket', () => { dataClient.copyObject = sinon.stub().yields(null, objectKey, versionID); const copyObjectRequest = newPutObjectRequest({ location: 'us-east-2' }); - async.series([ - next => bucketPut(authInfo, putSourceBucketRequest, log, next), - next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), - next => objectPut(authInfo, putSourceObjectRequest, undefined, log, next), - next => objectCopy(authInfo, copyObjectRequest, sourceBucketName, objectKey, undefined, log, - (err, xml, headers) => { - assert.ifError(err); - assert.notEqual(headers['x-amz-version-id'], versionID); - next(); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, putSourceBucketRequest, log, next), + next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), + next => objectPut(authInfo, putSourceObjectRequest, undefined, log, next), + next => + objectCopy( + authInfo, + copyObjectRequest, + sourceBucketName, + objectKey, + undefined, + log, + (err, xml, headers) => { + assert.ifError(err); + assert.notEqual(headers['x-amz-version-id'], versionID); + next(); + } + ), + ], + done + ); }); it('should not use the versionID from the backend when it is not a valid versionID', done => { const versionID = undefined; dataClient.copyObject = sinon.stub().yields(null, objectKey, versionID); - async.series([ - next => bucketPut(authInfo, putSourceBucketRequest, log, next), - next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), - next => objectPut(authInfo, putSourceObjectRequest, undefined, log, next), - next => objectCopy(authInfo, newPutObjectRequest(), sourceBucketName, objectKey, undefined, log, - (err, xml, headers) => { - assert.ifError(err); - assert.notEqual(headers['x-amz-version-id'], versionID); - next(); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, putSourceBucketRequest, log, next), + next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), + next => objectPut(authInfo, putSourceObjectRequest, undefined, log, next), + next => + objectCopy( + authInfo, + newPutObjectRequest(), + sourceBucketName, + objectKey, + undefined, + log, + (err, xml, headers) => { + assert.ifError(err); + assert.notEqual(headers['x-amz-version-id'], versionID); + next(); + } + ), + ], + done + ); }); }); diff --git a/tests/unit/api/objectCopyPart.js b/tests/unit/api/objectCopyPart.js index 46c95a0452..0365305513 100644 --- a/tests/unit/api/objectCopyPart.js +++ b/tests/unit/api/objectCopyPart.js @@ -6,13 +6,11 @@ const { storage } = require('arsenal'); const { bucketPut } = require('../../../lib/api/bucketPut'); const objectPut = require('../../../lib/api/objectPut'); const objectPutCopyPart = require('../../../lib/api/objectPutCopyPart'); -const initiateMultipartUpload -= require('../../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../../lib/api/initiateMultipartUpload'); const { metadata } = storage.metadata.inMemory.metadata; const metadataswitch = require('../metadataswitch'); const DummyRequest = require('../DummyRequest'); -const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } = require('../helpers'); const log = new DummyRequestLogger(); const canonicalID = 'accessKey1'; @@ -64,30 +62,27 @@ const initiateRequest = _createInitiateRequest(destBucketName); describe('objectCopyPart', () => { let uploadId; const objData = Buffer.from('foo', 'utf8'); - const testPutObjectRequest = - versioningTestUtils.createPutObjectRequest(sourceBucketName, objectKey, - objData); + const testPutObjectRequest = versioningTestUtils.createPutObjectRequest(sourceBucketName, objectKey, objData); before(done => { cleanup(); sinon.spy(metadataswitch, 'putObjectMD'); - async.waterfall([ - callback => bucketPut(authInfo, putDestBucketRequest, log, - err => callback(err)), - callback => bucketPut(authInfo, putSourceBucketRequest, log, - err => callback(err)), - callback => objectPut(authInfo, testPutObjectRequest, - undefined, log, err => callback(err)), - callback => initiateMultipartUpload(authInfo, initiateRequest, - log, (err, res) => callback(err, res)), - ], (err, res) => { - if (err) { - return done(err); + async.waterfall( + [ + callback => bucketPut(authInfo, putDestBucketRequest, log, err => callback(err)), + callback => bucketPut(authInfo, putSourceBucketRequest, log, err => callback(err)), + callback => objectPut(authInfo, testPutObjectRequest, undefined, log, err => callback(err)), + callback => initiateMultipartUpload(authInfo, initiateRequest, log, (err, res) => callback(err, res)), + ], + (err, res) => { + if (err) { + return done(err); + } + return parseString(res, (err, json) => { + uploadId = json.InitiateMultipartUploadResult.UploadId[0]; + return done(); + }); } - return parseString(res, (err, json) => { - uploadId = json.InitiateMultipartUploadResult.UploadId[0]; - return done(); - }); - }); + ); }); after(() => { @@ -95,8 +90,7 @@ describe('objectCopyPart', () => { cleanup(); }); - it('should copy part even if legacy metadata without dataStoreName', - done => { + it('should copy part even if legacy metadata without dataStoreName', done => { // force metadata for dataStoreName to be undefined metadata.keyMaps.get(sourceBucketName).get(objectKey).dataStoreName = undefined; const testObjectCopyRequest = _createObjectCopyPartRequest(destBucketName, uploadId); @@ -108,17 +102,17 @@ describe('objectCopyPart', () => { it('should return InvalidArgument error given invalid range', done => { const headers = { 'x-amz-copy-source-range': 'bad-range-parameter' }; - const req = - _createObjectCopyPartRequest(destBucketName, uploadId, headers); - objectPutCopyPart( - authInfo, req, sourceBucketName, objectKey, undefined, log, err => { - assert(err.is.InvalidArgument); - assert.strictEqual(err.description, - 'The x-amz-copy-source-range value must be of the form ' + + const req = _createObjectCopyPartRequest(destBucketName, uploadId, headers); + objectPutCopyPart(authInfo, req, sourceBucketName, objectKey, undefined, log, err => { + assert(err.is.InvalidArgument); + assert.strictEqual( + err.description, + 'The x-amz-copy-source-range value must be of the form ' + 'bytes=first-last where first and last are the ' + - 'zero-based offsets of the first and last bytes to copy'); - done(); - }); + 'zero-based offsets of the first and last bytes to copy' + ); + done(); + }); }); it('should pass overheadField', done => { diff --git a/tests/unit/api/objectDelete.js b/tests/unit/api/objectDelete.js index b46687ebaf..cece71b50a 100644 --- a/tests/unit/api/objectDelete.js +++ b/tests/unit/api/objectDelete.js @@ -32,8 +32,7 @@ const lateDate = new Date(); earlyDate.setMinutes(earlyDate.getMinutes() - 30); lateDate.setMinutes(lateDate.getMinutes() + 30); -function testAuth(bucketOwner, authUser, bucketPutReq, objPutReq, objDelReq, - log, cb) { +function testAuth(bucketOwner, authUser, bucketPutReq, objPutReq, objDelReq, log, cb) { bucketPut(bucketOwner, bucketPutReq, log, () => { bucketPutACL(bucketOwner, bucketPutReq, log, err => { assert.strictEqual(err, undefined); @@ -53,13 +52,16 @@ describe('objectDelete API', () => { beforeEach(() => { cleanup(); - testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: {}, - url: `/${bucketName}/${objectKey}`, - }, postBody); + testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: {}, + url: `/${bucketName}/${objectKey}`, + }, + postBody + ); sinon.stub(services, 'deleteObject').callsFake(originalDeleteObject); sinon.spy(metadataswitch, 'putObjectMD'); @@ -70,7 +72,6 @@ describe('objectDelete API', () => { sinon.restore(); }); - const testBucketPutRequest = new DummyRequest({ bucketName, namespace, @@ -93,43 +94,65 @@ describe('objectDelete API', () => { }); it('should delete an object', done => { - async.series([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - next => objectDelete(authInfo, testDeleteRequest, log, next), - async () => sinon.assert.calledWith(services.deleteObject, - any, any, any, - sinon.match({ - deleteData: true, - doesNotNeedOpogUpdate: true, - }), - any, any, any), - next => objectGet(authInfo, testGetObjectRequest, false, log, err => { - assert.strictEqual(err.is.NoSuchKey, true); - next(); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + next => objectDelete(authInfo, testDeleteRequest, log, next), + async () => + sinon.assert.calledWith( + services.deleteObject, + any, + any, + any, + sinon.match({ + deleteData: true, + doesNotNeedOpogUpdate: true, + }), + any, + any, + any + ), + next => + objectGet(authInfo, testGetObjectRequest, false, log, err => { + assert.strictEqual(err.is.NoSuchKey, true); + next(); + }), + ], + done + ); }); it('should delete an object with oplog update when object is archived', done => { const archived = { archiveInfo: { foo: 0, bar: 'stuff' } }; - async.series([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - next => fakeMetadataArchive(bucketName, objectKey, undefined, archived, next), - next => objectDelete(authInfo, testDeleteRequest, log, next), - async () => sinon.assert.calledWith(services.deleteObject, - any, any, any, - sinon.match({ - deleteData: true, - doesNotNeedOpogUpdate: undefined, - }), - any, any, any), - next => objectGet(authInfo, testGetObjectRequest, false, log, err => { - assert.strictEqual(err.is.NoSuchKey, true); - next(); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + next => fakeMetadataArchive(bucketName, objectKey, undefined, archived, next), + next => objectDelete(authInfo, testDeleteRequest, log, next), + async () => + sinon.assert.calledWith( + services.deleteObject, + any, + any, + any, + sinon.match({ + deleteData: true, + doesNotNeedOpogUpdate: undefined, + }), + any, + any, + any + ), + next => + objectGet(authInfo, testGetObjectRequest, false, log, err => { + assert.strictEqual(err.is.NoSuchKey, true); + next(); + }), + ], + done + ); }); it('should delete an object with oplog update when bucket has bucket notification', done => { @@ -138,72 +161,90 @@ describe('objectDelete API', () => { headers: { host: `${bucketName}.s3.amazonaws.com`, }, - post: '' + + post: + '' + '', actionImplicitDenies: false, }; - async.series([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - next => bucketPutNotification(authInfo, putNotifConfigRequest, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - next => objectDelete(authInfo, testDeleteRequest, log, next), - async () => sinon.assert.calledWith(services.deleteObject, - any, any, any, - sinon.match({ - deleteData: true, - doesNotNeedOpogUpdate: undefined, - }), - any, any, any), - next => objectGet(authInfo, testGetObjectRequest, false, log, err => { - assert.strictEqual(err.is.NoSuchKey, true); - next(); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + next => bucketPutNotification(authInfo, putNotifConfigRequest, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + next => objectDelete(authInfo, testDeleteRequest, log, next), + async () => + sinon.assert.calledWith( + services.deleteObject, + any, + any, + any, + sinon.match({ + deleteData: true, + doesNotNeedOpogUpdate: undefined, + }), + any, + any, + any + ), + next => + objectGet(authInfo, testGetObjectRequest, false, log, err => { + assert.strictEqual(err.is.NoSuchKey, true); + next(); + }), + ], + done + ); }); it('should delete a 0 bytes object', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: {}, - url: `/${bucketName}/${objectKey}`, - }, ''); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: {}, + url: `/${bucketName}/${objectKey}`, + }, + '' + ); bucketPut(authInfo, testBucketPutRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, - undefined, log, () => { - objectDelete(authInfo, testDeleteRequest, log, err => { - assert.strictEqual(err, null); - objectGet(authInfo, testGetObjectRequest, false, - log, err => { - const expected = - Object.assign({}, errors.NoSuchKey); - const received = Object.assign({}, err); - assert.deepStrictEqual(received, expected); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, () => { + objectDelete(authInfo, testDeleteRequest, log, err => { + assert.strictEqual(err, null); + objectGet(authInfo, testGetObjectRequest, false, log, err => { + const expected = Object.assign({}, errors.NoSuchKey); + const received = Object.assign({}, err); + assert.deepStrictEqual(received, expected); + done(); }); }); + }); }); }); it('should delete a multipart upload and send `uploadId` as `replayId` to deleteObject', done => { bucketPut(authInfo, testBucketPutRequest, log, () => { - mpuUtils.createMPU(namespace, bucketName, objectKey, log, - (err, testUploadId) => { - assert.ifError(err); - objectDelete(authInfo, testDeleteRequest, log, err => { - assert.strictEqual(err, null); - sinon.assert.calledWith(services.deleteObject, - any, any, any, - sinon.match({ - deleteData: true, - replayId: testUploadId, - doesNotNeedOpogUpdate: true, - }), any, any, any); - done(); - }); + mpuUtils.createMPU(namespace, bucketName, objectKey, log, (err, testUploadId) => { + assert.ifError(err); + objectDelete(authInfo, testDeleteRequest, log, err => { + assert.strictEqual(err, null); + sinon.assert.calledWith( + services.deleteObject, + any, + any, + any, + sinon.match({ + deleteData: true, + replayId: testUploadId, + doesNotNeedOpogUpdate: true, + }), + any, + any, + any + ); + done(); }); + }); }); }); @@ -220,46 +261,40 @@ describe('objectDelete API', () => { it('should del object if user has FULL_CONTROL grant on bucket', done => { const bucketOwner = makeAuthInfo('accessKey2'); const authUser = makeAuthInfo('accessKey3'); - testBucketPutRequest.headers['x-amz-grant-full-control'] = - `id=${authUser.getCanonicalID()}`; - testAuth(bucketOwner, authUser, testBucketPutRequest, - testPutObjectRequest, testDeleteRequest, log, done); + testBucketPutRequest.headers['x-amz-grant-full-control'] = `id=${authUser.getCanonicalID()}`; + testAuth(bucketOwner, authUser, testBucketPutRequest, testPutObjectRequest, testDeleteRequest, log, done); }); it('should del object if user has WRITE grant on bucket', done => { const bucketOwner = makeAuthInfo('accessKey2'); const authUser = makeAuthInfo('accessKey3'); - testBucketPutRequest.headers['x-amz-grant-write'] = - `id=${authUser.getCanonicalID()}`; - testAuth(bucketOwner, authUser, testBucketPutRequest, - testPutObjectRequest, testDeleteRequest, log, done); + testBucketPutRequest.headers['x-amz-grant-write'] = `id=${authUser.getCanonicalID()}`; + testAuth(bucketOwner, authUser, testBucketPutRequest, testPutObjectRequest, testDeleteRequest, log, done); }); it('should del object in bucket with public-read-write acl', done => { const bucketOwner = makeAuthInfo('accessKey2'); const authUser = makeAuthInfo('accessKey3'); testBucketPutRequest.headers['x-amz-acl'] = 'public-read-write'; - testAuth(bucketOwner, authUser, testBucketPutRequest, - testPutObjectRequest, testDeleteRequest, log, done); + testAuth(bucketOwner, authUser, testBucketPutRequest, testPutObjectRequest, testDeleteRequest, log, done); }); it('should pass overheadField to metadata', done => { bucketPut(authInfo, testBucketPutRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, - undefined, log, () => { - objectDelete(authInfo, testDeleteRequest, log, err => { - assert.strictEqual(err, null); - sinon.assert.calledWith( - metadataswitch.deleteObjectMD, - bucketName, - objectKey, - sinon.match({ overheadField: sinon.match.array }), - sinon.match.any, - sinon.match.any - ); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, () => { + objectDelete(authInfo, testDeleteRequest, log, err => { + assert.strictEqual(err, null); + sinon.assert.calledWith( + metadataswitch.deleteObjectMD, + bucketName, + objectKey, + sinon.match({ overheadField: sinon.match.array }), + sinon.match.any, + sinon.match.any + ); + done(); }); + }); }); }); @@ -273,52 +308,54 @@ describe('objectDelete API', () => { }); bucketPut(authInfo, testBucketPutVersionRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, - undefined, log, (err, data) => { - const deleteObjectVersionRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: {}, - url: `/${bucketName}/${objectKey}?versionId=${data['x-amz-version-id']}`, - query: { - versionId: data['x-amz-version-id'], - }, - }); - objectDeleteInternal(authInfo, deleteObjectVersionRequest, log, true, err => { - assert.strictEqual(err, null); - sinon.assert.calledWith(warnStub, 'expiration is trying to delete a master version ' + - 'of an object with versioning enabled'); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, data) => { + const deleteObjectVersionRequest = new DummyRequest({ + bucketName, + namespace, + objectKey, + headers: {}, + url: `/${bucketName}/${objectKey}?versionId=${data['x-amz-version-id']}`, + query: { + versionId: data['x-amz-version-id'], + }, + }); + objectDeleteInternal(authInfo, deleteObjectVersionRequest, log, true, err => { + assert.strictEqual(err, null); + sinon.assert.calledWith( + warnStub, + 'expiration is trying to delete a master version ' + 'of an object with versioning enabled' + ); + done(); }); + }); }); }); - describe('with \'modified\' headers', () => { + describe("with 'modified' headers", () => { beforeEach(done => { bucketPut(authInfo, testBucketPutRequest, log, () => { objectPut(authInfo, testPutObjectRequest, undefined, log, done); }); }); - it('should return error if request includes \'if-unmodified-since\' ' + - 'header and object has been modified', done => { - const testDeleteRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { 'if-unmodified-since': earlyDate }, - url: `/${bucketName}/${objectKey}`, - }); - objectDelete(authInfo, testDeleteRequest, log, err => { - assert.strictEqual(err.is.PreconditionFailed, true); - done(); - }); - }); + it( + "should return error if request includes 'if-unmodified-since' " + 'header and object has been modified', + done => { + const testDeleteRequest = new DummyRequest({ + bucketName, + namespace, + objectKey, + headers: { 'if-unmodified-since': earlyDate }, + url: `/${bucketName}/${objectKey}`, + }); + objectDelete(authInfo, testDeleteRequest, log, err => { + assert.strictEqual(err.is.PreconditionFailed, true); + done(); + }); + } + ); - it('should delete an object with \'if-unmodified-since\' header', - done => { + it("should delete an object with 'if-unmodified-since' header", done => { const testDeleteRequest = new DummyRequest({ bucketName, namespace, @@ -328,31 +365,31 @@ describe('objectDelete API', () => { }); objectDelete(authInfo, testDeleteRequest, log, err => { assert.strictEqual(err, null); - objectGet(authInfo, testGetObjectRequest, false, log, - err => { + objectGet(authInfo, testGetObjectRequest, false, log, err => { assert.strictEqual(err.is.NoSuchKey, true); done(); }); }); }); - it('should return error if request includes \'if-modified-since\' ' + - 'header and object has not been modified', done => { - const testDeleteRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { 'if-modified-since': lateDate }, - url: `/${bucketName}/${objectKey}`, - }); - objectDelete(authInfo, testDeleteRequest, log, err => { - assert.strictEqual(err.is.NotModified, true); - done(); - }); - }); + it( + "should return error if request includes 'if-modified-since' " + 'header and object has not been modified', + done => { + const testDeleteRequest = new DummyRequest({ + bucketName, + namespace, + objectKey, + headers: { 'if-modified-since': lateDate }, + url: `/${bucketName}/${objectKey}`, + }); + objectDelete(authInfo, testDeleteRequest, log, err => { + assert.strictEqual(err.is.NotModified, true); + done(); + }); + } + ); - it('should delete an object with \'if-modified-since\' header', - done => { + it("should delete an object with 'if-modified-since' header", done => { const testDeleteRequest = new DummyRequest({ bucketName, namespace, @@ -362,8 +399,7 @@ describe('objectDelete API', () => { }); objectDelete(authInfo, testDeleteRequest, log, err => { assert.strictEqual(err, null); - objectGet(authInfo, testGetObjectRequest, false, log, - err => { + objectGet(authInfo, testGetObjectRequest, false, log, err => { assert.strictEqual(err.is.NoSuchKey, true); done(); }); diff --git a/tests/unit/api/objectDeleteTagging.js b/tests/unit/api/objectDeleteTagging.js index 66ed5a11a2..df5253cc24 100644 --- a/tests/unit/api/objectDeleteTagging.js +++ b/tests/unit/api/objectDeleteTagging.js @@ -6,10 +6,7 @@ const objectPut = require('../../../lib/api/objectPut'); const objectPutTagging = require('../../../lib/api/objectPutTagging'); const objectDeleteTagging = require('../../../lib/api/objectDeleteTagging'); const metadata = require('../../../lib/metadata/wrapper'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - TaggingConfigTester } = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, TaggingConfigTester } = require('../helpers'); const DummyRequest = require('../DummyRequest'); const log = new DummyRequestLogger(); @@ -25,13 +22,16 @@ const testBucketPutRequest = { actionImplicitDenies: false, }; -const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, -}, postBody); +const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }, + postBody +); describe('deleteObjectTagging API', () => { beforeEach(done => { @@ -40,8 +40,7 @@ describe('deleteObjectTagging API', () => { if (err) { return done(err); } - return objectPut(authInfo, testPutObjectRequest, undefined, log, - done); + return objectPut(authInfo, testPutObjectRequest, undefined, log, done); }); }); @@ -49,22 +48,20 @@ describe('deleteObjectTagging API', () => { it('should delete tag set and update originOp', done => { const taggingUtil = new TaggingConfigTester(); - const testObjectPutTaggingRequest = taggingUtil - .createObjectTaggingRequest('PUT', bucketName, objectName); - const testObjectDeleteTaggingRequest = taggingUtil - .createObjectTaggingRequest('DELETE', bucketName, objectName); - async.waterfall([ - next => objectPutTagging(authInfo, testObjectPutTaggingRequest, log, - err => next(err)), - next => objectDeleteTagging(authInfo, - testObjectDeleteTaggingRequest, log, err => next(err)), - next => metadata.getObjectMD(bucketName, objectName, {}, log, - (err, objectMD) => next(err, objectMD)), - ], (err, objectMD) => { - const uploadedTags = objectMD.tags; - assert.deepStrictEqual(uploadedTags, {}); - assert.strictEqual(objectMD.originOp, 's3:ObjectTagging:Delete'); - return done(); - }); + const testObjectPutTaggingRequest = taggingUtil.createObjectTaggingRequest('PUT', bucketName, objectName); + const testObjectDeleteTaggingRequest = taggingUtil.createObjectTaggingRequest('DELETE', bucketName, objectName); + async.waterfall( + [ + next => objectPutTagging(authInfo, testObjectPutTaggingRequest, log, err => next(err)), + next => objectDeleteTagging(authInfo, testObjectDeleteTaggingRequest, log, err => next(err)), + next => metadata.getObjectMD(bucketName, objectName, {}, log, (err, objectMD) => next(err, objectMD)), + ], + (err, objectMD) => { + const uploadedTags = objectMD.tags; + assert.deepStrictEqual(uploadedTags, {}); + assert.strictEqual(objectMD.originOp, 's3:ObjectTagging:Delete'); + return done(); + } + ); }); }); diff --git a/tests/unit/api/objectGet.js b/tests/unit/api/objectGet.js index 8f8b1c89f9..1e990dc8a9 100644 --- a/tests/unit/api/objectGet.js +++ b/tests/unit/api/objectGet.js @@ -5,11 +5,9 @@ const { parseString } = require('xml2js'); const { bucketPut } = require('../../../lib/api/bucketPut'); const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); -const completeMultipartUpload - = require('../../../lib/api/completeMultipartUpload'); +const completeMultipartUpload = require('../../../lib/api/completeMultipartUpload'); const DummyRequest = require('../DummyRequest'); -const initiateMultipartUpload - = require('../../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../../lib/api/initiateMultipartUpload'); const objectPut = require('../../../lib/api/objectPut'); const objectGet = require('../../../lib/api/objectGet'); const objectPutPart = require('../../../lib/api/objectPutPart'); @@ -29,17 +27,20 @@ describe('objectGet API', () => { beforeEach(() => { cleanup(); - testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-meta-test': 'some metadata', - 'content-length': '12', + testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-meta-test': 'some metadata', + 'content-length': '12', + }, + parsedContentLength: 12, + url: `/${bucketName}/${objectName}`, }, - parsedContentLength: 12, - url: `/${bucketName}/${objectName}`, - }, postBody); + postBody + ); }); const correctMD5 = 'be747eb4b75517bf6b3cf7c5fbb62f3a'; @@ -63,19 +64,14 @@ describe('objectGet API', () => { it('should get the object metadata', done => { bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, - log, (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGet(authInfo, testGetRequest, false, - log, (err, result, responseMetaHeaders) => { - assert.strictEqual( - responseMetaHeaders[userMetadataKey], - userMetadataValue); - assert.strictEqual(responseMetaHeaders.ETag, - `"${correctMD5}"`); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGet(authInfo, testGetRequest, false, log, (err, result, responseMetaHeaders) => { + assert.strictEqual(responseMetaHeaders[userMetadataKey], userMetadataValue); + assert.strictEqual(responseMetaHeaders.ETag, `"${correctMD5}"`); + done(); }); + }); }); }); @@ -83,25 +79,29 @@ describe('objectGet API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-bucket-object-lock-enabled': 'true', }, url: `/${bucketName}`, actionImplicitDenies: false, }; - const createPutDummyRetention = (date, mode) => new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-object-lock-retain-until-date': date, - 'x-amz-object-lock-mode': mode, - 'content-length': '12', - }, - parsedContentLength: 12, - url: `/${bucketName}/${objectName}`, - }, postBody); + const createPutDummyRetention = (date, mode) => + new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-object-lock-retain-until-date': date, + 'x-amz-object-lock-mode': mode, + 'content-length': '12', + }, + parsedContentLength: 12, + url: `/${bucketName}/${objectName}`, + }, + postBody + ); const threeDaysMilliSecs = 3 * 24 * 60 * 60 * 1000; const testDate = new Date(Date.now() + threeDaysMilliSecs).toISOString(); @@ -109,205 +109,204 @@ describe('objectGet API', () => { it('should get the object metadata with valid retention info', done => { bucketPut(authInfo, testPutBucketRequestObjectLock, log, () => { const request = createPutDummyRetention(testDate, 'GOVERNANCE'); - objectPut(authInfo, request, undefined, - log, (err, headers) => { + objectPut(authInfo, request, undefined, log, (err, headers) => { + assert.ifError(err); + assert.strictEqual(headers.ETag, `"${correctMD5}"`); + const req = testGetRequest; + objectGet(authInfo, req, false, log, (err, r, headers) => { assert.ifError(err); + assert.strictEqual(headers['x-amz-object-lock-retain-until-date'], testDate); + assert.strictEqual(headers['x-amz-object-lock-mode'], 'GOVERNANCE'); assert.strictEqual(headers.ETag, `"${correctMD5}"`); - const req = testGetRequest; - objectGet(authInfo, req, false, log, (err, r, headers) => { - assert.ifError(err); - assert.strictEqual( - headers['x-amz-object-lock-retain-until-date'], - testDate); - assert.strictEqual( - headers['x-amz-object-lock-mode'], - 'GOVERNANCE'); - assert.strictEqual(headers.ETag, - `"${correctMD5}"`); - changeObjectLock([{ - bucket: bucketName, - key: objectName, - versionId: headers['x-amz-version-id'], - }], '', done); - }); + changeObjectLock( + [ + { + bucket: bucketName, + key: objectName, + versionId: headers['x-amz-version-id'], + }, + ], + '', + done + ); }); + }); }); }); - const createPutDummyLegalHold = legalHold => new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-object-lock-legal-hold': legalHold, - 'content-length': '12', - }, - parsedContentLength: 12, - url: `/${bucketName}/${objectName}`, - }, postBody); + const createPutDummyLegalHold = legalHold => + new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-object-lock-legal-hold': legalHold, + 'content-length': '12', + }, + parsedContentLength: 12, + url: `/${bucketName}/${objectName}`, + }, + postBody + ); const testStatuses = ['ON', 'OFF']; testStatuses.forEach(status => { it(`should get object metadata with legal hold ${status}`, done => { bucketPut(authInfo, testPutBucketRequestObjectLock, log, () => { const request = createPutDummyLegalHold(status); - objectPut(authInfo, request, undefined, log, - (err, resHeaders) => { + objectPut(authInfo, request, undefined, log, (err, resHeaders) => { + assert.ifError(err); + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGet(authInfo, testGetRequest, false, log, (err, res, headers) => { assert.ifError(err); - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGet(authInfo, testGetRequest, false, log, - (err, res, headers) => { - assert.ifError(err); - assert.strictEqual( - headers['x-amz-object-lock-legal-hold'], - status); - assert.strictEqual(headers.ETag, - `"${correctMD5}"`); - changeObjectLock([{ + assert.strictEqual(headers['x-amz-object-lock-legal-hold'], status); + assert.strictEqual(headers.ETag, `"${correctMD5}"`); + changeObjectLock( + [ + { bucket: bucketName, key: objectName, versionId: headers['x-amz-version-id'], - }], '', done); - }); + }, + ], + '', + done + ); }); + }); }); }); }); const createPutDummyRetentionAndLegalHold = (date, mode, status) => - new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-object-lock-retain-until-date': date, - 'x-amz-object-lock-mode': mode, - 'x-amz-object-lock-legal-hold': status, - 'content-length': '12', + new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-object-lock-retain-until-date': date, + 'x-amz-object-lock-mode': mode, + 'x-amz-object-lock-legal-hold': status, + 'content-length': '12', + }, + parsedContentLength: 12, + url: `/${bucketName}/${objectName}`, }, - parsedContentLength: 12, - url: `/${bucketName}/${objectName}`, - }, postBody); + postBody + ); - it('should get the object metadata with both retention and legal hold', - done => { - bucketPut(authInfo, testPutBucketRequestObjectLock, log, () => { - const request = createPutDummyRetentionAndLegalHold( - testDate, 'COMPLIANCE', 'ON'); - objectPut(authInfo, request, undefined, log, - (err, resHeaders) => { - assert.ifError(err); - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - const auth = authInfo; - const req = testGetRequest; - objectGet(auth, req, false, log, (err, r, headers) => { - assert.ifError(err); - assert.strictEqual( - headers['x-amz-object-lock-legal-hold'], - 'ON'); - assert.strictEqual( - headers['x-amz-object-lock-retain-until-date'], - testDate); - assert.strictEqual( - headers['x-amz-object-lock-mode'], - 'COMPLIANCE'); - assert.strictEqual(headers.ETag, - `"${correctMD5}"`); - done(); - }); - }); + it('should get the object metadata with both retention and legal hold', done => { + bucketPut(authInfo, testPutBucketRequestObjectLock, log, () => { + const request = createPutDummyRetentionAndLegalHold(testDate, 'COMPLIANCE', 'ON'); + objectPut(authInfo, request, undefined, log, (err, resHeaders) => { + assert.ifError(err); + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + const auth = authInfo; + const req = testGetRequest; + objectGet(auth, req, false, log, (err, r, headers) => { + assert.ifError(err); + assert.strictEqual(headers['x-amz-object-lock-legal-hold'], 'ON'); + assert.strictEqual(headers['x-amz-object-lock-retain-until-date'], testDate); + assert.strictEqual(headers['x-amz-object-lock-mode'], 'COMPLIANCE'); + assert.strictEqual(headers.ETag, `"${correctMD5}"`); + done(); + }); }); }); + }); it('should get the object data retrieval info', done => { bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGet(authInfo, testGetRequest, false, log, - (err, dataGetInfo) => { - assert.deepStrictEqual(dataGetInfo, - [{ - key: 1, - start: 0, - size: 12, - dataStoreName: 'mem', - dataStoreETag: `1:${correctMD5}`, - }]); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGet(authInfo, testGetRequest, false, log, (err, dataGetInfo) => { + assert.deepStrictEqual(dataGetInfo, [ + { + key: 1, + start: 0, + size: 12, + dataStoreName: 'mem', + dataStoreETag: `1:${correctMD5}`, + }, + ]); + done(); }); + }); }); }); - it('should get the object data retrieval info for an object put by MPU', - done => { - const partBody = Buffer.from('I am a part\n', 'utf8'); - const initiateRequest = { - bucketName, - namespace, - objectKey: objectName, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectName}?uploads`, - actionImplicitDenies: false, - }; - async.waterfall([ + it('should get the object data retrieval info for an object put by MPU', done => { + const partBody = Buffer.from('I am a part\n', 'utf8'); + const initiateRequest = { + bucketName, + namespace, + objectKey: objectName, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectName}?uploads`, + actionImplicitDenies: false, + }; + async.waterfall( + [ next => bucketPut(authInfo, testPutBucketRequest, log, next), - (corsHeaders, next) => initiateMultipartUpload(authInfo, - initiateRequest, log, next), + (corsHeaders, next) => initiateMultipartUpload(authInfo, initiateRequest, log, next), (result, corsHeaders, next) => parseString(result, next), (json, next) => { - const testUploadId = - json.InitiateMultipartUploadResult.UploadId[0]; + const testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; const md5Hash = crypto.createHash('md5').update(partBody); const calculatedHash = md5Hash.digest('hex'); - const partRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - // Part (other than last part) must be at least 5MB - 'content-length': '5242880', - }, - parsedContentLength: 5242880, - url: `/${objectName}?partNumber=1&uploadId` + - `=${testUploadId}`, - query: { - partNumber: '1', - uploadId: testUploadId, + const partRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + // Part (other than last part) must be at least 5MB + 'content-length': '5242880', + }, + parsedContentLength: 5242880, + url: `/${objectName}?partNumber=1&uploadId` + `=${testUploadId}`, + query: { + partNumber: '1', + uploadId: testUploadId, + }, + calculatedHash, }, - calculatedHash, - }, partBody); + partBody + ); objectPutPart(authInfo, partRequest, undefined, log, () => { next(null, testUploadId, calculatedHash); }); }, (testUploadId, calculatedHash, next) => { - const part2Request = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'host': `${bucketName}.s3.amazonaws.com`, - 'content-length': '12', - }, - parsedContentLength: 12, - url: `/${objectName}?partNumber=2&uploadId=` + - `${testUploadId}`, - query: { - partNumber: '2', - uploadId: testUploadId, + const part2Request = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + host: `${bucketName}.s3.amazonaws.com`, + 'content-length': '12', + }, + parsedContentLength: 12, + url: `/${objectName}?partNumber=2&uploadId=` + `${testUploadId}`, + query: { + partNumber: '2', + uploadId: testUploadId, + }, + calculatedHash, }, - calculatedHash, - }, partBody); - objectPutPart(authInfo, part2Request, undefined, - log, () => { - next(null, testUploadId, calculatedHash); - }); + partBody + ); + objectPutPart(authInfo, part2Request, undefined, log, () => { + next(null, testUploadId, calculatedHash); + }); }, (testUploadId, calculatedHash, next) => { - const completeBody = '' + + const completeBody = + '' + '' + '1' + `"${calculatedHash}"` + @@ -328,19 +327,17 @@ describe('objectGet API', () => { post: completeBody, actionImplicitDenies: false, }; - completeMultipartUpload(authInfo, completeRequest, - log, err => { - next(err, calculatedHash); - }); + completeMultipartUpload(authInfo, completeRequest, log, err => { + next(err, calculatedHash); + }); }, ], (err, calculatedHash) => { assert.ifError(err); - objectGet(authInfo, testGetRequest, false, log, - (err, dataGetInfo) => { + objectGet(authInfo, testGetRequest, false, log, (err, dataGetInfo) => { assert.ifError(err); - assert.deepStrictEqual(dataGetInfo, - [{ + assert.deepStrictEqual(dataGetInfo, [ + { key: 1, dataStoreName: 'mem', dataStoreETag: `1:${calculatedHash}`, @@ -353,42 +350,42 @@ describe('objectGet API', () => { dataStoreETag: `2:${calculatedHash}`, size: 12, start: 5242880, - }]); + }, + ]); done(); }); - }); - }); + } + ); + }); it('should get a 0 bytes object', done => { const postBody = ''; const correctMD5 = 'd41d8cd98f00b204e9800998ecf8427e'; - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'content-length': '0', - 'x-amz-meta-test': 'some metadata', + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'content-length': '0', + 'x-amz-meta-test': 'some metadata', + }, + parsedContentLength: 0, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'd41d8cd98f00b204e9800998ecf8427e', }, - parsedContentLength: 0, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'd41d8cd98f00b204e9800998ecf8427e', - }, postBody); + postBody + ); bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGet(authInfo, testGetRequest, false, - log, (err, result, responseMetaHeaders) => { - assert.strictEqual(result, null); - assert.strictEqual( - responseMetaHeaders[userMetadataKey], - userMetadataValue); - assert.strictEqual(responseMetaHeaders.ETag, - `"${correctMD5}"`); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGet(authInfo, testGetRequest, false, log, (err, result, responseMetaHeaders) => { + assert.strictEqual(result, null); + assert.strictEqual(responseMetaHeaders[userMetadataKey], userMetadataValue); + assert.strictEqual(responseMetaHeaders.ETag, `"${correctMD5}"`); + done(); }); + }); }); }); @@ -494,28 +491,34 @@ describe('objectGet API', () => { }); }); - it('should reflect the restore header with ongoing-request=false and expiry-date set ' + - 'if the object is restored and not yet expired', done => { - const testGetRequest = { - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - }; - mdColdHelper.putBucketMock(bucketName, null, () => { - const objectCustomMDFields = mdColdHelper.getRestoredObjectMD(); - const restoreInfo = objectCustomMDFields.getAmzRestore(); - mdColdHelper.putObjectMock(bucketName, objectName, objectCustomMDFields, () => { - objectGet(authInfo, testGetRequest, false, log, (err, res, headers) => { - assert.ifError(err); - assert.ok(res); - assert.strictEqual(headers['x-amz-storage-class'], mdColdHelper.defaultLocation); - const utcDate = new Date(restoreInfo.getExpiryDate()).toUTCString(); - assert.strictEqual(headers['x-amz-restore'], `ongoing-request="false", expiry-date="${utcDate}"`); - done(); + it( + 'should reflect the restore header with ongoing-request=false and expiry-date set ' + + 'if the object is restored and not yet expired', + done => { + const testGetRequest = { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }; + mdColdHelper.putBucketMock(bucketName, null, () => { + const objectCustomMDFields = mdColdHelper.getRestoredObjectMD(); + const restoreInfo = objectCustomMDFields.getAmzRestore(); + mdColdHelper.putObjectMock(bucketName, objectName, objectCustomMDFields, () => { + objectGet(authInfo, testGetRequest, false, log, (err, res, headers) => { + assert.ifError(err); + assert.ok(res); + assert.strictEqual(headers['x-amz-storage-class'], mdColdHelper.defaultLocation); + const utcDate = new Date(restoreInfo.getExpiryDate()).toUTCString(); + assert.strictEqual( + headers['x-amz-restore'], + `ongoing-request="false", expiry-date="${utcDate}"` + ); + done(); + }); }); }); - }); - }); + } + ); }); diff --git a/tests/unit/api/objectGetACL.js b/tests/unit/api/objectGetACL.js index 5785a79a6a..939c1507e6 100644 --- a/tests/unit/api/objectGetACL.js +++ b/tests/unit/api/objectGetACL.js @@ -31,7 +31,7 @@ describe('objectGetACL API', () => { bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-acl': 'public-read-write', }, url: '/', @@ -48,36 +48,42 @@ describe('objectGetACL API', () => { }; it('should get a canned private ACL', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { 'x-amz-acl': 'private' }, - url: `/${bucketName}/${objectName}`, - post: postBody, - }, postBody); - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, - undefined, log, next), - (resHeaders, next) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGetACL(authInfo, testGetACLRequest, log, next); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { 'x-amz-acl': 'private' }, + url: `/${bucketName}/${objectName}`, + post: postBody, }, - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - done(); - }); + postBody + ); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + (resHeaders, next) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGetACL(authInfo, testGetACLRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + done(); + } + ); }); - it('should return an error if try to get an ACL ' + - 'for a nonexistent object', done => { + it('should return an error if try to get an ACL ' + 'for a nonexistent object', done => { bucketPut(authInfo, testBucketPutRequest, log, () => { objectGetACL(authInfo, testGetACLRequest, log, err => { assert.strictEqual(err.is.NoSuchKey, true); @@ -87,323 +93,343 @@ describe('objectGetACL API', () => { }); it('should get a canned public-read ACL', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { 'x-amz-acl': 'public-read' }, - url: `/${bucketName}/${objectName}`, - }, postBody); - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, - undefined, log, next), - (resHeaders, next) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGetACL(authInfo, testGetACLRequest, log, next); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { 'x-amz-acl': 'public-read' }, + url: `/${bucketName}/${objectName}`, }, - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .URI[0], constants.publicId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Permission[0], 'READ'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2], undefined); - done(); - }); + postBody + ); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + (resHeaders, next) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGetACL(authInfo, testGetACLRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].URI[0], + constants.publicId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], 'READ'); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2], undefined); + done(); + } + ); }); it('should get a canned public-read-write ACL', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { 'x-amz-acl': 'public-read-write' }, - url: `/${bucketName}/${objectName}`, - }, postBody); - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, - undefined, log, next), - (resHeaders, next) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGetACL(authInfo, testGetACLRequest, log, next); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { 'x-amz-acl': 'public-read-write' }, + url: `/${bucketName}/${objectName}`, }, - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .URI[0], constants.publicId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Permission[0], - 'READ'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2].Grantee[0] - .URI[0], constants.publicId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2].Permission[0], - 'WRITE'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[3], undefined); - done(); - }); + postBody + ); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + (resHeaders, next) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGetACL(authInfo, testGetACLRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].URI[0], + constants.publicId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], 'READ'); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[2].Grantee[0].URI[0], + constants.publicId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2].Permission[0], 'WRITE'); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[3], undefined); + done(); + } + ); }); it('should get a canned authenticated-read ACL', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { 'x-amz-acl': 'authenticated-read' }, - url: `/${bucketName}/${objectName}`, - }, postBody); - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, log, next), - (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, - undefined, log, next), - (resHeaders, next) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGetACL(authInfo, testGetACLRequest, log, next); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { 'x-amz-acl': 'authenticated-read' }, + url: `/${bucketName}/${objectName}`, }, - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .URI[0], constants.allAuthedUsersId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Permission[0], - 'READ'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2], - undefined); - done(); - }); + postBody + ); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + (resHeaders, next) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGetACL(authInfo, testGetACLRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].URI[0], + constants.allAuthedUsersId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], 'READ'); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2], undefined); + done(); + } + ); }); it('should get a canned bucket-owner-read ACL', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { 'x-amz-acl': 'bucket-owner-read' }, - url: `/${bucketName}/${objectName}`, - post: postBody, - }, postBody); - async.waterfall([ - next => - bucketPut(otherAccountAuthInfo, testBucketPutRequest, - log, next), - (corsHeaders, next) => objectPut( - authInfo, testPutObjectRequest, undefined, log, next), - (resHeaders, next) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGetACL(authInfo, testGetACLRequest, log, next); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { 'x-amz-acl': 'bucket-owner-read' }, + url: `/${bucketName}/${objectName}`, + post: postBody, }, - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .ID[0], otherAccountCanonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Permission[0], - 'READ'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2], undefined); - done(); - }); + postBody + ); + async.waterfall( + [ + next => bucketPut(otherAccountAuthInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + (resHeaders, next) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGetACL(authInfo, testGetACLRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].ID[0], + otherAccountCanonicalID + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], 'READ'); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2], undefined); + done(); + } + ); }); it('should get a canned bucket-owner-full-control ACL', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { 'x-amz-acl': 'bucket-owner-full-control' }, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); - async.waterfall([ - next => - bucketPut(otherAccountAuthInfo, testBucketPutRequest, - log, next), - (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, - undefined, log, next), - (resHeaders, next) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGetACL(authInfo, testGetACLRequest, log, next); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { 'x-amz-acl': 'bucket-owner-full-control' }, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', }, - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], canonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .ID[0], otherAccountCanonicalID); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2], undefined); - done(); - }); + postBody + ); + async.waterfall( + [ + next => bucketPut(otherAccountAuthInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + (resHeaders, next) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGetACL(authInfo, testGetACLRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + canonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].ID[0], + otherAccountCanonicalID + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2], undefined); + done(); + } + ); }); it('should get specifically set ACLs', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-grant-full-control': - 'emailaddress="sampleaccount1@sampling.com"' + - ',emailaddress="sampleaccount2@sampling.com"', - 'x-amz-grant-read': `uri=${constants.allAuthedUsersId}`, - 'x-amz-grant-write': `uri=${constants.publicId}`, - 'x-amz-grant-read-acp': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + - 'f8f8d5218e7cd47ef2be', - 'x-amz-grant-write-acp': - 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + - 'f8f8d5218e7cd47ef2bf', - }, - url: `/${bucketName}/${objectName}`, - }, postBody); - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, - log, next), - (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, - undefined, log, next), - (resHeaders, next) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectGetACL(authInfo, testGetACLRequest, log, next); - }, - (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .ID[0], '79a59df900b949e55d96a1e698fbacedfd6e09d98' + - 'eacf8f8d5218e7cd47ef2be'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Grantee[0] - .DisplayName[0], 'sampleaccount1@sampling.com'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[0].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .ID[0], '79a59df900b949e55d96a1e698fbacedfd6e09d98' + - 'eacf8f8d5218e7cd47ef2bf'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Grantee[0] - .DisplayName[0], 'sampleaccount2@sampling.com'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[1].Permission[0], - 'FULL_CONTROL'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2].Grantee[0] - .ID[0], '79a59df900b949e55d96a1e698fbacedfd6e09d98' + - 'eacf8f8d5218e7cd47ef2bf'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2].Grantee[0] - .DisplayName[0], 'sampleaccount2@sampling.com'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[2].Permission[0], - 'WRITE_ACP'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[3].Grantee[0] - .ID[0], '79a59df900b949e55d96a1e698fbacedfd6e09d98' + - 'eacf8f8d5218e7cd47ef2be'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[3].Grantee[0] - .DisplayName[0], 'sampleaccount1@sampling.com'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[3].Permission[0], - 'READ_ACP'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[4].Grantee[0] - .URI[0], constants.allAuthedUsersId); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[4].Permission[0], - 'READ'); - assert.strictEqual(result.AccessControlPolicy. - AccessControlList[0].Grant[5], - undefined); - done(); - }); - }); - - const grantsByURI = [ - constants.publicId, - constants.allAuthedUsersId, - ]; - - grantsByURI.forEach(uri => { - it('should get all ACLs when predefined group - ' + - `${uri} is used for multiple grants`, done => { - const testPutObjectRequest = new DummyRequest({ + const testPutObjectRequest = new DummyRequest( + { bucketName, namespace, objectKey: objectName, headers: { - 'x-amz-grant-full-control': `uri=${uri}`, - 'x-amz-grant-read': `uri=${uri}`, - 'x-amz-grant-read-acp': `uri=${uri}`, - 'x-amz-grant-write-acp': `uri=${uri}`, + 'x-amz-grant-full-control': + 'emailaddress="sampleaccount1@sampling.com"' + ',emailaddress="sampleaccount2@sampling.com"', + 'x-amz-grant-read': `uri=${constants.allAuthedUsersId}`, + 'x-amz-grant-write': `uri=${constants.publicId}`, + 'x-amz-grant-read-acp': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + 'f8f8d5218e7cd47ef2be', + 'x-amz-grant-write-acp': 'id=79a59df900b949e55d96a1e698fbacedfd6e09d98eac' + 'f8f8d5218e7cd47ef2bf', }, url: `/${bucketName}/${objectName}`, - }, postBody); - async.waterfall([ - next => bucketPut(authInfo, testBucketPutRequest, - log, next), - (corsHeaders, next) => objectPut(authInfo, - testPutObjectRequest, undefined, log, next), + }, + postBody + ); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, undefined, log, next), (resHeaders, next) => { assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); objectGetACL(authInfo, testGetACLRequest, log, next); }, (result, corsHeaders, next) => parseString(result, next), - ], (err, result) => { - assert.ifError(err); - const grants = - result.AccessControlPolicy.AccessControlList[0].Grant; - grants.forEach(grant => { - assert.strictEqual(grant.Permission.length, 1); - assert.strictEqual(grant.Grantee.length, 1); - assert.strictEqual(grant.Grantee[0].URI.length, 1); - assert.strictEqual(grant.Grantee[0].URI[0], `${uri}`); - }); + ], + (err, result) => { + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].ID[0], + '79a59df900b949e55d96a1e698fbacedfd6e09d98' + 'eacf8f8d5218e7cd47ef2be' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Grantee[0].DisplayName[0], + 'sampleaccount1@sampling.com' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[0].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].ID[0], + '79a59df900b949e55d96a1e698fbacedfd6e09d98' + 'eacf8f8d5218e7cd47ef2bf' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Grantee[0].DisplayName[0], + 'sampleaccount2@sampling.com' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[1].Permission[0], + 'FULL_CONTROL' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[2].Grantee[0].ID[0], + '79a59df900b949e55d96a1e698fbacedfd6e09d98' + 'eacf8f8d5218e7cd47ef2bf' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[2].Grantee[0].DisplayName[0], + 'sampleaccount2@sampling.com' + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[2].Permission[0], 'WRITE_ACP'); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[3].Grantee[0].ID[0], + '79a59df900b949e55d96a1e698fbacedfd6e09d98' + 'eacf8f8d5218e7cd47ef2be' + ); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[3].Grantee[0].DisplayName[0], + 'sampleaccount1@sampling.com' + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[3].Permission[0], 'READ_ACP'); + assert.strictEqual( + result.AccessControlPolicy.AccessControlList[0].Grant[4].Grantee[0].URI[0], + constants.allAuthedUsersId + ); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[4].Permission[0], 'READ'); + assert.strictEqual(result.AccessControlPolicy.AccessControlList[0].Grant[5], undefined); done(); - }); + } + ); + }); + + const grantsByURI = [constants.publicId, constants.allAuthedUsersId]; + + grantsByURI.forEach(uri => { + it('should get all ACLs when predefined group - ' + `${uri} is used for multiple grants`, done => { + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-grant-full-control': `uri=${uri}`, + 'x-amz-grant-read': `uri=${uri}`, + 'x-amz-grant-read-acp': `uri=${uri}`, + 'x-amz-grant-write-acp': `uri=${uri}`, + }, + url: `/${bucketName}/${objectName}`, + }, + postBody + ); + async.waterfall( + [ + next => bucketPut(authInfo, testBucketPutRequest, log, next), + (corsHeaders, next) => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + (resHeaders, next) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectGetACL(authInfo, testGetACLRequest, log, next); + }, + (result, corsHeaders, next) => parseString(result, next), + ], + (err, result) => { + assert.ifError(err); + const grants = result.AccessControlPolicy.AccessControlList[0].Grant; + grants.forEach(grant => { + assert.strictEqual(grant.Permission.length, 1); + assert.strictEqual(grant.Grantee.length, 1); + assert.strictEqual(grant.Grantee[0].URI.length, 1); + assert.strictEqual(grant.Grantee[0].URI[0], `${uri}`); + }); + done(); + } + ); }); }); }); diff --git a/tests/unit/api/objectGetLegalHold.js b/tests/unit/api/objectGetLegalHold.js index 910cf12c74..939c4bd3ba 100644 --- a/tests/unit/api/objectGetLegalHold.js +++ b/tests/unit/api/objectGetLegalHold.js @@ -21,17 +21,19 @@ const bucketPutRequest = { actionImplicitDenies: false, }; -const putObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, -}, postBody); +const putObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }, + postBody +); const objectLegalHoldXml = status => - '' + - `${status}`; + '' + `${status}`; const putObjectLegalHoldRequest = status => ({ bucketName, @@ -62,17 +64,17 @@ describe('getObjectLegalHold API', () => { afterEach(cleanup); it('should return InvalidRequest error', done => { - objectGetLegalHold(authInfo, getObjectLegalHoldRequest, log, - err => { - assert.strictEqual(err.is.InvalidRequest, true); - done(); - }); + objectGetLegalHold(authInfo, getObjectLegalHoldRequest, log, err => { + assert.strictEqual(err.is.InvalidRequest, true); + done(); + }); }); }); describe('with Object Lock enabled on bucket', () => { - const bucketObjectLockRequest = Object.assign({}, bucketPutRequest, - { headers: { 'x-amz-bucket-object-lock-enabled': 'true' } }); + const bucketObjectLockRequest = Object.assign({}, bucketPutRequest, { + headers: { 'x-amz-bucket-object-lock-enabled': 'true' }, + }); beforeEach(done => { bucketPut(authInfo, bucketObjectLockRequest, log, err => { @@ -83,42 +85,38 @@ describe('getObjectLegalHold API', () => { afterEach(cleanup); - it('should return NoSuchObjectLockConfiguration if no legal hold set', - done => { - objectGetLegalHold(authInfo, getObjectLegalHoldRequest, log, - err => { - assert.strictEqual(err.is.NoSuchObjectLockConfiguration, true); - done(); - }); + it('should return NoSuchObjectLockConfiguration if no legal hold set', done => { + objectGetLegalHold(authInfo, getObjectLegalHoldRequest, log, err => { + assert.strictEqual(err.is.NoSuchObjectLockConfiguration, true); + done(); }); + }); - it('should get an object\'s legal hold status when OFF', done => { + it("should get an object's legal hold status when OFF", done => { const status = 'OFF'; const request = putObjectLegalHoldRequest(status); objectPutLegalHold(authInfo, request, log, err => { assert.ifError(err); - objectGetLegalHold(authInfo, getObjectLegalHoldRequest, log, - (err, xml) => { - const expectedXml = objectLegalHoldXml(status); - assert.ifError(err); - assert.strictEqual(xml, expectedXml); - done(); - }); + objectGetLegalHold(authInfo, getObjectLegalHoldRequest, log, (err, xml) => { + const expectedXml = objectLegalHoldXml(status); + assert.ifError(err); + assert.strictEqual(xml, expectedXml); + done(); + }); }); }); - it('should get an object\'s legal hold status when ON', done => { + it("should get an object's legal hold status when ON", done => { const status = 'ON'; const request = putObjectLegalHoldRequest(status); objectPutLegalHold(authInfo, request, log, err => { assert.ifError(err); - objectGetLegalHold(authInfo, getObjectLegalHoldRequest, log, - (err, xml) => { - const expectedXml = objectLegalHoldXml(status); - assert.ifError(err); - assert.strictEqual(xml, expectedXml); - done(); - }); + objectGetLegalHold(authInfo, getObjectLegalHoldRequest, log, (err, xml) => { + const expectedXml = objectLegalHoldXml(status); + assert.ifError(err); + assert.strictEqual(xml, expectedXml); + done(); + }); }); }); }); diff --git a/tests/unit/api/objectGetRetention.js b/tests/unit/api/objectGetRetention.js index cd1481f98f..21e35a1f84 100644 --- a/tests/unit/api/objectGetRetention.js +++ b/tests/unit/api/objectGetRetention.js @@ -24,15 +24,19 @@ const bucketPutRequest = { actionImplicitDenies: false, }; -const putObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, -}, postBody); +const putObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }, + postBody +); -const objectRetentionXml = '' + 'GOVERNANCE' + `${date.toISOString()}` + @@ -74,8 +78,9 @@ describe('getObjectRetention API', () => { }); describe('with Object Lock enabled on bucket', () => { - const bucketObjLockRequest = Object.assign({}, bucketPutRequest, - { headers: { 'x-amz-bucket-object-lock-enabled': 'true' } }); + const bucketObjLockRequest = Object.assign({}, bucketPutRequest, { + headers: { 'x-amz-bucket-object-lock-enabled': 'true' }, + }); beforeEach(done => { bucketPut(authInfo, bucketObjLockRequest, log, err => { @@ -85,19 +90,17 @@ describe('getObjectRetention API', () => { }); afterEach(cleanup); - it('should return NoSuchObjectLockConfiguration if no retention set', - done => { + it('should return NoSuchObjectLockConfiguration if no retention set', done => { objectGetRetention(authInfo, getObjRetRequest, log, err => { assert.strictEqual(err.is.NoSuchObjectLockConfiguration, true); done(); }); }); - it('should get an object\'s retention info', done => { + it("should get an object's retention info", done => { objectPutRetention(authInfo, putObjRetRequest, log, err => { assert.ifError(err); - objectGetRetention(authInfo, getObjRetRequest, log, - (err, xml) => { + objectGetRetention(authInfo, getObjRetRequest, log, (err, xml) => { assert.ifError(err); assert.strictEqual(xml, objectRetentionXml); done(); diff --git a/tests/unit/api/objectGetTagging.js b/tests/unit/api/objectGetTagging.js index b099120fb2..a63e1b27d0 100644 --- a/tests/unit/api/objectGetTagging.js +++ b/tests/unit/api/objectGetTagging.js @@ -4,11 +4,7 @@ const { bucketPut } = require('../../../lib/api/bucketPut'); const objectPut = require('../../../lib/api/objectPut'); const objectPutTagging = require('../../../lib/api/objectPutTagging'); const objectGetTagging = require('../../../lib/api/objectGetTagging'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - TaggingConfigTester } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, TaggingConfigTester } = require('../helpers'); const DummyRequest = require('../DummyRequest'); const log = new DummyRequestLogger(); @@ -24,13 +20,16 @@ const testBucketPutRequest = { actionImplicitDenies: false, }; -const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, -}, postBody); +const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }, + postBody +); describe('getObjectTagging API', () => { beforeEach(done => { @@ -39,8 +38,7 @@ describe('getObjectTagging API', () => { if (err) { return done(err); } - return objectPut(authInfo, testPutObjectRequest, undefined, log, - done); + return objectPut(authInfo, testPutObjectRequest, undefined, log, done); }); }); @@ -48,17 +46,14 @@ describe('getObjectTagging API', () => { it('should return tags resource', done => { const taggingUtil = new TaggingConfigTester(); - const testObjectPutTaggingRequest = taggingUtil - .createObjectTaggingRequest('PUT', bucketName, objectName); + const testObjectPutTaggingRequest = taggingUtil.createObjectTaggingRequest('PUT', bucketName, objectName); objectPutTagging(authInfo, testObjectPutTaggingRequest, log, err => { if (err) { process.stdout.write(`Err putting object tagging ${err}`); return done(err); } - const testObjectGetTaggingRequest = taggingUtil - .createObjectTaggingRequest('GET', bucketName, objectName); - return objectGetTagging(authInfo, testObjectGetTaggingRequest, log, - (err, xml) => { + const testObjectGetTaggingRequest = taggingUtil.createObjectTaggingRequest('GET', bucketName, objectName); + return objectGetTagging(authInfo, testObjectGetTaggingRequest, log, (err, xml) => { if (err) { process.stdout.write(`Err getting object tagging ${err}`); return done(err); diff --git a/tests/unit/api/objectHead.js b/tests/unit/api/objectHead.js index 511a39037c..06759cbc99 100644 --- a/tests/unit/api/objectHead.js +++ b/tests/unit/api/objectHead.js @@ -36,55 +36,61 @@ let testPutObjectRequest; describe('objectHead API', () => { beforeEach(() => { cleanup(); - testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { 'x-amz-meta-test': userMetadataValue }, - url: `/${bucketName}/${objectName}`, - calculatedHash: correctMD5, - }, postBody); + testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { 'x-amz-meta-test': userMetadataValue }, + url: `/${bucketName}/${objectName}`, + calculatedHash: correctMD5, + }, + postBody + ); }); - it('should return NotModified if request header ' + - 'includes "if-modified-since" and object ' + - 'not modified since specified time', done => { - const testGetRequest = { - bucketName, - namespace, - objectKey: objectName, - headers: { 'if-modified-since': laterDate }, - url: `/${bucketName}/${objectName}`, - actionImplicitDenies: false, - }; + it( + 'should return NotModified if request header ' + + 'includes "if-modified-since" and object ' + + 'not modified since specified time', + done => { + const testGetRequest = { + bucketName, + namespace, + objectKey: objectName, + headers: { 'if-modified-since': laterDate }, + url: `/${bucketName}/${objectName}`, + actionImplicitDenies: false, + }; - bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { + bucketPut(authInfo, testPutBucketRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); objectHead(authInfo, testGetRequest, log, err => { assert.strictEqual(err.is.NotModified, true); done(); }); }); - }); - }); + }); + } + ); - it('should return PreconditionFailed if request header ' + - 'includes "if-unmodified-since" and object has ' + - 'been modified since specified time', done => { - const testGetRequest = { - bucketName, - namespace, - objectKey: objectName, - headers: { 'if-unmodified-since': earlierDate }, - url: `/${bucketName}/${objectName}`, - actionImplicitDenies: false, - }; + it( + 'should return PreconditionFailed if request header ' + + 'includes "if-unmodified-since" and object has ' + + 'been modified since specified time', + done => { + const testGetRequest = { + bucketName, + namespace, + objectKey: objectName, + headers: { 'if-unmodified-since': earlierDate }, + url: `/${bucketName}/${objectName}`, + actionImplicitDenies: false, + }; - bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { + bucketPut(authInfo, testPutBucketRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { assert.ifError(err); assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); objectHead(authInfo, testGetRequest, log, err => { @@ -92,106 +98,118 @@ describe('objectHead API', () => { done(); }); }); - }); - }); + }); + } + ); - it('should return PreconditionFailed if request header ' + - 'includes "if-match" and ETag of object ' + - 'does not match specified ETag', done => { - const testGetRequest = { - bucketName, - namespace, - objectKey: objectName, - headers: { 'if-match': incorrectMD5 }, - url: `/${bucketName}/${objectName}`, - actionImplicitDenies: false, - }; + it( + 'should return PreconditionFailed if request header ' + + 'includes "if-match" and ETag of object ' + + 'does not match specified ETag', + done => { + const testGetRequest = { + bucketName, + namespace, + objectKey: objectName, + headers: { 'if-match': incorrectMD5 }, + url: `/${bucketName}/${objectName}`, + actionImplicitDenies: false, + }; - bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { + bucketPut(authInfo, testPutBucketRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); objectHead(authInfo, testGetRequest, log, err => { assert.strictEqual(err.is.PreconditionFailed, true); done(); }); }); - }); - }); + }); + } + ); - it('should return NotModified if request header ' + - 'includes "if-none-match" and ETag of object does ' + - 'match specified ETag', done => { - const testGetRequest = { - bucketName, - namespace, - objectKey: objectName, - headers: { 'if-none-match': correctMD5 }, - url: `/${bucketName}/${objectName}`, - actionImplicitDenies: false, - }; + it( + 'should return NotModified if request header ' + + 'includes "if-none-match" and ETag of object does ' + + 'match specified ETag', + done => { + const testGetRequest = { + bucketName, + namespace, + objectKey: objectName, + headers: { 'if-none-match': correctMD5 }, + url: `/${bucketName}/${objectName}`, + actionImplicitDenies: false, + }; - bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { + bucketPut(authInfo, testPutBucketRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); objectHead(authInfo, testGetRequest, log, err => { assert.strictEqual(err.is.NotModified, true); done(); }); }); - }); - }); + }); + } + ); - it('should return Accept-Ranges header if request includes "Range" ' + - 'header with specified range bytes of an object', done => { - const testGetRequest = { - bucketName, - namespace, - objectKey: objectName, - headers: { range: 'bytes=1-9' }, - url: `/${bucketName}/${objectName}`, - actionImplicitDenies: false, - }; + it( + 'should return Accept-Ranges header if request includes "Range" ' + + 'header with specified range bytes of an object', + done => { + const testGetRequest = { + bucketName, + namespace, + objectKey: objectName, + headers: { range: 'bytes=1-9' }, + url: `/${bucketName}/${objectName}`, + actionImplicitDenies: false, + }; - bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, err => { - assert.strictEqual(err, null, `Error copying: ${err}`); - objectHead(authInfo, testGetRequest, log, (err, res) => { - assert.strictEqual(res['accept-ranges'], 'bytes'); - done(); + bucketPut(authInfo, testPutBucketRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.strictEqual(err, null, `Error copying: ${err}`); + objectHead(authInfo, testGetRequest, log, (err, res) => { + assert.strictEqual(res['accept-ranges'], 'bytes'); + done(); + }); }); }); - }); - }); + } + ); - it('should return InvalidRequest error when both the Range header and ' + - 'the partNumber query parameter specified', done => { - const testGetRequest = { - bucketName, - namespace, - objectKey: objectName, - headers: { range: 'bytes=1-9' }, - url: `/${bucketName}/${objectName}`, - query: { - partNumber: '1', - }, - actionImplicitDenies: false, - }; + it( + 'should return InvalidRequest error when both the Range header and ' + + 'the partNumber query parameter specified', + done => { + const testGetRequest = { + bucketName, + namespace, + objectKey: objectName, + headers: { range: 'bytes=1-9' }, + url: `/${bucketName}/${objectName}`, + query: { + partNumber: '1', + }, + actionImplicitDenies: false, + }; - bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, err => { - assert.strictEqual(err, null, `Error objectPut: ${err}`); - objectHead(authInfo, testGetRequest, log, err => { - assert.strictEqual(err.is.InvalidRequest, true); - assert.strictEqual(err.description, - 'Cannot specify both Range header and ' + - 'partNumber query parameter.'); - done(); + bucketPut(authInfo, testPutBucketRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.strictEqual(err, null, `Error objectPut: ${err}`); + objectHead(authInfo, testGetRequest, log, err => { + assert.strictEqual(err.is.InvalidRequest, true); + assert.strictEqual( + err.description, + 'Cannot specify both Range header and ' + 'partNumber query parameter.' + ); + done(); + }); }); }); - }); - }); + } + ); it('should return InvalidArgument error if partNumber is nan', done => { const testGetRequest = { @@ -218,27 +236,30 @@ describe('objectHead API', () => { }); }); - it('should not return Accept-Ranges header if request does not include ' + - '"Range" header with specified range bytes of an object', done => { - const testGetRequest = { - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - actionImplicitDenies: false, - }; + it( + 'should not return Accept-Ranges header if request does not include ' + + '"Range" header with specified range bytes of an object', + done => { + const testGetRequest = { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + actionImplicitDenies: false, + }; - bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, err => { - assert.strictEqual(err, null, `Error objectPut: ${err}`); - objectHead(authInfo, testGetRequest, log, (err, res) => { - assert.strictEqual(res['accept-ranges'], undefined); - done(); + bucketPut(authInfo, testPutBucketRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.strictEqual(err, null, `Error objectPut: ${err}`); + objectHead(authInfo, testGetRequest, log, (err, res) => { + assert.strictEqual(res['accept-ranges'], undefined); + done(); + }); }); }); - }); - }); + } + ); it('should get the object metadata', done => { const testGetRequest = { @@ -251,17 +272,14 @@ describe('objectHead API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectHead(authInfo, testGetRequest, log, (err, res) => { - assert.strictEqual(res[userMetadataKey], - userMetadataValue); - assert - .strictEqual(res.ETag, `"${correctMD5}"`); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectHead(authInfo, testGetRequest, log, (err, res) => { + assert.strictEqual(res[userMetadataKey], userMetadataValue); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + done(); }); + }); }); }); @@ -273,18 +291,21 @@ describe('objectHead API', () => { url: `/${bucketName}`, actionImplicitDenies: false, }; - const testPutObjectRequestLock = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-object-lock-retain-until-date': '2050-10-10', - 'x-amz-object-lock-mode': 'GOVERNANCE', - 'x-amz-object-lock-legal-hold': 'ON', + const testPutObjectRequestLock = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-object-lock-retain-until-date': '2050-10-10', + 'x-amz-object-lock-mode': 'GOVERNANCE', + 'x-amz-object-lock-legal-hold': 'ON', + }, + url: `/${bucketName}/${objectName}`, + calculatedHash: correctMD5, }, - url: `/${bucketName}/${objectName}`, - calculatedHash: correctMD5, - }, postBody); + postBody + ); const testGetRequest = { bucketName, namespace, @@ -295,31 +316,30 @@ describe('objectHead API', () => { }; bucketPut(authInfo, testPutBucketRequestLock, log, () => { - objectPut(authInfo, testPutObjectRequestLock, undefined, log, - (err, resHeaders) => { + objectPut(authInfo, testPutObjectRequestLock, undefined, log, (err, resHeaders) => { + assert.ifError(err); + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectHead(authInfo, testGetRequest, log, (err, res) => { assert.ifError(err); - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectHead(authInfo, testGetRequest, log, (err, res) => { - assert.ifError(err); - const expectedDate = testPutObjectRequestLock - .headers['x-amz-object-lock-retain-until-date']; - const expectedMode = testPutObjectRequestLock - .headers['x-amz-object-lock-mode']; - assert.ifError(err); - assert.strictEqual( - res['x-amz-object-lock-retain-until-date'], - expectedDate); - assert.strictEqual(res['x-amz-object-lock-mode'], - expectedMode); - assert.strictEqual(res['x-amz-object-lock-legal-hold'], - 'ON'); - changeObjectLock([{ - bucket: bucketName, - key: objectName, - versionId: res['x-amz-version-id'], - }], '', done); - }); + const expectedDate = testPutObjectRequestLock.headers['x-amz-object-lock-retain-until-date']; + const expectedMode = testPutObjectRequestLock.headers['x-amz-object-lock-mode']; + assert.ifError(err); + assert.strictEqual(res['x-amz-object-lock-retain-until-date'], expectedDate); + assert.strictEqual(res['x-amz-object-lock-mode'], expectedMode); + assert.strictEqual(res['x-amz-object-lock-legal-hold'], 'ON'); + changeObjectLock( + [ + { + bucket: bucketName, + key: objectName, + versionId: res['x-amz-version-id'], + }, + ], + '', + done + ); }); + }); }); }); @@ -393,37 +413,40 @@ describe('objectHead API', () => { }); }); - it('should reflect the restore header with ongoing-request=false and expiry-date set ' + - 'if the object is restored and not yet expired', done => { - const testGetRequest = { - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - }; - mdColdHelper.putBucketMock(bucketName, null, () => { - const objectCustomMDFields = mdColdHelper.getRestoredObjectMD(); - mdColdHelper.putObjectMock(bucketName, objectName, objectCustomMDFields, () => { - objectHead(authInfo, testGetRequest, log, (err, res) => { - const restoreInfo = objectCustomMDFields.getAmzRestore(); - assert.strictEqual(res[userMetadataKey], userMetadataValue); - assert.strictEqual(res.ETag, `"${correctMD5}"`); - assert.strictEqual(res['x-amz-storage-class'], mdColdHelper.defaultLocation); - const utcDate = new Date(restoreInfo.getExpiryDate()).toUTCString(); - assert.strictEqual(res['x-amz-restore'], `ongoing-request="false", expiry-date="${utcDate}"`); - // Check we do not leak non-standard fields - assert.strictEqual(res['x-amz-scal-transition-in-progress'], undefined); - assert.strictEqual(res['x-amz-scal-archive-info'], undefined); - assert.strictEqual(res['x-amz-scal-restore-requested-at'], undefined); - assert.strictEqual(res['x-amz-scal-restore-completed-at'], undefined); - assert.strictEqual(res['x-amz-scal-restore-will-expire-at'], undefined); - assert.strictEqual(res['x-amz-scal-owner-id'], undefined); - done(); + it( + 'should reflect the restore header with ongoing-request=false and expiry-date set ' + + 'if the object is restored and not yet expired', + done => { + const testGetRequest = { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }; + mdColdHelper.putBucketMock(bucketName, null, () => { + const objectCustomMDFields = mdColdHelper.getRestoredObjectMD(); + mdColdHelper.putObjectMock(bucketName, objectName, objectCustomMDFields, () => { + objectHead(authInfo, testGetRequest, log, (err, res) => { + const restoreInfo = objectCustomMDFields.getAmzRestore(); + assert.strictEqual(res[userMetadataKey], userMetadataValue); + assert.strictEqual(res.ETag, `"${correctMD5}"`); + assert.strictEqual(res['x-amz-storage-class'], mdColdHelper.defaultLocation); + const utcDate = new Date(restoreInfo.getExpiryDate()).toUTCString(); + assert.strictEqual(res['x-amz-restore'], `ongoing-request="false", expiry-date="${utcDate}"`); + // Check we do not leak non-standard fields + assert.strictEqual(res['x-amz-scal-transition-in-progress'], undefined); + assert.strictEqual(res['x-amz-scal-archive-info'], undefined); + assert.strictEqual(res['x-amz-scal-restore-requested-at'], undefined); + assert.strictEqual(res['x-amz-scal-restore-completed-at'], undefined); + assert.strictEqual(res['x-amz-scal-restore-will-expire-at'], undefined); + assert.strictEqual(res['x-amz-scal-owner-id'], undefined); + done(); + }); }); }); - }); - }); + } + ); it('should report when transition in progress', done => { const testGetRequest = { @@ -465,8 +488,10 @@ describe('objectHead API', () => { objectHead(authInfo, testGetRequest, log, (err, res) => { assert.strictEqual(res['x-amz-meta-scal-s3-transition-in-progress'], true); assert.strictEqual(res['x-amz-scal-transition-in-progress'], true); - assert.strictEqual(res['x-amz-scal-transition-time'], - new Date(objectCustomMDFields.getTransitionTime()).toUTCString()); + assert.strictEqual( + res['x-amz-scal-transition-time'], + new Date(objectCustomMDFields.getTransitionTime()).toUTCString() + ); assert.strictEqual(res['x-amz-scal-archive-info'], undefined); assert.strictEqual(res['x-amz-scal-owner-id'], mdColdHelper.defaultOwnerId); done(err); @@ -518,10 +543,11 @@ describe('objectHead API', () => { assert.strictEqual(res['x-amz-meta-scal-s3-transition-in-progress'], undefined); assert.strictEqual(res['x-amz-scal-transition-in-progress'], undefined); assert.strictEqual(res['x-amz-scal-archive-info'], '{"foo":0,"bar":"stuff"}'); - assert.strictEqual(res['x-amz-scal-restore-requested-at'], - new Date(archive.restoreRequestedAt).toUTCString()); - assert.strictEqual(res['x-amz-scal-restore-requested-days'], - archive.restoreRequestedDays); + assert.strictEqual( + res['x-amz-scal-restore-requested-at'], + new Date(archive.restoreRequestedAt).toUTCString() + ); + assert.strictEqual(res['x-amz-scal-restore-requested-days'], archive.restoreRequestedDays); assert.strictEqual(res['x-amz-storage-class'], mdColdHelper.defaultLocation); assert.strictEqual(res['x-amz-scal-owner-id'], mdColdHelper.defaultOwnerId); done(err); @@ -548,14 +574,19 @@ describe('objectHead API', () => { assert.strictEqual(res['x-amz-meta-scal-s3-transition-in-progress'], undefined); assert.strictEqual(res['x-amz-scal-transition-in-progress'], undefined); assert.strictEqual(res['x-amz-scal-archive-info'], '{"foo":0,"bar":"stuff"}'); - assert.strictEqual(res['x-amz-scal-restore-requested-at'], - new Date(archive.restoreRequestedAt).toUTCString()); - assert.strictEqual(res['x-amz-scal-restore-requested-days'], - archive.restoreRequestedDays); - assert.strictEqual(res['x-amz-scal-restore-completed-at'], - new Date(archive.restoreCompletedAt).toUTCString()); - assert.strictEqual(res['x-amz-scal-restore-will-expire-at'], - new Date(archive.restoreWillExpireAt).toUTCString()); + assert.strictEqual( + res['x-amz-scal-restore-requested-at'], + new Date(archive.restoreRequestedAt).toUTCString() + ); + assert.strictEqual(res['x-amz-scal-restore-requested-days'], archive.restoreRequestedDays); + assert.strictEqual( + res['x-amz-scal-restore-completed-at'], + new Date(archive.restoreCompletedAt).toUTCString() + ); + assert.strictEqual( + res['x-amz-scal-restore-will-expire-at'], + new Date(archive.restoreWillExpireAt).toUTCString() + ); assert.strictEqual(res['x-amz-scal-restore-etag'], mdColdHelper.restoredEtag); assert.strictEqual(res['x-amz-storage-class'], mdColdHelper.defaultLocation); assert.strictEqual(res['x-amz-scal-owner-id'], mdColdHelper.defaultOwnerId); @@ -570,30 +601,33 @@ describe('objectHead API', () => { name: 'should return content-length of 0 when requesting part 1 of empty object', partNumber: '1', expectedError: null, - expectedContentLength: 0 + expectedContentLength: 0, }, { name: 'should return InvalidRange error when requesting part > 1 of empty object', partNumber: '2', expectedError: 'InvalidRange', - expectedContentLength: undefined - } + expectedContentLength: undefined, + }, ].forEach(testCase => { it(testCase.name, done => { const emptyBody = ''; const emptyMD5 = 'd41d8cd98f00b204e9800998ecf8427e'; - const testPutEmptyObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'content-length': '0', - 'x-amz-meta-test': userMetadataValue, + const testPutEmptyObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'content-length': '0', + 'x-amz-meta-test': userMetadataValue, + }, + parsedContentLength: 0, + url: `/${bucketName}/${objectName}`, + calculatedHash: emptyMD5, }, - parsedContentLength: 0, - url: `/${bucketName}/${objectName}`, - calculatedHash: emptyMD5, - }, emptyBody); + emptyBody + ); const testGetRequest = { bucketName, diff --git a/tests/unit/api/objectPut.js b/tests/unit/api/objectPut.js index f4f1b21715..fdf0f942c2 100644 --- a/tests/unit/api/objectPut.js +++ b/tests/unit/api/objectPut.js @@ -10,18 +10,13 @@ const bucketPutACL = require('../../../lib/api/bucketPutACL'); const bucketPutVersioning = require('../../../lib/api/bucketPutVersioning'); const bucketPutPolicy = require('../../../lib/api/bucketPutPolicy'); const { parseTagFromQuery } = s3middleware.tagging; -const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, versioningTestUtils } = require('../helpers'); const metadata = require('../metadataswitch'); const { data } = require('../../../lib/data/wrapper'); const objectPut = require('../../../lib/api/objectPut'); const { objectLockTestUtils } = require('../helpers'); const DummyRequest = require('../DummyRequest'); -const { - lastModifiedHeader, - maximumAllowedUploadSize, - objectLocationConstraintHeader, -} = require('../../../constants'); +const { lastModifiedHeader, maximumAllowedUploadSize, objectLocationConstraintHeader } = require('../../../constants'); const mpuUtils = require('../utils/mpuUtils'); const { fakeMetadataArchive } = require('../../functional/aws-node-sdk/test/utils/init'); @@ -47,7 +42,7 @@ const testPutBucketRequestLock = new DummyRequest({ bucketName, namespace, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-bucket-object-lock-enabled': 'true', }, url: '/', @@ -57,21 +52,18 @@ const originalputObjectMD = metadata.putObjectMD; const objectName = 'objectName'; let testPutObjectRequest; -const enableVersioningRequest = - versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Enabled'); -const suspendVersioningRequest = - versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Suspended'); +const enableVersioningRequest = versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Enabled'); +const suspendVersioningRequest = versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Suspended'); function testAuth(bucketOwner, authUser, bucketPutReq, log, cb) { bucketPut(bucketOwner, bucketPutReq, log, () => { bucketPutACL(bucketOwner, testPutBucketRequest, log, err => { assert.strictEqual(err, undefined); - objectPut(authUser, testPutObjectRequest, undefined, - log, (err, resHeaders) => { - assert.strictEqual(err, null); - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - cb(); - }); + objectPut(authUser, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(err, null); + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + cb(); + }); }); }); } @@ -82,8 +74,7 @@ describe('parseTagFromQuery', () => { const allowedChar = '+- =._:/'; const tests = [ { tagging: 'key1=value1', result: { key1: 'value1' } }, - { tagging: `key1=${encodeURIComponent(allowedChar)}`, - result: { key1: allowedChar } }, + { tagging: `key1=${encodeURIComponent(allowedChar)}`, result: { key1: allowedChar } }, { tagging: 'key1=value1=value2', error: invalidArgument }, { tagging: '=value1', error: invalidArgument }, { tagging: 'key1%=value1', error: invalidArgument }, @@ -114,13 +105,16 @@ describe('objectPut API', () => { beforeEach(() => { cleanup(); sinon.spy(metadata, 'putObjectMD'); - testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: '/', - }, postBody); + testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: '/', + }, + postBody + ); }); afterEach(() => { @@ -137,40 +131,38 @@ describe('objectPut API', () => { it('should return an error if user is not authorized', done => { const putAuthInfo = makeAuthInfo('accessKey2'); - bucketPut(putAuthInfo, testPutBucketRequest, - log, () => { - objectPut(authInfo, testPutObjectRequest, - undefined, log, err => { - assert.strictEqual(err.is.AccessDenied, true); - done(); - }); - }); - }); - - it('should return error if the upload size exceeds the ' + - 'maximum allowed upload size for a single PUT request', done => { - testPutObjectRequest.parsedContentLength = maximumAllowedUploadSize + 1; - bucketPut(authInfo, testPutBucketRequest, log, () => { + bucketPut(putAuthInfo, testPutBucketRequest, log, () => { objectPut(authInfo, testPutObjectRequest, undefined, log, err => { - assert.strictEqual(err.is.EntityTooLarge, true); + assert.strictEqual(err.is.AccessDenied, true); done(); }); }); }); + it( + 'should return error if the upload size exceeds the ' + 'maximum allowed upload size for a single PUT request', + done => { + testPutObjectRequest.parsedContentLength = maximumAllowedUploadSize + 1; + bucketPut(authInfo, testPutBucketRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.strictEqual(err.is.EntityTooLarge, true); + done(); + }); + }); + } + ); + it('should put object if user has FULL_CONTROL grant on bucket', done => { const bucketOwner = makeAuthInfo('accessKey2'); const authUser = makeAuthInfo('accessKey3'); - testPutBucketRequest.headers['x-amz-grant-full-control'] = - `id=${authUser.getCanonicalID()}`; + testPutBucketRequest.headers['x-amz-grant-full-control'] = `id=${authUser.getCanonicalID()}`; testAuth(bucketOwner, authUser, testPutBucketRequest, log, done); }); it('should put object if user has WRITE grant on bucket', done => { const bucketOwner = makeAuthInfo('accessKey2'); const authUser = makeAuthInfo('accessKey3'); - testPutBucketRequest.headers['x-amz-grant-write'] = - `id=${authUser.getCanonicalID()}`; + testPutBucketRequest.headers['x-amz-grant-write'] = `id=${authUser.getCanonicalID()}`; testAuth(bucketOwner, authUser, testPutBucketRequest, log, done); }); @@ -184,60 +176,61 @@ describe('objectPut API', () => { }); it('should successfully put an object', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', + }, + postBody + ); bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - metadata.getObjectMD(bucketName, objectName, - {}, log, (err, md) => { - assert(md); - assert - .strictEqual(md['content-md5'], correctMD5); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert(md); + assert.strictEqual(md['content-md5'], correctMD5); + done(); }); + }); }); }); const mockModes = ['GOVERNANCE', 'COMPLIANCE']; mockModes.forEach(mockMode => { it(`should put an object with valid date & ${mockMode} mode`, done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-object-lock-retain-until-date': mockDate, - 'x-amz-object-lock-mode': mockMode, + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-object-lock-retain-until-date': mockDate, + 'x-amz-object-lock-mode': mockMode, + }, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', }, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + postBody + ); bucketPut(authInfo, testPutBucketRequestLock, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, headers) => { + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, headers) => { + assert.ifError(err); + assert.strictEqual(headers.ETag, `"${correctMD5}"`); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + const mode = md.retentionMode; + const retainUntilDate = md.retentionDate; assert.ifError(err); - assert.strictEqual(headers.ETag, `"${correctMD5}"`); - metadata.getObjectMD(bucketName, objectName, {}, log, - (err, md) => { - const mode = md.retentionMode; - const retainUntilDate = md.retentionDate; - assert.ifError(err); - assert(md); - assert.strictEqual(mode, mockMode); - assert.strictEqual(retainUntilDate, mockDate); - done(); - }); + assert(md); + assert.strictEqual(mode, mockMode); + assert.strictEqual(retainUntilDate, mockDate); + done(); }); + }); }); }); }); @@ -258,313 +251,321 @@ describe('objectPut API', () => { ]; testObjectLockConfigs.forEach(config => { const { testMode, type, val } = config; - it('should put an object with default retention if object does not ' + - 'have retention configuration but bucket has', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + it( + 'should put an object with default retention if object does not ' + + 'have retention configuration but bucket has', + done => { + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', + }, + postBody + ); - const testObjLockRequest = { - bucketName, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - post: objectLockTestUtils.generateXml(testMode, val, type), - }; + const testObjLockRequest = { + bucketName, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + post: objectLockTestUtils.generateXml(testMode, val, type), + }; - bucketPut(authInfo, testPutBucketRequestLock, log, () => { - bucketPutObjectLock(authInfo, testObjLockRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, headers) => { + bucketPut(authInfo, testPutBucketRequestLock, log, () => { + bucketPutObjectLock(authInfo, testObjLockRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, headers) => { assert.ifError(err); assert.strictEqual(headers.ETag, `"${correctMD5}"`); - metadata.getObjectMD(bucketName, objectName, {}, - log, (err, md) => { - const mode = md.retentionMode; - const retainDate = md.retentionDate; - const date = moment(); - const days - = type === 'Days' ? val : val * 365; - const expectedDate - = date.add(days, 'days'); - assert.ifError(err); - assert.strictEqual(mode, testMode); - assert.strictEqual(formatTime(retainDate), - formatTime(expectedDate.toISOString())); - done(); - }); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + const mode = md.retentionMode; + const retainDate = md.retentionDate; + const date = moment(); + const days = type === 'Days' ? val : val * 365; + const expectedDate = date.add(days, 'days'); + assert.ifError(err); + assert.strictEqual(mode, testMode); + assert.strictEqual(formatTime(retainDate), formatTime(expectedDate.toISOString())); + done(); + }); }); + }); }); - }); - }); + } + ); }); - it('should successfully put an object with legal hold ON', done => { - const request = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-object-lock-legal-hold': 'ON', + const request = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-object-lock-legal-hold': 'ON', + }, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', }, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + postBody + ); bucketPut(authInfo, testPutBucketRequestLock, log, () => { objectPut(authInfo, request, undefined, log, (err, headers) => { assert.ifError(err); assert.strictEqual(headers.ETag, `"${correctMD5}"`); - metadata.getObjectMD(bucketName, objectName, {}, log, - (err, md) => { - assert.ifError(err); - assert.strictEqual(md.legalHold, true); - done(); - }); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert.ifError(err); + assert.strictEqual(md.legalHold, true); + done(); + }); }); }); }); it('should successfully put an object with legal hold OFF', done => { - const request = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-object-lock-legal-hold': 'OFF', + const request = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-object-lock-legal-hold': 'OFF', + }, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', }, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + postBody + ); bucketPut(authInfo, testPutBucketRequestLock, log, () => { objectPut(authInfo, request, undefined, log, (err, headers) => { assert.ifError(err); assert.strictEqual(headers.ETag, `"${correctMD5}"`); - metadata.getObjectMD(bucketName, objectName, {}, log, - (err, md) => { - assert.ifError(err); - assert(md); - assert.strictEqual(md.legalHold, false); - done(); - }); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert.ifError(err); + assert(md); + assert.strictEqual(md.legalHold, false); + done(); + }); }); }); }); it('should successfully put an object with user metadata', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - // Note that Node will collapse common headers into one - // (e.g. "x-amz-meta-test: hi" and "x-amz-meta-test: - // there" becomes "x-amz-meta-test: hi, there") - // Here we are not going through an actual http - // request so will not collapse properly. - 'x-amz-meta-test': 'some metadata', - 'x-amz-meta-test2': 'some more metadata', - 'x-amz-meta-test3': 'even more metadata', + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + // Note that Node will collapse common headers into one + // (e.g. "x-amz-meta-test: hi" and "x-amz-meta-test: + // there" becomes "x-amz-meta-test: hi, there") + // Here we are not going through an actual http + // request so will not collapse properly. + 'x-amz-meta-test': 'some metadata', + 'x-amz-meta-test2': 'some more metadata', + 'x-amz-meta-test3': 'even more metadata', + }, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', }, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + postBody + ); bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - metadata.getObjectMD(bucketName, objectName, {}, log, - (err, md) => { - assert(md); - assert.strictEqual(md['x-amz-meta-test'], - 'some metadata'); - assert.strictEqual(md['x-amz-meta-test2'], - 'some more metadata'); - assert.strictEqual(md['x-amz-meta-test3'], - 'even more metadata'); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert(md); + assert.strictEqual(md['x-amz-meta-test'], 'some metadata'); + assert.strictEqual(md['x-amz-meta-test2'], 'some more metadata'); + assert.strictEqual(md['x-amz-meta-test3'], 'even more metadata'); + done(); }); + }); }); }); it('If testingMode=true and the last-modified header is given, should set last-modified accordingly', done => { const imposedLastModified = '2024-07-19'; - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - [lastModifiedHeader]: imposedLastModified, + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + [lastModifiedHeader]: imposedLastModified, + }, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', }, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + postBody + ); bucketPut(authInfo, testPutBucketRequest, log, () => { const config = require('../../../lib/Config'); config.config.testingMode = true; - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - metadata.getObjectMD(bucketName, objectName, {}, log, - (err, md) => { - assert(md); - - const lastModified = md['last-modified']; - const lastModifiedDate = lastModified.split('T')[0]; - // last-modified date should be the one set by the last-modified header - assert.strictEqual(lastModifiedDate, imposedLastModified); - - // The header should be removed after being treated. - assert(md[lastModifiedHeader] === undefined); - - config.config.testingMode = false; - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert(md); + + const lastModified = md['last-modified']; + const lastModifiedDate = lastModified.split('T')[0]; + // last-modified date should be the one set by the last-modified header + assert.strictEqual(lastModifiedDate, imposedLastModified); + + // The header should be removed after being treated. + assert(md[lastModifiedHeader] === undefined); + + config.config.testingMode = false; + done(); }); + }); }); }); it('should not take into acccount the last-modified header when testingMode=false', done => { const imposedLastModified = '2024-07-19'; - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-meta-x-scal-last-modified': imposedLastModified, + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-meta-x-scal-last-modified': imposedLastModified, + }, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', }, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + postBody + ); bucketPut(authInfo, testPutBucketRequest, log, () => { const config = require('../../../lib/Config'); config.config.testingMode = false; - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - metadata.getObjectMD(bucketName, objectName, {}, log, - (err, md) => { - assert(md); - assert.strictEqual(md['x-amz-meta-x-scal-last-modified'], - imposedLastModified); - const lastModified = md['last-modified']; - const lastModifiedDate = lastModified.split('T')[0]; - const currentTs = new Date().toJSON(); - const currentDate = currentTs.split('T')[0]; - assert.strictEqual(lastModifiedDate, currentDate); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert(md); + assert.strictEqual(md['x-amz-meta-x-scal-last-modified'], imposedLastModified); + const lastModified = md['last-modified']; + const lastModifiedDate = lastModified.split('T')[0]; + const currentTs = new Date().toJSON(); + const currentDate = currentTs.split('T')[0]; + assert.strictEqual(lastModifiedDate, currentDate); + done(); }); + }); }); }); it('should put an object with user metadata but no data', done => { const postBody = ''; const correctMD5 = 'd41d8cd98f00b204e9800998ecf8427e'; - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'content-length': '0', - 'x-amz-meta-test': 'some metadata', - 'x-amz-meta-test2': 'some more metadata', - 'x-amz-meta-test3': 'even more metadata', + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'content-length': '0', + 'x-amz-meta-test': 'some metadata', + 'x-amz-meta-test2': 'some more metadata', + 'x-amz-meta-test3': 'even more metadata', + }, + parsedContentLength: 0, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'd41d8cd98f00b204e9800998ecf8427e', }, - parsedContentLength: 0, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'd41d8cd98f00b204e9800998ecf8427e', - }, postBody); + postBody + ); bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - assert.deepStrictEqual(ds, []); - metadata.getObjectMD(bucketName, objectName, {}, log, - (err, md) => { - assert(md); - assert.strictEqual(md.location, null); - assert.strictEqual(md['x-amz-meta-test'], - 'some metadata'); - assert.strictEqual(md['x-amz-meta-test2'], - 'some more metadata'); - assert.strictEqual(md['x-amz-meta-test3'], - 'even more metadata'); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + assert.deepStrictEqual(ds, []); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert(md); + assert.strictEqual(md.location, null); + assert.strictEqual(md['x-amz-meta-test'], 'some metadata'); + assert.strictEqual(md['x-amz-meta-test2'], 'some more metadata'); + assert.strictEqual(md['x-amz-meta-test3'], 'even more metadata'); + done(); }); + }); }); }); it('should not leave orphans in data when overwriting an object', done => { - const testPutObjectRequest2 = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - }, Buffer.from('I am another body', 'utf8')); + const testPutObjectRequest2 = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }, + Buffer.from('I am another body', 'utf8') + ); bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, - undefined, log, () => { - objectPut(authInfo, testPutObjectRequest2, undefined, - log, - () => { - // orphan objects don't get deleted - // until the next tick - // in memory - setImmediate(() => { - // Data store starts at index 1 - assert.strictEqual(ds[0], undefined); - assert.strictEqual(ds[1], undefined); - assert.deepStrictEqual(ds[2].value, - Buffer.from('I am another body', 'utf8')); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, () => { + objectPut(authInfo, testPutObjectRequest2, undefined, log, () => { + // orphan objects don't get deleted + // until the next tick + // in memory + setImmediate(() => { + // Data store starts at index 1 + assert.strictEqual(ds[0], undefined); + assert.strictEqual(ds[1], undefined); + assert.deepStrictEqual(ds[2].value, Buffer.from('I am another body', 'utf8')); + done(); }); }); + }); }); }); it('should not leave orphans in data when overwriting an multipart upload object', done => { bucketPut(authInfo, testPutBucketRequest, log, () => { - mpuUtils.createMPU(namespace, bucketName, objectName, log, - (err, testUploadId) => { - objectPut(authInfo, testPutObjectRequest, undefined, log, err => { - assert.ifError(err); - sinon.assert.calledWith(metadata.putObjectMD, - any, any, any, sinon.match({ oldReplayId: testUploadId }), any, any); - done(); - }); + mpuUtils.createMPU(namespace, bucketName, objectName, log, (err, testUploadId) => { + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD, + any, + any, + any, + sinon.match({ oldReplayId: testUploadId }), + any, + any + ); + done(); }); + }); }); }); - it('should not put object with retention configuration if object lock ' + - 'is not enabled on the bucket', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-object-lock-retain-until-date': mockDate, - 'x-amz-object-lock-mode': 'GOVERNANCE', + it('should not put object with retention configuration if object lock ' + 'is not enabled on the bucket', done => { + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-object-lock-retain-until-date': mockDate, + 'x-amz-object-lock-mode': 'GOVERNANCE', + }, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', }, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + postBody + ); bucketPut(authInfo, testPutBucketRequest, log, () => { objectPut(authInfo, testPutObjectRequest, undefined, log, err => { @@ -576,233 +577,313 @@ describe('objectPut API', () => { }); it('should forward a 400 back to client on metadata 408 response', () => { - data.switch(new storage.data.MultipleBackendGateway({ - 'us-east-1': dataClient, - 'us-east-2': dataClient, - }, metadata, data.locStorageCheckFn)); + data.switch( + new storage.data.MultipleBackendGateway( + { + 'us-east-1': dataClient, + 'us-east-2': dataClient, + }, + metadata, + data.locStorageCheckFn + ) + ); data.implName = 'multipleBackends'; const originalPut = data.client.put; - data.client.put = (hashedStream, valueSize, keyContext, backendInfo, log, cb) => - cb({ httpCode: 408 }); + data.client.put = (hashedStream, valueSize, keyContext, backendInfo, log, cb) => cb({ httpCode: 408 }); bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - err => { - assert.strictEqual(err.code, 400); - data.client.put = originalPut; - data.switch(dataClient); - data.implName = prevDataImplName; - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.strictEqual(err.code, 400); + data.client.put = originalPut; + data.switch(dataClient); + data.implName = prevDataImplName; + }); }); }); it('should forward a 503 to the client for 4xx != 408', () => { - data.switch(new storage.data.MultipleBackendGateway({ - 'us-east-1': dataClient, - 'us-east-2': dataClient, - }, metadata, data.locStorageCheckFn)); + data.switch( + new storage.data.MultipleBackendGateway( + { + 'us-east-1': dataClient, + 'us-east-2': dataClient, + }, + metadata, + data.locStorageCheckFn + ) + ); data.implName = 'multipleBackends'; const originalPut = data.client.put; - data.client.put = (hashedStream, valueSize, keyContext, backendInfo, log, cb) => - cb({ httpCode: 412 }); + data.client.put = (hashedStream, valueSize, keyContext, backendInfo, log, cb) => cb({ httpCode: 412 }); bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - err => { - assert.strictEqual(err.code, 503); - data.client.put = originalPut; - data.switch(dataClient); - data.implName = prevDataImplName; - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.strictEqual(err.code, 503); + data.client.put = originalPut; + data.switch(dataClient); + data.implName = prevDataImplName; + }); }); }); it('should not put object with storage-class header not equal to STANDARD', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { - 'x-amz-storage-class': 'COLD', + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-storage-class': 'COLD', + }, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', }, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + postBody + ); bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - err => { - assert.strictEqual(err.is.InvalidStorageClass, true); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.strictEqual(err.is.InvalidStorageClass, true); + done(); + }); }); }); it('should pass overheadField to metadata.putObjectMD for a non-versioned request', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - contentMD5: correctMD5, - }, postBody); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + contentMD5: correctMD5, + }, + postBody + ); bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - err => { - assert.ifError(err); - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, objectName, any, sinon.match({ overheadField: sinon.match.array }), any, any); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + any, + sinon.match({ overheadField: sinon.match.array }), + any, + any + ); + done(); + }); }); }); it('should pass overheadField to metadata.putObjectMD for a versioned request', done => { - const testPutObjectRequest = versioningTestUtils - .createPutObjectRequest(bucketName, objectName, Buffer.from('I am another body', 'utf8')); + const testPutObjectRequest = versioningTestUtils.createPutObjectRequest( + bucketName, + objectName, + Buffer.from('I am another body', 'utf8') + ); bucketPut(authInfo, testPutBucketRequest, log, () => { bucketPutVersioning(authInfo, enableVersioningRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - err => { - assert.ifError(err); - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, objectName, any, sinon.match({ overheadField: sinon.match.array }), any, any); - done(); - } - ); + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + any, + sinon.match({ overheadField: sinon.match.array }), + any, + any + ); + done(); + }); }); }); }); it('should pass overheadField to metadata.putObjectMD for a version-suspended request', done => { - const testPutObjectRequest = versioningTestUtils - .createPutObjectRequest(bucketName, objectName, Buffer.from('I am another body', 'utf8')); + const testPutObjectRequest = versioningTestUtils.createPutObjectRequest( + bucketName, + objectName, + Buffer.from('I am another body', 'utf8') + ); bucketPut(authInfo, testPutBucketRequest, log, () => { bucketPutVersioning(authInfo, suspendVersioningRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - err => { - assert.ifError(err); - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, objectName, any, sinon.match({ overheadField: sinon.match.array }), any, any); - done(); - } - ); + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + any, + sinon.match({ overheadField: sinon.match.array }), + any, + any + ); + done(); + }); }); }); }); it('should not pass needOplogUpdate when writing new object', done => { - async.series([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, objectName, any, sinon.match({ - needOplogUpdate: undefined, - originOp: undefined, - }), any, any); - }, - ], done); + async.series( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + any, + sinon.match({ + needOplogUpdate: undefined, + originOp: undefined, + }), + any, + any + ); + }, + ], + done + ); }); it('should not pass needOplogUpdate when replacing object', done => { - async.series([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, objectName, any, sinon.match({ - needOplogUpdate: undefined, - originOp: undefined, - }), any, any); - }, - ], done); + async.series( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + any, + sinon.match({ + needOplogUpdate: undefined, + originOp: undefined, + }), + any, + any + ); + }, + ], + done + ); }); it('should pass needOplogUpdate to metadata when replacing archived object', done => { const archived = { - archiveInfo: { foo: 0, bar: 'stuff' } + archiveInfo: { foo: 0, bar: 'stuff' }, }; - async.series([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archived, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, objectName, any, sinon.match({ - needOplogUpdate: true, - originOp: 's3:ReplaceArchivedObject', - }), any, any); - }, - ], done); + async.series( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archived, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + any, + sinon.match({ + needOplogUpdate: true, + originOp: 's3:ReplaceArchivedObject', + }), + any, + any + ); + }, + ], + done + ); }); it('should pass needOplogUpdate to metadata when replacing archived object in version suspended bucket', done => { const archived = { - archiveInfo: { foo: 0, bar: 'stuff' } + archiveInfo: { foo: 0, bar: 'stuff' }, }; - async.series([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - next => bucketPutVersioning(authInfo, suspendVersioningRequest, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archived, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, objectName, any, sinon.match({ - needOplogUpdate: true, - originOp: 's3:ReplaceArchivedObject', - }), any, any); - }, - ], done); + async.series( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + next => bucketPutVersioning(authInfo, suspendVersioningRequest, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archived, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + any, + sinon.match({ + needOplogUpdate: true, + originOp: 's3:ReplaceArchivedObject', + }), + any, + any + ); + }, + ], + done + ); }); it('should not set bucketOwnerId if requester owns the bucket', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', + }, + postBody + ); bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - err => { - assert.ifError(err); - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, - objectName, - sinon.match({ bucketOwnerId: sinon.match.typeOf('undefined') }), - any, - any, - any - ); - done(); - } + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + sinon.match({ bucketOwnerId: sinon.match.typeOf('undefined') }), + any, + any, + any ); + done(); + }); }); }); it('should set bucketOwnerId if requester does not own the bucket', done => { const authInfo2 = makeAuthInfo('accessKey2'); - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', - }, postBody); + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==', + }, + postBody + ); const testPutPolicyRequest = new DummyRequest({ bucketName, @@ -826,20 +907,19 @@ describe('objectPut API', () => { bucketPut(authInfo, testPutBucketRequest, log, () => { bucketPutPolicy(authInfo, testPutPolicyRequest, log, err => { assert.ifError(err); - objectPut(authInfo, testPutObjectRequest, undefined, log, - err => { - assert.ifError(err); - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, - objectName, - sinon.match({ bucketOwnerId: authInfo.canonicalId }), - any, - any, - any - ); - done(); - } - ); + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.ifError(err); + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + sinon.match({ bucketOwnerId: authInfo.canonicalId }), + any, + any, + any + ); + done(); + }); }); }); }); @@ -849,13 +929,16 @@ describe('objectPut API with versioning', () => { beforeEach(() => { cleanup(); sinon.spy(metadata, 'putObjectMD'); - testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: '/', - }, postBody); + testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: '/', + }, + postBody + ); }); afterEach(() => { @@ -863,172 +946,199 @@ describe('objectPut API with versioning', () => { metadata.putObjectMD = originalputObjectMD; }); - const objData = ['foo0', 'foo1', 'foo2'].map(str => - Buffer.from(str, 'utf8')); - const testPutObjectRequests = objData.map(data => versioningTestUtils - .createPutObjectRequest(bucketName, objectName, data)); - - it('should delete latest version when creating new null version ' + - 'if latest version is null version', done => { - async.series([ - callback => bucketPut(authInfo, testPutBucketRequest, log, - callback), - // putting null version by putting obj before versioning configured - callback => objectPut(authInfo, testPutObjectRequests[0], undefined, - log, err => { - versioningTestUtils.assertDataStoreValues(ds, [objData[0]]); - callback(err); - }), - callback => bucketPutVersioning(authInfo, suspendVersioningRequest, - log, callback), - // creating new null version by putting obj after ver suspended - callback => objectPut(authInfo, testPutObjectRequests[1], - undefined, log, err => { - // wait until next tick since mem backend executes - // deletes in the next tick - setImmediate(() => { - // old null version should be deleted - versioningTestUtils.assertDataStoreValues(ds, - [undefined, objData[1]]); - callback(err); - }); - }), - // create another null version - callback => objectPut(authInfo, testPutObjectRequests[2], - undefined, log, err => { - setImmediate(() => { - // old null version should be deleted - versioningTestUtils.assertDataStoreValues(ds, - [undefined, undefined, objData[2]]); + const objData = ['foo0', 'foo1', 'foo2'].map(str => Buffer.from(str, 'utf8')); + const testPutObjectRequests = objData.map(data => + versioningTestUtils.createPutObjectRequest(bucketName, objectName, data) + ); + + it('should delete latest version when creating new null version ' + 'if latest version is null version', done => { + async.series( + [ + callback => bucketPut(authInfo, testPutBucketRequest, log, callback), + // putting null version by putting obj before versioning configured + callback => + objectPut(authInfo, testPutObjectRequests[0], undefined, log, err => { + versioningTestUtils.assertDataStoreValues(ds, [objData[0]]); callback(err); - }); - }), - ], done); + }), + callback => bucketPutVersioning(authInfo, suspendVersioningRequest, log, callback), + // creating new null version by putting obj after ver suspended + callback => + objectPut(authInfo, testPutObjectRequests[1], undefined, log, err => { + // wait until next tick since mem backend executes + // deletes in the next tick + setImmediate(() => { + // old null version should be deleted + versioningTestUtils.assertDataStoreValues(ds, [undefined, objData[1]]); + callback(err); + }); + }), + // create another null version + callback => + objectPut(authInfo, testPutObjectRequests[2], undefined, log, err => { + setImmediate(() => { + // old null version should be deleted + versioningTestUtils.assertDataStoreValues(ds, [undefined, undefined, objData[2]]); + callback(err); + }); + }), + ], + done + ); }); describe('when null version is not the latest version', () => { - const objData = ['foo0', 'foo1', 'foo2'].map(str => - Buffer.from(str, 'utf8')); - const testPutObjectRequests = objData.map(data => versioningTestUtils - .createPutObjectRequest(bucketName, objectName, data)); + const objData = ['foo0', 'foo1', 'foo2'].map(str => Buffer.from(str, 'utf8')); + const testPutObjectRequests = objData.map(data => + versioningTestUtils.createPutObjectRequest(bucketName, objectName, data) + ); beforeEach(done => { - async.series([ - callback => bucketPut(authInfo, testPutBucketRequest, log, - callback), - // putting null version: put obj before versioning configured - callback => objectPut(authInfo, testPutObjectRequests[0], - undefined, log, callback), - callback => bucketPutVersioning(authInfo, - enableVersioningRequest, log, callback), - // put another version: - callback => objectPut(authInfo, testPutObjectRequests[1], - undefined, log, callback), - callback => bucketPutVersioning(authInfo, - suspendVersioningRequest, log, callback), - ], err => { - if (err) { - return done(err); + async.series( + [ + callback => bucketPut(authInfo, testPutBucketRequest, log, callback), + // putting null version: put obj before versioning configured + callback => objectPut(authInfo, testPutObjectRequests[0], undefined, log, callback), + callback => bucketPutVersioning(authInfo, enableVersioningRequest, log, callback), + // put another version: + callback => objectPut(authInfo, testPutObjectRequests[1], undefined, log, callback), + callback => bucketPutVersioning(authInfo, suspendVersioningRequest, log, callback), + ], + err => { + if (err) { + return done(err); + } + versioningTestUtils.assertDataStoreValues(ds, objData.slice(0, 2)); + return done(); } - versioningTestUtils.assertDataStoreValues(ds, - objData.slice(0, 2)); - return done(); - }); + ); }); - it('should still delete null version when creating new null version', - done => { - objectPut(authInfo, testPutObjectRequests[2], undefined, - log, err => { - assert.ifError(err, `Unexpected err: ${err}`); - setImmediate(() => { - // old null version should be deleted after putting - // new null version - versioningTestUtils.assertDataStoreValues(ds, - [undefined, objData[1], objData[2]]); - done(err); - }); + it('should still delete null version when creating new null version', done => { + objectPut(authInfo, testPutObjectRequests[2], undefined, log, err => { + assert.ifError(err, `Unexpected err: ${err}`); + setImmediate(() => { + // old null version should be deleted after putting + // new null version + versioningTestUtils.assertDataStoreValues(ds, [undefined, objData[1], objData[2]]); + done(err); }); + }); }); }); - it('should return BadDigest error and not leave orphans in data when ' + - 'contentMD5 and completedHash do not match', done => { - const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - contentMD5: 'vnR+tLdVF79rPPfF+7YvOg==', - }, Buffer.from('I am another body', 'utf8')); - - bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - err => { - assert.strictEqual(err.is.BadDigest, true); - // orphan objects don't get deleted - // until the next tick - // in memory - setImmediate(() => { - // Data store starts at index 1 - assert.strictEqual(ds[0], undefined); - assert.strictEqual(ds[1], undefined); - done(); + it( + 'should return BadDigest error and not leave orphans in data when ' + + 'contentMD5 and completedHash do not match', + done => { + const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + contentMD5: 'vnR+tLdVF79rPPfF+7YvOg==', + }, + Buffer.from('I am another body', 'utf8') + ); + + bucketPut(authInfo, testPutBucketRequest, log, () => { + objectPut(authInfo, testPutObjectRequest, undefined, log, err => { + assert.strictEqual(err.is.BadDigest, true); + // orphan objects don't get deleted + // until the next tick + // in memory + setImmediate(() => { + // Data store starts at index 1 + assert.strictEqual(ds[0], undefined); + assert.strictEqual(ds[1], undefined); + done(); + }); }); }); - }); - }); + } + ); it('should not pass needOplogUpdate when writing new object', done => { - async.series([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - next => bucketPutVersioning(authInfo, enableVersioningRequest, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, objectName, any, sinon.match({ - needOplogUpdate: undefined, - originOp: undefined, - }), any, any); - }, - ], done); + async.series( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + next => bucketPutVersioning(authInfo, enableVersioningRequest, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + any, + sinon.match({ + needOplogUpdate: undefined, + originOp: undefined, + }), + any, + any + ); + }, + ], + done + ); }); it('should not pass needOplogUpdate when replacing object', done => { - async.series([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - next => bucketPutVersioning(authInfo, enableVersioningRequest, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, objectName, any, sinon.match({ - needOplogUpdate: undefined, - originOp: undefined, - }), any, any); - }, - ], done); + async.series( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + next => bucketPutVersioning(authInfo, enableVersioningRequest, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + any, + sinon.match({ + needOplogUpdate: undefined, + originOp: undefined, + }), + any, + any + ); + }, + ], + done + ); }); it('should not pass needOplogUpdate when replacing archived object', done => { const archived = { - archiveInfo: { foo: 0, bar: 'stuff' } + archiveInfo: { foo: 0, bar: 'stuff' }, }; - async.series([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - next => bucketPutVersioning(authInfo, enableVersioningRequest, log, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archived, next), - next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), - async () => { - sinon.assert.calledWith(metadata.putObjectMD.lastCall, - bucketName, objectName, any, sinon.match({ - needOplogUpdate: undefined, - originOp: undefined, - }), any, any); - }, - ], done); + async.series( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + next => bucketPutVersioning(authInfo, enableVersioningRequest, log, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + next => fakeMetadataArchive(bucketName, objectName, undefined, archived, next), + next => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + async () => { + sinon.assert.calledWith( + metadata.putObjectMD.lastCall, + bucketName, + objectName, + any, + sinon.match({ + needOplogUpdate: undefined, + originOp: undefined, + }), + any, + any + ); + }, + ], + done + ); }); }); @@ -1038,10 +1148,16 @@ describe('objectPut API in ingestion bucket', () => { before(() => { // Setup multi-backend, this is required for ingestion - data.switch(new storage.data.MultipleBackendGateway({ - 'us-east-1': dataClient, - 'us-east-2': dataClient, - }, metadata, data.locStorageCheckFn)); + data.switch( + new storage.data.MultipleBackendGateway( + { + 'us-east-1': dataClient, + 'us-east-2': dataClient, + }, + metadata, + data.locStorageCheckFn + ) + ); data.implName = 'multipleBackends'; }); @@ -1060,8 +1176,11 @@ describe('objectPut API in ingestion bucket', () => { const newPutObjectRequest = params => { const { location, versionID } = params || {}; - const r = versioningTestUtils - .createPutObjectRequest(bucketName, objectName, Buffer.from('I am another body', 'utf8')); + const r = versioningTestUtils.createPutObjectRequest( + bucketName, + objectName, + Buffer.from('I am another body', 'utf8') + ); if (location) { r.headers[objectLocationConstraintHeader] = location; } @@ -1070,17 +1189,19 @@ describe('objectPut API in ingestion bucket', () => { } return r; }; - const newPutIngestBucketRequest = location => new DummyRequest({ - bucketName, - namespace, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: '/', - post: '' + - '' + - `${location}` + - '', - }); + const newPutIngestBucketRequest = location => + new DummyRequest({ + bucketName, + namespace, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: '/', + post: + '' + + '' + + `${location}` + + '', + }); const archiveRestoreRequested = { archiveInfo: { foo: 0, bar: 'stuff' }, // opaque, can be anything... restoreRequestedAt: new Date().toString(), @@ -1095,13 +1216,17 @@ describe('objectPut API in ingestion bucket', () => { cb(null, `${keyContext.bucketName}/${keyContext.objectKey}`, versionID, size, 'md5'); }); - async.series([ - next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), - next => objectPut(authInfo, newPutObjectRequest(), undefined, log, (err, headers) => { - assert.strictEqual(headers['x-amz-version-id'], versionID); - next(err); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), + next => + objectPut(authInfo, newPutObjectRequest(), undefined, log, (err, headers) => { + assert.strictEqual(headers['x-amz-version-id'], versionID); + next(err); + }), + ], + done + ); }); it('should not use the versionID from the backend when writing in another location', done => { @@ -1112,16 +1237,26 @@ describe('objectPut API in ingestion bucket', () => { cb(null, `${keyContext.bucketName}/${keyContext.objectKey}`, versionID, size, 'md5'); }); - async.series([ - next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), - next => objectPut(authInfo, newPutObjectRequest({ - location: 'us-east-2', - }), undefined, log, (err, headers) => { - assert.ok(headers['x-amz-version-id']); - assert.notEqual(headers['x-amz-version-id'], versionID); - next(err); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), + next => + objectPut( + authInfo, + newPutObjectRequest({ + location: 'us-east-2', + }), + undefined, + log, + (err, headers) => { + assert.ok(headers['x-amz-version-id']); + assert.notEqual(headers['x-amz-version-id'], versionID); + next(err); + } + ), + ], + done + ); }); it('should not use the versionID from the backend when it is not a valid versionID', done => { @@ -1132,24 +1267,32 @@ describe('objectPut API in ingestion bucket', () => { cb(null, `${keyContext.bucketName}/${keyContext.objectKey}`, versionID, size, 'md5'); }); - async.series([ - next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), - next => objectPut(authInfo, newPutObjectRequest(), undefined, log, (err, headers) => { - assert.ok(headers['x-amz-version-id']); - assert.notEqual(headers['x-amz-version-id'], versionID); - next(err); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), + next => + objectPut(authInfo, newPutObjectRequest(), undefined, log, (err, headers) => { + assert.ok(headers['x-amz-version-id']); + assert.notEqual(headers['x-amz-version-id'], versionID); + next(err); + }), + ], + done + ); }); it('should not use the versionID from the backend when it is not provided', done => { - async.series([ - next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), - next => objectPut(authInfo, newPutObjectRequest(), undefined, log, (err, headers) => { - assert.ok(headers['x-amz-version-id']); - next(err); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), + next => + objectPut(authInfo, newPutObjectRequest(), undefined, log, (err, headers) => { + assert.ok(headers['x-amz-version-id']); + next(err); + }), + ], + done + ); }); it('should add versionID to backend putObject when restoring object', done => { @@ -1157,31 +1300,39 @@ describe('objectPut API in ingestion bucket', () => { const restoredVersionID = versioning.VersionID.encode(versioning.VersionID.generateVersionId('0', '')); // Use a "mock" data location, simulating a write to an ingest location - sinon.stub(dataClient, 'put') - .onCall(0).callsFake((writeStream, size, keyContext, reqUids, cb) => { + sinon + .stub(dataClient, 'put') + .onCall(0) + .callsFake((writeStream, size, keyContext, reqUids, cb) => { // First call: regular object creation, should not pass extra metadata header assert.strictEqual(keyContext.metaHeaders['x-amz-meta-scal-version-id'], undefined); cb(null, `${keyContext.bucketName}/${keyContext.objectKey}`, versionID, size, 'md5'); }) - .onCall(1).callsFake((writeStream, size, keyContext, reqUids, cb) => { + .onCall(1) + .callsFake((writeStream, size, keyContext, reqUids, cb) => { // Second call: "restored" data, should pass extra metadata header assert.strictEqual(keyContext.metaHeaders['x-amz-meta-scal-version-id'], versionID); cb(null, `${keyContext.bucketName}/${keyContext.objectKey}`, restoredVersionID, size, 'md5'); }); - async.series([ - next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), - next => objectPut(authInfo, newPutObjectRequest(), undefined, log, (err, headers) => { - assert.strictEqual(headers['x-amz-version-id'], versionID); - next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, versionID, archiveRestoreRequested, next), - next => objectPut(authInfo, newPutObjectRequest({ versionID }), undefined, log, (err, headers) => { - assert.ok(headers['x-amz-version-id']); - assert.strictEqual(headers['x-amz-version-id'], versionID); // keep the same versionID - next(err); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), + next => + objectPut(authInfo, newPutObjectRequest(), undefined, log, (err, headers) => { + assert.strictEqual(headers['x-amz-version-id'], versionID); + next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, versionID, archiveRestoreRequested, next), + next => + objectPut(authInfo, newPutObjectRequest({ versionID }), undefined, log, (err, headers) => { + assert.ok(headers['x-amz-version-id']); + assert.strictEqual(headers['x-amz-version-id'], versionID); // keep the same versionID + next(err); + }), + ], + done + ); }); it('should not add versionID to backend putObject when restoring object to another location', done => { @@ -1189,33 +1340,47 @@ describe('objectPut API in ingestion bucket', () => { const restoredVersionID = versioning.VersionID.encode(versioning.VersionID.generateVersionId('0', '')); // Use a "mock" data location, simulating a write to an ingest location - sinon.stub(dataClient, 'put') - .onCall(0).callsFake((writeStream, size, keyContext, reqUids, cb) => { + sinon + .stub(dataClient, 'put') + .onCall(0) + .callsFake((writeStream, size, keyContext, reqUids, cb) => { // First call: regular object creation, should not pass extra metadata header assert.strictEqual(keyContext.metaHeaders['x-amz-meta-scal-version-id'], undefined); cb(null, `${keyContext.bucketName}/${keyContext.objectKey}`, versionID, size, 'md5'); }) - .onCall(1).callsFake((writeStream, size, keyContext, reqUids, cb) => { + .onCall(1) + .callsFake((writeStream, size, keyContext, reqUids, cb) => { // Second call: "restored" data, should not pass extra metadata header (different location) assert.strictEqual(keyContext.metaHeaders['x-amz-meta-scal-version-id'], undefined); cb(null, `${keyContext.bucketName}/${keyContext.objectKey}`, restoredVersionID, size, 'md5'); }); - async.series([ - next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), - next => objectPut(authInfo, newPutObjectRequest(), undefined, log, (err, headers) => { - assert.strictEqual(headers['x-amz-version-id'], versionID); - next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, versionID, archiveRestoreRequested, next), - next => objectPut(authInfo, newPutObjectRequest({ - versionID, - location: 'us-east-2', - }), undefined, log, (err, headers) => { - assert.ok(headers['x-amz-version-id']); - assert.strictEqual(headers['x-amz-version-id'], versionID); // keep the same versionID - next(err); - }), - ], done); + async.series( + [ + next => bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log, next), + next => + objectPut(authInfo, newPutObjectRequest(), undefined, log, (err, headers) => { + assert.strictEqual(headers['x-amz-version-id'], versionID); + next(err); + }), + next => fakeMetadataArchive(bucketName, objectName, versionID, archiveRestoreRequested, next), + next => + objectPut( + authInfo, + newPutObjectRequest({ + versionID, + location: 'us-east-2', + }), + undefined, + log, + (err, headers) => { + assert.ok(headers['x-amz-version-id']); + assert.strictEqual(headers['x-amz-version-id'], versionID); // keep the same versionID + next(err); + } + ), + ], + done + ); }); }); diff --git a/tests/unit/api/objectPutACL.js b/tests/unit/api/objectPutACL.js index a88cc93bc2..1e7317a0c7 100644 --- a/tests/unit/api/objectPutACL.js +++ b/tests/unit/api/objectPutACL.js @@ -7,11 +7,7 @@ const AuthInfo = require('arsenal').auth.AuthInfo; const { bucketPut } = require('../../../lib/api/bucketPut'); const bucketPutPolicy = require('../../../lib/api/bucketPutPolicy'); const constants = require('../../../constants'); -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - AccessControlPolicy } - = require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, AccessControlPolicy } = require('../helpers'); const metadata = require('../metadataswitch'); const objectPut = require('../../../lib/api/objectPut'); const objectPutACL = require('../../../lib/api/objectPutACL'); @@ -21,8 +17,7 @@ const log = new DummyRequestLogger(); const canonicalID = 'accessKey1'; const authInfo = makeAuthInfo(canonicalID); const ownerID = authInfo.getCanonicalID(); -const anotherID = '79a59df900b949e55d96a1e698fba' + - 'cedfd6e09d98eacf8f8d5218e7cd47ef2bf'; +const anotherID = '79a59df900b949e55d96a1e698fba' + 'cedfd6e09d98eacf8f8d5218e7cd47ef2bf'; const defaultAcpParams = { ownerID, ownerDisplayName: 'OwnerDisplayName', @@ -43,13 +38,16 @@ let testPutObjectRequest; describe('putObjectACL API', () => { beforeEach(() => { cleanup(); - testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, - }, postBody); + testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }, + postBody + ); }); it('should return an error if invalid canned ACL provided', done => { @@ -64,14 +62,13 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err.is.InvalidArgument, true); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err.is.InvalidArgument, true); + done(); }); + }); }); }); @@ -87,25 +84,21 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err, null); - metadata.getObjectMD(bucketName, objectName, {}, - log, (err, md) => { - assert.strictEqual(md.acl.Canned, - 'public-read-write'); - assert.strictEqual(md.originOp, 's3:ObjectAcl:Put'); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err, null); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert.strictEqual(md.acl.Canned, 'public-read-write'); + assert.strictEqual(md.originOp, 's3:ObjectAcl:Put'); + done(); }); }); + }); }); }); - it('should set a canned public-read ACL followed by' - + ' a canned authenticated-read ACL', done => { + it('should set a canned public-read ACL followed by' + ' a canned authenticated-read ACL', done => { const testObjACLRequest1 = { bucketName, namespace, @@ -127,30 +120,23 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest1, log, err => { - assert.strictEqual(err, null); - metadata.getObjectMD(bucketName, objectName, {}, - log, (err, md) => { - assert.strictEqual(md.acl.Canned, - 'public-read'); - objectPutACL(authInfo, testObjACLRequest2, log, - err => { - assert.strictEqual(err, null); - metadata.getObjectMD(bucketName, - objectName, {}, log, (err, md) => { - assert.strictEqual(md - .acl.Canned, - 'authenticated-read'); - assert.strictEqual(md.originOp, 's3:ObjectAcl:Put'); - done(); - }); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest1, log, err => { + assert.strictEqual(err, null); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert.strictEqual(md.acl.Canned, 'public-read'); + objectPutACL(authInfo, testObjACLRequest2, log, err => { + assert.strictEqual(err, null); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert.strictEqual(md.acl.Canned, 'authenticated-read'); + assert.strictEqual(md.originOp, 's3:ObjectAcl:Put'); + done(); + }); }); }); }); + }); }); }); @@ -161,8 +147,7 @@ describe('putObjectACL API', () => { objectKey: objectName, headers: { 'x-amz-grant-full-control': - 'emailaddress="sampleaccount1@sampling.com"' + - ',emailaddress="sampleaccount2@sampling.com"', + 'emailaddress="sampleaccount1@sampling.com"' + ',emailaddress="sampleaccount2@sampling.com"', 'x-amz-grant-read': `uri=${constants.logId}`, 'x-amz-grant-read-acp': `id=${ownerID}`, 'x-amz-grant-write-acp': `id=${anotherID}`, @@ -172,43 +157,34 @@ describe('putObjectACL API', () => { actionImplicitDenies: false, }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err, null); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { assert.strictEqual(err, null); - metadata.getObjectMD(bucketName, objectName, {}, - log, (err, md) => { - assert.strictEqual(err, null); - const acls = md.acl; - assert.strictEqual(acls.READ[0], - constants.logId); - assert(acls.FULL_CONTROL[0] - .indexOf(ownerID) > -1); - assert(acls.FULL_CONTROL[1] - .indexOf(anotherID) > -1); - assert(acls.READ_ACP[0] - .indexOf(ownerID) > -1); - assert(acls.WRITE_ACP[0] - .indexOf(anotherID) > -1); - assert.strictEqual(md.originOp, 's3:ObjectAcl:Put'); - done(); - }); + const acls = md.acl; + assert.strictEqual(acls.READ[0], constants.logId); + assert(acls.FULL_CONTROL[0].indexOf(ownerID) > -1); + assert(acls.FULL_CONTROL[1].indexOf(anotherID) > -1); + assert(acls.READ_ACP[0].indexOf(ownerID) > -1); + assert(acls.WRITE_ACP[0].indexOf(anotherID) > -1); + assert.strictEqual(md.originOp, 's3:ObjectAcl:Put'); + done(); }); }); + }); }); }); - it('should return an error if invalid email ' + - 'provided in ACL header request', done => { + it('should return an error if invalid email ' + 'provided in ACL header request', done => { const testObjACLRequest = { bucketName, namespace, objectKey: objectName, headers: { 'x-amz-grant-full-control': - 'emailaddress="sampleaccount1@sampling.com"' + - ',emailaddress="nonexistentemail@sampling.com"', + 'emailaddress="sampleaccount1@sampling.com"' + ',emailaddress="nonexistentemail@sampling.com"', }, url: `/${bucketName}/${objectName}?acl`, query: { acl: '' }, @@ -216,24 +192,21 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err.is.UnresolvableGrantByEmailAddress, true); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err.is.UnresolvableGrantByEmailAddress, true); + done(); }); + }); }); }); it('should set ACLs provided in request body', done => { const acp = new AccessControlPolicy(defaultAcpParams); - acp.addGrantee('CanonicalUser', ownerID, 'FULL_CONTROL', - 'OwnerDisplayName'); + acp.addGrantee('CanonicalUser', ownerID, 'FULL_CONTROL', 'OwnerDisplayName'); acp.addGrantee('Group', constants.publicId, 'READ'); - acp.addGrantee('AmazonCustomerByEmail', 'sampleaccount1@sampling.com', - 'WRITE_ACP'); + acp.addGrantee('AmazonCustomerByEmail', 'sampleaccount1@sampling.com', 'WRITE_ACP'); acp.addGrantee('CanonicalUser', anotherID, 'READ_ACP'); const testObjACLRequest = { bucketName, @@ -247,31 +220,24 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, - log, (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err, null); - metadata.getObjectMD(bucketName, objectName, {}, - log, (err, md) => { - assert.strictEqual(md - .acl.FULL_CONTROL[0], ownerID); - assert.strictEqual(md - .acl.READ[0], constants.publicId); - assert.strictEqual(md - .acl.WRITE_ACP[0], ownerID); - assert.strictEqual(md - .acl.READ_ACP[0], anotherID); - assert.strictEqual(md.originOp, 's3:ObjectAcl:Put'); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err, null); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert.strictEqual(md.acl.FULL_CONTROL[0], ownerID); + assert.strictEqual(md.acl.READ[0], constants.publicId); + assert.strictEqual(md.acl.WRITE_ACP[0], ownerID); + assert.strictEqual(md.acl.READ_ACP[0], anotherID); + assert.strictEqual(md.originOp, 's3:ObjectAcl:Put'); + done(); }); }); + }); }); }); - it('should return an error if wrong owner ID ' + - 'provided in ACLs set out in request body', done => { + it('should return an error if wrong owner ID ' + 'provided in ACLs set out in request body', done => { const acp = new AccessControlPolicy({ ownerID: anotherID }); const testObjACLRequest = { bucketName, @@ -285,21 +251,18 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - () => { - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err.is.AccessDenied, true); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, () => { + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err.is.AccessDenied, true); + done(); }); + }); }); }); - it('should ignore if WRITE ACL permission is ' + - 'provided in request body', done => { + it('should ignore if WRITE ACL permission is ' + 'provided in request body', done => { const acp = new AccessControlPolicy(defaultAcpParams); - acp.addGrantee('CanonicalUser', ownerID, 'FULL_CONTROL', - 'OwnerDisplayName'); + acp.addGrantee('CanonicalUser', ownerID, 'FULL_CONTROL', 'OwnerDisplayName'); acp.addGrantee('Group', constants.publicId, 'WRITE'); const testObjACLRequest = { bucketName, @@ -313,31 +276,25 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err, null); - metadata.getObjectMD(bucketName, objectName, {}, - log, (err, md) => { - assert.strictEqual(md.acl.Canned, ''); - assert.strictEqual(md.acl.FULL_CONTROL[0], - ownerID); - assert.strictEqual(md.acl.WRITE, undefined); - assert.strictEqual(md.acl.READ[0], undefined); - assert.strictEqual(md.acl.WRITE_ACP[0], - undefined); - assert.strictEqual(md.acl.READ_ACP[0], - undefined); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err, null); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert.strictEqual(md.acl.Canned, ''); + assert.strictEqual(md.acl.FULL_CONTROL[0], ownerID); + assert.strictEqual(md.acl.WRITE, undefined); + assert.strictEqual(md.acl.READ[0], undefined); + assert.strictEqual(md.acl.WRITE_ACP[0], undefined); + assert.strictEqual(md.acl.READ_ACP[0], undefined); + done(); }); }); + }); }); }); - it('should return an error if invalid email ' + - 'address provided in ACLs set out in request body', done => { + it('should return an error if invalid email ' + 'address provided in ACLs set out in request body', done => { const acp = new AccessControlPolicy(defaultAcpParams); acp.addGrantee('AmazonCustomerByEmail', 'xyz@amazon.com', 'WRITE_ACP'); const testObjACLRequest = { @@ -351,21 +308,18 @@ describe('putObjectACL API', () => { actionImplicitDenies: false, }; - bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err.is.UnresolvableGrantByEmailAddress, true); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err.is.UnresolvableGrantByEmailAddress, true); + done(); }); + }); }); }); - it('should return an error if xml provided does not match s3 ' + - 'scheme for setting ACLs', done => { + it('should return an error if xml provided does not match s3 ' + 'scheme for setting ACLs', done => { const acp = new AccessControlPolicy(defaultAcpParams); acp.addGrantee('AmazonCustomerByEmail', 'xyz@amazon.com', 'WRITE_ACP'); const originalXml = acp.getXml(); @@ -382,14 +336,13 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err.is.MalformedACLError, true); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err.is.MalformedACLError, true); + done(); }); + }); }); }); @@ -409,24 +362,20 @@ describe('putObjectACL API', () => { actionImplicitDenies: false, }; - bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err.is.MalformedXML, true); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err.is.MalformedXML, true); + done(); }); + }); }); }); - it('should return an error if invalid group ' + - 'uri provided in ACLs set out in request body', done => { + it('should return an error if invalid group ' + 'uri provided in ACLs set out in request body', done => { const acp = new AccessControlPolicy(defaultAcpParams); - acp.addGrantee('Group', 'http://acs.amazonaws.com/groups/' + - 'global/NOTAVALIDGROUP', 'WRITE_ACP'); + acp.addGrantee('Group', 'http://acs.amazonaws.com/groups/' + 'global/NOTAVALIDGROUP', 'WRITE_ACP'); const testObjACLRequest = { bucketName, namespace, @@ -439,28 +388,24 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err.is.InvalidArgument, true); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err.is.InvalidArgument, true); + done(); }); + }); }); }); - it('should return an error if invalid group uri ' + - 'provided in ACL header request', done => { + it('should return an error if invalid group uri ' + 'provided in ACL header request', done => { const testObjACLRequest = { bucketName, namespace, objectKey: objectName, headers: { - 'host': 's3.amazonaws.com', - 'x-amz-grant-full-control': - 'uri="http://acs.amazonaws.com/groups/' + - 'global/NOTAVALIDGROUP"', + host: 's3.amazonaws.com', + 'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/' + 'global/NOTAVALIDGROUP"', }, url: `/${bucketName}/${objectName}?acl`, query: { acl: '' }, @@ -468,14 +413,13 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err.is.InvalidArgument, true); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err.is.InvalidArgument, true); + done(); }); + }); }); }); @@ -485,56 +429,89 @@ describe('putObjectACL API', () => { { headers: { 'x-amz-grant-read': `uri=${constants.logId}` }, type: 'READ' }, { headers: { 'x-amz-grant-read-acp': `id=${ownerID}` }, type: 'READ_ACP' }, { headers: { 'x-amz-grant-write-acp': `id=${anotherID}` }, type: 'WRITE_ACP' }, - { headers: { - 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', - 'x-amz-grant-read': `uri=${constants.logId}`, - 'x-amz-grant-read-acp': `id=${ownerID}`, - 'x-amz-grant-write-acp': `id=${anotherID}`, - }, type: 'ALL' }, - { headers: { - 'x-amz-grant-read': `uri=${constants.logId}`, - 'x-amz-grant-read-acp': `id=${ownerID}`, - 'x-amz-grant-write-acp': `id=${anotherID}`, - }, type: 'READ/READ_ACP/WRITE_ACP' }, - { headers: { - 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', - 'x-amz-grant-read-acp': `id=${ownerID}`, - 'x-amz-grant-write-acp': `id=${anotherID}`, - }, type: 'FULL/READ_ACP/WRITE_ACP' }, - { headers: { - 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', - 'x-amz-grant-read': `uri=${constants.logId}`, - 'x-amz-grant-write-acp': `id=${anotherID}`, - }, type: 'FULL/READ/WRITE_ACP' }, - { headers: { - 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', - 'x-amz-grant-read': `uri=${constants.logId}`, - 'x-amz-grant-read-acp': `id=${ownerID}`, - }, type: 'FULL/READ/READ_ACP' }, - { headers: { - 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', - 'x-amz-grant-read': `uri=${constants.logId}`, - }, type: 'FULL/READ' }, - { headers: { - 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', - 'x-amz-grant-read-acp': `id=${ownerID}`, - }, type: 'FULL/READ_ACP' }, - { headers: { - 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', - 'x-amz-grant-write-acp': `id=${anotherID}`, - }, type: 'FULL/WRITE_ACP' }, - { headers: { - 'x-amz-grant-read': `uri=${constants.logId}`, - 'x-amz-grant-read-acp': `id=${ownerID}`, - }, type: 'READ/READ_ACP' }, - { headers: { - 'x-amz-grant-read': `uri=${constants.logId}`, - 'x-amz-grant-write-acp': `id=${anotherID}`, - }, type: 'READ/WRITE_ACP' }, - { headers: { - 'x-amz-grant-read-acp': `id=${ownerID}`, - 'x-amz-grant-write-acp': `id=${anotherID}`, - }, type: 'READ_ACP/WRITE_ACP' }, + { + headers: { + 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', + 'x-amz-grant-read': `uri=${constants.logId}`, + 'x-amz-grant-read-acp': `id=${ownerID}`, + 'x-amz-grant-write-acp': `id=${anotherID}`, + }, + type: 'ALL', + }, + { + headers: { + 'x-amz-grant-read': `uri=${constants.logId}`, + 'x-amz-grant-read-acp': `id=${ownerID}`, + 'x-amz-grant-write-acp': `id=${anotherID}`, + }, + type: 'READ/READ_ACP/WRITE_ACP', + }, + { + headers: { + 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', + 'x-amz-grant-read-acp': `id=${ownerID}`, + 'x-amz-grant-write-acp': `id=${anotherID}`, + }, + type: 'FULL/READ_ACP/WRITE_ACP', + }, + { + headers: { + 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', + 'x-amz-grant-read': `uri=${constants.logId}`, + 'x-amz-grant-write-acp': `id=${anotherID}`, + }, + type: 'FULL/READ/WRITE_ACP', + }, + { + headers: { + 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', + 'x-amz-grant-read': `uri=${constants.logId}`, + 'x-amz-grant-read-acp': `id=${ownerID}`, + }, + type: 'FULL/READ/READ_ACP', + }, + { + headers: { + 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', + 'x-amz-grant-read': `uri=${constants.logId}`, + }, + type: 'FULL/READ', + }, + { + headers: { + 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', + 'x-amz-grant-read-acp': `id=${ownerID}`, + }, + type: 'FULL/READ_ACP', + }, + { + headers: { + 'x-amz-grant-full-control': 'emailaddress="sampleaccount1@sampling.com"', + 'x-amz-grant-write-acp': `id=${anotherID}`, + }, + type: 'FULL/WRITE_ACP', + }, + { + headers: { + 'x-amz-grant-read': `uri=${constants.logId}`, + 'x-amz-grant-read-acp': `id=${ownerID}`, + }, + type: 'READ/READ_ACP', + }, + { + headers: { + 'x-amz-grant-read': `uri=${constants.logId}`, + 'x-amz-grant-write-acp': `id=${anotherID}`, + }, + type: 'READ/WRITE_ACP', + }, + { + headers: { + 'x-amz-grant-read-acp': `id=${ownerID}`, + 'x-amz-grant-write-acp': `id=${anotherID}`, + }, + type: 'READ_ACP/WRITE_ACP', + }, ].forEach(params => { const { headers, type } = params; it(`should set originOp to s3:ObjectAcl:Put when ACL is changed (${type})`, done => { @@ -547,19 +524,16 @@ describe('putObjectACL API', () => { query: { acl: '' }, }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err, null); - metadata.getObjectMD(bucketName, objectName, {}, - log, (err, md) => { - assert.strictEqual(md.originOp, - 's3:ObjectAcl:Put'); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err, null); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert.strictEqual(md.originOp, 's3:ObjectAcl:Put'); + done(); }); }); + }); }); }); }); @@ -575,19 +549,16 @@ describe('putObjectACL API', () => { }; bucketPut(authInfo, testPutBucketRequest, log, () => { - objectPut(authInfo, testPutObjectRequest, undefined, log, - (err, resHeaders) => { - assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); - objectPutACL(authInfo, testObjACLRequest, log, err => { - assert.strictEqual(err, null); - metadata.getObjectMD(bucketName, objectName, {}, - log, (err, md) => { - assert.strictEqual(md.originOp, - 's3:ObjectCreated:Put'); - done(); - }); + objectPut(authInfo, testPutObjectRequest, undefined, log, (err, resHeaders) => { + assert.strictEqual(resHeaders.ETag, `"${correctMD5}"`); + objectPutACL(authInfo, testObjACLRequest, log, err => { + assert.strictEqual(err, null); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + assert.strictEqual(md.originOp, 's3:ObjectCreated:Put'); + done(); }); }); + }); }); }); @@ -609,18 +580,19 @@ describe('putObjectACL API', () => { }; beforeEach(done => { - async.waterfall([ - next => bucketPut(authInfo, testPutBucketRequest, log, next), - (cors, next) => objectPut(authInfo, - testPutObjectRequest, undefined, log, next), - ], err => { - assert.ifError(err); - done(); - }); + async.waterfall( + [ + next => bucketPut(authInfo, testPutBucketRequest, log, next), + (cors, next) => objectPut(authInfo, testPutObjectRequest, undefined, log, next), + ], + err => { + assert.ifError(err); + done(); + } + ); }); - it('should succeed with a deny on unrelated object as non root', - done => { + it('should succeed with a deny on unrelated object as non root', done => { const bucketPutPolicyRequest = getPolicyRequest({ Version: '2012-10-17', Statement: [ @@ -632,36 +604,38 @@ describe('putObjectACL API', () => { }, ], }); - const testObjACLRequest = Object.assign({ - socket: { - remoteAddress: '1.1.1.1', + const testObjACLRequest = Object.assign( + { + socket: { + remoteAddress: '1.1.1.1', + }, + bucketName, + namespace, + objectKey: objectName, + headers: { 'x-amz-acl': 'public-read-write' }, + url: `/${bucketName}/${objectName}?acl`, + query: { acl: '' }, + actionImplicitDenies: false, }, - bucketName, - namespace, - objectKey: objectName, - headers: { 'x-amz-acl': 'public-read-write' }, - url: `/${bucketName}/${objectName}?acl`, - query: { acl: '' }, - actionImplicitDenies: false, - }, requestFix); + requestFix + ); /** root user doesn't check bucket policy */ const authNotRoot = makeAuthInfo(canonicalID, 'not-root'); - async.waterfall([ - next => bucketPutPolicy(authInfo, - bucketPutPolicyRequest, log, next), - (cors, next) => objectPutACL(authNotRoot, - testObjACLRequest, log, next), - (headers, next) => metadata.getObjectMD(bucketName, - objectName, {}, log, next), - ], (err, md) => { - assert.ifError(err); - assert.strictEqual(md.acl.Canned, 'public-read-write'); - done(); - }); + async.waterfall( + [ + next => bucketPutPolicy(authInfo, bucketPutPolicyRequest, log, next), + (cors, next) => objectPutACL(authNotRoot, testObjACLRequest, log, next), + (headers, next) => metadata.getObjectMD(bucketName, objectName, {}, log, next), + ], + (err, md) => { + assert.ifError(err); + assert.strictEqual(md.acl.Canned, 'public-read-write'); + done(); + } + ); }); - it('should fail with an allow on unrelated object as public', - done => { + it('should fail with an allow on unrelated object as public', done => { const bucketPutPolicyRequest = getPolicyRequest({ Version: '2012-10-17', Statement: [ @@ -673,31 +647,35 @@ describe('putObjectACL API', () => { }, ], }); - const testObjACLRequest = Object.assign({ - socket: { - remoteAddress: '1.1.1.1', + const testObjACLRequest = Object.assign( + { + socket: { + remoteAddress: '1.1.1.1', + }, + bucketName, + namespace, + objectKey: objectName, + headers: { 'x-amz-acl': 'public-read-write' }, + url: `/${bucketName}/${objectName}?acl`, + query: { acl: '' }, + actionImplicitDenies: false, }, - bucketName, - namespace, - objectKey: objectName, - headers: { 'x-amz-acl': 'public-read-write' }, - url: `/${bucketName}/${objectName}?acl`, - query: { acl: '' }, - actionImplicitDenies: false, - }, requestFix); + requestFix + ); const publicAuth = new AuthInfo({ canonicalID: constants.publicId, }); - async.waterfall([ - next => bucketPutPolicy(authInfo, - bucketPutPolicyRequest, log, next), - (cors, next) => objectPutACL(publicAuth, - testObjACLRequest, log, next), - ], err => { - assert(err instanceof Error); - assert.strictEqual(err.code, errorInstances.AccessDenied.code); - done(); - }); + async.waterfall( + [ + next => bucketPutPolicy(authInfo, bucketPutPolicyRequest, log, next), + (cors, next) => objectPutACL(publicAuth, testObjACLRequest, log, next), + ], + err => { + assert(err instanceof Error); + assert.strictEqual(err.code, errorInstances.AccessDenied.code); + done(); + } + ); }); }); }); diff --git a/tests/unit/api/objectPutLegalHold.js b/tests/unit/api/objectPutLegalHold.js index 86c41efe89..b4af2d9077 100644 --- a/tests/unit/api/objectPutLegalHold.js +++ b/tests/unit/api/objectPutLegalHold.js @@ -22,18 +22,19 @@ const putBucketRequest = { actionImplicitDenies: false, }; -const putObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, -}, postBody); +const putObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }, + postBody +); -const objectLegalHoldXml = status => '' + - `${status}` + - ''; +const objectLegalHoldXml = status => + '' + `${status}` + ''; const putLegalHoldReq = status => ({ bucketName, @@ -64,8 +65,9 @@ describe('putObjectLegalHold API', () => { }); describe('with Object Lock enabled on bucket', () => { - const bucketObjLockRequest = Object.assign({}, putBucketRequest, - { headers: { 'x-amz-bucket-object-lock-enabled': 'true' } }); + const bucketObjLockRequest = Object.assign({}, putBucketRequest, { + headers: { 'x-amz-bucket-object-lock-enabled': 'true' }, + }); beforeEach(done => { bucketPut(authInfo, bucketObjLockRequest, log, err => { @@ -75,11 +77,10 @@ describe('putObjectLegalHold API', () => { }); afterEach(cleanup); - it('should update object\'s metadata with legal hold status', done => { + it("should update object's metadata with legal hold status", done => { objectPutLegalHold(authInfo, putLegalHoldReq('ON'), log, err => { assert.ifError(err); - return metadata.getObjectMD(bucketName, objectName, {}, log, - (err, objMD) => { + return metadata.getObjectMD(bucketName, objectName, {}, log, (err, objMD) => { assert.ifError(err); assert.strictEqual(objMD.legalHold, true); return done(); @@ -87,11 +88,10 @@ describe('putObjectLegalHold API', () => { }); }); - it('should update object\'s metadata with legal hold status', done => { + it("should update object's metadata with legal hold status", done => { objectPutLegalHold(authInfo, putLegalHoldReq('OFF'), log, err => { assert.ifError(err); - return metadata.getObjectMD(bucketName, objectName, {}, log, - (err, objMD) => { + return metadata.getObjectMD(bucketName, objectName, {}, log, (err, objMD) => { assert.ifError(err); assert.strictEqual(objMD.legalHold, false); return done(); @@ -99,11 +99,10 @@ describe('putObjectLegalHold API', () => { }); }); - it('should set originOp in object\'s metadata to s3:ObjectLegalHold:Put', done => { + it("should set originOp in object's metadata to s3:ObjectLegalHold:Put", done => { objectPutLegalHold(authInfo, putLegalHoldReq('ON'), log, err => { assert.ifError(err); - return metadata.getObjectMD(bucketName, objectName, {}, log, - (err, objMD) => { + return metadata.getObjectMD(bucketName, objectName, {}, log, (err, objMD) => { assert.ifError(err); assert.strictEqual(objMD.originOp, 's3:ObjectLegalHold:Put'); return done(); diff --git a/tests/unit/api/objectPutRetention.js b/tests/unit/api/objectPutRetention.js index 4bf24cbcea..d3ab098bf2 100644 --- a/tests/unit/api/objectPutRetention.js +++ b/tests/unit/api/objectPutRetention.js @@ -25,39 +25,47 @@ const bucketPutRequest = { actionImplicitDenies: false, }; -const putObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, -}, postBody); +const putObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }, + postBody +); -const objectRetentionXmlGovernance = '' + 'GOVERNANCE' + `${expectedDate}` + ''; -const objectRetentionXmlCompliance = '' + 'COMPLIANCE' + `${expectedDate}` + ''; -const objectRetentionXmlGovernanceLonger = '' + 'GOVERNANCE' + `${moment().add(5, 'days').toISOString()}` + ''; -const objectRetentionXmlGovernanceShorter = '' + 'GOVERNANCE' + `${moment().add(1, 'days').toISOString()}` + ''; -const objectRetentionXmlComplianceShorter = '' + 'COMPLIANCE' + `${moment().add(1, 'days').toISOString()}` + @@ -75,7 +83,7 @@ const putObjRetRequestGovernanceWithHeader = { bucketName, objectKey: objectName, headers: { - 'host': `${bucketName}.s3.amazonaws.com`, + host: `${bucketName}.s3.amazonaws.com`, 'x-amz-bypass-governance-retention': 'true', }, post: objectRetentionXmlGovernance, @@ -135,8 +143,9 @@ describe('putObjectRetention API', () => { }); describe('with Object Lock enabled on bucket', () => { - const bucketObjLockRequest = Object.assign({}, bucketPutRequest, - { headers: { 'x-amz-bucket-object-lock-enabled': 'true' } }); + const bucketObjLockRequest = Object.assign({}, bucketPutRequest, { + headers: { 'x-amz-bucket-object-lock-enabled': 'true' }, + }); beforeEach(done => { bucketPut(authInfo, bucketObjLockRequest, log, err => { @@ -146,11 +155,10 @@ describe('putObjectRetention API', () => { }); afterEach(() => cleanup()); - it('should update an object\'s metadata with retention info', done => { + it("should update an object's metadata with retention info", done => { objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { assert.ifError(err); - return metadata.getObjectMD(bucketName, objectName, {}, log, - (err, objMD) => { + return metadata.getObjectMD(bucketName, objectName, {}, log, (err, objMD) => { assert.ifError(err); assert.strictEqual(objMD.retentionMode, expectedMode); assert.strictEqual(objMD.retentionDate, expectedDate); @@ -159,11 +167,10 @@ describe('putObjectRetention API', () => { }); }); - it('should set originOp in object\'s metadata to s3:ObjectRetention:Put', done => { + it("should set originOp in object's metadata to s3:ObjectRetention:Put", done => { objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { assert.ifError(err); - return metadata.getObjectMD(bucketName, objectName, {}, log, - (err, objMD) => { + return metadata.getObjectMD(bucketName, objectName, {}, log, (err, objMD) => { assert.ifError(err); assert.strictEqual(objMD.originOp, 's3:ObjectRetention:Put'); return done(); @@ -191,48 +198,60 @@ describe('putObjectRetention API', () => { }); }); - it('should allow update if the x-amz-bypass-governance-retention header is missing and ' - + 'GOVERNANCE mode is enabled if time is being extended', done => { - objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { - assert.ifError(err); - return objectPutRetention(authInfo, putObjRetRequestGovernanceLonger, log, err => { + it( + 'should allow update if the x-amz-bypass-governance-retention header is missing and ' + + 'GOVERNANCE mode is enabled if time is being extended', + done => { + objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { assert.ifError(err); - done(); + return objectPutRetention(authInfo, putObjRetRequestGovernanceLonger, log, err => { + assert.ifError(err); + done(); + }); }); - }); - }); + } + ); - it('should disallow update if the x-amz-bypass-governance-retention header is missing and ' - + 'GOVERNANCE mode is enabled', done => { - objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { - assert.ifError(err); - return objectPutRetention(authInfo, putObjRetRequestGovernanceShorter, log, err => { - assert.strictEqual(err.is.AccessDenied, true); - done(); + it( + 'should disallow update if the x-amz-bypass-governance-retention header is missing and ' + + 'GOVERNANCE mode is enabled', + done => { + objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { + assert.ifError(err); + return objectPutRetention(authInfo, putObjRetRequestGovernanceShorter, log, err => { + assert.strictEqual(err.is.AccessDenied, true); + done(); + }); }); - }); - }); + } + ); - it('should allow update if the x-amz-bypass-governance-retention header is missing and ' - + 'GOVERNANCE mode is enabled and the same date is used', done => { - objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { - assert.ifError(err); - return objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { + it( + 'should allow update if the x-amz-bypass-governance-retention header is missing and ' + + 'GOVERNANCE mode is enabled and the same date is used', + done => { + objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { assert.ifError(err); - done(); + return objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { + assert.ifError(err); + done(); + }); }); - }); - }); + } + ); - it('should allow update if the x-amz-bypass-governance-retention header is present and ' - + 'GOVERNANCE mode is enabled', done => { - objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { - assert.ifError(err); - return objectPutRetention(authInfo, putObjRetRequestGovernanceWithHeader, log, err => { + it( + 'should allow update if the x-amz-bypass-governance-retention header is present and ' + + 'GOVERNANCE mode is enabled', + done => { + objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => { assert.ifError(err); - done(); + return objectPutRetention(authInfo, putObjRetRequestGovernanceWithHeader, log, err => { + assert.ifError(err); + done(); + }); }); - }); - }); + } + ); }); }); diff --git a/tests/unit/api/objectPutTagging.js b/tests/unit/api/objectPutTagging.js index faa6d9a188..8086a219fd 100644 --- a/tests/unit/api/objectPutTagging.js +++ b/tests/unit/api/objectPutTagging.js @@ -3,16 +3,10 @@ const assert = require('assert'); const { bucketPut } = require('../../../lib/api/bucketPut'); const objectPut = require('../../../lib/api/objectPut'); const objectPutTagging = require('../../../lib/api/objectPutTagging'); -const { _validator, parseTagXml } - = require('arsenal').s3middleware.tagging; -const { cleanup, - DummyRequestLogger, - makeAuthInfo, - TaggingConfigTester } - = require('../helpers'); +const { _validator, parseTagXml } = require('arsenal').s3middleware.tagging; +const { cleanup, DummyRequestLogger, makeAuthInfo, TaggingConfigTester } = require('../helpers'); const metadata = require('../../../lib/metadata/wrapper'); -const { taggingTests } - = require('../../functional/aws-node-sdk/lib/utility/tagging.js'); +const { taggingTests } = require('../../functional/aws-node-sdk/lib/utility/tagging.js'); const DummyRequest = require('../DummyRequest'); const log = new DummyRequestLogger(); @@ -28,13 +22,16 @@ const testBucketPutRequest = { actionImplicitDenies: false, }; -const testPutObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, -}, postBody); +const testPutObjectRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey: objectName, + headers: {}, + url: `/${bucketName}/${objectName}`, + }, + postBody +); function _checkError(err, code, errorName) { assert(err, 'Expected error but found none'); @@ -43,14 +40,15 @@ function _checkError(err, code, errorName) { } function _generateSampleXml(key, value) { - const xml = '' + - '' + - '' + - `${key}` + - `${value}` + - '' + - '' + - ''; + const xml = + '' + + '' + + '' + + `${key}` + + `${value}` + + '' + + '' + + ''; return xml; } @@ -62,24 +60,21 @@ describe('putObjectTagging API', () => { if (err) { return done(err); } - return objectPut(authInfo, testPutObjectRequest, undefined, log, - done); + return objectPut(authInfo, testPutObjectRequest, undefined, log, done); }); }); afterEach(cleanup); - it('should update an object\'s metadata with tags resource and update originOp', done => { + it("should update an object's metadata with tags resource and update originOp", done => { const taggingUtil = new TaggingConfigTester(); - const testObjectPutTaggingRequest = taggingUtil - .createObjectTaggingRequest('PUT', bucketName, objectName); + const testObjectPutTaggingRequest = taggingUtil.createObjectTaggingRequest('PUT', bucketName, objectName); objectPutTagging(authInfo, testObjectPutTaggingRequest, log, err => { if (err) { process.stdout.write(`Err putting object tagging ${err}`); return done(err); } - return metadata.getObjectMD(bucketName, objectName, {}, log, - (err, objectMD) => { + return metadata.getObjectMD(bucketName, objectName, {}, log, (err, objectMD) => { if (err) { process.stdout.write(`Err retrieving object MD ${err}`); return done(err); @@ -95,19 +90,15 @@ describe('putObjectTagging API', () => { describe('PUT object tagging :: helper validation functions ', () => { describe('validateTagStructure ', () => { - it('should return expected true if tag is valid false/undefined if not', - done => { + it('should return expected true if tag is valid false/undefined if not', done => { const tags = [ { tagTest: { Key: ['foo'], Value: ['bar'] }, isValid: true }, { tagTest: { Key: ['foo'] }, isValid: false }, { tagTest: { Value: ['bar'] }, isValid: false }, { tagTest: { Keys: ['foo'], Value: ['bar'] }, isValid: false }, - { tagTest: { Key: ['foo', 'boo'], Value: ['bar'] }, - isValid: false }, - { tagTest: { Key: ['foo'], Value: ['bar', 'boo'] }, - isValid: false }, - { tagTest: { Key: ['foo', 'boo'], Value: ['bar', 'boo'] }, - isValid: false }, + { tagTest: { Key: ['foo', 'boo'], Value: ['bar'] }, isValid: false }, + { tagTest: { Key: ['foo'], Value: ['bar', 'boo'] }, isValid: false }, + { tagTest: { Key: ['foo', 'boo'], Value: ['bar', 'boo'] }, isValid: false }, { tagTest: { Key: ['foo'], Values: ['bar'] }, isValid: false }, { tagTest: { Keys: ['foo'], Values: ['bar'] }, isValid: false }, ]; @@ -125,26 +116,18 @@ describe('PUT object tagging :: helper validation functions ', () => { }); describe('validateXMLStructure ', () => { - it('should return expected true if tag is valid false/undefined ' + - 'if not', done => { + it('should return expected true if tag is valid false/undefined ' + 'if not', done => { const tags = [ - { tagging: { Tagging: { TagSet: [{ Tag: [] }] } }, isValid: - true }, + { tagging: { Tagging: { TagSet: [{ Tag: [] }] } }, isValid: true }, { tagging: { Tagging: { TagSet: [''] } }, isValid: true }, { tagging: { Tagging: { TagSet: [] } }, isValid: false }, { tagging: { Tagging: { TagSet: [{}] } }, isValid: false }, - { tagging: { Tagging: { Tagset: [{ Tag: [] }] } }, isValid: - false }, - { tagging: { Tagging: { Tagset: [{ Tag: [] }] }, - ExtraTagging: 'extratagging' }, isValid: false }, - { tagging: { Tagging: { Tagset: [{ Tag: [] }], ExtraTagset: - 'extratagset' } }, isValid: false }, - { tagging: { Tagging: { Tagset: [{ Tag: [] }], ExtraTagset: - 'extratagset' } }, isValid: false }, - { tagging: { Tagging: { Tagset: [{ Tag: [], ExtraTag: - 'extratag' }] } }, isValid: false }, - { tagging: { Tagging: { Tagset: [{ Tag: {} }] } }, isValid: - false }, + { tagging: { Tagging: { Tagset: [{ Tag: [] }] } }, isValid: false }, + { tagging: { Tagging: { Tagset: [{ Tag: [] }] }, ExtraTagging: 'extratagging' }, isValid: false }, + { tagging: { Tagging: { Tagset: [{ Tag: [] }], ExtraTagset: 'extratagset' } }, isValid: false }, + { tagging: { Tagging: { Tagset: [{ Tag: [] }], ExtraTagset: 'extratagset' } }, isValid: false }, + { tagging: { Tagging: { Tagset: [{ Tag: [], ExtraTag: 'extratag' }] } }, isValid: false }, + { tagging: { Tagging: { Tagset: [{ Tag: {} }] } }, isValid: false }, ]; for (let i = 0; i < tags.length; i++) { @@ -173,7 +156,9 @@ describe('PUT object tagging :: helper validation functions ', () => { taggingTests.forEach(taggingTest => { it(taggingTest.it, done => { - const { tag: { key, value } } = taggingTest; + const { + tag: { key, value }, + } = taggingTest; const xml = _generateSampleXml(key, value); parseTagXml(xml, log, (err, result) => { if (taggingTest.error) { diff --git a/tests/unit/api/objectReplicationMD.js b/tests/unit/api/objectReplicationMD.js index 48451b43ce..f87a84eb6d 100644 --- a/tests/unit/api/objectReplicationMD.js +++ b/tests/unit/api/objectReplicationMD.js @@ -4,16 +4,14 @@ const crypto = require('crypto'); const BucketInfo = require('arsenal').models.BucketInfo; -const { cleanup, DummyRequestLogger, makeAuthInfo, TaggingConfigTester } = - require('../helpers'); +const { cleanup, DummyRequestLogger, makeAuthInfo, TaggingConfigTester } = require('../helpers'); const constants = require('../../../constants'); const { metadata } = require('arsenal').storage.metadata.inMemory.metadata; const DummyRequest = require('../DummyRequest'); const { objectDelete } = require('../../../lib/api/objectDelete'); const objectPut = require('../../../lib/api/objectPut'); const objectCopy = require('../../../lib/api/objectCopy'); -const completeMultipartUpload = - require('../../../lib/api/completeMultipartUpload'); +const completeMultipartUpload = require('../../../lib/api/completeMultipartUpload'); const objectPutACL = require('../../../lib/api/objectPutACL'); const objectPutTagging = require('../../../lib/api/objectPutTagging'); const objectDeleteTagging = require('../../../lib/api/objectDeleteTagging'); @@ -55,19 +53,20 @@ const objectACLReq = { // Get an object request with the given key. function getObjectPutReq(key, hasContent) { const bodyContent = hasContent ? 'body content' : ''; - return new DummyRequest({ - bucketName, - namespace, - objectKey: key, - headers: {}, - url: `/${bucketName}/${key}`, - }, Buffer.from(bodyContent, 'utf8')); + return new DummyRequest( + { + bucketName, + namespace, + objectKey: key, + headers: {}, + url: `/${bucketName}/${key}`, + }, + Buffer.from(bodyContent, 'utf8') + ); } -const taggingPutReq = new TaggingConfigTester() - .createObjectTaggingRequest('PUT', bucketName, keyA); -const taggingDeleteReq = new TaggingConfigTester() - .createObjectTaggingRequest('DELETE', bucketName, keyA); +const taggingPutReq = new TaggingConfigTester().createObjectTaggingRequest('PUT', bucketName, keyA); +const taggingDeleteReq = new TaggingConfigTester().createObjectTaggingRequest('DELETE', bucketName, keyA); const emptyReplicationMD = { status: '', @@ -99,35 +98,34 @@ function checkObjectReplicationInfo(key, expected) { // Put the object key and check the replication information. function putObjectAndCheckMD(key, expected, cb) { - return objectPut(authInfo, getObjectPutReq(key, true), undefined, log, - err => { - if (err) { - return cb(err); - } - checkObjectReplicationInfo(key, expected); - return cb(); - }); + return objectPut(authInfo, getObjectPutReq(key, true), undefined, log, err => { + if (err) { + return cb(err); + } + checkObjectReplicationInfo(key, expected); + return cb(); + }); } // Create the bucket in metadata. function createBucket() { - metadata - .buckets.set(bucketName, new BucketInfo(bucketName, ownerID, '', '')); - metadata.keyMaps.set(bucketName, new Map); + metadata.buckets.set(bucketName, new BucketInfo(bucketName, ownerID, '', '')); + metadata.keyMaps.set(bucketName, new Map()); } // Create the bucket in metadata with versioning and a replication config. function createBucketWithReplication(hasStorageClass) { createBucket(); const config = { - role: 'arn:aws:iam::account-id:role/src-resource,' + - 'arn:aws:iam::account-id:role/dest-resource', + role: 'arn:aws:iam::account-id:role/src-resource,' + 'arn:aws:iam::account-id:role/dest-resource', destination: 'arn:aws:s3:::source-bucket', - rules: [{ - prefix: keyA, - enabled: true, - id: 'test-id', - }], + rules: [ + { + prefix: keyA, + enabled: true, + id: 'test-id', + }, + ], }; if (hasStorageClass) { config.rules[0].storageClass = storageClassType; @@ -140,22 +138,21 @@ function createBucketWithReplication(hasStorageClass) { // Create the shadow bucket in metadata for MPUs with a recent model number. function createShadowBucket(key, uploadId) { - const overviewKey = `overview${constants.splitter}` + - `${key}${constants.splitter}${uploadId}`; - metadata.buckets - .set(mpuShadowBucket, new BucketInfo(mpuShadowBucket, ownerID, '', '')); - // Set modelVersion to use the most recent splitter. + const overviewKey = `overview${constants.splitter}` + `${key}${constants.splitter}${uploadId}`; + metadata.buckets.set(mpuShadowBucket, new BucketInfo(mpuShadowBucket, ownerID, '', '')); + // Set modelVersion to use the most recent splitter. Object.assign(metadata.buckets.get(mpuShadowBucket), { _mdBucketModelVersion: 5, }); - metadata.keyMaps.set(mpuShadowBucket, new Map); - metadata.keyMaps.get(mpuShadowBucket).set(overviewKey, new Map); + metadata.keyMaps.set(mpuShadowBucket, new Map()); + metadata.keyMaps.get(mpuShadowBucket).set(overviewKey, new Map()); Object.assign(metadata.keyMaps.get(mpuShadowBucket).get(overviewKey), { id: uploadId, eventualStorageBucket: bucketName, initiator: { DisplayName: 'accessKey1displayName', - ID: ownerID }, + ID: ownerID, + }, key, uploadId, }); @@ -170,24 +167,26 @@ function putMPU(key, body, cb) { const calculatedHash = md5Hash.digest('hex'); const partKey = `${uploadId}${constants.splitter}00001`; const obj = { - partLocations: [{ - key: 1, - dataStoreName: 'scality-internal-mem', - dataStoreETag: `1:${calculatedHash}`, - }], + partLocations: [ + { + key: 1, + dataStoreName: 'scality-internal-mem', + dataStoreETag: `1:${calculatedHash}`, + }, + ], key: partKey, }; obj['content-md5'] = calculatedHash; obj['content-length'] = body.length; - metadata.keyMaps.get(mpuShadowBucket).set(partKey, new Map); + metadata.keyMaps.get(mpuShadowBucket).set(partKey, new Map()); const partMap = metadata.keyMaps.get(mpuShadowBucket).get(partKey); Object.assign(partMap, obj); const postBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + ''; const req = { bucketName, @@ -217,8 +216,7 @@ function copyObject(sourceObjectKey, copyObjectKey, hasContent, cb) { headers: {}, url: `/${bucketName}/${sourceObjectKey}`, }); - return objectCopy(authInfo, req, bucketName, sourceObjectKey, undefined, - log, cb); + return objectCopy(authInfo, req, bucketName, sourceObjectKey, undefined, log, cb); }); } @@ -230,26 +228,33 @@ describe('Replication object MD without bucket replication config', () => { afterEach(() => cleanup()); - it('should not update object metadata', done => - putObjectAndCheckMD(keyA, emptyReplicationMD, done)); + it('should not update object metadata', done => putObjectAndCheckMD(keyA, emptyReplicationMD, done)); it('should not update object metadata if putting object ACL', done => - async.series([ - next => putObjectAndCheckMD(keyA, emptyReplicationMD, next), - next => objectPutACL(authInfo, objectACLReq, log, next), - ], err => { - if (err) { - return done(err); + async.series( + [ + next => putObjectAndCheckMD(keyA, emptyReplicationMD, next), + next => objectPutACL(authInfo, objectACLReq, log, next), + ], + err => { + if (err) { + return done(err); + } + checkObjectReplicationInfo(keyA, expectedEmptyReplicationMD); + return done(); } - checkObjectReplicationInfo(keyA, expectedEmptyReplicationMD); - return done(); - })); + )); describe('Object tagging', () => { - beforeEach(done => async.series([ - next => putObjectAndCheckMD(keyA, emptyReplicationMD, next), - next => objectPutTagging(authInfo, taggingPutReq, log, next), - ], err => done(err))); + beforeEach(done => + async.series( + [ + next => putObjectAndCheckMD(keyA, emptyReplicationMD, next), + next => objectPutTagging(authInfo, taggingPutReq, log, next), + ], + err => done(err) + ) + ); it('should not update object metadata if putting tag', done => { checkObjectReplicationInfo(keyA, expectedEmptyReplicationMD); @@ -257,18 +262,20 @@ describe('Replication object MD without bucket replication config', () => { }); it('should not update object metadata if deleting tag', done => - async.series([ - // Put a new version to update replication MD content array. - next => putObjectAndCheckMD(keyA, emptyReplicationMD, next), - next => objectDeleteTagging(authInfo, taggingDeleteReq, log, - next), - ], err => { - if (err) { - return done(err); + async.series( + [ + // Put a new version to update replication MD content array. + next => putObjectAndCheckMD(keyA, emptyReplicationMD, next), + next => objectDeleteTagging(authInfo, taggingDeleteReq, log, next), + ], + err => { + if (err) { + return done(err); + } + checkObjectReplicationInfo(keyA, expectedEmptyReplicationMD); + return done(); } - checkObjectReplicationInfo(keyA, expectedEmptyReplicationMD); - return done(); - })); + )); it('should not update object metadata if completing MPU', done => putMPU(keyA, 'content', err => { @@ -291,430 +298,455 @@ describe('Replication object MD without bucket replication config', () => { }); [true, false].forEach(hasStorageClass => { - describe('Replication object MD with bucket replication config ' + - `${hasStorageClass ? 'with' : 'without'} storage class`, () => { - const replicationMD = { - status: 'PENDING', - backends: [{ - site: 'zenko', + describe( + 'Replication object MD with bucket replication config ' + + `${hasStorageClass ? 'with' : 'without'} storage class`, + () => { + const replicationMD = { status: 'PENDING', + backends: [ + { + site: 'zenko', + status: 'PENDING', + dataStoreVersionId: '', + }, + ], + content: ['DATA', 'METADATA'], + destination: bucketARN, + storageClass: 'zenko', + role: 'arn:aws:iam::account-id:role/src-resource,' + 'arn:aws:iam::account-id:role/dest-resource', + storageType: '', dataStoreVersionId: '', - }], - content: ['DATA', 'METADATA'], - destination: bucketARN, - storageClass: 'zenko', - role: 'arn:aws:iam::account-id:role/src-resource,' + - 'arn:aws:iam::account-id:role/dest-resource', - storageType: '', - dataStoreVersionId: '', - isNFS: undefined, - }; - const newReplicationMD = hasStorageClass ? Object.assign(replicationMD, - { storageClass: storageClassType }) : replicationMD; - const replicateMetadataOnly = Object.assign({}, newReplicationMD, - { content: ['METADATA'] }); - - beforeEach(() => { - cleanup(); - createBucketWithReplication(hasStorageClass); - }); - - afterEach(() => { - cleanup(); - delete config.locationConstraints['zenko']; - }); - - it('should update metadata when replication config prefix matches ' + - 'an object key', done => - putObjectAndCheckMD(keyA, newReplicationMD, done)); - - it('should update metadata when replication config prefix matches ' + - 'the start of an object key', done => - putObjectAndCheckMD(`${keyA}abc`, newReplicationMD, done)); - - it('should not update metadata when replication config prefix does ' + - 'not match the start of an object key', done => - putObjectAndCheckMD(`abc${keyA}`, emptyReplicationMD, done)); - - it('should not update metadata when replication config prefix does ' + - 'not apply', done => - putObjectAndCheckMD(keyB, emptyReplicationMD, done)); - - it("should update status to 'PENDING' if putting a new version", done => - putObjectAndCheckMD(keyA, newReplicationMD, err => { - if (err) { - return done(err); - } - const objectMD = metadata.keyMaps.get(bucketName).get(keyA); - // Update metadata to a status after replication has occurred. - objectMD.replicationInfo.status = 'COMPLETED'; - return putObjectAndCheckMD(keyA, newReplicationMD, done); - })); - - it("should update status to 'PENDING' and content to '['METADATA']' " + - 'if putting 0 byte object', done => - objectPut(authInfo, getObjectPutReq(keyA, false), undefined, log, - err => { - if (err) { - return done(err); - } - checkObjectReplicationInfo(keyA, replicateMetadataOnly); - return done(); - })); - - it('should update metadata if putting object ACL and CRR replication', done => { - // Set 'zenko' as a typical CRR location (i.e. no type) - config.locationConstraints['zenko'] = { - ...config.locationConstraints['zenko'], - type: '', + isNFS: undefined, }; - - async.series([ - next => putObjectAndCheckMD(keyA, newReplicationMD, next), - next => { - const objectMD = metadata.keyMaps.get(bucketName).get(keyA); - // Update metadata to a status after replication has occurred. - objectMD.replicationInfo.status = 'COMPLETED'; - objectPutACL(authInfo, objectACLReq, log, next); - }, - ], err => { - if (err) { - return done(err); - } - checkObjectReplicationInfo(keyA, replicateMetadataOnly); - return done(); - }); - }); - - it('should not update metadata if putting object ACL and cloud replication', done => { - // Set 'zenko' as a typical cloud location (i.e. type) - config.locationConstraints['zenko'] = { - ...config.locationConstraints['zenko'], - type: 'aws_s3', - }; - - const replicationMD = { ...newReplicationMD, storageType: 'aws_s3' }; - - let completedReplicationInfo; - async.series([ - next => putObjectAndCheckMD(keyA, replicationMD, next), - next => { - const objectMD = metadata.keyMaps.get(bucketName).get(keyA); - // Update metadata to a status after replication has occurred. - objectMD.replicationInfo.status = 'COMPLETED'; - completedReplicationInfo = JSON.parse( - JSON.stringify(objectMD.replicationInfo)); - objectPutACL(authInfo, objectACLReq, log, next); - }, - ], err => { - if (err) { - return done(err); - } - checkObjectReplicationInfo(keyA, completedReplicationInfo); - return done(); + const newReplicationMD = hasStorageClass + ? Object.assign(replicationMD, { storageClass: storageClassType }) + : replicationMD; + const replicateMetadataOnly = Object.assign({}, newReplicationMD, { content: ['METADATA'] }); + + beforeEach(() => { + cleanup(); + createBucketWithReplication(hasStorageClass); }); - }); - - it('should update metadata if putting a delete marker', done => - async.series([ - next => putObjectAndCheckMD(keyA, newReplicationMD, err => { - if (err) { - return next(err); - } - const objectMD = metadata.keyMaps.get(bucketName).get(keyA); - // Set metadata to a status after replication has occurred. - objectMD.replicationInfo.status = 'COMPLETED'; - return next(); - }), - next => objectDelete(authInfo, deleteReq, log, next), - ], err => { - if (err) { - return done(err); - } - const objectMD = metadata.keyMaps.get(bucketName).get(keyA); - assert.strictEqual(objectMD.isDeleteMarker, true); - checkObjectReplicationInfo(keyA, replicateMetadataOnly); - return done(); - })); - - it('should not update metadata if putting a delete marker owned by ' + - 'Lifecycle service account', done => - async.series([ - next => putObjectAndCheckMD(keyA, newReplicationMD, next), - next => objectDelete(authInfoLifecycleService, deleteReq, - log, next), - ], err => { - if (err) { - return done(err); - } - const objectMD = metadata.keyMaps.get(bucketName).get(keyA); - assert.strictEqual(objectMD.isDeleteMarker, true); - checkObjectReplicationInfo(keyA, emptyReplicationMD); - return done(); - })); - describe('Object tagging', () => { - beforeEach(done => async.series([ - next => putObjectAndCheckMD(keyA, newReplicationMD, next), - next => objectPutTagging(authInfo, taggingPutReq, log, next), - ], err => done(err))); - - it("should update status to 'PENDING' and content to " + - "'['METADATA']'if putting tag", done => { - checkObjectReplicationInfo(keyA, replicateMetadataOnly); - return done(); + afterEach(() => { + cleanup(); + delete config.locationConstraints['zenko']; }); - it("should update status to 'PENDING' and content to " + - "'['METADATA']' if deleting tag", done => - async.series([ - // Put a new version to update replication MD content array. - next => putObjectAndCheckMD(keyA, newReplicationMD, next), - next => objectDeleteTagging(authInfo, taggingDeleteReq, log, - next), - ], err => { - if (err) { - return done(err); - } - checkObjectReplicationInfo(keyA, replicateMetadataOnly); - return done(); - })); - }); + it('should update metadata when replication config prefix matches ' + 'an object key', done => + putObjectAndCheckMD(keyA, newReplicationMD, done) + ); - describe('Complete MPU', () => { - it("should update status to 'PENDING' and content to " + - "'['DATA, METADATA']' if completing MPU", done => - putMPU(keyA, 'content', err => { - if (err) { - return done(err); - } - checkObjectReplicationInfo(keyA, newReplicationMD); - return done(); - })); + it('should update metadata when replication config prefix matches ' + 'the start of an object key', done => + putObjectAndCheckMD(`${keyA}abc`, newReplicationMD, done) + ); - it("should update status to 'PENDING' and content to " + - "'['METADATA']' if completing MPU with 0 bytes", done => - putMPU(keyA, '', err => { - if (err) { - return done(err); - } - checkObjectReplicationInfo(keyA, replicateMetadataOnly); - return done(); - })); + it( + 'should not update metadata when replication config prefix does ' + + 'not match the start of an object key', + done => putObjectAndCheckMD(`abc${keyA}`, emptyReplicationMD, done) + ); - it('should not update replicationInfo if key does not apply', - done => putMPU(keyB, 'content', err => { - if (err) { - return done(err); - } - checkObjectReplicationInfo(keyB, emptyReplicationMD); - return done(); - })); - }); + it('should not update metadata when replication config prefix does ' + 'not apply', done => + putObjectAndCheckMD(keyB, emptyReplicationMD, done) + ); - describe('Object copy', () => { - it("should update status to 'PENDING' and content to " + - "'['DATA, METADATA']' if copying object", done => - copyObject(keyB, keyA, true, err => { + it("should update status to 'PENDING' if putting a new version", done => + putObjectAndCheckMD(keyA, newReplicationMD, err => { if (err) { return done(err); } - checkObjectReplicationInfo(keyA, newReplicationMD); - return done(); + const objectMD = metadata.keyMaps.get(bucketName).get(keyA); + // Update metadata to a status after replication has occurred. + objectMD.replicationInfo.status = 'COMPLETED'; + return putObjectAndCheckMD(keyA, newReplicationMD, done); })); - it("should update status to 'PENDING' and content to " + - "'['METADATA']' if copying object with 0 bytes", done => - copyObject(keyB, keyA, false, err => { + it("should update status to 'PENDING' and content to '['METADATA']' " + 'if putting 0 byte object', done => + objectPut(authInfo, getObjectPutReq(keyA, false), undefined, log, err => { if (err) { return done(err); } checkObjectReplicationInfo(keyA, replicateMetadataOnly); return done(); - })); + }) + ); + + it('should update metadata if putting object ACL and CRR replication', done => { + // Set 'zenko' as a typical CRR location (i.e. no type) + config.locationConstraints['zenko'] = { + ...config.locationConstraints['zenko'], + type: '', + }; - it('should not update replicationInfo if key does not apply', - done => { - const copyKey = `foo-${keyA}`; - return copyObject(keyB, copyKey, true, err => { + async.series( + [ + next => putObjectAndCheckMD(keyA, newReplicationMD, next), + next => { + const objectMD = metadata.keyMaps.get(bucketName).get(keyA); + // Update metadata to a status after replication has occurred. + objectMD.replicationInfo.status = 'COMPLETED'; + objectPutACL(authInfo, objectACLReq, log, next); + }, + ], + err => { if (err) { return done(err); } - checkObjectReplicationInfo(copyKey, emptyReplicationMD); + checkObjectReplicationInfo(keyA, replicateMetadataOnly); return done(); - }); - }); - }); + } + ); + }); - ['awsbackend', - 'azurebackend', - 'gcpbackend', - 'awsbackend,azurebackend'].forEach(backend => { - const storageTypeMap = { - 'awsbackend': 'aws_s3', - 'azurebackend': 'azure', - 'gcpbackend': 'gcp', - 'awsbackend,azurebackend': 'aws_s3,azure', - }; - const storageType = storageTypeMap[backend]; - const backends = backend.split(',').map(site => ({ - site, - status: 'PENDING', - dataStoreVersionId: '', - })); - describe('Object metadata replicationInfo storageType value', - () => { - const expectedReplicationInfo = { - status: 'PENDING', - backends, - content: ['DATA', 'METADATA'], - destination: 'arn:aws:s3:::destination-bucket', - storageClass: backend, - role: 'arn:aws:iam::account-id:role/resource', - storageType, - dataStoreVersionId: '', - isNFS: undefined, + it('should not update metadata if putting object ACL and cloud replication', done => { + // Set 'zenko' as a typical cloud location (i.e. type) + config.locationConstraints['zenko'] = { + ...config.locationConstraints['zenko'], + type: 'aws_s3', }; - // Expected for a metadata-only replication operation (for - // example, putting object tags). - const expectedReplicationInfoMD = Object.assign({}, - expectedReplicationInfo, { content: ['METADATA'] }); - - beforeEach(() => - // We have already created the bucket, so update the - // replication configuration to include a location - // constraint for the `storageClass`. This results in a - // `storageType` of 'aws_s3', for example. - Object.assign(metadata.buckets.get(bucketName), { - _replicationConfiguration: { - role: 'arn:aws:iam::account-id:role/resource', - destination: 'arn:aws:s3:::destination-bucket', - rules: [{ - prefix: keyA, - enabled: true, - id: 'test-id', - storageClass: backend, - }], - }, - })); - - it('should update on a put object request', done => - putObjectAndCheckMD(keyA, expectedReplicationInfo, done)); + const replicationMD = { ...newReplicationMD, storageType: 'aws_s3' }; - it('should update on a complete MPU object request', done => - putMPU(keyA, 'content', err => { + let completedReplicationInfo; + async.series( + [ + next => putObjectAndCheckMD(keyA, replicationMD, next), + next => { + const objectMD = metadata.keyMaps.get(bucketName).get(keyA); + // Update metadata to a status after replication has occurred. + objectMD.replicationInfo.status = 'COMPLETED'; + completedReplicationInfo = JSON.parse(JSON.stringify(objectMD.replicationInfo)); + objectPutACL(authInfo, objectACLReq, log, next); + }, + ], + err => { if (err) { return done(err); } - const expected = - Object.assign({}, expectedReplicationInfo, - { content: ['DATA', 'METADATA', 'MPU'] }); - checkObjectReplicationInfo(keyA, expected); + checkObjectReplicationInfo(keyA, completedReplicationInfo); return done(); - })); + } + ); + }); - it('should update on a copy object request', done => - copyObject(keyB, keyA, true, err => { + it('should update metadata if putting a delete marker', done => + async.series( + [ + next => + putObjectAndCheckMD(keyA, newReplicationMD, err => { + if (err) { + return next(err); + } + const objectMD = metadata.keyMaps.get(bucketName).get(keyA); + // Set metadata to a status after replication has occurred. + objectMD.replicationInfo.status = 'COMPLETED'; + return next(); + }), + next => objectDelete(authInfo, deleteReq, log, next), + ], + err => { if (err) { return done(err); } - checkObjectReplicationInfo(keyA, - expectedReplicationInfo); + const objectMD = metadata.keyMaps.get(bucketName).get(keyA); + assert.strictEqual(objectMD.isDeleteMarker, true); + checkObjectReplicationInfo(keyA, replicateMetadataOnly); return done(); - })); - - it('should update on a put object ACL request', done => { - let completedReplicationInfo; - async.series([ - next => putObjectAndCheckMD(keyA, - expectedReplicationInfo, next), - next => { - const objectMD = metadata.keyMaps - .get(bucketName).get(keyA); - // Update metadata to a status after replication - // has occurred. - objectMD.replicationInfo.status = 'COMPLETED'; - completedReplicationInfo = JSON.parse( - JSON.stringify(objectMD.replicationInfo)); - objectPutACL(authInfo, objectACLReq, log, next); - }, - ], err => { + } + )); + + it('should not update metadata if putting a delete marker owned by ' + 'Lifecycle service account', done => + async.series( + [ + next => putObjectAndCheckMD(keyA, newReplicationMD, next), + next => objectDelete(authInfoLifecycleService, deleteReq, log, next), + ], + err => { if (err) { return done(err); } - checkObjectReplicationInfo(keyA, completedReplicationInfo); + const objectMD = metadata.keyMaps.get(bucketName).get(keyA); + assert.strictEqual(objectMD.isDeleteMarker, true); + checkObjectReplicationInfo(keyA, emptyReplicationMD); return done(); - }); + } + ) + ); + + describe('Object tagging', () => { + beforeEach(done => + async.series( + [ + next => putObjectAndCheckMD(keyA, newReplicationMD, next), + next => objectPutTagging(authInfo, taggingPutReq, log, next), + ], + err => done(err) + ) + ); + + it("should update status to 'PENDING' and content to " + "'['METADATA']'if putting tag", done => { + checkObjectReplicationInfo(keyA, replicateMetadataOnly); + return done(); }); - it('should update on a put object tagging request', done => - async.series([ - next => putObjectAndCheckMD(keyA, - expectedReplicationInfo, next), - next => objectPutTagging(authInfo, taggingPutReq, log, - next), - ], err => { + it("should update status to 'PENDING' and content to " + "'['METADATA']' if deleting tag", done => + async.series( + [ + // Put a new version to update replication MD content array. + next => putObjectAndCheckMD(keyA, newReplicationMD, next), + next => objectDeleteTagging(authInfo, taggingDeleteReq, log, next), + ], + err => { + if (err) { + return done(err); + } + checkObjectReplicationInfo(keyA, replicateMetadataOnly); + return done(); + } + ) + ); + }); + + describe('Complete MPU', () => { + it( + "should update status to 'PENDING' and content to " + "'['DATA, METADATA']' if completing MPU", + done => + putMPU(keyA, 'content', err => { + if (err) { + return done(err); + } + checkObjectReplicationInfo(keyA, newReplicationMD); + return done(); + }) + ); + + it( + "should update status to 'PENDING' and content to " + + "'['METADATA']' if completing MPU with 0 bytes", + done => + putMPU(keyA, '', err => { + if (err) { + return done(err); + } + checkObjectReplicationInfo(keyA, replicateMetadataOnly); + return done(); + }) + ); + + it('should not update replicationInfo if key does not apply', done => + putMPU(keyB, 'content', err => { if (err) { return done(err); } - const expected = Object.assign({}, - expectedReplicationInfo, - { content: ['METADATA', 'PUT_TAGGING'] }); - checkObjectReplicationInfo(keyA, expected); + checkObjectReplicationInfo(keyB, emptyReplicationMD); return done(); })); + }); - it('should update on a delete tagging request', done => - async.series([ - next => putObjectAndCheckMD(keyA, - expectedReplicationInfo, next), - next => objectDeleteTagging(authInfo, taggingDeleteReq, - log, next), - ], err => { + describe('Object copy', () => { + it( + "should update status to 'PENDING' and content to " + "'['DATA, METADATA']' if copying object", + done => + copyObject(keyB, keyA, true, err => { + if (err) { + return done(err); + } + checkObjectReplicationInfo(keyA, newReplicationMD); + return done(); + }) + ); + + it( + "should update status to 'PENDING' and content to " + + "'['METADATA']' if copying object with 0 bytes", + done => + copyObject(keyB, keyA, false, err => { + if (err) { + return done(err); + } + checkObjectReplicationInfo(keyA, replicateMetadataOnly); + return done(); + }) + ); + + it('should not update replicationInfo if key does not apply', done => { + const copyKey = `foo-${keyA}`; + return copyObject(keyB, copyKey, true, err => { if (err) { return done(err); } - const expected = Object.assign({}, - expectedReplicationInfo, - { content: ['METADATA', 'DELETE_TAGGING'] }); - checkObjectReplicationInfo(keyA, expected); + checkObjectReplicationInfo(copyKey, emptyReplicationMD); return done(); - })); + }); + }); + }); + + ['awsbackend', 'azurebackend', 'gcpbackend', 'awsbackend,azurebackend'].forEach(backend => { + const storageTypeMap = { + awsbackend: 'aws_s3', + azurebackend: 'azure', + gcpbackend: 'gcp', + 'awsbackend,azurebackend': 'aws_s3,azure', + }; + const storageType = storageTypeMap[backend]; + const backends = backend.split(',').map(site => ({ + site, + status: 'PENDING', + dataStoreVersionId: '', + })); + describe('Object metadata replicationInfo storageType value', () => { + const expectedReplicationInfo = { + status: 'PENDING', + backends, + content: ['DATA', 'METADATA'], + destination: 'arn:aws:s3:::destination-bucket', + storageClass: backend, + role: 'arn:aws:iam::account-id:role/resource', + storageType, + dataStoreVersionId: '', + isNFS: undefined, + }; + + // Expected for a metadata-only replication operation (for + // example, putting object tags). + const expectedReplicationInfoMD = Object.assign({}, expectedReplicationInfo, { + content: ['METADATA'], + }); - it('should update when putting a delete marker', done => - async.series([ - next => putObjectAndCheckMD(keyA, - expectedReplicationInfo, err => { + beforeEach(() => + // We have already created the bucket, so update the + // replication configuration to include a location + // constraint for the `storageClass`. This results in a + // `storageType` of 'aws_s3', for example. + Object.assign(metadata.buckets.get(bucketName), { + _replicationConfiguration: { + role: 'arn:aws:iam::account-id:role/resource', + destination: 'arn:aws:s3:::destination-bucket', + rules: [ + { + prefix: keyA, + enabled: true, + id: 'test-id', + storageClass: backend, + }, + ], + }, + }) + ); + + it('should update on a put object request', done => + putObjectAndCheckMD(keyA, expectedReplicationInfo, done)); + + it('should update on a complete MPU object request', done => + putMPU(keyA, 'content', err => { + if (err) { + return done(err); + } + const expected = Object.assign({}, expectedReplicationInfo, { + content: ['DATA', 'METADATA', 'MPU'], + }); + checkObjectReplicationInfo(keyA, expected); + return done(); + })); + + it('should update on a copy object request', done => + copyObject(keyB, keyA, true, err => { + if (err) { + return done(err); + } + checkObjectReplicationInfo(keyA, expectedReplicationInfo); + return done(); + })); + + it('should update on a put object ACL request', done => { + let completedReplicationInfo; + async.series( + [ + next => putObjectAndCheckMD(keyA, expectedReplicationInfo, next), + next => { + const objectMD = metadata.keyMaps.get(bucketName).get(keyA); + // Update metadata to a status after replication + // has occurred. + objectMD.replicationInfo.status = 'COMPLETED'; + completedReplicationInfo = JSON.parse(JSON.stringify(objectMD.replicationInfo)); + objectPutACL(authInfo, objectACLReq, log, next); + }, + ], + err => { if (err) { - return next(err); + return done(err); } - // Update metadata to a status indicating that - // replication has occurred for the object. - metadata - .keyMaps - .get(bucketName) - .get(keyA) - .replicationInfo - .status = 'COMPLETED'; - return next(); - }), - next => objectDelete(authInfo, deleteReq, log, next), - ], err => { - if (err) { - return done(err); - } - // Is it, in fact, a delete marker? - assert(metadata - .keyMaps - .get(bucketName) - .get(keyA) - .isDeleteMarker); - checkObjectReplicationInfo(keyA, - expectedReplicationInfoMD); - return done(); - })); + checkObjectReplicationInfo(keyA, completedReplicationInfo); + return done(); + } + ); + }); + + it('should update on a put object tagging request', done => + async.series( + [ + next => putObjectAndCheckMD(keyA, expectedReplicationInfo, next), + next => objectPutTagging(authInfo, taggingPutReq, log, next), + ], + err => { + if (err) { + return done(err); + } + const expected = Object.assign({}, expectedReplicationInfo, { + content: ['METADATA', 'PUT_TAGGING'], + }); + checkObjectReplicationInfo(keyA, expected); + return done(); + } + )); + + it('should update on a delete tagging request', done => + async.series( + [ + next => putObjectAndCheckMD(keyA, expectedReplicationInfo, next), + next => objectDeleteTagging(authInfo, taggingDeleteReq, log, next), + ], + err => { + if (err) { + return done(err); + } + const expected = Object.assign({}, expectedReplicationInfo, { + content: ['METADATA', 'DELETE_TAGGING'], + }); + checkObjectReplicationInfo(keyA, expected); + return done(); + } + )); + + it('should update when putting a delete marker', done => + async.series( + [ + next => + putObjectAndCheckMD(keyA, expectedReplicationInfo, err => { + if (err) { + return next(err); + } + // Update metadata to a status indicating that + // replication has occurred for the object. + metadata.keyMaps.get(bucketName).get(keyA).replicationInfo.status = 'COMPLETED'; + return next(); + }), + next => objectDelete(authInfo, deleteReq, log, next), + ], + err => { + if (err) { + return done(err); + } + // Is it, in fact, a delete marker? + assert(metadata.keyMaps.get(bucketName).get(keyA).isDeleteMarker); + checkObjectReplicationInfo(keyA, expectedReplicationInfoMD); + return done(); + } + )); + }); }); - }); - }); + } + ); }); diff --git a/tests/unit/api/objectRestore.js b/tests/unit/api/objectRestore.js index 931f70fba5..d64bec887f 100644 --- a/tests/unit/api/objectRestore.js +++ b/tests/unit/api/objectRestore.js @@ -23,38 +23,44 @@ const bucketPutRequest = { actionImplicitDenies: false, }; -const putObjectRequest = new DummyRequest({ - bucketName, - namespace, - objectKey: objectName, - headers: {}, - url: `/${bucketName}/${objectName}`, -}, postBody); - -const objectRestoreXml = '' + `${restoreDays}` + 'Standard' + ''; -const objectRestoreXmlBulkTier = '' + `${restoreDays}` + 'Bulk' + ''; -const objectRestoreXmlExpeditedTier = '' + `${restoreDays}` + 'Expedited' + ''; const objectRestoreRequest = requestXml => ({ - bucketName, - objectKey: objectName, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - post: requestXml, - }); + bucketName, + objectKey: objectName, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + post: requestXml, +}); describe('restoreObject API', () => { before(cleanup); @@ -85,7 +91,7 @@ describe('restoreObject API', () => { }); }); - it('should return NotImplemented error when object restore Tier is \'Bulk\'', done => { + it("should return NotImplemented error when object restore Tier is 'Bulk'", done => { mdColdHelper.putBucketMock(bucketName, null, () => { mdColdHelper.putObjectMock(bucketName, objectName, mdColdHelper.getArchivedObjectMD(), () => { objectRestore(authInfo, objectRestoreRequest(objectRestoreXmlBulkTier), log, err => { @@ -96,7 +102,7 @@ describe('restoreObject API', () => { }); }); - it('should return NotImplemented error when object restore Tier is \'Expedited\'', done => { + it("should return NotImplemented error when object restore Tier is 'Expedited'", done => { mdColdHelper.putBucketMock(bucketName, null, () => { mdColdHelper.putObjectMock(bucketName, objectName, mdColdHelper.getArchivedObjectMD(), () => { objectRestore(authInfo, objectRestoreRequest(objectRestoreXmlExpeditedTier), log, err => { @@ -107,48 +113,53 @@ describe('restoreObject API', () => { }); }); - it('should return Accepted and update objectMD ' + - 'while restoring an object from cold storage ' + - 'and the object doesn\'t have a restored copy in bucket', done => { - const testStartTime = new Date(Date.now()); - mdColdHelper.putBucketMock(bucketName, null, () => { - mdColdHelper.putObjectMock(bucketName, objectName, mdColdHelper.getArchivedObjectMD(), () => { - objectRestore(authInfo, objectRestoreRequest(objectRestoreXml), log, (err, statusCode) => { - assert.ifError(err); - assert.strictEqual(statusCode, 202); - metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { - const testEndTime = new Date(Date.now()); - assert.strictEqual(md.archive.restoreRequestedDays, restoreDays); - assert.strictEqual(testStartTime < md.archive.restoreRequestedAt < testEndTime, true); - done(); + it( + 'should return Accepted and update objectMD ' + + 'while restoring an object from cold storage ' + + "and the object doesn't have a restored copy in bucket", + done => { + const testStartTime = new Date(Date.now()); + mdColdHelper.putBucketMock(bucketName, null, () => { + mdColdHelper.putObjectMock(bucketName, objectName, mdColdHelper.getArchivedObjectMD(), () => { + objectRestore(authInfo, objectRestoreRequest(objectRestoreXml), log, (err, statusCode) => { + assert.ifError(err); + assert.strictEqual(statusCode, 202); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + const testEndTime = new Date(Date.now()); + assert.strictEqual(md.archive.restoreRequestedDays, restoreDays); + assert.strictEqual(testStartTime < md.archive.restoreRequestedAt < testEndTime, true); + done(); }); + }); }); }); - }); - }); - - it('should update the expiry time and return OK ' + - 'while restoring an object from cold storage ' + - 'and the object have a restored copy in bucket', done => { - const testStartTime = new Date(Date.now()); - mdColdHelper.putBucketMock(bucketName, null, () => { - mdColdHelper.putObjectMock(bucketName, objectName, mdColdHelper.getRestoredObjectMD(), () => { - objectRestore(authInfo, objectRestoreRequest(objectRestoreXml), log, (err, statusCode) => { - assert.ifError(err); - assert.strictEqual(statusCode, 200); - metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { - const testEndTime = new Date(Date.now()); - assert.strictEqual(md.archive.restoreRequestedDays, restoreDays); - assert.strictEqual(testStartTime < md.archive.restoreRequestedAt < testEndTime, true); + } + ); + + it( + 'should update the expiry time and return OK ' + + 'while restoring an object from cold storage ' + + 'and the object have a restored copy in bucket', + done => { + const testStartTime = new Date(Date.now()); + mdColdHelper.putBucketMock(bucketName, null, () => { + mdColdHelper.putObjectMock(bucketName, objectName, mdColdHelper.getRestoredObjectMD(), () => { + objectRestore(authInfo, objectRestoreRequest(objectRestoreXml), log, (err, statusCode) => { + assert.ifError(err); + assert.strictEqual(statusCode, 200); + metadata.getObjectMD(bucketName, objectName, {}, log, (err, md) => { + const testEndTime = new Date(Date.now()); + assert.strictEqual(md.archive.restoreRequestedDays, restoreDays); + assert.strictEqual(testStartTime < md.archive.restoreRequestedAt < testEndTime, true); done(); }); + }); }); }); - }); - }); + } + ); - it('should return InvalidObjectState ' + - 'while restoring an expired restored object', () => { + it('should return InvalidObjectState ' + 'while restoring an expired restored object', () => { mdColdHelper.putBucketMock(bucketName, null, () => { mdColdHelper.putObjectMock(bucketName, objectName, mdColdHelper.getExpiredObjectMD(), () => { objectRestore(authInfo, objectRestoreRequest(objectRestoreXml), log, err => { diff --git a/tests/unit/api/parseLikeExpression.js b/tests/unit/api/parseLikeExpression.js index 469b6a8df7..3e29326b85 100644 --- a/tests/unit/api/parseLikeExpression.js +++ b/tests/unit/api/parseLikeExpression.js @@ -1,6 +1,5 @@ const assert = require('assert'); -const parseLikeExpression = - require('../../../lib/api/apiUtils/bucket/parseLikeExpression'); +const parseLikeExpression = require('../../../lib/api/apiUtils/bucket/parseLikeExpression'); describe('parseLikeExpression', () => { const tests = [ @@ -29,11 +28,12 @@ describe('parseLikeExpression', () => { output: { $regex: /\//, $options: '' }, }, ]; - tests.forEach(test => it('should return correct MongoDB query object: ' + - `"${test.input}" => ${JSON.stringify(test.output)}`, () => { - const res = parseLikeExpression(test.input); - assert.deepStrictEqual(res, test.output); - })); + tests.forEach(test => + it('should return correct MongoDB query object: ' + `"${test.input}" => ${JSON.stringify(test.output)}`, () => { + const res = parseLikeExpression(test.input); + assert.deepStrictEqual(res, test.output); + }) + ); const badInputTests = [ { input: null, @@ -44,10 +44,10 @@ describe('parseLikeExpression', () => { output: null, }, ]; - badInputTests.forEach(test => it( - 'should return null if input is not a string ' + - `"${test.input}" => ${JSON.stringify(test.output)}`, () => { - const res = parseLikeExpression(test.input); - assert.deepStrictEqual(res, test.output); - })); + badInputTests.forEach(test => + it('should return null if input is not a string ' + `"${test.input}" => ${JSON.stringify(test.output)}`, () => { + const res = parseLikeExpression(test.input); + assert.deepStrictEqual(res, test.output); + }) + ); }); diff --git a/tests/unit/api/serviceGet.js b/tests/unit/api/serviceGet.js index 0804ac103d..ce16ef2180 100644 --- a/tests/unit/api/serviceGet.js +++ b/tests/unit/api/serviceGet.js @@ -45,35 +45,33 @@ describe('serviceGet API', () => { url: '/', headers: { host: `${bucketName3}.s3.amazonaws.com` }, }; - async.waterfall([ - function waterfall1(next) { - bucketPut(authInfo, testbucketPutRequest1, log, next); - }, - function waterfall2(corsHeaders, next) { - bucketPut(authInfo, testbucketPutRequest2, log, next); - }, - function waterfall3(corsHeaders, next) { - bucketPut(authInfo, testbucketPutRequest3, log, next); - }, - function waterfall4(corsHeaders, next) { - serviceGet(authInfo, serviceGetRequest, log, next); - }, - function waterfall4(result, next) { - parseString(result, next); - }, - ], (err, result) => { - assert.strictEqual(result.ListAllMyBucketsResult - .Buckets[0].Bucket.length, 3); - assert.strictEqual(result.ListAllMyBucketsResult - .Buckets[0].Bucket[0].Name[0], bucketName1); - assert.strictEqual(result.ListAllMyBucketsResult - .Buckets[0].Bucket[1].Name[0], bucketName2); - assert.strictEqual(result.ListAllMyBucketsResult - .Buckets[0].Bucket[2].Name[0], bucketName3); - assert.strictEqual(result.ListAllMyBucketsResult.$.xmlns, - 'http://s3.amazonaws.com/doc/2006-03-01/'); - done(); - }); + async.waterfall( + [ + function waterfall1(next) { + bucketPut(authInfo, testbucketPutRequest1, log, next); + }, + function waterfall2(corsHeaders, next) { + bucketPut(authInfo, testbucketPutRequest2, log, next); + }, + function waterfall3(corsHeaders, next) { + bucketPut(authInfo, testbucketPutRequest3, log, next); + }, + function waterfall4(corsHeaders, next) { + serviceGet(authInfo, serviceGetRequest, log, next); + }, + function waterfall4(result, next) { + parseString(result, next); + }, + ], + (err, result) => { + assert.strictEqual(result.ListAllMyBucketsResult.Buckets[0].Bucket.length, 3); + assert.strictEqual(result.ListAllMyBucketsResult.Buckets[0].Bucket[0].Name[0], bucketName1); + assert.strictEqual(result.ListAllMyBucketsResult.Buckets[0].Bucket[1].Name[0], bucketName2); + assert.strictEqual(result.ListAllMyBucketsResult.Buckets[0].Bucket[2].Name[0], bucketName3); + assert.strictEqual(result.ListAllMyBucketsResult.$.xmlns, 'http://s3.amazonaws.com/doc/2006-03-01/'); + done(); + } + ); }); it('should prevent anonymous user from accessing getService API', done => { diff --git a/tests/unit/api/transientBucket.js b/tests/unit/api/transientBucket.js index 9ddf6952db..1ebcf7c7bd 100644 --- a/tests/unit/api/transientBucket.js +++ b/tests/unit/api/transientBucket.js @@ -14,15 +14,12 @@ const bucketPutWebsite = require('../../../lib/api/bucketPutWebsite'); const bucketDelete = require('../../../lib/api/bucketDelete'); const bucketDeleteCors = require('../../../lib/api/bucketDeleteCors'); const bucketDeleteWebsite = require('../../../lib/api/bucketDeleteWebsite'); -const completeMultipartUpload - = require('../../../lib/api/completeMultipartUpload'); +const completeMultipartUpload = require('../../../lib/api/completeMultipartUpload'); const { config } = require('../../../lib/Config'); const constants = require('../../../constants'); const DummyRequest = require('../DummyRequest'); -const initiateMultipartUpload - = require('../../../lib/api/initiateMultipartUpload'); -const { cleanup, createAlteredRequest, DummyRequestLogger, makeAuthInfo } - = require('../helpers'); +const initiateMultipartUpload = require('../../../lib/api/initiateMultipartUpload'); +const { cleanup, createAlteredRequest, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const listMultipartUploads = require('../../../lib/api/listMultipartUploads'); const listParts = require('../../../lib/api/listParts'); const metadata = require('../metadataswitch'); @@ -63,18 +60,15 @@ const serviceGetRequest = { const userBucketOwner = 'admin'; const creationDate = new Date().toJSON(); -const usersBucket = new BucketInfo(usersBucketName, - userBucketOwner, userBucketOwner, creationDate); +const usersBucket = new BucketInfo(usersBucketName, userBucketOwner, userBucketOwner, creationDate); const locationConstraint = 'us-east-1'; describe('transient bucket handling', () => { beforeEach(done => { cleanup(); - const bucketMD = new BucketInfo(bucketName, canonicalID, - authInfo.getAccountDisplayName(), creationDate); + const bucketMD = new BucketInfo(bucketName, canonicalID, authInfo.getAccountDisplayName(), creationDate); bucketMD.addTransientFlag(); - bucketMD.setSpecificAcl(otherAccountAuthInfo.getCanonicalID(), - 'WRITE_ACP'); + bucketMD.setSpecificAcl(otherAccountAuthInfo.getCanonicalID(), 'WRITE_ACP'); bucketMD.setLocationConstraint(locationConstraint); metadata.createBucket(bucketName, bucketMD, log, () => { metadata.createBucket(usersBucketName, usersBucket, log, () => { @@ -83,86 +77,108 @@ describe('transient bucket handling', () => { }); }); - it('putBucket request should complete creation of transient bucket if ' + - 'request is from same account that originally put', done => { - bucketPut(authInfo, baseTestRequest, log, err => { - assert.ifError(err); - serviceGet(authInfo, serviceGetRequest, log, (err, data) => { - parseString(data, (err, result) => { - assert.strictEqual(result.ListAllMyBucketsResult - .Buckets[0].Bucket.length, 1); - assert.strictEqual(result.ListAllMyBucketsResult - .Buckets[0].Bucket[0].Name[0], bucketName); - done(); + it( + 'putBucket request should complete creation of transient bucket if ' + + 'request is from same account that originally put', + done => { + bucketPut(authInfo, baseTestRequest, log, err => { + assert.ifError(err); + serviceGet(authInfo, serviceGetRequest, log, (err, data) => { + parseString(data, (err, result) => { + assert.strictEqual(result.ListAllMyBucketsResult.Buckets[0].Bucket.length, 1); + assert.strictEqual(result.ListAllMyBucketsResult.Buckets[0].Bucket[0].Name[0], bucketName); + done(); + }); }); }); - }); - }); + } + ); - it('putBucket request should return error if ' + - 'transient bucket created by different account', done => { + it('putBucket request should return error if ' + 'transient bucket created by different account', done => { bucketPut(otherAccountAuthInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.BucketAlreadyExists, true); - serviceGet(otherAccountAuthInfo, serviceGetRequest, - log, (err, data) => { - parseString(data, (err, result) => { - assert.strictEqual(result.ListAllMyBucketsResult - .Buckets[0], ''); - done(); - }); + serviceGet(otherAccountAuthInfo, serviceGetRequest, log, (err, data) => { + parseString(data, (err, result) => { + assert.strictEqual(result.ListAllMyBucketsResult.Buckets[0], ''); + done(); }); + }); }); }); - it('ACLs from clean up putBucket request should overwrite ACLs from ' + - 'original failed request that resulted in transient state', done => { - const alteredRequest = createAlteredRequest({ - 'x-amz-acl': 'public-read' }, 'headers', - baseTestRequest, baseTestRequest.headers); - bucketPut(authInfo, alteredRequest, log, err => { - assert.ifError(err); - metadata.getBucket(bucketName, log, (err, data) => { - assert.strictEqual(data._transient, false); - assert.strictEqual(data._acl.Canned, 'public-read'); - assert.strictEqual(data._owner, authInfo.getCanonicalID()); - done(); + it( + 'ACLs from clean up putBucket request should overwrite ACLs from ' + + 'original failed request that resulted in transient state', + done => { + const alteredRequest = createAlteredRequest( + { + 'x-amz-acl': 'public-read', + }, + 'headers', + baseTestRequest, + baseTestRequest.headers + ); + bucketPut(authInfo, alteredRequest, log, err => { + assert.ifError(err); + metadata.getBucket(bucketName, log, (err, data) => { + assert.strictEqual(data._transient, false); + assert.strictEqual(data._acl.Canned, 'public-read'); + assert.strictEqual(data._owner, authInfo.getCanonicalID()); + done(); + }); }); - }); - }); + } + ); - it('putBucketACL request should complete creation of transient bucket if ' + - 'request is from same account that originally put', done => { - const putACLRequest = createAlteredRequest({ - 'x-amz-acl': 'public-read' }, 'headers', - baseTestRequest, baseTestRequest.headers); - putACLRequest.url = '/?acl'; - putACLRequest.query = { acl: '' }; - bucketPutACL(authInfo, putACLRequest, log, err => { - assert.ifError(err); - metadata.getBucket(bucketName, log, (err, data) => { - assert.strictEqual(data._transient, false); - assert.strictEqual(data._acl.Canned, 'public-read'); - assert.strictEqual(data._owner, authInfo.getCanonicalID()); - done(); + it( + 'putBucketACL request should complete creation of transient bucket if ' + + 'request is from same account that originally put', + done => { + const putACLRequest = createAlteredRequest( + { + 'x-amz-acl': 'public-read', + }, + 'headers', + baseTestRequest, + baseTestRequest.headers + ); + putACLRequest.url = '/?acl'; + putACLRequest.query = { acl: '' }; + bucketPutACL(authInfo, putACLRequest, log, err => { + assert.ifError(err); + metadata.getBucket(bucketName, log, (err, data) => { + assert.strictEqual(data._transient, false); + assert.strictEqual(data._acl.Canned, 'public-read'); + assert.strictEqual(data._owner, authInfo.getCanonicalID()); + done(); + }); }); - }); - }); + } + ); - it('putBucketACL request should complete creation of transient bucket if ' + - 'request is from another authorized account', done => { - const putACLRequest = createAlteredRequest({ - 'x-amz-acl': 'public-read' }, 'headers', - baseTestRequest, baseTestRequest.headers); - bucketPutACL(otherAccountAuthInfo, putACLRequest, log, err => { - assert.ifError(err); - metadata.getBucket(bucketName, log, (err, data) => { - assert.strictEqual(data._transient, false); - assert.strictEqual(data._acl.Canned, 'public-read'); - assert.strictEqual(data._owner, authInfo.getCanonicalID()); - done(); + it( + 'putBucketACL request should complete creation of transient bucket if ' + + 'request is from another authorized account', + done => { + const putACLRequest = createAlteredRequest( + { + 'x-amz-acl': 'public-read', + }, + 'headers', + baseTestRequest, + baseTestRequest.headers + ); + bucketPutACL(otherAccountAuthInfo, putACLRequest, log, err => { + assert.ifError(err); + metadata.getBucket(bucketName, log, (err, data) => { + assert.strictEqual(data._transient, false); + assert.strictEqual(data._acl.Canned, 'public-read'); + assert.strictEqual(data._owner, authInfo.getCanonicalID()); + done(); + }); }); - }); - }); + } + ); describe('objectPut on a transient bucket', () => { const objName = 'objectName'; @@ -172,10 +188,8 @@ describe('transient bucket handling', () => { }); }); - it('objectPut request should complete creation of transient bucket', - done => { - const setUpRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('objectPut request should complete creation of transient bucket', done => { + const setUpRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); setUpRequest.objectKey = objName; const postBody = Buffer.from('I am a body', 'utf8'); const md5Hash = crypto.createHash('md5'); @@ -186,12 +200,11 @@ describe('transient bucket handling', () => { metadata.getBucket(bucketName, log, (err, data) => { assert.strictEqual(data._transient, false); assert.strictEqual(data._owner, authInfo.getCanonicalID()); - metadata.getObjectMD(bucketName, objName, {}, log, - (err, obj) => { - assert.ifError(err); - assert.strictEqual(obj['content-md5'], etag); - done(); - }); + metadata.getObjectMD(bucketName, objName, {}, log, (err, obj) => { + assert.ifError(err); + assert.strictEqual(obj['content-md5'], etag); + done(); + }); }); }); }); @@ -200,19 +213,15 @@ describe('transient bucket handling', () => { describe('initiateMultipartUpload on a transient bucket', () => { const objName = 'objectName'; after(done => { - metadata.deleteObjectMD(`${constants.mpuBucketPrefix}` + - `${bucketName}`, objName, {}, log, () => { - metadata.deleteBucket(`${constants.mpuBucketPrefix}` + - `${bucketName}`, log, () => { - done(); - }); + metadata.deleteObjectMD(`${constants.mpuBucketPrefix}` + `${bucketName}`, objName, {}, log, () => { + metadata.deleteBucket(`${constants.mpuBucketPrefix}` + `${bucketName}`, log, () => { + done(); }); + }); }); - it('initiateMultipartUpload request should complete ' + - 'creation of transient bucket', done => { - const initiateRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('initiateMultipartUpload request should complete ' + 'creation of transient bucket', done => { + const initiateRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); initiateRequest.objectKey = objName; initiateRequest.url = `/${objName}?uploads`; initiateMultipartUpload(authInfo, initiateRequest, log, err => { @@ -220,21 +229,22 @@ describe('transient bucket handling', () => { metadata.getBucket(bucketName, log, (err, data) => { assert.strictEqual(data._transient, false); assert.strictEqual(data._owner, authInfo.getCanonicalID()); - metadata.listObject(`${constants.mpuBucketPrefix}` + - `${bucketName}`, + metadata.listObject( + `${constants.mpuBucketPrefix}` + `${bucketName}`, { prefix: `overview${constants.splitter}${objName}` }, - log, (err, results) => { + log, + (err, results) => { assert.ifError(err); assert.strictEqual(results.Contents.length, 1); done(); - }); + } + ); }); }); }); }); - it('deleteBucket request should delete transient bucket if ' + - 'request is from owner', done => { + it('deleteBucket request should delete transient bucket if ' + 'request is from owner', done => { bucketDelete(authInfo, baseTestRequest, log, err => { assert.ifError(err); metadata.getBucket(bucketName, log, err => { @@ -244,166 +254,140 @@ describe('transient bucket handling', () => { }); }); - it('deleteBucket request should return error if ' + - 'request is not from owner', done => { - bucketDelete(otherAccountAuthInfo, baseTestRequest, - log, err => { - assert.strictEqual(err.is.AccessDenied, true); - done(); - }); + it('deleteBucket request should return error if ' + 'request is not from owner', done => { + bucketDelete(otherAccountAuthInfo, baseTestRequest, log, err => { + assert.strictEqual(err.is.AccessDenied, true); + done(); + }); }); - it('bucketGet request on transient bucket should return NoSuchBucket' + - 'error', done => { - const bucketGetRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('bucketGet request on transient bucket should return NoSuchBucket' + 'error', done => { + const bucketGetRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); bucketGetRequest.url = `/${bucketName}`; bucketGetRequest.query = {}; - bucketGet(authInfo, bucketGetRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - done(); - }); + bucketGet(authInfo, bucketGetRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + done(); + }); }); - it('bucketGetACL request on transient bucket should return NoSuchBucket' + - 'error', done => { - const bucketGetACLRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('bucketGetACL request on transient bucket should return NoSuchBucket' + 'error', done => { + const bucketGetACLRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); bucketGetACLRequest.url = '/?acl'; bucketGetACLRequest.query = { acl: '' }; - bucketGetACL(authInfo, bucketGetACLRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - done(); - }); + bucketGetACL(authInfo, bucketGetACLRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + done(); + }); }); - it('bucketGetCors request on transient bucket should return ' + - 'NoSuchBucket error', done => { + it('bucketGetCors request on transient bucket should return ' + 'NoSuchBucket error', done => { bucketGetCors(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); }); - it('bucketPutCors request on transient bucket should return ' + - 'NoSuchBucket error', done => { - const bucketPutCorsRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); - bucketPutCorsRequest.post = '' + - 'PUT' + - 'http://www.example.com' + - ''; - bucketPutCorsRequest.headers['content-md5'] = crypto.createHash('md5') - .update(bucketPutCorsRequest.post, 'utf8').digest('base64'); + it('bucketPutCors request on transient bucket should return ' + 'NoSuchBucket error', done => { + const bucketPutCorsRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); + bucketPutCorsRequest.post = + '' + + 'PUT' + + 'http://www.example.com' + + ''; + bucketPutCorsRequest.headers['content-md5'] = crypto + .createHash('md5') + .update(bucketPutCorsRequest.post, 'utf8') + .digest('base64'); bucketPutCors(authInfo, bucketPutCorsRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); }); - it('bucketDeleteCors request on transient bucket should return ' + - 'NoSuchBucket error', done => { + it('bucketDeleteCors request on transient bucket should return ' + 'NoSuchBucket error', done => { bucketDeleteCors(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); }); - it('bucketGetWebsite request on transient bucket should return ' + - 'NoSuchBucket error', done => { + it('bucketGetWebsite request on transient bucket should return ' + 'NoSuchBucket error', done => { bucketGetWebsite(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); }); - it('bucketPutWebsite request on transient bucket should return ' + - 'NoSuchBucket error', done => { - const bucketPutWebsiteRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); - bucketPutWebsiteRequest.post = '' + - 'index.html' + - ''; + it('bucketPutWebsite request on transient bucket should return ' + 'NoSuchBucket error', done => { + const bucketPutWebsiteRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); + bucketPutWebsiteRequest.post = + '' + + 'index.html' + + ''; bucketPutWebsite(authInfo, bucketPutWebsiteRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); }); - it('bucketDeleteWebsite request on transient bucket should return ' + - 'NoSuchBucket error', done => { + it('bucketDeleteWebsite request on transient bucket should return ' + 'NoSuchBucket error', done => { bucketDeleteWebsite(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); }); - it('bucketHead request on transient bucket should return NoSuchBucket' + - 'error', done => { - bucketHead(authInfo, baseTestRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - done(); - }); + it('bucketHead request on transient bucket should return NoSuchBucket' + 'error', done => { + bucketHead(authInfo, baseTestRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + done(); + }); }); - it('completeMultipartUpload request on transient bucket should ' + - 'return NoSuchUpload error', done => { - const completeMpuRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('completeMultipartUpload request on transient bucket should ' + 'return NoSuchUpload error', done => { + const completeMpuRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); const uploadId = '5555'; completeMpuRequest.objectKey = 'objectName'; completeMpuRequest.query = { uploadId }; - completeMultipartUpload(authInfo, completeMpuRequest, - log, err => { - assert.strictEqual(err.is.NoSuchUpload, true); - done(); - }); + completeMultipartUpload(authInfo, completeMpuRequest, log, err => { + assert.strictEqual(err.is.NoSuchUpload, true); + done(); + }); }); - it('listParts request on transient bucket should ' + - 'return NoSuchUpload error', done => { - const listRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('listParts request on transient bucket should ' + 'return NoSuchUpload error', done => { + const listRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); const uploadId = '5555'; listRequest.objectKey = 'objectName'; listRequest.query = { uploadId }; - listParts(authInfo, listRequest, - log, err => { - assert.strictEqual(err.is.NoSuchUpload, true); - done(); - }); + listParts(authInfo, listRequest, log, err => { + assert.strictEqual(err.is.NoSuchUpload, true); + done(); + }); }); describe('multipartDelete request on a transient bucket', () => { - const deleteRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + const deleteRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); const uploadId = '5555'; deleteRequest.objectKey = 'objectName'; deleteRequest.query = { uploadId }; - const originalLegacyAWSBehavior = - config.locationConstraints[locationConstraint].legacyAwsBehavior; + const originalLegacyAWSBehavior = config.locationConstraints[locationConstraint].legacyAwsBehavior; after(done => { - config.locationConstraints[locationConstraint].legacyAwsBehavior = - originalLegacyAWSBehavior; + config.locationConstraints[locationConstraint].legacyAwsBehavior = originalLegacyAWSBehavior; done(); }); - it('should return NoSuchUpload error if legacyAwsBehavior is enabled', - done => { - config.locationConstraints[locationConstraint]. - legacyAwsBehavior = true; + it('should return NoSuchUpload error if legacyAwsBehavior is enabled', done => { + config.locationConstraints[locationConstraint].legacyAwsBehavior = true; multipartDelete(authInfo, deleteRequest, log, err => { assert.strictEqual(err.is.NoSuchUpload, true); done(); }); }); - it('should return no error if legacyAwsBehavior is not enabled', - done => { + it('should return no error if legacyAwsBehavior is not enabled', done => { config.locationConstraints[locationConstraint].legacyAwsBehavior = false; multipartDelete(authInfo, deleteRequest, log, err => { assert.ifError(err); @@ -412,75 +396,59 @@ describe('transient bucket handling', () => { }); }); - it('objectPutPart request on transient bucket should ' + - 'return NoSuchUpload error', done => { - const putPartRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('objectPutPart request on transient bucket should ' + 'return NoSuchUpload error', done => { + const putPartRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); const uploadId = '5555'; putPartRequest.objectKey = 'objectName'; putPartRequest.query = { uploadId, - partNumber: '1' }; - objectPutPart(authInfo, putPartRequest, undefined, - log, err => { - assert.strictEqual(err.is.NoSuchUpload, true); - done(); - }); + partNumber: '1', + }; + objectPutPart(authInfo, putPartRequest, undefined, log, err => { + assert.strictEqual(err.is.NoSuchUpload, true); + done(); + }); }); - it('list multipartUploads request on transient bucket should ' + - 'return NoSuchBucket error', done => { - const listRequest = createAlteredRequest({}, 'headers', - baseTestRequest, baseTestRequest.headers); + it('list multipartUploads request on transient bucket should ' + 'return NoSuchBucket error', done => { + const listRequest = createAlteredRequest({}, 'headers', baseTestRequest, baseTestRequest.headers); listRequest.query = {}; - listMultipartUploads(authInfo, listRequest, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - done(); - }); + listMultipartUploads(authInfo, listRequest, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + done(); + }); }); - it('objectGet request on transient bucket should' + - 'return NoSuchBucket error', - done => { - objectGet(authInfo, baseTestRequest, false, - log, err => { - assert.strictEqual(err.is.NoSuchBucket, true); - done(); - }); + it('objectGet request on transient bucket should' + 'return NoSuchBucket error', done => { + objectGet(authInfo, baseTestRequest, false, log, err => { + assert.strictEqual(err.is.NoSuchBucket, true); + done(); }); + }); - it('objectGetACL request on transient bucket should return ' + - 'NoSuchBucket error', done => { - objectGetACL(authInfo, baseTestRequest, - log, err => { + it('objectGetACL request on transient bucket should return ' + 'NoSuchBucket error', done => { + objectGetACL(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); }); - it('objectHead request on transient bucket should return ' + - 'NoSuchBucket error', done => { - objectHead(authInfo, baseTestRequest, - log, err => { + it('objectHead request on transient bucket should return ' + 'NoSuchBucket error', done => { + objectHead(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); }); - it('objectPutACL request on transient bucket should return ' + - 'NoSuchBucket error', done => { - objectPutACL(authInfo, baseTestRequest, - log, err => { + it('objectPutACL request on transient bucket should return ' + 'NoSuchBucket error', done => { + objectPutACL(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); }); - it('objectDelete request on transient bucket should return ' + - 'NoSuchBucket error', done => { - objectDelete(authInfo, baseTestRequest, - log, err => { + it('objectDelete request on transient bucket should return ' + 'NoSuchBucket error', done => { + objectDelete(authInfo, baseTestRequest, log, err => { assert.strictEqual(err.is.NoSuchBucket, true); done(); }); diff --git a/tests/unit/api/utils/metadataMockColdStorage.js b/tests/unit/api/utils/metadataMockColdStorage.js index 34cdf6bbd5..23cfd13da3 100644 --- a/tests/unit/api/utils/metadataMockColdStorage.js +++ b/tests/unit/api/utils/metadataMockColdStorage.js @@ -26,7 +26,7 @@ const baseMd = { FULL_CONTROL: [], WRITE_ACP: [], READ: [], - READ_ACP: [] + READ_ACP: [], }, key: 'objectName', location: [ @@ -35,8 +35,8 @@ const baseMd = { size: 11, start: 0, dataStoreName: 'mem', - dataStoreETag: '1:be747eb4b75517bf6b3cf7c5fbb62f3a' - } + dataStoreETag: '1:be747eb4b75517bf6b3cf7c5fbb62f3a', + }, ], isDeleteMarker: false, tags: {}, @@ -49,13 +49,13 @@ const baseMd = { role: '', storageType: '', dataStoreVersionId: '', - isNFS: null + isNFS: null, }, dataStoreName: 'mem', originOp: 's3:ObjectCreated:Put', 'last-modified': '2022-05-10T08:31:51.878Z', 'md-model-version': 5, - 'x-amz-meta-test': 'some metadata' + 'x-amz-meta-test': 'some metadata', }; /** @@ -77,7 +77,8 @@ function putBucketMock(bucketName, location, cb) { null, null, null, - location); + location + ); return metadata.createBucket(bucketName, bucket, log, cb); } @@ -113,9 +114,11 @@ function getTransitionInProgressObjectMD() { */ function getArchivedObjectMD() { return getTransitionInProgressObjectMD() - .setArchive(new ObjectMDArchive( - { foo: 0, bar: 'stuff' }, // opaque, can be anything... - )) + .setArchive( + new ObjectMDArchive( + { foo: 0, bar: 'stuff' } // opaque, can be anything... + ) + ) .setDataStoreName(defaultLocation) .setAmzStorageClass(defaultLocation) .setTransitionInProgress(false) @@ -129,14 +132,8 @@ function getArchivedObjectMD() { function getRestoringObjectMD() { const archivedObjectMD = getArchivedObjectMD(); return archivedObjectMD - .setAmzRestore(new ObjectMDAmzRestore( - true, - )) - .setArchive(new ObjectMDArchive( - archivedObjectMD.getArchive().getArchiveInfo(), - new Date(Date.now() - 60), - 5, - )) + .setAmzRestore(new ObjectMDAmzRestore(true)) + .setArchive(new ObjectMDArchive(archivedObjectMD.getArchive().getArchiveInfo(), new Date(Date.now() - 60), 5)) .setOriginOp('s3:ObjectRestore:Post'); } @@ -151,21 +148,20 @@ function getRestoredObjectMD(date) { const expiryDate = date || new Date(restoreDate.getTime() + 1000 * 60 * 60 * 24 * restoreDays); const restoringObjectMD = getRestoringObjectMD(); - const restoreInfo = new ObjectMDAmzRestore( - false, - expiryDate, - ); + const restoreInfo = new ObjectMDAmzRestore(false, expiryDate); restoreInfo['content-md5'] = restoredEtag; return restoringObjectMD .setAmzRestore(restoreInfo) - .setArchive(new ObjectMDArchive( - restoringObjectMD.getArchive().getArchiveInfo(), - new Date(Date.now() - 60000), - restoreDays, - restoreDate, - expiryDate, - )) + .setArchive( + new ObjectMDArchive( + restoringObjectMD.getArchive().getArchiveInfo(), + new Date(Date.now() - 60000), + restoreDays, + restoreDate, + expiryDate + ) + ) .setDataStoreName('mem') .setOriginOp('s3:ObjectRestore:Completed'); } diff --git a/tests/unit/auth/TrailingChecksumTransform.js b/tests/unit/auth/TrailingChecksumTransform.js index 9cae54e659..b1ae3fbbd8 100644 --- a/tests/unit/auth/TrailingChecksumTransform.js +++ b/tests/unit/auth/TrailingChecksumTransform.js @@ -9,11 +9,12 @@ const { DummyRequestLogger } = require('../helpers'); const log = new DummyRequestLogger(); // note this is not the correct checksum in objDataWithTrailingChecksum -const objDataWithTrailingChecksum = '10\r\n01234\r6789abcd\r\n\r\n' + - '2\r\n01\r\n' + - '1\r\n2\r\n' + - 'd\r\n3456789abcdef\r\n' + - '0\r\nchecksum:xyz=\r\n'; +const objDataWithTrailingChecksum = + '10\r\n01234\r6789abcd\r\n\r\n' + + '2\r\n01\r\n' + + '1\r\n2\r\n' + + 'd\r\n3456789abcdef\r\n' + + '0\r\nchecksum:xyz=\r\n'; const objDataWithoutTrailingChecksum = '01234\r6789abcd\r\n0123456789abcdef'; class ChunkedReader extends Readable { @@ -43,9 +44,7 @@ describe('TrailingChecksumTransform class', () => { trailingChecksumTransform.on('error', err => { assert.strictEqual(err, null); }); - const chunks = [ - Buffer.from(objDataWithTrailingChecksum), - ]; + const chunks = [Buffer.from(objDataWithTrailingChecksum)]; const chunkedReader = new ChunkedReader(chunks); chunkedReader.pipe(trailingChecksumTransform); const outputChunks = []; diff --git a/tests/unit/auth/V4Transform.js b/tests/unit/auth/V4Transform.js index daad1138df..419ba1e10d 100644 --- a/tests/unit/auth/V4Transform.js +++ b/tests/unit/auth/V4Transform.js @@ -7,8 +7,7 @@ const { DummyRequestLogger } = require('../helpers'); const log = new DummyRequestLogger(); const streamingV4Params = { accessKey: 'accessKey1', - signatureFromRequest: '2b8637632a997e06ee7b6c85d7' + - '147d2025e8f04d4374f4d7d7320de1618c7509', + signatureFromRequest: '2b8637632a997e06ee7b6c85d7' + '147d2025e8f04d4374f4d7d7320de1618c7509', region: 'us-east-1', scopeDate: '20170516', timestamp: '20170516T204738Z', @@ -33,15 +32,10 @@ describe('V4Transform class', () => { const v4Transform = new V4Transform(streamingV4Params, log, err => { assert.strictEqual(err, null); }); - const filler1 = '8;chunk-signature=51d2511f7c6887907dff20474d8db6' + - '7d557e5f515a6fa6a8466bb12f8833bcca\r\ncontents\r\n'; - const filler2 = '0;chunk-signature=c0eac24b7ce72141ec077df9753db' + - '4cc8b7991491806689da0395c8bd0231e48\r\n'; - const chunks = [ - Buffer.from(filler1), - Buffer.from(filler2), - null, - ]; + const filler1 = + '8;chunk-signature=51d2511f7c6887907dff20474d8db6' + '7d557e5f515a6fa6a8466bb12f8833bcca\r\ncontents\r\n'; + const filler2 = '0;chunk-signature=c0eac24b7ce72141ec077df9753db' + '4cc8b7991491806689da0395c8bd0231e48\r\n'; + const chunks = [Buffer.from(filler1), Buffer.from(filler2), null]; const authMe = new AuthMe(chunks); authMe.pipe(v4Transform); v4Transform.on('finish', () => { @@ -54,15 +48,10 @@ describe('V4Transform class', () => { assert(err); done(); }); - const filler1 = '8;chunk-signature=51d2511f7c6887907dff20474d8db6' + - '7d557e5f515a6fa6a8466bb12f8833bcca\r\ncontents\r\n'; - const filler2 = '0;chunk-signature=baadc0debaadc0debaadc0debaadc0de' + - 'baadc0debaadc0debaadc0debaadc0de\r\n'; - const chunks = [ - Buffer.from(filler1), - Buffer.from(filler2), - null, - ]; + const filler1 = + '8;chunk-signature=51d2511f7c6887907dff20474d8db6' + '7d557e5f515a6fa6a8466bb12f8833bcca\r\ncontents\r\n'; + const filler2 = '0;chunk-signature=baadc0debaadc0debaadc0debaadc0de' + 'baadc0debaadc0debaadc0debaadc0de\r\n'; + const chunks = [Buffer.from(filler1), Buffer.from(filler2), null]; const authMe = new AuthMe(chunks); authMe.pipe(v4Transform); }); @@ -71,17 +60,11 @@ describe('V4Transform class', () => { const v4Transform = new V4Transform(streamingV4Params, log, () => { assert(false); }); - const filler1 = '8;chunk-signature=51d2511f7c6887907dff20474d8db6' + - '7d557e5f515a6fa6a8466bb12f8833bcca\r\ncontents\r\n'; - const filler2 = '0;chunk-signature=c0eac24b7ce72141ec077df9753db' + - '4cc8b7991491806689da0395c8bd0231e48\r\n'; + const filler1 = + '8;chunk-signature=51d2511f7c6887907dff20474d8db6' + '7d557e5f515a6fa6a8466bb12f8833bcca\r\ncontents\r\n'; + const filler2 = '0;chunk-signature=c0eac24b7ce72141ec077df9753db' + '4cc8b7991491806689da0395c8bd0231e48\r\n'; const filler3 = '\r\n'; - const chunks = [ - Buffer.from(filler1), - Buffer.from(filler2), - Buffer.from(filler3), - null, - ]; + const chunks = [Buffer.from(filler1), Buffer.from(filler2), Buffer.from(filler3), null]; const authMe = new AuthMe(chunks); authMe.pipe(v4Transform); v4Transform.on('finish', () => { diff --git a/tests/unit/auth/in_memory/backend.js b/tests/unit/auth/in_memory/backend.js index bc31066158..0b655227e4 100644 --- a/tests/unit/auth/in_memory/backend.js +++ b/tests/unit/auth/in_memory/backend.js @@ -1,7 +1,6 @@ const assert = require('assert'); -const { buildAuthDataAccount } = - require('../../../../lib/auth/in_memory/builder'); +const { buildAuthDataAccount } = require('../../../../lib/auth/in_memory/builder'); const fakeAccessKey = 'fakeaccesskey'; const fakeSecretKey = 'fakesecretkey'; @@ -16,18 +15,20 @@ function getFirstAndOnlyAccount(authdata) { } describe('buildAuthDataAccount function', () => { - it('should return authdata with the default user name if no user ' + - 'name provided', () => { - const authdata = buildAuthDataAccount(fakeAccessKey, fakeSecretKey, - fakeCanonicalId, fakeServiceName); + it('should return authdata with the default user name if no user ' + 'name provided', () => { + const authdata = buildAuthDataAccount(fakeAccessKey, fakeSecretKey, fakeCanonicalId, fakeServiceName); const firstAccount = getFirstAndOnlyAccount(authdata); assert.strictEqual(firstAccount.name, defaultUserName); }); - it('should return authdata with the user name that has been ' + - 'provided', () => { - const authdata = buildAuthDataAccount(fakeAccessKey, fakeSecretKey, - fakeCanonicalId, fakeServiceName, fakeUserName); + it('should return authdata with the user name that has been ' + 'provided', () => { + const authdata = buildAuthDataAccount( + fakeAccessKey, + fakeSecretKey, + fakeCanonicalId, + fakeServiceName, + fakeUserName + ); const firstAccount = getFirstAndOnlyAccount(authdata); assert.strictEqual(firstAccount.name, fakeUserName); }); diff --git a/tests/unit/auth/permissionChecks.js b/tests/unit/auth/permissionChecks.js index 69aaa5b9f9..266974b939 100644 --- a/tests/unit/auth/permissionChecks.js +++ b/tests/unit/auth/permissionChecks.js @@ -25,35 +25,50 @@ describe('checkBucketAcls', () => { { description: 'should return true if bucket owner matches canonicalID', input: { - bucketAcl: {}, requestType: 'anyType', canonicalID: 'ownerId', mainApiCall: 'anyApiCall', + bucketAcl: {}, + requestType: 'anyType', + canonicalID: 'ownerId', + mainApiCall: 'anyApiCall', }, expected: true, }, { description: 'should return true for objectGetTagging when mainApiCall is objectGet', input: { - bucketAcl: {}, requestType: 'objectGetTagging', canonicalID: 'anyId', mainApiCall: 'objectGet', + bucketAcl: {}, + requestType: 'objectGetTagging', + canonicalID: 'anyId', + mainApiCall: 'objectGet', }, expected: true, }, { description: 'should return true for objectPutTagging when mainApiCall is objectPut', input: { - bucketAcl: {}, requestType: 'objectPutTagging', canonicalID: 'anyId', mainApiCall: 'objectPut', + bucketAcl: {}, + requestType: 'objectPutTagging', + canonicalID: 'anyId', + mainApiCall: 'objectPut', }, expected: true, }, { description: 'should return true for objectPutLegalHold when mainApiCall is objectPut', input: { - bucketAcl: {}, requestType: 'objectPutLegalHold', canonicalID: 'anyId', mainApiCall: 'objectPut', + bucketAcl: {}, + requestType: 'objectPutLegalHold', + canonicalID: 'anyId', + mainApiCall: 'objectPut', }, expected: true, }, { description: 'should return true for objectPutRetention when mainApiCall is objectPut', input: { - bucketAcl: {}, requestType: 'objectPutRetention', canonicalID: 'anyId', mainApiCall: 'objectPut', + bucketAcl: {}, + requestType: 'objectPutRetention', + canonicalID: 'anyId', + mainApiCall: 'objectPut', }, expected: true, }, @@ -62,7 +77,10 @@ describe('checkBucketAcls', () => { input: { bucketAcl: { Canned: 'public-read-write', - }, requestType: 'initiateMultipartUpload', canonicalID: 'any', mainApiCall: 'initiateMultipartUpload', + }, + requestType: 'initiateMultipartUpload', + canonicalID: 'any', + mainApiCall: 'initiateMultipartUpload', }, expected: true, }, @@ -71,7 +89,10 @@ describe('checkBucketAcls', () => { input: { bucketAcl: { Canned: 'public-read-write', - }, requestType: 'objectPutPart', canonicalID: 'any', mainApiCall: 'objectPutPart', + }, + requestType: 'objectPutPart', + canonicalID: 'any', + mainApiCall: 'objectPutPart', }, expected: true, }, @@ -80,7 +101,10 @@ describe('checkBucketAcls', () => { input: { bucketAcl: { Canned: 'public-read-write', - }, requestType: 'completeMultipartUpload', canonicalID: 'any', mainApiCall: 'completeMultipartUpload', + }, + requestType: 'completeMultipartUpload', + canonicalID: 'any', + mainApiCall: 'completeMultipartUpload', }, expected: true, }, @@ -240,8 +264,12 @@ describe('checkBucketAcls', () => { // Mock the bucket based on the test scenario's input mockBucket.getAcl = () => scenario.input.bucketAcl; - const result = checkBucketAcls(mockBucket, - scenario.input.requestType, scenario.input.canonicalID, scenario.input.mainApiCall); + const result = checkBucketAcls( + mockBucket, + scenario.input.requestType, + scenario.input.canonicalID, + scenario.input.mainApiCall + ); assert.strictEqual(result, scenario.expected); }); }); @@ -255,7 +283,7 @@ describe('checkObjectAcls', () => { }; const mockObjectMD = { 'owner-id': 'objectOwnerId', - 'acl': { + acl: { Canned: '', FULL_CONTROL: [], READ: [], @@ -266,42 +294,73 @@ describe('checkObjectAcls', () => { }; it('should return true if request type is in bucketOwnerActions and bucket owner matches canonicalID', () => { - assert.strictEqual(checkObjectAcls(mockBucket, mockObjectMD, bucketOwnerActions[0], - 'bucketOwnerId', false, false, 'anyApiCall'), true); + assert.strictEqual( + checkObjectAcls( + mockBucket, + mockObjectMD, + bucketOwnerActions[0], + 'bucketOwnerId', + false, + false, + 'anyApiCall' + ), + true + ); }); it('should return true if objectMD owner matches canonicalID', () => { - assert.strictEqual(checkObjectAcls(mockBucket, mockObjectMD, 'anyType', - 'objectOwnerId', false, false, 'anyApiCall'), true); + assert.strictEqual( + checkObjectAcls(mockBucket, mockObjectMD, 'anyType', 'objectOwnerId', false, false, 'anyApiCall'), + true + ); }); it('should return true for objectGetTagging when mainApiCall is objectGet and conditions met', () => { - assert.strictEqual(checkObjectAcls(mockBucket, mockObjectMD, 'objectGetTagging', - 'anyIdNotPublic', true, true, 'objectGet'), true); + assert.strictEqual( + checkObjectAcls(mockBucket, mockObjectMD, 'objectGetTagging', 'anyIdNotPublic', true, true, 'objectGet'), + true + ); }); it('should return false if no acl provided in objectMD', () => { const objMDWithoutAcl = Object.assign({}, mockObjectMD); delete objMDWithoutAcl.acl; - assert.strictEqual(checkObjectAcls(mockBucket, objMDWithoutAcl, 'anyType', - 'anyId', false, false, 'anyApiCall'), false); + assert.strictEqual( + checkObjectAcls(mockBucket, objMDWithoutAcl, 'anyType', 'anyId', false, false, 'anyApiCall'), + false + ); }); const tests = [ { - acl: 'public-read', reqType: 'objectGet', id: 'anyIdNotPublic', expected: true, + acl: 'public-read', + reqType: 'objectGet', + id: 'anyIdNotPublic', + expected: true, }, { - acl: 'public-read-write', reqType: 'objectGet', id: 'anyIdNotPublic', expected: true, + acl: 'public-read-write', + reqType: 'objectGet', + id: 'anyIdNotPublic', + expected: true, }, { - acl: 'authenticated-read', reqType: 'objectGet', id: 'anyIdNotPublic', expected: true, + acl: 'authenticated-read', + reqType: 'objectGet', + id: 'anyIdNotPublic', + expected: true, }, { - acl: 'bucket-owner-read', reqType: 'objectGet', id: 'bucketOwnerId', expected: true, + acl: 'bucket-owner-read', + reqType: 'objectGet', + id: 'bucketOwnerId', + expected: true, }, { - acl: 'bucket-owner-full-control', reqType: 'objectGet', id: 'bucketOwnerId', expected: true, + acl: 'bucket-owner-full-control', + reqType: 'objectGet', + id: 'bucketOwnerId', + expected: true, }, { aclList: ['someId', 'anyIdNotPublic'], @@ -323,19 +382,31 @@ describe('checkObjectAcls', () => { { reqType: 'completeMultipartUpload', id: 'anyId', expected: true }, { reqType: 'objectDelete', id: 'anyId', expected: true }, { - aclList: ['anyId'], aclField: 'FULL_CONTROL', reqType: 'objectPutACL', id: 'anyId', expected: true, + aclList: ['anyId'], + aclField: 'FULL_CONTROL', + reqType: 'objectPutACL', + id: 'anyId', + expected: true, }, { - aclList: ['anyId'], aclField: 'FULL_CONTROL', reqType: 'objectGetACL', id: 'anyId', expected: true, + aclList: ['anyId'], + aclField: 'FULL_CONTROL', + reqType: 'objectGetACL', + id: 'anyId', + expected: true, }, { - acl: '', reqType: 'objectGet', id: 'randomId', expected: false, + acl: '', + reqType: 'objectGet', + id: 'randomId', + expected: false, }, ]; tests.forEach(test => { - it(`should return ${test.expected} for ${test.reqType} with ACL as ${test.acl - || (`${test.aclField}:${JSON.stringify(test.aclList)}`)}`, () => { + it(`should return ${test.expected} for ${test.reqType} with ACL as ${ + test.acl || `${test.aclField}:${JSON.stringify(test.aclList)}` + }`, () => { if (test.acl) { mockObjectMD.acl.Canned = test.acl; } else if (test.aclList && test.aclField) { @@ -344,7 +415,7 @@ describe('checkObjectAcls', () => { assert.strictEqual( checkObjectAcls(mockBucket, mockObjectMD, test.reqType, test.id, false, false, 'anyApiCall'), - test.expected, + test.expected ); }); }); @@ -360,112 +431,123 @@ describe('validatePolicyConditions', () => { { description: 'Should return null if conditions have a valid IP address', inputPolicy: { - Statement: [{ - Condition: { - IpAddress: { 'aws:SourceIp': '192.168.1.1/24' }, + Statement: [ + { + Condition: { + IpAddress: { 'aws:SourceIp': '192.168.1.1/24' }, + }, }, - }], + ], }, expected: null, }, { - description: 'Should return "Invalid IP address in Conditions" ' + - 'if conditions have an invalid IP address', + description: + 'Should return "Invalid IP address in Conditions" ' + 'if conditions have an invalid IP address', inputPolicy: { - Statement: [{ - Condition: { - IpAddress: { 'aws:SourceIp': '123' }, + Statement: [ + { + Condition: { + IpAddress: { 'aws:SourceIp': '123' }, + }, }, - }], + ], }, expected: 'Invalid IP address in Conditions', }, { - description: 'Should return "Policy has an invalid condition key" if a' + - ' condition key does not start with \'aws:\' and is not recognized', + description: + 'Should return "Policy has an invalid condition key" if a' + + " condition key does not start with 'aws:' and is not recognized", inputPolicy: { - Statement: [{ - Condition: { - NotARealCondition: { 's3:prefix': 'something' }, + Statement: [ + { + Condition: { + NotARealCondition: { 's3:prefix': 'something' }, + }, }, - }], + ], }, expected: 'Policy has an invalid condition key', }, { - description: 'Should return null if a statement in the policy does not contain a \'Condition\' block', + description: "Should return null if a statement in the policy does not contain a 'Condition' block", inputPolicy: { Statement: [{}], }, expected: null, }, { - description: 'Should return a relevant error message ' + - 'if the condition value is an empty string', + description: 'Should return a relevant error message ' + 'if the condition value is an empty string', inputPolicy: { - Statement: [{ - Condition: { - IpAddress: { 'aws:SourceIp': '' }, + Statement: [ + { + Condition: { + IpAddress: { 'aws:SourceIp': '' }, + }, }, - }], + ], }, expected: 'Invalid IP address in Conditions', }, { description: 'Should accept arrays of IPs', inputPolicy: { - Statement: [{ - Condition: { - IpAddress: { - 'aws:SourceIp': [ - '10.0.11.0/24', - '10.0.1.0/24', - ], + Statement: [ + { + Condition: { + IpAddress: { + 'aws:SourceIp': ['10.0.11.0/24', '10.0.1.0/24'], + }, }, }, - }], + ], }, expected: null, }, { description: 'Should return relevant error if one of the IPs in the array is invalid', inputPolicy: { - Statement: [{ - Condition: { - IpAddress: { - 'aws:SourceIp': [ - '10.0.11.0/24', - '123', - ], + Statement: [ + { + Condition: { + IpAddress: { + 'aws:SourceIp': ['10.0.11.0/24', '123'], + }, }, }, - }], + ], }, expected: 'Invalid IP address in Conditions', }, { description: 'Should not return error if array value in IP condition is empty', // this is AWS behavior inputPolicy: { - Statement: [{ - Condition: { - IpAddress: { - 'aws:SourceIp': [], + Statement: [ + { + Condition: { + IpAddress: { + 'aws:SourceIp': [], + }, }, }, - }], + ], }, expected: null, }, { - description: 'Should return null or a relevant error message ' + - 'if multiple conditions are provided in a single statement', + description: + 'Should return null or a relevant error message ' + + 'if multiple conditions are provided in a single statement', inputPolicy: { - Statement: [{ - Condition: { - IpAddress: { 'aws:SourceIp': '192.168.1.1' }, - NotARealCondition: { 's3:prefix': 'something' }, + Statement: [ + { + Condition: { + IpAddress: { 'aws:SourceIp': '192.168.1.1' }, + NotARealCondition: { 's3:prefix': 'something' }, + }, }, - }], + ], }, expected: 'Policy has an invalid condition key', }, @@ -490,34 +572,41 @@ describe('validatePolicyConditions', () => { { description: 'Should return null if conditions have a valid IPv6 address', inputPolicy: { - Statement: [{ - Condition: { - IpAddress: { 'aws:SourceIp': '2001:0db8:85a3:0000:0000:8a2e:0370:7334' }, + Statement: [ + { + Condition: { + IpAddress: { 'aws:SourceIp': '2001:0db8:85a3:0000:0000:8a2e:0370:7334' }, + }, }, - }], + ], }, expected: null, }, { description: 'Should return "Invalid IP address in Conditions" if conditions have an invalid IPv6 address', inputPolicy: { - Statement: [{ - Condition: { - IpAddress: { 'aws:SourceIp': '2001:0db8:85a3:0000:XYZZ:8a2e:0370:7334' }, + Statement: [ + { + Condition: { + IpAddress: { 'aws:SourceIp': '2001:0db8:85a3:0000:XYZZ:8a2e:0370:7334' }, + }, }, - }], + ], }, expected: 'Invalid IP address in Conditions', }, { - description: 'Should return "Invalid IP address in Conditions" if conditions' - + ' have an IPv6 address with unusual and invalid notation', + description: + 'Should return "Invalid IP address in Conditions" if conditions' + + ' have an IPv6 address with unusual and invalid notation', inputPolicy: { - Statement: [{ - Condition: { - IpAddress: { 'aws:SourceIp': '2001::85a3::8a2e' }, + Statement: [ + { + Condition: { + IpAddress: { 'aws:SourceIp': '2001::85a3::8a2e' }, + }, }, - }], + ], }, expected: 'Invalid IP address in Conditions', }, diff --git a/tests/unit/bucket/bucketCreation.js b/tests/unit/bucket/bucketCreation.js index 6ae385d17c..c89e76a443 100644 --- a/tests/unit/bucket/bucketCreation.js +++ b/tests/unit/bucket/bucketCreation.js @@ -1,8 +1,7 @@ const assert = require('assert'); const { cleanup, DummyRequestLogger } = require('../helpers'); -const { createBucket } = - require('../../../lib/api/apiUtils/bucket/bucketCreation'); +const { createBucket } = require('../../../lib/api/apiUtils/bucket/bucketCreation'); const { makeAuthInfo } = require('../helpers'); const bucketName = 'creationbucket'; @@ -15,34 +14,30 @@ const specialBehaviorLocationConstraint = 'us-east-1'; describe('bucket creation', () => { it('should create a bucket', done => { - createBucket(authInfo, bucketName, headers, - normalBehaviorLocationConstraint, log, err => { - assert.ifError(err); - done(); - }); + createBucket(authInfo, bucketName, headers, normalBehaviorLocationConstraint, log, err => { + assert.ifError(err); + done(); + }); }); describe('when you already created the bucket in us-east-1', () => { beforeEach(done => { cleanup(); - createBucket(authInfo, bucketName, headers, - specialBehaviorLocationConstraint, log, err => { - assert.ifError(err); - done(); - }); + createBucket(authInfo, bucketName, headers, specialBehaviorLocationConstraint, log, err => { + assert.ifError(err); + done(); + }); }); it('should return 200 if try to recreate in us-east-1', done => { - createBucket(authInfo, bucketName, headers, - specialBehaviorLocationConstraint, log, err => { + createBucket(authInfo, bucketName, headers, specialBehaviorLocationConstraint, log, err => { assert.ifError(err); done(); }); }); it('should return 409 if try to recreate in non-us-east-1', done => { - createBucket(authInfo, bucketName, headers, - normalBehaviorLocationConstraint, log, err => { + createBucket(authInfo, bucketName, headers, normalBehaviorLocationConstraint, log, err => { assert.strictEqual(err.is.BucketAlreadyOwnedByYou, true); done(); }); @@ -52,16 +47,14 @@ describe('bucket creation', () => { describe('when you already created the bucket in non-us-east-1', () => { beforeEach(done => { cleanup(); - createBucket(authInfo, bucketName, headers, - normalBehaviorLocationConstraint, log, err => { - assert.ifError(err); - done(); - }); + createBucket(authInfo, bucketName, headers, normalBehaviorLocationConstraint, log, err => { + assert.ifError(err); + done(); + }); }); it('should return 409 if try to recreate in us-east-1', done => { - createBucket(authInfo, bucketName, headers, - specialBehaviorLocationConstraint, log, err => { + createBucket(authInfo, bucketName, headers, specialBehaviorLocationConstraint, log, err => { assert.strictEqual(err.is.BucketAlreadyOwnedByYou, true); done(); }); @@ -74,10 +67,9 @@ describe('bucket creation with object lock', () => { const headers = { 'x-amz-bucket-object-lock-enabled': 'true', }; - createBucket(authInfo, bucketName, headers, - normalBehaviorLocationConstraint, log, err => { - assert.ifError(err); - done(); - }); + createBucket(authInfo, bucketName, headers, normalBehaviorLocationConstraint, log, err => { + assert.ifError(err); + done(); + }); }); }); diff --git a/tests/unit/bucket/bucket_mem_api.js b/tests/unit/bucket/bucket_mem_api.js index 980a5ba83f..030db44487 100644 --- a/tests/unit/bucket/bucket_mem_api.js +++ b/tests/unit/bucket/bucket_mem_api.js @@ -12,30 +12,25 @@ const bucketName = 'Zaphod'; const objMD = { test: '8' }; const log = new DummyRequestLogger(); -describe('bucket API for getting, putting and deleting ' + - 'objects in a bucket', () => { +describe('bucket API for getting, putting and deleting ' + 'objects in a bucket', () => { let bucket; before(done => { cleanup(); const creationDate = new Date().toJSON(); - bucket = new BucketInfo(bucketName, 'iAmTheOwnerId', - 'iAmTheOwnerDisplayName', creationDate); + bucket = new BucketInfo(bucketName, 'iAmTheOwnerId', 'iAmTheOwnerDisplayName', creationDate); metadata.createBucket(bucketName, bucket, log, done); }); - it('should be able to add an object to a bucket ' + - 'and get the object by key', done => { + it('should be able to add an object to a bucket ' + 'and get the object by key', done => { metadata.putObjectMD(bucketName, 'sampleKey', objMD, {}, log, () => { - metadata.getObjectMD(bucketName, 'sampleKey', {}, log, - (err, value) => { + metadata.getObjectMD(bucketName, 'sampleKey', {}, log, (err, value) => { assert.deepStrictEqual(value, objMD); done(); }); }); }); - it('should return an error in response ' + - 'to getObjectMD when no such key', done => { + it('should return an error in response ' + 'to getObjectMD when no such key', done => { metadata.getObjectMD(bucketName, 'notThere', {}, log, (err, value) => { assert.strictEqual(err.is.NoSuchKey, true); assert.strictEqual(value, undefined); @@ -44,22 +39,18 @@ describe('bucket API for getting, putting and deleting ' + }); it('should be able to delete an object from a bucket', done => { - metadata.putObjectMD(bucketName, 'objectToDelete', '{}', {}, log, - () => { - metadata.deleteObjectMD(bucketName, 'objectToDelete', {}, log, - () => { - metadata.getObjectMD(bucketName, 'objectToDelete', {}, log, - (err, value) => { - assert.strictEqual(err.is.NoSuchKey, true); - assert.strictEqual(value, undefined); - done(); - }); + metadata.putObjectMD(bucketName, 'objectToDelete', '{}', {}, log, () => { + metadata.deleteObjectMD(bucketName, 'objectToDelete', {}, log, () => { + metadata.getObjectMD(bucketName, 'objectToDelete', {}, log, (err, value) => { + assert.strictEqual(err.is.NoSuchKey, true); + assert.strictEqual(value, undefined); + done(); + }); }); }); }); }); - describe('bucket API for getting a subset of objects from a bucket', () => { /* * Implementation of AWS GET Bucket (List Objects) functionality @@ -104,147 +95,127 @@ describe('bucket API for getting a subset of objects from a bucket', () => { before(done => { cleanup(); const creationDate = new Date().toJSON(); - bucket = new BucketInfo(bucketName, 'ownerid', - 'ownerdisplayname', creationDate); + bucket = new BucketInfo(bucketName, 'ownerid', 'ownerdisplayname', creationDate); metadata.createBucket(bucketName, bucket, log, done); }); - it('should return individual key if key does not contain ' + - 'the delimiter even if key contains prefix', done => { - async.waterfall([ - next => - metadata.putObjectMD(bucketName, 'key1', '{}', {}, log, next), - (data, next) => - metadata.putObjectMD(bucketName, 'noMatchKey', '{}', {}, log, - next), - (data, next) => - metadata.putObjectMD(bucketName, 'key1/', '{}', {}, log, next), - (data, next) => - metadata.listObject(bucketName, { prefix: 'key', delimiter, - maxKeys: defaultLimit }, log, next), - ], (err, response) => { - assert.strictEqual(isKeyInContents(response, 'key1'), true); - assert.strictEqual(response.CommonPrefixes.indexOf('key1'), -1); - assert.strictEqual(isKeyInContents(response, 'key1/'), false); - assert(response.CommonPrefixes.indexOf('key1/') > -1); - assert.strictEqual(isKeyInContents(response, 'noMatchKey'), false); - assert.strictEqual(response.CommonPrefixes.indexOf('noMatchKey'), - -1); - done(); - }); + it('should return individual key if key does not contain ' + 'the delimiter even if key contains prefix', done => { + async.waterfall( + [ + next => metadata.putObjectMD(bucketName, 'key1', '{}', {}, log, next), + (data, next) => metadata.putObjectMD(bucketName, 'noMatchKey', '{}', {}, log, next), + (data, next) => metadata.putObjectMD(bucketName, 'key1/', '{}', {}, log, next), + (data, next) => + metadata.listObject(bucketName, { prefix: 'key', delimiter, maxKeys: defaultLimit }, log, next), + ], + (err, response) => { + assert.strictEqual(isKeyInContents(response, 'key1'), true); + assert.strictEqual(response.CommonPrefixes.indexOf('key1'), -1); + assert.strictEqual(isKeyInContents(response, 'key1/'), false); + assert(response.CommonPrefixes.indexOf('key1/') > -1); + assert.strictEqual(isKeyInContents(response, 'noMatchKey'), false); + assert.strictEqual(response.CommonPrefixes.indexOf('noMatchKey'), -1); + done(); + } + ); }); - it('should return grouped keys under common prefix if keys start with ' + - 'given prefix and contain given delimiter', done => { - async.waterfall([ - next => - metadata.putObjectMD(bucketName, 'key/one', '{}', {}, log, - next), - (data, next) => - metadata.putObjectMD(bucketName, 'key/two', '{}', {}, log, - next), - (data, next) => - metadata.putObjectMD(bucketName, 'key/three', '{}', {}, log, - next), - (data, next) => - metadata.listObject(bucketName, { prefix: 'ke', delimiter, - maxKeys: defaultLimit }, log, next), - ], (err, response) => { - assert(response.CommonPrefixes.indexOf('key/') > -1); - assert.strictEqual(isKeyInContents(response, 'key/'), false); - done(); - }); - }); + it( + 'should return grouped keys under common prefix if keys start with ' + + 'given prefix and contain given delimiter', + done => { + async.waterfall( + [ + next => metadata.putObjectMD(bucketName, 'key/one', '{}', {}, log, next), + (data, next) => metadata.putObjectMD(bucketName, 'key/two', '{}', {}, log, next), + (data, next) => metadata.putObjectMD(bucketName, 'key/three', '{}', {}, log, next), + (data, next) => + metadata.listObject(bucketName, { prefix: 'ke', delimiter, maxKeys: defaultLimit }, log, next), + ], + (err, response) => { + assert(response.CommonPrefixes.indexOf('key/') > -1); + assert.strictEqual(isKeyInContents(response, 'key/'), false); + done(); + } + ); + } + ); - it('should return grouped keys if no prefix ' + - 'given and keys match before delimiter', done => { + it('should return grouped keys if no prefix ' + 'given and keys match before delimiter', done => { metadata.putObjectMD(bucketName, 'noPrefix/one', '{}', {}, log, () => { - metadata.putObjectMD(bucketName, 'noPrefix/two', '{}', {}, log, - () => { - metadata.listObject(bucketName, { delimiter, - maxKeys: defaultLimit }, log, (err, response) => { - assert(response.CommonPrefixes.indexOf('noPrefix/') - > -1); - assert.strictEqual(isKeyInContents(response, - 'noPrefix'), false); - done(); - }); + metadata.putObjectMD(bucketName, 'noPrefix/two', '{}', {}, log, () => { + metadata.listObject(bucketName, { delimiter, maxKeys: defaultLimit }, log, (err, response) => { + assert(response.CommonPrefixes.indexOf('noPrefix/') > -1); + assert.strictEqual(isKeyInContents(response, 'noPrefix'), false); + done(); + }); }); }); }); - it('should return no grouped keys if no ' + - 'delimiter specified in getBucketListObjects', done => { - metadata.listObject(bucketName, - { prefix: 'key', maxKeys: defaultLimit }, log, - (err, response) => { - assert.strictEqual(response.CommonPrefixes.length, 0); - done(); - }); + it('should return no grouped keys if no ' + 'delimiter specified in getBucketListObjects', done => { + metadata.listObject(bucketName, { prefix: 'key', maxKeys: defaultLimit }, log, (err, response) => { + assert.strictEqual(response.CommonPrefixes.length, 0); + done(); + }); }); - it('should only return keys occurring alphabetically ' + - 'AFTER marker when no delimiter specified', done => { + it('should only return keys occurring alphabetically ' + 'AFTER marker when no delimiter specified', done => { metadata.putObjectMD(bucketName, 'a', '{}', {}, log, () => { metadata.putObjectMD(bucketName, 'b', '{}', {}, log, () => { - metadata.listObject(bucketName, - { marker: 'a', maxKeys: defaultLimit }, - log, (err, response) => { - assert(isKeyInContents(response, 'b')); - assert.strictEqual(isKeyInContents(response, 'a'), - false); - done(); - }); + metadata.listObject(bucketName, { marker: 'a', maxKeys: defaultLimit }, log, (err, response) => { + assert(isKeyInContents(response, 'b')); + assert.strictEqual(isKeyInContents(response, 'a'), false); + done(); + }); }); }); }); - it('should only return keys occurring alphabetically AFTER ' + - 'marker when delimiter specified', done => { - metadata.listObject(bucketName, - { marker: 'a', delimiter, maxKeys: defaultLimit }, - log, (err, response) => { - assert(isKeyInContents(response, 'b')); - assert.strictEqual(isKeyInContents(response, 'a'), false); - done(); - }); + it('should only return keys occurring alphabetically AFTER ' + 'marker when delimiter specified', done => { + metadata.listObject(bucketName, { marker: 'a', delimiter, maxKeys: defaultLimit }, log, (err, response) => { + assert(isKeyInContents(response, 'b')); + assert.strictEqual(isKeyInContents(response, 'a'), false); + done(); + }); }); - it('should only return keys occurring alphabetically AFTER ' + - 'marker when delimiter and prefix specified', done => { - metadata.listObject(bucketName, - { prefix: 'b', marker: 'a', delimiter, maxKeys: defaultLimit }, - log, (err, response) => { - assert(isKeyInContents(response, 'b')); - assert.strictEqual(isKeyInContents(response, 'a'), false); - done(); - }); - }); + it( + 'should only return keys occurring alphabetically AFTER ' + 'marker when delimiter and prefix specified', + done => { + metadata.listObject( + bucketName, + { prefix: 'b', marker: 'a', delimiter, maxKeys: defaultLimit }, + log, + (err, response) => { + assert(isKeyInContents(response, 'b')); + assert.strictEqual(isKeyInContents(response, 'a'), false); + done(); + } + ); + } + ); // Next marker should be the last common prefix or contents key returned it('should return a NextMarker if maxKeys reached', done => { - async.waterfall([ - next => - metadata.putObjectMD(bucketName, 'next/', '{}', {}, log, next), - (data, next) => - metadata.putObjectMD(bucketName, 'next/rollUp', '{}', {}, log, - next), - (data, next) => - metadata.putObjectMD(bucketName, 'next1/', '{}', {}, log, next), - (data, next) => - metadata.listObject(bucketName, - { prefix: 'next', delimiter, maxKeys: smallLimit }, - log, next), - ], (err, response) => { - assert(response.CommonPrefixes.indexOf('next/') > -1); - assert.strictEqual(response.CommonPrefixes.indexOf('next1/'), -1); - assert.strictEqual(response.NextMarker, 'next/'); - assert(response.IsTruncated); - done(); - }); + async.waterfall( + [ + next => metadata.putObjectMD(bucketName, 'next/', '{}', {}, log, next), + (data, next) => metadata.putObjectMD(bucketName, 'next/rollUp', '{}', {}, log, next), + (data, next) => metadata.putObjectMD(bucketName, 'next1/', '{}', {}, log, next), + (data, next) => + metadata.listObject(bucketName, { prefix: 'next', delimiter, maxKeys: smallLimit }, log, next), + ], + (err, response) => { + assert(response.CommonPrefixes.indexOf('next/') > -1); + assert.strictEqual(response.CommonPrefixes.indexOf('next1/'), -1); + assert.strictEqual(response.NextMarker, 'next/'); + assert(response.IsTruncated); + done(); + } + ); }); }); - describe('stress test for bucket API', function describe() { this.timeout(200000); @@ -270,86 +241,83 @@ describe('stress test for bucket API', function describe() { before(done => { cleanup(); const creationDate = new Date().toJSON(); - bucket = new BucketInfo(bucketName, 'ownerid', - 'ownerdisplayname', creationDate); + bucket = new BucketInfo(bucketName, 'ownerid', 'ownerdisplayname', creationDate); metadata.createBucket(bucketName, bucket, log, done); }); - it(`should put ${numKeys} keys into bucket and retrieve bucket list ` + - `in under ${maxMilliseconds} milliseconds`, done => { - const data = {}; - const keys = []; + it( + `should put ${numKeys} keys into bucket and retrieve bucket list ` + `in under ${maxMilliseconds} milliseconds`, + done => { + const data = {}; + const keys = []; - // Create dictionary entries based on prefixes array - for (let i = 0; i < prefixes.length; i++) { - data[prefixes[i]] = []; - } - // Populate dictionary with random key extensions - let prefix; - for (let j = 0; j < numKeys; j++) { - prefix = prefixes[j % prefixes.length]; - data[prefix].push(makeid(10)); - } + // Create dictionary entries based on prefixes array + for (let i = 0; i < prefixes.length; i++) { + data[prefixes[i]] = []; + } + // Populate dictionary with random key extensions + let prefix; + for (let j = 0; j < numKeys; j++) { + prefix = prefixes[j % prefixes.length]; + data[prefix].push(makeid(10)); + } - // Populate keys array with all keys including prefixes - Object.keys(data).forEach(dkey => { - data[dkey].forEach(key => { - keys.push(dkey + delimiter + key); + // Populate keys array with all keys including prefixes + Object.keys(data).forEach(dkey => { + data[dkey].forEach(key => { + keys.push(dkey + delimiter + key); + }); }); - }); - // Shuffle the keys array so the keys appear in random order - shuffle(keys); + // Shuffle the keys array so the keys appear in random order + shuffle(keys); - // Start timing - const startTime = process.hrtime(); + // Start timing + const startTime = process.hrtime(); - async.each(keys, (item, next) => { - metadata.putObjectMD(bucketName, item, '{}', {}, log, next); - }, err => { - if (err) { - assert.strictEqual(err, undefined); - done(); - } else { - metadata.listObject(bucketName, { delimiter }, - log, (err, response) => { - // Stop timing and calculate millisecond time difference - const diff = timeDiff(startTime); - assert(diff < maxMilliseconds); - prefixes.forEach(prefix => { - assert(response.CommonPrefixes - .indexOf(prefix + delimiter) > -1); - }); + async.each( + keys, + (item, next) => { + metadata.putObjectMD(bucketName, item, '{}', {}, log, next); + }, + err => { + if (err) { + assert.strictEqual(err, undefined); done(); - }); - } + } else { + metadata.listObject(bucketName, { delimiter }, log, (err, response) => { + // Stop timing and calculate millisecond time difference + const diff = timeDiff(startTime); + assert(diff < maxMilliseconds); + prefixes.forEach(prefix => { + assert(response.CommonPrefixes.indexOf(prefix + delimiter) > -1); + }); + done(); + }); + } + } + ); + } + ); + + it('should return all keys as Contents if delimiter ' + 'does not match and specify NextMarker', done => { + metadata.listObject(bucketName, { delimiter: oddDelimiter, maxKeys: testLimit }, log, (err, response) => { + assert.strictEqual(response.CommonPrefixes.length, 0); + assert.strictEqual(response.Contents.length, testLimit); + assert.strictEqual(response.IsTruncated, true); + assert.strictEqual(typeof response.NextMarker, 'string'); + done(); }); }); - it('should return all keys as Contents if delimiter ' + - 'does not match and specify NextMarker', done => { - metadata.listObject(bucketName, - { delimiter: oddDelimiter, maxKeys: testLimit }, - log, (err, response) => { - assert.strictEqual(response.CommonPrefixes.length, 0); - assert.strictEqual(response.Contents.length, testLimit); - assert.strictEqual(response.IsTruncated, true); - assert.strictEqual(typeof response.NextMarker, 'string'); - done(); - }); - }); - - it('should return only keys occurring ' + - 'after specified marker', done => { - metadata.listObject(bucketName, { marker: testMarker, delimiter }, log, - (err, res) => { - assert.strictEqual(res.CommonPrefixes.length, - prefixes.length - 1); - assert.strictEqual(res.CommonPrefixes.indexOf(testPrefix), -1); - assert.strictEqual(res.Contents.length, 0); - assert.strictEqual(res.IsTruncated, false); - assert.strictEqual(res.NextMarker, undefined); - done(); - }); + it('should return only keys occurring ' + 'after specified marker', done => { + metadata.listObject(bucketName, { marker: testMarker, delimiter }, log, (err, res) => { + assert.strictEqual(res.CommonPrefixes.length, prefixes.length - 1); + assert.strictEqual(res.CommonPrefixes.indexOf(testPrefix), -1); + assert.strictEqual(res.Contents.length, 0); + assert.strictEqual(res.IsTruncated, false); + assert.strictEqual(res.NextMarker, undefined); + done(); + }); }); }); diff --git a/tests/unit/encryption/checkHealth.js b/tests/unit/encryption/checkHealth.js index e6b5d90c64..75d5f71c20 100644 --- a/tests/unit/encryption/checkHealth.js +++ b/tests/unit/encryption/checkHealth.js @@ -61,10 +61,12 @@ describe('KMS.checkHealth', () => { assert(shouldRefreshStub.calledOnce, 'shouldRefresh should be called once'); - assert(setResultSpy.calledOnceWithExactly({ - code: 200, - message: 'OK', - })); + assert( + setResultSpy.calledOnceWithExactly({ + code: 200, + message: 'OK', + }) + ); done(); }); @@ -89,11 +91,13 @@ describe('KMS.checkHealth', () => { assert(shouldRefreshStub.calledOnce, 'shouldRefresh should be called once'); - assert(setResultSpy.calledOnceWithExactly({ - code: 500, - message: 'KMS health check failed', - description: 'We encountered an internal error. Please try again.', - })); + assert( + setResultSpy.calledOnceWithExactly({ + code: 500, + message: 'KMS health check failed', + description: 'We encountered an internal error. Please try again.', + }) + ); done(); }); diff --git a/tests/unit/encryption/healthCheckCache.js b/tests/unit/encryption/healthCheckCache.js index 4d21232e8f..0d2e4285d0 100644 --- a/tests/unit/encryption/healthCheckCache.js +++ b/tests/unit/encryption/healthCheckCache.js @@ -68,7 +68,7 @@ describe('Cache Class', () => { it('should return false if elapsed time is less than duration minus maximum jitter', () => { const fakeNow = 1625077800000; - const fakeLastChecked = fakeNow - (45 * 60 * 1000); // 45 minutes ago + const fakeLastChecked = fakeNow - 45 * 60 * 1000; // 45 minutes ago sandbox.stub(Date, 'now').returns(fakeNow); sandbox.stub(Math, 'random').returns(0); cache.lastChecked = fakeLastChecked; @@ -80,7 +80,7 @@ describe('Cache Class', () => { it('should return true if elapsed time is greater than duration minus maximum jitter', () => { const fakeNow = 1625077800000; - const fakeLastChecked = fakeNow - (61 * 60 * 1000); // 61 minutes ago + const fakeLastChecked = fakeNow - 61 * 60 * 1000; // 61 minutes ago sandbox.stub(Date, 'now').returns(fakeNow); sandbox.stub(Math, 'random').returns(0); cache.lastChecked = fakeLastChecked; @@ -96,7 +96,7 @@ describe('Cache Class', () => { sandbox.stub(Date, 'now').returns(fakeNow); // Elapsed time = 5 hours - const fakeLastChecked1 = fakeNow - (5 * 60 * 60 * 1000); + const fakeLastChecked1 = fakeNow - 5 * 60 * 60 * 1000; cache.lastChecked = fakeLastChecked1; sandbox.stub(Math, 'random').returns(0); @@ -109,15 +109,11 @@ describe('Cache Class', () => { ); // Elapsed time = 7 hours - const fakeLastChecked2 = fakeNow - (7 * 60 * 60 * 1000); + const fakeLastChecked2 = fakeNow - 7 * 60 * 60 * 1000; cache.lastChecked = fakeLastChecked2; // 7 hours > 6 hours => shouldRefresh = true - assert.strictEqual( - cache.shouldRefresh(customDuration), - true, - 'Cache should refresh after custom duration' - ); + assert.strictEqual(cache.shouldRefresh(customDuration), true, 'Cache should refresh after custom duration'); }); }); diff --git a/tests/unit/encryption/kms.js b/tests/unit/encryption/kms.js index ff94a77d8a..1a0936edc1 100644 --- a/tests/unit/encryption/kms.js +++ b/tests/unit/encryption/kms.js @@ -6,8 +6,7 @@ const Common = require('../../../lib/kms/common'); const { cleanup, DummyRequestLogger } = require('../helpers'); const log = new DummyRequestLogger(); -const dummyBucket = new BucketInfo( - 'dummyBucket', 'dummyOwnerId', 'Joe, John', new Date().toJSON()); +const dummyBucket = new BucketInfo('dummyBucket', 'dummyOwnerId', 'Joe, John', new Date().toJSON()); describe('KMS unit tests', () => { beforeEach(() => { @@ -20,17 +19,15 @@ describe('KMS unit tests', () => { 'x-amz-scal-server-side-encryption': algorithm, }; const sseConfig = parseBucketEncryptionHeaders(headers); - KMS.bucketLevelEncryption( - dummyBucket, sseConfig, log, - (err, sseInfo) => { - assert.strictEqual(err, null); - assert.strictEqual(sseInfo.cryptoScheme, 1); - assert.strictEqual(sseInfo.mandatory, true); - assert.strictEqual(sseInfo.algorithm, algorithm); - assert.notEqual(sseInfo.masterKeyId, undefined); - assert.notEqual(sseInfo.masterKeyId, null); - done(); - }); + KMS.bucketLevelEncryption(dummyBucket, sseConfig, log, (err, sseInfo) => { + assert.strictEqual(err, null); + assert.strictEqual(sseInfo.cryptoScheme, 1); + assert.strictEqual(sseInfo.mandatory, true); + assert.strictEqual(sseInfo.algorithm, algorithm); + assert.notEqual(sseInfo.masterKeyId, undefined); + assert.notEqual(sseInfo.masterKeyId, null); + done(); + }); }); it('should construct a sse info object on aws:kms', done => { @@ -41,49 +38,48 @@ describe('KMS unit tests', () => { 'x-amz-scal-server-side-encryption-aws-kms-key-id': masterKeyId, }; const sseConfig = parseBucketEncryptionHeaders(headers); - KMS.bucketLevelEncryption( - dummyBucket, sseConfig, log, - (err, sseInfo) => { - assert.strictEqual(err, null); - assert.strictEqual(sseInfo.cryptoScheme, 1); - assert.strictEqual(sseInfo.mandatory, true); - assert.strictEqual(sseInfo.algorithm, 'aws:kms'); - assert.strictEqual(sseInfo.configuredMasterKeyId, `${KMS.arnPrefix}${masterKeyId}`); - done(); - }); + KMS.bucketLevelEncryption(dummyBucket, sseConfig, log, (err, sseInfo) => { + assert.strictEqual(err, null); + assert.strictEqual(sseInfo.cryptoScheme, 1); + assert.strictEqual(sseInfo.mandatory, true); + assert.strictEqual(sseInfo.algorithm, 'aws:kms'); + assert.strictEqual(sseInfo.configuredMasterKeyId, `${KMS.arnPrefix}${masterKeyId}`); + done(); + }); }); - it('should not construct a sse info object if ' + - 'x-amz-scal-server-side-encryption header contains invalid ' + - 'algorithm option', done => { - const algorithm = 'garbage'; - const masterKeyId = 'foobarbaz'; - const headers = { - 'x-amz-scal-server-side-encryption': algorithm, - 'x-amz-scal-server-side-encryption-aws-kms-key-id': masterKeyId, - }; - const sseConfig = parseBucketEncryptionHeaders(headers); - KMS.bucketLevelEncryption( - dummyBucket, sseConfig, log, - (err, sseInfo) => { + it( + 'should not construct a sse info object if ' + + 'x-amz-scal-server-side-encryption header contains invalid ' + + 'algorithm option', + done => { + const algorithm = 'garbage'; + const masterKeyId = 'foobarbaz'; + const headers = { + 'x-amz-scal-server-side-encryption': algorithm, + 'x-amz-scal-server-side-encryption-aws-kms-key-id': masterKeyId, + }; + const sseConfig = parseBucketEncryptionHeaders(headers); + KMS.bucketLevelEncryption(dummyBucket, sseConfig, log, (err, sseInfo) => { assert.strictEqual(err, null); assert.strictEqual(sseInfo, null); done(); }); - }); + } + ); - it('should not construct a sse info object if no ' + - 'x-amz-scal-server-side-encryption header included with request', + it( + 'should not construct a sse info object if no ' + + 'x-amz-scal-server-side-encryption header included with request', done => { const sseConfig = parseBucketEncryptionHeaders({}); - KMS.bucketLevelEncryption( - dummyBucket, sseConfig, log, - (err, sseInfo) => { - assert.strictEqual(err, null); - assert.strictEqual(sseInfo, null); - done(); - }); - }); + KMS.bucketLevelEncryption(dummyBucket, sseConfig, log, (err, sseInfo) => { + assert.strictEqual(err, null); + assert.strictEqual(sseInfo, null); + done(); + }); + } + ); it('should create a cipher bundle for AES256', done => { const algorithm = 'AES256'; @@ -91,22 +87,16 @@ describe('KMS unit tests', () => { 'x-amz-scal-server-side-encryption': algorithm, }; const sseConfig = parseBucketEncryptionHeaders(headers); - KMS.bucketLevelEncryption( - dummyBucket, sseConfig, log, - (err, sseInfo) => { - KMS.createCipherBundle( - sseInfo, log, (err, cipherBundle) => { - assert.strictEqual(cipherBundle.algorithm, - sseInfo.algorithm); - assert.strictEqual(cipherBundle.masterKeyId, - sseInfo.masterKeyId); - assert.strictEqual(cipherBundle.cryptoScheme, - sseInfo.cryptoScheme); - assert.notEqual(cipherBundle.cipheredDataKey, null); - assert.notEqual(cipherBundle.cipher, null); - done(); - }); + KMS.bucketLevelEncryption(dummyBucket, sseConfig, log, (err, sseInfo) => { + KMS.createCipherBundle(sseInfo, log, (err, cipherBundle) => { + assert.strictEqual(cipherBundle.algorithm, sseInfo.algorithm); + assert.strictEqual(cipherBundle.masterKeyId, sseInfo.masterKeyId); + assert.strictEqual(cipherBundle.cryptoScheme, sseInfo.cryptoScheme); + assert.notEqual(cipherBundle.cipheredDataKey, null); + assert.notEqual(cipherBundle.cipher, null); + done(); }); + }); }); it('should create a cipher bundle for aws:kms', done => { @@ -115,33 +105,24 @@ describe('KMS unit tests', () => { }; let masterKeyId; let sseConfig = parseBucketEncryptionHeaders(headers); - KMS.bucketLevelEncryption( - dummyBucket, sseConfig, log, - (err, sseInfo) => { - assert.strictEqual(err, null); - masterKeyId = sseInfo.bucketKeyId; - }); + KMS.bucketLevelEncryption(dummyBucket, sseConfig, log, (err, sseInfo) => { + assert.strictEqual(err, null); + masterKeyId = sseInfo.bucketKeyId; + }); headers['x-amz-scal-server-side-encryption'] = 'aws:kms'; - headers['x-amz-scal-server-side-encryption-aws-kms-key-id'] = - masterKeyId; + headers['x-amz-scal-server-side-encryption-aws-kms-key-id'] = masterKeyId; sseConfig = parseBucketEncryptionHeaders(headers); - KMS.bucketLevelEncryption( - dummyBucket, sseConfig, log, - (err, sseInfo) => { - KMS.createCipherBundle( - sseInfo, log, (err, cipherBundle) => { - assert.strictEqual(cipherBundle.algorithm, - sseInfo.algorithm); - assert.strictEqual(cipherBundle.masterKeyId, - sseInfo.masterKeyId); - assert.strictEqual(cipherBundle.cryptoScheme, - sseInfo.cryptoScheme); - assert.notEqual(cipherBundle.cipheredDataKey, null); - assert.notEqual(cipherBundle.cipher, null); - done(); - }); + KMS.bucketLevelEncryption(dummyBucket, sseConfig, log, (err, sseInfo) => { + KMS.createCipherBundle(sseInfo, log, (err, cipherBundle) => { + assert.strictEqual(cipherBundle.algorithm, sseInfo.algorithm); + assert.strictEqual(cipherBundle.masterKeyId, sseInfo.masterKeyId); + assert.strictEqual(cipherBundle.cryptoScheme, sseInfo.cryptoScheme); + assert.notEqual(cipherBundle.cipheredDataKey, null); + assert.notEqual(cipherBundle.cipher, null); + done(); }); + }); }); /* cb(err, cipherBundle, decipherBundle) */ @@ -151,37 +132,30 @@ describe('KMS unit tests', () => { 'x-amz-scal-server-side-encryption': algorithm, }; const sseConfig = parseBucketEncryptionHeaders(headers); - KMS.bucketLevelEncryption( - dummyBucket, sseConfig, log, - (err, sseInfo) => { + KMS.bucketLevelEncryption(dummyBucket, sseConfig, log, (err, sseInfo) => { + if (err) { + cb(err); + return; + } + KMS.createCipherBundle(sseInfo, log, (err, cipherBundle) => { if (err) { cb(err); return; } - KMS.createCipherBundle( - sseInfo, log, (err, cipherBundle) => { - if (err) { - cb(err); - return; - } - const creatingSseInfo = sseInfo; - creatingSseInfo.cipheredDataKey = - Buffer.from(cipherBundle.cipheredDataKey, 'base64'); - KMS.createDecipherBundle( - sseInfo, 0, log, (err, decipherBundle) => { - if (err) { - cb(err); - return; - } - assert.strictEqual(typeof decipherBundle, - 'object'); - assert.strictEqual(decipherBundle.cryptoScheme, - cipherBundle.cryptoScheme); - assert.notEqual(decipherBundle.decipher, null); - cb(null, cipherBundle, decipherBundle); - }); - }); + const creatingSseInfo = sseInfo; + creatingSseInfo.cipheredDataKey = Buffer.from(cipherBundle.cipheredDataKey, 'base64'); + KMS.createDecipherBundle(sseInfo, 0, log, (err, decipherBundle) => { + if (err) { + cb(err); + return; + } + assert.strictEqual(typeof decipherBundle, 'object'); + assert.strictEqual(decipherBundle.cryptoScheme, cipherBundle.cryptoScheme); + assert.notEqual(decipherBundle.decipher, null); + cb(null, cipherBundle, decipherBundle); + }); }); + }); } it('should cipher and decipher a datastream', done => { @@ -199,8 +173,7 @@ describe('KMS unit tests', () => { }); }); - it('should increment the IV by modifying the last two positions of ' + - 'the buffer', () => { + it('should increment the IV by modifying the last two positions of ' + 'the buffer', () => { const derivedIV = Buffer.from('aaaaaaff', 'hex'); const counter = 6; const incrementedIV = Common._incrementIV(derivedIV, counter); @@ -208,8 +181,7 @@ describe('KMS unit tests', () => { assert.deepStrictEqual(incrementedIV, expected); }); - it('should increment the IV by incrementing the last position of the ' + - 'buffer', () => { + it('should increment the IV by incrementing the last position of the ' + 'buffer', () => { const derivedIV = Buffer.from('aaaaaaf0', 'hex'); const counter = 6; const incrementedIV = Common._incrementIV(derivedIV, counter); @@ -217,8 +189,7 @@ describe('KMS unit tests', () => { assert.deepStrictEqual(incrementedIV, expected); }); - it('should increment the IV by shifting each position in the ' + - 'buffer', () => { + it('should increment the IV by shifting each position in the ' + 'buffer', () => { const derivedIV = Buffer.from('ffffffff', 'hex'); const counter = 1; const incrementedIV = Common._incrementIV(derivedIV, counter); diff --git a/tests/unit/helpers.js b/tests/unit/helpers.js index a8f1f594c4..4514ec9471 100644 --- a/tests/unit/helpers.js +++ b/tests/unit/helpers.js @@ -34,11 +34,9 @@ const testsRangeOnEmptyFile = [ function makeid(size) { let text = ''; - const possible = - 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; + const possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; for (let i = 0; i < size; i += 1) { - text += possible - .charAt(Math.floor(Math.random() * possible.length)); + text += possible.charAt(Math.floor(Math.random() * possible.length)); } return text; } @@ -62,16 +60,14 @@ function timeDiff(startTime) { const timeArray = process.hrtime(startTime); // timeArray[0] is whole seconds // timeArray[1] is remaining nanoseconds - const milliseconds = (timeArray[0] * 1000) + (timeArray[1] / 1e6); + const milliseconds = timeArray[0] * 1000 + timeArray[1] / 1e6; return milliseconds; } function makeAuthInfo(accessKey, userName) { const canIdMap = { - accessKey1: '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7' - + 'cd47ef2be', - accessKey2: '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7' - + 'cd47ef2bf', + accessKey1: '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7' + 'cd47ef2be', + accessKey2: '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7' + 'cd47ef2bf', lifecycleKey1: '0123456789abcdef/lifecycle', default: crypto.randomBytes(32).toString('hex'), }; @@ -139,32 +135,23 @@ class WebsiteConfig { }); } - xml.push(''); + xml.push(''); if (this.IndexDocument) { - xml.push('', - `${this.IndexDocument.Suffix}`, - ''); + xml.push('', `${this.IndexDocument.Suffix}`, ''); } if (this.ErrorDocument) { - xml.push('', - `${this.ErrorDocument.Key}`, - ''); + xml.push('', `${this.ErrorDocument.Key}`, ''); } if (this.RedirectAllRequestsTo) { xml.push(''); if (this.RedirectAllRequestsTo.HostName) { - xml.push('', - `${this.RedirectAllRequestsTo.HostName})`, - ''); + xml.push('', `${this.RedirectAllRequestsTo.HostName})`, ''); } if (this.RedirectAllRequestsTo.Protocol) { - xml.push('', - `${this.RedirectAllRequestsTo.Protocol})`, - ''); + xml.push('', `${this.RedirectAllRequestsTo.Protocol})`, ''); } xml.push(''); } @@ -193,8 +180,7 @@ class WebsiteConfig { } } -function createAlteredRequest(alteredItems, objToAlter, - baseOuterObj, baseInnerObj) { +function createAlteredRequest(alteredItems, objToAlter, baseOuterObj, baseInnerObj) { const alteredRequest = Object.assign({}, baseOuterObj); const alteredNestedObj = Object.assign({}, baseInnerObj); Object.keys(alteredItems).forEach(key => { @@ -205,8 +191,8 @@ function createAlteredRequest(alteredItems, objToAlter, } function cleanup() { - metadata.buckets = new Map; - metadata.keyMaps = new Map; + metadata.buckets = new Map(); + metadata.keyMaps = new Map(); // Set data store array back to empty array ds.length = 0; // Set data store key count back to 1 @@ -276,19 +262,22 @@ class DummyRequestLogger { class CorsConfigTester { constructor(params) { - this._cors = [{ - allowedMethods: ['PUT', 'POST', 'DELETE'], - allowedOrigins: ['http://www.example.com'], - allowedHeaders: ['*'], - maxAgeSeconds: 3000, - exposeHeaders: ['x-amz-server-side-encryption'], - }, { - id: 'testid', - allowedMethods: ['GET'], - allowedOrigins: ['*'], - allowedHeaders: ['*'], - maxAgeSeconds: 3000, - }]; + this._cors = [ + { + allowedMethods: ['PUT', 'POST', 'DELETE'], + allowedOrigins: ['http://www.example.com'], + allowedHeaders: ['*'], + maxAgeSeconds: 3000, + exposeHeaders: ['x-amz-server-side-encryption'], + }, + { + id: 'testid', + allowedMethods: ['GET'], + allowedOrigins: ['*'], + allowedHeaders: ['*'], + maxAgeSeconds: 3000, + }, + ]; if (params) { Object.keys(params).forEach(key => { @@ -306,14 +295,12 @@ class CorsConfigTester { xml.push(''); this._cors.forEach(rule => { xml.push(''); - ['allowedMethods', 'allowedOrigins', 'allowedHeaders', - 'exposeHeaders', 'maxAgeSeconds'] - .forEach(key => { + ['allowedMethods', 'allowedOrigins', 'allowedHeaders', 'exposeHeaders', 'maxAgeSeconds'].forEach(key => { if (rule[key] && Array.isArray(rule[key])) { - const element = key === 'maxAgeSeconds' ? - key.charAt(0).toUpperCase() + key.slice(1) : - key.charAt(0).toUpperCase() + - key.slice(1, -1); + const element = + key === 'maxAgeSeconds' + ? key.charAt(0).toUpperCase() + key.slice(1) + : key.charAt(0).toUpperCase() + key.slice(1, -1); rule[key].forEach(value => { xml.push(`<${element}>${value}`); }); @@ -323,8 +310,7 @@ class CorsConfigTester { xml.push(`${rule.id}`); } if (rule.maxAgeSeconds && !Array.isArray(rule.maxAgeSeconds)) { - xml.push(`${rule.maxAgeSeconds}` + - ''); + xml.push(`${rule.maxAgeSeconds}` + ''); } xml.push(''); }); @@ -344,8 +330,7 @@ class CorsConfigTester { }; if (method === 'PUT') { request.post = body || this.constructXml(); - request.headers['content-md5'] = crypto.createHash('md5') - .update(request.post, 'utf8').digest('base64'); + request.headers['content-md5'] = crypto.createHash('md5').update(request.post, 'utf8').digest('base64'); } return request; } @@ -384,10 +369,11 @@ const versioningTestUtils = { query: { versioning: '' }, actionImplicitDenies: false, }; - const xml = '' + - `${status}` + - ''; + const xml = + '' + + `${status}` + + ''; request.post = xml; return request; }, @@ -414,8 +400,7 @@ class TaggingConfigTester { constructXml() { const xml = []; - xml.push('' + - ' '); + xml.push('' + ' '); Object.keys(this._tags).forEach(key => { const value = this._tags[key]; xml.push(`${key}${value}`); @@ -437,8 +422,7 @@ class TaggingConfigTester { }; if (method === 'PUT') { request.post = body || this.constructXml(); - request.headers['content-md5'] = crypto.createHash('md5') - .update(request.post, 'utf8').digest('base64'); + request.headers['content-md5'] = crypto.createHash('md5').update(request.post, 'utf8').digest('base64'); } return request; } @@ -455,8 +439,7 @@ class TaggingConfigTester { }; if (method === 'PUT') { request.post = body || this.constructXml(); - request.headers['content-md5'] = crypto.createHash('md5') - .update(request.post, 'utf8').digest('base64'); + request.headers['content-md5'] = crypto.createHash('md5').update(request.post, 'utf8').digest('base64'); } return request; } @@ -499,16 +482,13 @@ class AccessControlPolicy { } }); } - xml.push('', ''); + xml.push('', ''); _pushChildren(this.Owner); xml.push('', ''); this.AccessControlList.forEach(grant => { xml.push('', ``); _pushChildren(grant.Grantee); - xml.push('', - `${grant.Permission}`, - ''); + xml.push('', `${grant.Permission}`, ''); }); xml.push('', ''); return xml.join(''); @@ -516,9 +496,16 @@ class AccessControlPolicy { } function createRequestContext(apiMethod, request) { - return new RequestContext(request.headers, - request.query, request.bucketName, request.objectKey, - '127.0.0.1', false, apiMethod, 's3'); + return new RequestContext( + request.headers, + request.query, + request.bucketName, + request.objectKey, + '127.0.0.1', + false, + apiMethod, + 's3' + ); } module.exports = { diff --git a/tests/unit/internal/routeVeeam.js b/tests/unit/internal/routeVeeam.js index 7b2b49b9c8..e64c787450 100644 --- a/tests/unit/internal/routeVeeam.js +++ b/tests/unit/internal/routeVeeam.js @@ -44,21 +44,19 @@ describe('RouteVeeam: checkBucketAndKey', () => { }); }); - [ - ['test', 'badObjectKey', null, 'GET', log], - ].forEach(test => { + [['test', 'badObjectKey', null, 'GET', log]].forEach(test => { it(`should return InvalidArgument for "${test[1]}" object name`, () => { assert.strictEqual(routeVeeam.checkBucketAndKey(...test).is.InvalidArgument, true); }); }); - [ - ['test', '.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml', { random: 'queryparam' }, 'GET', log], - ].forEach(test => { - it(`should return InvalidRequest for "${test[1]}" object name`, () => { - assert.strictEqual(routeVeeam.checkBucketAndKey(...test).is.InvalidRequest, true); - }); - }); + [['test', '.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml', { random: 'queryparam' }, 'GET', log]].forEach( + test => { + it(`should return InvalidRequest for "${test[1]}" object name`, () => { + assert.strictEqual(routeVeeam.checkBucketAndKey(...test).is.InvalidRequest, true); + }); + } + ); [ ['test', '.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml', null, 'GET', log], @@ -123,14 +121,19 @@ describe('RouteVeeam: routeVeeam', () => { url: '/bucket/veeam', }); req.method = 'PATCH'; - routeVeeam.routeVeeam('127.0.0.1', req, { - setHeader: () => {}, - writeHead: () => {}, - end: data => { - assert(data.includes('MethodNotAllowed')); - done(); + routeVeeam.routeVeeam( + '127.0.0.1', + req, + { + setHeader: () => {}, + writeHead: () => {}, + end: data => { + assert(data.includes('MethodNotAllowed')); + done(); + }, + headersSent: false, }, - headersSent: false, - }, log); + log + ); }); }); diff --git a/tests/unit/internal/veeam/schemas/system.js b/tests/unit/internal/veeam/schemas/system.js index ee92bfbf4c..06fa21ea9e 100644 --- a/tests/unit/internal/veeam/schemas/system.js +++ b/tests/unit/internal/veeam/schemas/system.js @@ -18,7 +18,7 @@ describe('RouteVeeam: validateSystemSchema 1.0', () => { CapacityInfo: true, UploadSessions: true, IAMSTS: true, - } + }, }, SystemRecommendations: { S3ConcurrentTaskLimit: 0, @@ -89,7 +89,6 @@ describe('RouteVeeam: validateSystemSchema 1.0', () => { }); }); - describe('RouteVeeam: validateSystemSchema unknown version', () => { const protocolVersion = '"1.1"'; [ @@ -101,7 +100,7 @@ describe('RouteVeeam: validateSystemSchema unknown version', () => { CapacityInfo: true, UploadSessions: true, IAMSTS: true, - } + }, }, SystemRecommendations: { S3ConcurrentTaskLimit: 0, diff --git a/tests/unit/management/agent.js b/tests/unit/management/agent.js index da806a7521..3092bd5c73 100644 --- a/tests/unit/management/agent.js +++ b/tests/unit/management/agent.js @@ -1,8 +1,6 @@ const assert = require('assert'); -const { - createWSAgent, -} = require('../../../lib/management/push'); +const { createWSAgent } = require('../../../lib/management/push'); const proxy = 'http://proxy:3128/'; const logger = { info: () => {} }; @@ -10,57 +8,93 @@ const logger = { info: () => {} }; function testVariableSet(httpProxy, httpsProxy, allProxy, noProxy) { return () => { it(`should use ${httpProxy} environment variable`, () => { - let agent = createWSAgent('https://pushserver', { - [httpProxy]: 'http://proxy:3128', - }, logger); + let agent = createWSAgent( + 'https://pushserver', + { + [httpProxy]: 'http://proxy:3128', + }, + logger + ); assert.equal(agent, null); - agent = createWSAgent('http://pushserver', { - [httpProxy]: proxy, - }, logger); + agent = createWSAgent( + 'http://pushserver', + { + [httpProxy]: proxy, + }, + logger + ); assert.equal(agent.proxy.href, proxy); }); it(`should use ${httpsProxy} environment variable`, () => { - let agent = createWSAgent('http://pushserver', { - [httpsProxy]: proxy, - }, logger); + let agent = createWSAgent( + 'http://pushserver', + { + [httpsProxy]: proxy, + }, + logger + ); assert.equal(agent, null); - agent = createWSAgent('https://pushserver', { - [httpsProxy]: proxy, - }, logger); + agent = createWSAgent( + 'https://pushserver', + { + [httpsProxy]: proxy, + }, + logger + ); assert.equal(agent.proxy.href, proxy); }); it(`should use ${allProxy} environment variable`, () => { - let agent = createWSAgent('http://pushserver', { - [allProxy]: proxy, - }, logger); + let agent = createWSAgent( + 'http://pushserver', + { + [allProxy]: proxy, + }, + logger + ); assert.equal(agent.proxy.href, proxy); - agent = createWSAgent('https://pushserver', { - [allProxy]: proxy, - }, logger); + agent = createWSAgent( + 'https://pushserver', + { + [allProxy]: proxy, + }, + logger + ); assert.equal(agent.proxy.href, proxy); }); it(`should use ${noProxy} environment variable`, () => { - let agent = createWSAgent('http://pushserver', { - [noProxy]: 'pushserver', - }, logger); + let agent = createWSAgent( + 'http://pushserver', + { + [noProxy]: 'pushserver', + }, + logger + ); assert.equal(agent, null); - agent = createWSAgent('http://pushserver', { - [noProxy]: 'pushserver', - [httpProxy]: proxy, - }, logger); + agent = createWSAgent( + 'http://pushserver', + { + [noProxy]: 'pushserver', + [httpProxy]: proxy, + }, + logger + ); assert.equal(agent, null); - agent = createWSAgent('http://pushserver', { - [noProxy]: 'pushserver2', - [httpProxy]: proxy, - }, logger); + agent = createWSAgent( + 'http://pushserver', + { + [noProxy]: 'pushserver2', + [httpProxy]: proxy, + }, + logger + ); assert.equal(agent.proxy.href, proxy); }); }; @@ -74,9 +108,7 @@ describe('Websocket connection agent', () => { }); }); - describe('with lowercase proxy env', - testVariableSet('http_proxy', 'https_proxy', 'all_proxy', 'no_proxy')); + describe('with lowercase proxy env', testVariableSet('http_proxy', 'https_proxy', 'all_proxy', 'no_proxy')); - describe('with uppercase proxy env', - testVariableSet('HTTP_PROXY', 'HTTPS_PROXY', 'ALL_PROXY', 'NO_PROXY')); + describe('with uppercase proxy env', testVariableSet('HTTP_PROXY', 'HTTPS_PROXY', 'ALL_PROXY', 'NO_PROXY')); }); diff --git a/tests/unit/management/configuration.js b/tests/unit/management/configuration.js index 71134c4f3c..267cde1740 100644 --- a/tests/unit/management/configuration.js +++ b/tests/unit/management/configuration.js @@ -8,26 +8,20 @@ const metadata = require('../../../lib/metadata/wrapper'); const managementDatabaseName = 'PENSIEVE'; const tokenConfigurationKey = 'auth/zenko/remote-management-token'; -const { privateKey, accessKey, decryptedSecretKey, secretKey, canonicalId, - userName } = require('./resources.json'); +const { privateKey, accessKey, decryptedSecretKey, secretKey, canonicalId, userName } = require('./resources.json'); const shortid = '123456789012'; const email = 'customaccount1@setbyenv.com'; const arn = 'arn:aws:iam::123456789012:root'; const { config } = require('../../../lib/Config'); -const { - remoteOverlayIsNewer, - patchConfiguration, -} = require('../../../lib/management/configuration'); +const { remoteOverlayIsNewer, patchConfiguration } = require('../../../lib/management/configuration'); -const { - initManagementDatabase, -} = require('../../../lib/management/index'); +const { initManagementDatabase } = require('../../../lib/management/index'); function initManagementCredentialsMock(cb) { - return metadata.putObjectMD(managementDatabaseName, - tokenConfigurationKey, { privateKey }, {}, - log, error => cb(error)); + return metadata.putObjectMD(managementDatabaseName, tokenConfigurationKey, { privateKey }, {}, log, error => + cb(error) + ); } function getConfig() { @@ -37,14 +31,11 @@ function getConfig() { // Original Config const overlayVersionOriginal = Object.assign({}, config.overlayVersion); const authDataOriginal = Object.assign({}, config.authData).accounts; -const locationConstraintsOriginal = Object.assign({}, - config.locationConstraints); +const locationConstraintsOriginal = Object.assign({}, config.locationConstraints); const restEndpointsOriginal = Object.assign({}, config.restEndpoints); const browserAccessEnabledOriginal = config.browserAccessEnabled; const instanceId = '19683e55-56f7-4a4c-98a7-706c07e4ec30'; -const publicInstanceId = crypto.createHash('sha256') - .update(instanceId) - .digest('hex'); +const publicInstanceId = crypto.createHash('sha256').update(instanceId).digest('hex'); function resetConfig() { config.overlayVersion = overlayVersionOriginal; @@ -63,12 +54,14 @@ function assertConfig(actualConf, expectedConf) { } describe('patchConfiguration', () => { - before(done => initManagementDatabase(log, err => { - if (err) { - return done(err); - } - return initManagementCredentialsMock(done); - })); + before(done => + initManagementDatabase(log, err => { + if (err) { + return done(err); + } + return initManagementCredentialsMock(done); + }) + ); beforeEach(() => { resetConfig(); }); @@ -115,17 +108,21 @@ describe('patchConfiguration', () => { publicInstanceId, browserAccessEnabled: true, authData: { - accounts: [{ - name: userName, - email, - arn, - canonicalID: canonicalId, - shortid, - keys: [{ - access: accessKey, - secret: decryptedSecretKey, - }], - }], + accounts: [ + { + name: userName, + email, + arn, + canonicalID: canonicalId, + shortid, + keys: [ + { + access: accessKey, + secret: decryptedSecretKey, + }, + ], + }, + ], }, locationConstraints: { 'us-east-1': { @@ -141,14 +138,12 @@ describe('patchConfiguration', () => { }, }; assertConfig(actualConf, expectedConf); - assert.deepStrictEqual(actualConf.restEndpoints['1.1.1.1'], - 'us-east-1'); + assert.deepStrictEqual(actualConf.restEndpoints['1.1.1.1'], 'us-east-1'); return done(); }); }); - it('should apply second configuration if version (2) is greater than ' + - 'overlayVersion (1)', done => { + it('should apply second configuration if version (2) is greater than ' + 'overlayVersion (1)', done => { const newConf1 = { version: 1, instanceId, @@ -175,8 +170,7 @@ describe('patchConfiguration', () => { }); }); - it('should not apply the second configuration if version equals ' + - 'overlayVersion', done => { + it('should not apply the second configuration if version equals ' + 'overlayVersion', done => { const newConf1 = { version: 1, instanceId, @@ -205,40 +199,37 @@ describe('patchConfiguration', () => { }); describe('remoteOverlayIsNewer', () => { - it('should return remoteOverlayIsNewer equals false if remote overlay ' + - 'is less than the cached', () => { + it('should return remoteOverlayIsNewer equals false if remote overlay ' + 'is less than the cached', () => { const cachedOverlay = { version: 2, }; const remoteOverlay = { version: 1, }; - const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay, - remoteOverlay); + const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay, remoteOverlay); assert.equal(isRemoteOverlayNewer, false); }); - it('should return remoteOverlayIsNewer equals false if remote overlay ' + - 'and the cached one are equal', () => { + it('should return remoteOverlayIsNewer equals false if remote overlay ' + 'and the cached one are equal', () => { const cachedOverlay = { version: 1, }; const remoteOverlay = { version: 1, }; - const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay, - remoteOverlay); + const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay, remoteOverlay); assert.equal(isRemoteOverlayNewer, false); }); - it('should return remoteOverlayIsNewer equals true if remote overlay ' + - 'version is greater than the cached one ', () => { - const cachedOverlay = { - version: 0, - }; - const remoteOverlay = { - version: 1, - }; - const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay, - remoteOverlay); - assert.equal(isRemoteOverlayNewer, true); - }); + it( + 'should return remoteOverlayIsNewer equals true if remote overlay ' + 'version is greater than the cached one ', + () => { + const cachedOverlay = { + version: 0, + }; + const remoteOverlay = { + version: 1, + }; + const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay, remoteOverlay); + assert.equal(isRemoteOverlayNewer, true); + } + ); }); diff --git a/tests/unit/management/secureChannel.js b/tests/unit/management/secureChannel.js index dc29bb1b8a..05d32247ab 100644 --- a/tests/unit/management/secureChannel.js +++ b/tests/unit/management/secureChannel.js @@ -19,16 +19,16 @@ describe('report handler', () => { }); [ - { value: 'true', result: true }, - { value: 'TRUE', result: true }, - { value: 'tRuE', result: true }, - { value: '1', result: true }, - { value: 'false', result: false }, - { value: 'FALSE', result: false }, - { value: 'FaLsE', result: false }, - { value: '0', result: false }, - { value: 'foo', result: false }, - { value: '', result: true }, + { value: 'true', result: true }, + { value: 'TRUE', result: true }, + { value: 'tRuE', result: true }, + { value: '1', result: true }, + { value: 'false', result: false }, + { value: 'FALSE', result: false }, + { value: 'FaLsE', result: false }, + { value: '0', result: false }, + { value: 'foo', result: false }, + { value: '', result: true }, { value: undefined, result: true }, ].forEach(param => it(`should allow set local file system capability ${param.value}`, () => { diff --git a/tests/unit/management/testChannelMessageV0.js b/tests/unit/management/testChannelMessageV0.js index 28421d46a1..9bc69e2553 100644 --- a/tests/unit/management/testChannelMessageV0.js +++ b/tests/unit/management/testChannelMessageV0.js @@ -1,10 +1,6 @@ const assert = require('assert'); -const { - ChannelMessageV0, - MessageType, - TargetType, -} = require('../../../lib/management/ChannelMessageV0'); +const { ChannelMessageV0, MessageType, TargetType } = require('../../../lib/management/ChannelMessageV0'); const { CONFIG_OVERLAY_MESSAGE, diff --git a/tests/unit/metadata/metadataUtils.spec.js b/tests/unit/metadata/metadataUtils.spec.js index 50aab8ddb1..807b3bd886 100644 --- a/tests/unit/metadata/metadataUtils.spec.js +++ b/tests/unit/metadata/metadataUtils.spec.js @@ -10,51 +10,66 @@ const authInfo = makeAuthInfo('accessKey'); const otherAuthInfo = makeAuthInfo('otherAccessKey'); const ownerCanonicalId = authInfo.getCanonicalID(); -const bucket = new BucketInfo('niftyBucket', ownerCanonicalId, - authInfo.getAccountDisplayName(), creationDate); +const bucket = new BucketInfo('niftyBucket', ownerCanonicalId, authInfo.getAccountDisplayName(), creationDate); const log = new DummyRequestLogger(); -const { - validateBucket, - metadataGetObjects, - metadataGetObject, -} = require('../../../lib/metadata/metadataUtils'); +const { validateBucket, metadataGetObjects, metadataGetObject } = require('../../../lib/metadata/metadataUtils'); const metadata = require('../../../lib/metadata/wrapper'); describe('validateBucket', () => { it('action bucketPutPolicy by bucket owner', () => { - const validationResult = validateBucket(bucket, { - authInfo, - requestType: 'bucketPutPolicy', - request: null, - }, log, false); + const validationResult = validateBucket( + bucket, + { + authInfo, + requestType: 'bucketPutPolicy', + request: null, + }, + log, + false + ); assert.ifError(validationResult); }); it('action bucketPutPolicy by other than bucket owner', () => { - const validationResult = validateBucket(bucket, { - authInfo: otherAuthInfo, - requestType: 'bucketPutPolicy', - request: null, - }, log, false); + const validationResult = validateBucket( + bucket, + { + authInfo: otherAuthInfo, + requestType: 'bucketPutPolicy', + request: null, + }, + log, + false + ); assert(validationResult); assert(validationResult.is.MethodNotAllowed); }); it('action bucketGet by bucket owner', () => { - const validationResult = validateBucket(bucket, { - authInfo, - requestType: 'bucketGet', - request: null, - }, log, false); + const validationResult = validateBucket( + bucket, + { + authInfo, + requestType: 'bucketGet', + request: null, + }, + log, + false + ); assert.ifError(validationResult); }); it('action bucketGet by other than bucket owner', () => { - const validationResult = validateBucket(bucket, { - authInfo: otherAuthInfo, - requestType: 'bucketGet', - request: null, - }, log, false); + const validationResult = validateBucket( + bucket, + { + authInfo: otherAuthInfo, + requestType: 'bucketGet', + request: null, + }, + log, + false + ); assert(validationResult); assert(validationResult.is.AccessDenied); }); @@ -118,7 +133,8 @@ describe('metadataGetObject', () => { it('should return the cached document if provided', done => { const cachedDoc = { [objectKey.inPlay.key]: { - key: 'objectKey1', versionId: 'versionId1', + key: 'objectKey1', + versionId: 'versionId1', }, }; metadataGetObject('bucketName', objectKey.inPlay.key, objectKey.versionId, cachedDoc, log, (err, result) => { diff --git a/tests/unit/multipleBackend/VersioningBackendClient.js b/tests/unit/multipleBackend/VersioningBackendClient.js index 60d8527602..315cdb4b79 100644 --- a/tests/unit/multipleBackend/VersioningBackendClient.js +++ b/tests/unit/multipleBackend/VersioningBackendClient.js @@ -7,8 +7,7 @@ const DummyService = require('../DummyService'); const { DummyRequestLogger } = require('../helpers'); const missingVerIdInternalError = errorInstances.InternalError.customizeDescription( - 'Invalid state. Please ensure versioning is enabled ' + - 'in AWS for the location constraint and try again.' + 'Invalid state. Please ensure versioning is enabled ' + 'in AWS for the location constraint and try again.' ); const log = new DummyRequestLogger(); @@ -43,8 +42,7 @@ const s3Config = { }; const assertSuccess = (err, cb) => { - assert.ifError(err, - `Expected success, but got error ${err}`); + assert.ifError(err, `Expected success, but got error ${err}`); cb(); }; @@ -54,26 +52,22 @@ const assertFailure = (err, cb) => { }; const genTests = [ { - msg: 'should return success if supportsVersioning === true ' + - 'and backend versioning is enabled', + msg: 'should return success if supportsVersioning === true ' + 'and backend versioning is enabled', input: { supportsVersioning: true, enableMockVersioning: true }, callback: assertSuccess, }, { - msg: 'should return success if supportsVersioning === false ' + - 'and backend versioning is enabled', + msg: 'should return success if supportsVersioning === false ' + 'and backend versioning is enabled', input: { supportsVersioning: false, enableMockVersioning: true }, callback: assertSuccess, }, { - msg: 'should return error if supportsVersioning === true ' + - 'and backend versioning is disabled', + msg: 'should return error if supportsVersioning === true ' + 'and backend versioning is disabled', input: { supportsVersioning: true, enableMockVersioning: false }, callback: assertFailure, }, { - msg: 'should return success if supportsVersioning === false ' + - 'and backend versioning is disabled', + msg: 'should return success if supportsVersioning === false ' + 'and backend versioning is disabled', input: { supportsVersioning: false, enableMockVersioning: false }, callback: assertSuccess, }, @@ -86,12 +80,13 @@ describe('AwsClient::putObject', () => { testClient = new AwsClient(s3Config); testClient._client = new DummyService({ versioning: true }); }); - genTests.forEach(test => it(test.msg, done => { - testClient._supportsVersioning = test.input.supportsVersioning; - testClient._client.versioning = test.input.enableMockVersioning; - testClient.put('', 0, { bucketName: bucket, objectKey: key }, - reqUID, err => test.callback(err, done)); - })); + genTests.forEach(test => + it(test.msg, done => { + testClient._supportsVersioning = test.input.supportsVersioning; + testClient._client.versioning = test.input.enableMockVersioning; + testClient.put('', 0, { bucketName: bucket, objectKey: key }, reqUID, err => test.callback(err, done)); + }) + ); }); describe('AwsClient::copyObject', () => { @@ -102,13 +97,15 @@ describe('AwsClient::copyObject', () => { testClient._client = new DummyService({ versioning: true }); }); - genTests.forEach(test => it(test.msg, done => { - testClient._supportsVersioning = test.input.supportsVersioning; - testClient._client.versioning = test.input.enableMockVersioning; - testClient.copyObject(copyObjectRequest, null, key, - sourceLocationConstraint, null, config, log, - err => test.callback(err, done)); - })); + genTests.forEach(test => + it(test.msg, done => { + testClient._supportsVersioning = test.input.supportsVersioning; + testClient._client.versioning = test.input.enableMockVersioning; + testClient.copyObject(copyObjectRequest, null, key, sourceLocationConstraint, null, config, log, err => + test.callback(err, done) + ); + }) + ); }); describe('AwsClient::completeMPU', () => { @@ -118,13 +115,14 @@ describe('AwsClient::completeMPU', () => { testClient = new AwsClient(s3Config); testClient._client = new DummyService({ versioning: true }); }); - genTests.forEach(test => it(test.msg, done => { - testClient._supportsVersioning = test.input.supportsVersioning; - testClient._client.versioning = test.input.enableMockVersioning; - const uploadId = 'externalBackendTestUploadId'; - testClient.completeMPU(jsonList, null, key, uploadId, - bucket, log, err => test.callback(err, done)); - })); + genTests.forEach(test => + it(test.msg, done => { + testClient._supportsVersioning = test.input.supportsVersioning; + testClient._client.versioning = test.input.enableMockVersioning; + const uploadId = 'externalBackendTestUploadId'; + testClient.completeMPU(jsonList, null, key, uploadId, bucket, log, err => test.callback(err, done)); + }) + ); }); describe('AwsClient::healthcheck', () => { @@ -159,34 +157,31 @@ describe('AwsClient::healthcheck', () => { const tests = [ { - msg: 'should return success if supportsVersioning === true ' + - 'and backend versioning is enabled', + msg: 'should return success if supportsVersioning === true ' + 'and backend versioning is enabled', input: { supportsVersioning: true, enableMockVersioning: true }, callback: assertSuccessVersioned, }, { - msg: 'should return success if supportsVersioning === false ' + - 'and backend versioning is enabled', + msg: 'should return success if supportsVersioning === false ' + 'and backend versioning is enabled', input: { supportsVersioning: false, enableMockVersioning: true }, callback: assertSuccessNonVersioned, }, { - msg: 'should return error if supportsVersioning === true ' + - ' and backend versioning is disabled', + msg: 'should return error if supportsVersioning === true ' + ' and backend versioning is disabled', input: { supportsVersioning: true, enableMockVersioning: false }, callback: assertFailure, }, { - msg: 'should return success if supportsVersioning === false ' + - 'and backend versioning is disabled', + msg: 'should return success if supportsVersioning === false ' + 'and backend versioning is disabled', input: { supportsVersioning: false, enableMockVersioning: false }, callback: assertSuccessNonVersioned, }, ]; - tests.forEach(test => it(test.msg, done => { - testClient._supportsVersioning = test.input.supportsVersioning; - testClient._client.versioning = test.input.enableMockVersioning; - testClient.healthcheck('backend', - (err, resp) => test.callback(resp.backend, done)); - })); + tests.forEach(test => + it(test.msg, done => { + testClient._supportsVersioning = test.input.supportsVersioning; + testClient._client.versioning = test.input.enableMockVersioning; + testClient.healthcheck('backend', (err, resp) => test.callback(resp.backend, done)); + }) + ); }); diff --git a/tests/unit/multipleBackend/getReplicationBackendDataLocator.js b/tests/unit/multipleBackend/getReplicationBackendDataLocator.js index b27f7595ff..048928725a 100644 --- a/tests/unit/multipleBackend/getReplicationBackendDataLocator.js +++ b/tests/unit/multipleBackend/getReplicationBackendDataLocator.js @@ -1,7 +1,6 @@ const assert = require('assert'); -const getReplicationBackendDataLocator = require( - '../../../lib/api/apiUtils/object/getReplicationBackendDataLocator'); +const getReplicationBackendDataLocator = require('../../../lib/api/apiUtils/object/getReplicationBackendDataLocator'); const locCheckResult = { location: 'spoofbackend', @@ -9,47 +8,45 @@ const locCheckResult = { locationType: 'spoof', }; const repNoMatch = { backends: [{ site: 'nomatch' }] }; -const repMatchPending = { backends: - [{ site: 'spoofbackend', status: 'PENDING', dataVersionId: '' }] }; -const repMatchFailed = { backends: - [{ site: 'spoofbackend', status: 'FAILED', dataVersionId: '' }] }; -const repMatch = { backends: [{ - site: 'spoofbackend', - status: 'COMPLETED', - dataStoreVersionId: 'spoofid' }], +const repMatchPending = { backends: [{ site: 'spoofbackend', status: 'PENDING', dataVersionId: '' }] }; +const repMatchFailed = { backends: [{ site: 'spoofbackend', status: 'FAILED', dataVersionId: '' }] }; +const repMatch = { + backends: [ + { + site: 'spoofbackend', + status: 'COMPLETED', + dataStoreVersionId: 'spoofid', + }, + ], }; -const expDataLocator = [{ - key: locCheckResult.key, - dataStoreName: locCheckResult.location, - dataStoreType: locCheckResult.locationType, - dataStoreVersionId: repMatch.backends[0].dataStoreVersionId, -}]; - +const expDataLocator = [ + { + key: locCheckResult.key, + dataStoreName: locCheckResult.location, + dataStoreType: locCheckResult.locationType, + dataStoreVersionId: repMatch.backends[0].dataStoreVersionId, + }, +]; describe('Replication Backend Compare', () => { it('should return error if no match in replication backends', () => { - const repBackendResult = - getReplicationBackendDataLocator(locCheckResult, repNoMatch); + const repBackendResult = getReplicationBackendDataLocator(locCheckResult, repNoMatch); assert.strictEqual(repBackendResult.error.is.InvalidLocationConstraint, true); }); it('should return a status and reason if backend status is PENDING', () => { - const repBackendResult = - getReplicationBackendDataLocator(locCheckResult, repMatchPending); + const repBackendResult = getReplicationBackendDataLocator(locCheckResult, repMatchPending); assert.strictEqual(repBackendResult.dataLocator, undefined); assert.strictEqual(repBackendResult.status, 'PENDING'); assert.notStrictEqual(repBackendResult.reason, undefined); }); it('should return a status and reason if backend status is FAILED', () => { - const repBackendResult = - getReplicationBackendDataLocator(locCheckResult, repMatchFailed); + const repBackendResult = getReplicationBackendDataLocator(locCheckResult, repMatchFailed); assert.strictEqual(repBackendResult.dataLocator, undefined); assert.strictEqual(repBackendResult.status, 'FAILED'); assert.notStrictEqual(repBackendResult.reason, undefined); }); - it('should return dataLocator obj if backend matches and rep is COMPLETED', - () => { - const repBackendResult = - getReplicationBackendDataLocator(locCheckResult, repMatch); + it('should return dataLocator obj if backend matches and rep is COMPLETED', () => { + const repBackendResult = getReplicationBackendDataLocator(locCheckResult, repMatch); assert.strictEqual(repBackendResult.status, 'COMPLETED'); assert.deepStrictEqual(repBackendResult.dataLocator, expDataLocator); }); diff --git a/tests/unit/multipleBackend/locationConstraintCheck.js b/tests/unit/multipleBackend/locationConstraintCheck.js index 1388e53bbe..2c92837e65 100644 --- a/tests/unit/multipleBackend/locationConstraintCheck.js +++ b/tests/unit/multipleBackend/locationConstraintCheck.js @@ -3,8 +3,7 @@ const assert = require('assert'); const { BucketInfo, BackendInfo } = require('arsenal').models; const DummyRequest = require('../DummyRequest'); const { DummyRequestLogger } = require('../helpers'); -const locationConstraintCheck - = require('../../../lib/api/apiUtils/object/locationConstraintCheck'); +const locationConstraintCheck = require('../../../lib/api/apiUtils/object/locationConstraintCheck'); const memLocation = 'scality-internal-mem'; const fileLocation = 'scality-internal-file'; @@ -18,48 +17,51 @@ const objectKey = 'someobject'; const postBody = Buffer.from('I am a body', 'utf8'); const log = new DummyRequestLogger(); -const testBucket = new BucketInfo(bucketName, owner, ownerDisplayName, - testDate, null, null, null, null, null, null, locationConstraint); +const testBucket = new BucketInfo( + bucketName, + owner, + ownerDisplayName, + testDate, + null, + null, + null, + null, + null, + null, + locationConstraint +); function createTestRequest(locationConstraint) { - const testRequest = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { 'x-amz-meta-scal-location-constraint': locationConstraint }, - url: `/${bucketName}/${objectKey}`, - parsedHost: 'localhost', - }, postBody); + const testRequest = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { 'x-amz-meta-scal-location-constraint': locationConstraint }, + url: `/${bucketName}/${objectKey}`, + parsedHost: 'localhost', + }, + postBody + ); return testRequest; } describe('Location Constraint Check', () => { - it('should return error if controlling location constraint is ' + - 'not valid', done => { - const backendInfoObj = locationConstraintCheck( - createTestRequest('fail-region'), null, testBucket, log); - assert.strictEqual(backendInfoObj.err.code, 400, - 'Expected "Invalid Argument" code error'); - assert(backendInfoObj.err.is.InvalidArgument, 'Expected "Invalid ' + - 'Argument" error'); + it('should return error if controlling location constraint is ' + 'not valid', done => { + const backendInfoObj = locationConstraintCheck(createTestRequest('fail-region'), null, testBucket, log); + assert.strictEqual(backendInfoObj.err.code, 400, 'Expected "Invalid Argument" code error'); + assert(backendInfoObj.err.is.InvalidArgument, 'Expected "Invalid ' + 'Argument" error'); done(); }); - it('should return instance of BackendInfo with correct ' + - 'locationConstraints', done => { - const backendInfoObj = locationConstraintCheck( - createTestRequest(memLocation), null, testBucket, log); - assert.strictEqual(backendInfoObj.err, null, 'Expected success ' + - `but got error ${backendInfoObj.err}`); + it('should return instance of BackendInfo with correct ' + 'locationConstraints', done => { + const backendInfoObj = locationConstraintCheck(createTestRequest(memLocation), null, testBucket, log); + assert.strictEqual(backendInfoObj.err, null, 'Expected success ' + `but got error ${backendInfoObj.err}`); assert.strictEqual(typeof backendInfoObj.controllingLC, 'string'); - assert.equal(backendInfoObj.backendInfo instanceof BackendInfo, - true); - assert.strictEqual(backendInfoObj. - backendInfo.getObjectLocationConstraint(), memLocation); - assert.strictEqual(backendInfoObj. - backendInfo.getBucketLocationConstraint(), fileLocation); - assert.strictEqual(backendInfoObj.backendInfo.getRequestEndpoint(), - 'localhost'); + assert.equal(backendInfoObj.backendInfo instanceof BackendInfo, true); + assert.strictEqual(backendInfoObj.backendInfo.getObjectLocationConstraint(), memLocation); + assert.strictEqual(backendInfoObj.backendInfo.getBucketLocationConstraint(), fileLocation); + assert.strictEqual(backendInfoObj.backendInfo.getRequestEndpoint(), 'localhost'); done(); }); }); diff --git a/tests/unit/multipleBackend/locationHeaderCheck.js b/tests/unit/multipleBackend/locationHeaderCheck.js index 34d58e0a9b..875a9b7fe0 100644 --- a/tests/unit/multipleBackend/locationHeaderCheck.js +++ b/tests/unit/multipleBackend/locationHeaderCheck.js @@ -1,8 +1,7 @@ const assert = require('assert'); const { errorInstances } = require('arsenal'); -const locationHeaderCheck = - require('../../../lib/api/apiUtils/object/locationHeaderCheck'); +const locationHeaderCheck = require('../../../lib/api/apiUtils/object/locationHeaderCheck'); const objectKey = 'locationHeaderCheckObject'; const bucketName = 'locationHeaderCheckBucket'; @@ -11,18 +10,22 @@ const testCases = [ { location: 'doesnotexist', expRes: errorInstances.InvalidLocationConstraint.customizeDescription( - 'Invalid location constraint specified in header'), - }, { + 'Invalid location constraint specified in header' + ), + }, + { location: '', expRes: undefined, - }, { + }, + { location: 'awsbackend', expRes: { location: 'awsbackend', key: objectKey, locationType: 'aws_s3', }, - }, { + }, + { location: 'awsbackendmismatch', expRes: { location: 'awsbackendmismatch', @@ -34,11 +37,9 @@ const testCases = [ describe('Location Header Check', () => { testCases.forEach(test => { - it('should return expected result with location constraint header ' + - `set to ${test.location}`, () => { + it('should return expected result with location constraint header ' + `set to ${test.location}`, () => { const headers = { 'x-amz-location-constraint': `${test.location}` }; - const checkRes = - locationHeaderCheck(headers, objectKey, bucketName); + const checkRes = locationHeaderCheck(headers, objectKey, bucketName); assert.deepStrictEqual(checkRes, test.expRes); }); }); diff --git a/tests/unit/policies.js b/tests/unit/policies.js index b1dfdc596e..fd19996d4c 100644 --- a/tests/unit/policies.js +++ b/tests/unit/policies.js @@ -240,10 +240,7 @@ const apiMatrix = [ headers: { 'x-amz-version-id': '1', }, - expectedPermissions: [ - 's3:PutObject', - 's3:PutObjectVersionTagging', - ], + expectedPermissions: ['s3:PutObject', 's3:PutObjectVersionTagging'], }, { name: 'objectPutACL', @@ -282,7 +279,6 @@ const apiMatrix = [ }, ]; - function prepareDummyRequest(headers = {}) { const request = new DummyRequest({ hostname: 'localhost', @@ -300,13 +296,16 @@ describe('Policies: permission checks for S3 APIs', () => { if (api.name.length === 0) { return; } - const message = `should return ${api.expectedPermissions.join(', ')} in requestContextParams for ${api.name}` + - `${(api.headers && api.headers.length) > 0 ? - ` with headers ${api.headers.map(el => el[0]).join(', ')}` : ''}`; + const message = + `should return ${api.expectedPermissions.join(', ')} in requestContextParams for ${api.name}` + + `${ + (api.headers && api.headers.length) > 0 + ? ` with headers ${api.headers.map(el => el[0]).join(', ')}` + : '' + }`; it(message, () => { const request = prepareDummyRequest(api.headers); - const requestContexts = prepareRequestContexts(api.name, request, - sourceBucket, sourceObject); + const requestContexts = prepareRequestContexts(api.name, request, sourceBucket, sourceObject); const requestedActions = requestContexts.map(rq => rq.getAction()); assert.deepStrictEqual(requestedActions, api.expectedPermissions); }); @@ -320,26 +319,26 @@ describe('Policies: permission checks for S3 APIs', () => { } it('should return s3:PutBucket without any provided header', () => { - assert.deepStrictEqual( - putBucketApiMethods(), - ['bucketPut'], - ); + assert.deepStrictEqual(putBucketApiMethods(), ['bucketPut']); }); - it('should return s3:CreateBucket, s3:PutBucketVersioning and s3:PutBucketObjectLockConfiguration' + - ' with object-lock headers', () => { - assert.deepStrictEqual( - putBucketApiMethods({ 'x-amz-bucket-object-lock-enabled': 'true' }), - ['bucketPut', 'bucketPutObjectLock', 'bucketPutVersioning'], - ); - }); + it( + 'should return s3:CreateBucket, s3:PutBucketVersioning and s3:PutBucketObjectLockConfiguration' + + ' with object-lock headers', + () => { + assert.deepStrictEqual(putBucketApiMethods({ 'x-amz-bucket-object-lock-enabled': 'true' }), [ + 'bucketPut', + 'bucketPutObjectLock', + 'bucketPutVersioning', + ]); + } + ); - it('should return s3:CreateBucket and s3:PutBucketAcl' + - ' with ACL headers', () => { - assert.deepStrictEqual( - putBucketApiMethods({ 'x-amz-grant-read': 'private' }), - ['bucketPut', 'bucketPutACL'], - ); + it('should return s3:CreateBucket and s3:PutBucketAcl' + ' with ACL headers', () => { + assert.deepStrictEqual(putBucketApiMethods({ 'x-amz-grant-read': 'private' }), [ + 'bucketPut', + 'bucketPutACL', + ]); }); }); }); diff --git a/tests/unit/quotas/scuba/wrapper.js b/tests/unit/quotas/scuba/wrapper.js index 81a9a83a8f..dd36ca333e 100644 --- a/tests/unit/quotas/scuba/wrapper.js +++ b/tests/unit/quotas/scuba/wrapper.js @@ -44,7 +44,7 @@ describe('ScubaClientImpl', () => { }); it('should disable Scuba if health check returns non-stale data', async () => { - sinon.stub(client, 'healthCheck').resolves({ date: Date.now() - (12 * 60 * 60 * 1000) }); + sinon.stub(client, 'healthCheck').resolves({ date: Date.now() - 12 * 60 * 60 * 1000 }); await client._healthCheck(); @@ -52,7 +52,7 @@ describe('ScubaClientImpl', () => { }); it('should disable Scuba if health check returns stale data', async () => { - sinon.stub(client, 'healthCheck').resolves({ date: Date.now() - (48 * 60 * 60 * 1000) }); + sinon.stub(client, 'healthCheck').resolves({ date: Date.now() - 48 * 60 * 60 * 1000 }); await client._healthCheck(); diff --git a/tests/unit/routes/routeBackbeat.js b/tests/unit/routes/routeBackbeat.js index 034d976bd9..fff9927a65 100644 --- a/tests/unit/routes/routeBackbeat.js +++ b/tests/unit/routes/routeBackbeat.js @@ -14,19 +14,22 @@ const quotaUtils = require('../../../lib/api/apiUtils/quotas/quotaUtils'); const log = new DummyRequestLogger(); function prepareDummyRequest(headers = {}, body = '') { - const request = new DummyRequest({ - hostname: 'localhost', - method: 'PUT', - url: '/_/backbeat/metadata/bucket0/key0', - port: 80, - headers, - socket: { - remoteAddress: '0.0.0.0', - destroy: () => {}, - on: () => {}, - removeListener: () => {}, + const request = new DummyRequest( + { + hostname: 'localhost', + method: 'PUT', + url: '/_/backbeat/metadata/bucket0/key0', + port: 80, + headers, + socket: { + remoteAddress: '0.0.0.0', + destroy: () => {}, + on: () => {}, + removeListener: () => {}, + }, }, - }, body || '{"replicationInfo":"{}"}'); + body || '{"replicationInfo":"{}"}' + ); return request; } @@ -42,7 +45,9 @@ describe('routeBackbeat', () => { sandbox = sinon.createSandbox(); // create a Promise that resolves when response.end is called - endPromise = new Promise(resolve => { resolveEnd = resolve; }); + endPromise = new Promise(resolve => { + resolveEnd = resolve; + }); mockResponse = { statusCode: null, @@ -104,7 +109,7 @@ describe('routeBackbeat', () => { routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 409); assert.strictEqual(mockResponse.body.code, 'InvalidBucketState'); @@ -124,7 +129,7 @@ describe('routeBackbeat', () => { routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, { Body: '{}' }); @@ -151,14 +156,15 @@ describe('routeBackbeat', () => { const objMd = {}; callback(null, bucketInfo, objMd); }); - storeObject.dataStore.callsFake((objectContext, cipherBundle, stream, size, - streamingV4Params, backendInfo, log, callback) => { - callback(null, {}, md5); - }); + storeObject.dataStore.callsFake( + (objectContext, cipherBundle, stream, size, streamingV4Params, backendInfo, log, callback) => { + callback(null, {}, md5); + } + ); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, [{}]); @@ -172,15 +178,18 @@ describe('routeBackbeat', () => { let dataDeleteSpy; function preparePutMetadataRequest(body = {}) { - const req = prepareDummyRequest({ - 'x-scal-versioning-required': 'true', - }, JSON.stringify({ - replicationInfo: {}, - ...body, - })); + const req = prepareDummyRequest( + { + 'x-scal-versioning-required': 'true', + }, + JSON.stringify({ + replicationInfo: {}, + ...body, + }) + ); req.method = 'PUT'; req.url = '/_/backbeat/metadata/bucket0/key0'; - req.destroy = () => { }; + req.destroy = () => {}; return req; } @@ -200,7 +209,7 @@ describe('routeBackbeat', () => { }); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); }); @@ -211,7 +220,7 @@ describe('routeBackbeat', () => { }); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); @@ -222,13 +231,15 @@ describe('routeBackbeat', () => { sandbox.stub(metadata, 'putObjectMD').callsFake((bucketName, objectKey, omVal, options, logParam, cb) => { assert.strictEqual(omVal['owner-display-name'], 'Bart'); - assert.strictEqual(omVal['owner-id'], - '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be'); + assert.strictEqual( + omVal['owner-id'], + '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be' + ); cb(null, {}); }); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); @@ -238,14 +249,15 @@ describe('routeBackbeat', () => { mockRequest.url = '/_/backbeat/metadata/bucket0/key0?accountId=invalid'; routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 404); assert.deepStrictEqual(mockResponse.body.code, 'AccountNotFound'); }); it('should repair master when putting metadata of a new version', async () => { - mockRequest.url = '/_/backbeat/metadata/bucket0/key0' + + mockRequest.url = + '/_/backbeat/metadata/bucket0/key0' + '?accountId=123456789012&versionId=aIXVkw5Tw2Pd00000000001I4j3QKsvf'; sandbox.stub(metadata, 'putObjectMD').callsFake((bucketName, objectKey, omVal, options, logParam, cb) => { @@ -259,14 +271,15 @@ describe('routeBackbeat', () => { }); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); }); it('should not repair master when updating metadata of an existing version', async () => { - mockRequest.url = '/_/backbeat/metadata/bucket0/key0' + + mockRequest.url = + '/_/backbeat/metadata/bucket0/key0' + '?accountId=123456789012&versionId=aIXVkw5Tw2Pd00000000001I4j3QKsvf'; sandbox.stub(metadata, 'putObjectMD').callsFake((bucketName, objectKey, omVal, options, logParam, cb) => { @@ -275,7 +288,7 @@ describe('routeBackbeat', () => { }); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); }); @@ -286,7 +299,7 @@ describe('routeBackbeat', () => { }); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 500); }); @@ -295,17 +308,19 @@ describe('routeBackbeat', () => { mockRequest.url = '/_/backbeat/metadata/bucket0'; routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 405); }); it('should delete data when replacing with empty object', async () => { const existingMd = { - location: [{ - key: 'key0', - dataStoreName: 'location1', - size: 100, - }], + location: [ + { + key: 'key0', + dataStoreName: 'location1', + size: 100, + }, + ], }; metadataUtils.standardMetadataValidateBucketAndObj.callsFake((params, denies, log, callback) => { callback(null, bucketInfo, existingMd); @@ -322,7 +337,7 @@ describe('routeBackbeat', () => { mockRequest.url += '?versionId=aIXVkw5Tw2Pd00000000001I4j3QKsvf'; routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); @@ -331,11 +346,13 @@ describe('routeBackbeat', () => { }); it('should preserve existing locations when x-scal-replication-content is METADATA', async () => { - const existingLocations = [{ - key: 'key0', - dataStoreName: 'location1', - size: 100, - }]; + const existingLocations = [ + { + key: 'key0', + dataStoreName: 'location1', + size: 100, + }, + ]; metadataUtils.standardMetadataValidateBucketAndObj.callsFake((params, denies, log, callback) => { callback(null, bucketInfo, { location: existingLocations, @@ -361,7 +378,7 @@ describe('routeBackbeat', () => { mockRequest.url += '?versionId=aIXVkw5Tw2Pd00000000001I4j3QKsvf'; routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); @@ -370,11 +387,13 @@ describe('routeBackbeat', () => { it('should delete data when no more locations', async () => { const existingMd = { - location: [{ - key: 'key0', - dataStoreName: 'location1', - size: 100, - }], + location: [ + { + key: 'key0', + dataStoreName: 'location1', + size: 100, + }, + ], }; metadataUtils.standardMetadataValidateBucketAndObj.callsFake((params, denies, log, callback) => { callback(null, bucketInfo, existingMd); @@ -390,7 +409,7 @@ describe('routeBackbeat', () => { mockRequest.url += '?versionId=aIXVkw5Tw2Pd00000000001I4j3QKsvf'; routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); @@ -400,11 +419,13 @@ describe('routeBackbeat', () => { it('should delete data when locations change', async () => { const existingMd = { - location: [{ - key: 'key0', - dataStoreName: 'location1', - size: 100, - }], + location: [ + { + key: 'key0', + dataStoreName: 'location1', + size: 100, + }, + ], 'content-length': 100, }; metadataUtils.standardMetadataValidateBucketAndObj.callsFake((params, denies, log, callback) => { @@ -413,11 +434,13 @@ describe('routeBackbeat', () => { // New metadata has different locations const reqBody = { - location: [{ - key: 'key1', - dataStoreName: 'location1', - size: 100, - }], + location: [ + { + key: 'key1', + dataStoreName: 'location1', + size: 100, + }, + ], 'content-length': 100, }; mockRequest = preparePutMetadataRequest(reqBody); @@ -430,7 +453,7 @@ describe('routeBackbeat', () => { }); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); @@ -440,15 +463,18 @@ describe('routeBackbeat', () => { it('should not delete data when some keys are still used', async () => { const existingMd = { - location: [{ - key: 'key0', - dataStoreName: 'location1', - size: 100, - }, { - key: 'key1', - dataStoreName: 'location1', - size: 100, - }], + location: [ + { + key: 'key0', + dataStoreName: 'location1', + size: 100, + }, + { + key: 'key1', + dataStoreName: 'location1', + size: 100, + }, + ], 'content-length': 100, }; metadataUtils.standardMetadataValidateBucketAndObj.callsFake((params, denies, log, callback) => { @@ -457,15 +483,18 @@ describe('routeBackbeat', () => { // New metadata has different locations const reqBody = { - location: [{ - key: 'key1', - dataStoreName: 'location1', - size: 100, - }, { - key: 'key2', - dataStoreName: 'location1', - size: 100, - }], + location: [ + { + key: 'key1', + dataStoreName: 'location1', + size: 100, + }, + { + key: 'key2', + dataStoreName: 'location1', + size: 100, + }, + ], 'content-length': 100, }; mockRequest = preparePutMetadataRequest(reqBody); @@ -478,7 +507,7 @@ describe('routeBackbeat', () => { }); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); @@ -487,11 +516,13 @@ describe('routeBackbeat', () => { it('should not delete data when object is archived', async () => { const existingMd = { - location: [{ - key: 'key0', - dataStoreName: 'location1', - size: 100, - }], + location: [ + { + key: 'key0', + dataStoreName: 'location1', + size: 100, + }, + ], 'content-length': 100, }; metadataUtils.standardMetadataValidateBucketAndObj.callsFake((params, denies, log, callback) => { @@ -499,11 +530,14 @@ describe('routeBackbeat', () => { }); // New metadata has empty location array but keeps content-length (cold storage case) - mockRequest = prepareDummyRequest(mockRequest.headers, JSON.stringify({ - location: undefined, - 'content-length': 100, - replicationInfo: {}, - })); + mockRequest = prepareDummyRequest( + mockRequest.headers, + JSON.stringify({ + location: undefined, + 'content-length': 100, + replicationInfo: {}, + }) + ); mockRequest.url += '?versionId=aIXVkw5Tw2Pd00000000001I4j3QKsvf'; sandbox.stub(metadata, 'putObjectMD').callsFake((bucketName, objectKey, omVal, options, logParam, cb) => { @@ -515,7 +549,7 @@ describe('routeBackbeat', () => { }); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 200); assert.deepStrictEqual(mockResponse.body, {}); @@ -528,18 +562,23 @@ describe('routeBackbeat', () => { let validateQuotasSpy; const prepareBatchDeleteRequest = (locations = undefined) => { - const mockRequest = prepareDummyRequest({ - 'x-scal-versioning-required': 'true' - }, JSON.stringify({ - Locations: locations || [{ - key: 'key0', - bucket: 'bucket0', - size: 100, - }], - })); + const mockRequest = prepareDummyRequest( + { + 'x-scal-versioning-required': 'true', + }, + JSON.stringify({ + Locations: locations || [ + { + key: 'key0', + bucket: 'bucket0', + size: 100, + }, + ], + }) + ); mockRequest.method = 'POST'; mockRequest.url = '/_/backbeat/batchdelete/bucket0/key0'; - mockRequest.destroy = () => { }; + mockRequest.destroy = () => {}; return mockRequest; }; @@ -562,7 +601,7 @@ describe('routeBackbeat', () => { doAuthStub.callThrough(); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert.strictEqual(mockResponse.statusCode, 403); }); @@ -578,7 +617,8 @@ describe('routeBackbeat', () => { await endPromise; sinon.assert.calledOnce(validateQuotasSpy); - sinon.assert.calledWith(validateQuotasSpy, + sinon.assert.calledWith( + validateQuotasSpy, mockRequest, bucketMD, mockRequest.accountQuotas, @@ -587,7 +627,7 @@ describe('routeBackbeat', () => { -100, false, log, - sinon.match.any, + sinon.match.any ); assert.strictEqual(mockResponse.statusCode, 200); @@ -599,7 +639,7 @@ describe('routeBackbeat', () => { mockRequest.url = '/_/backbeat/batchdelete'; routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert(!validateQuotasSpy.called); @@ -611,7 +651,7 @@ describe('routeBackbeat', () => { sandbox.stub(config, 'isQuotaEnabled').returns(false); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert(!validateQuotasSpy.called); @@ -621,14 +661,16 @@ describe('routeBackbeat', () => { it('should skip quota updates when content length is 0', async () => { sandbox.stub(config, 'isQuotaEnabled').returns(true); - mockRequest = prepareBatchDeleteRequest([{ - key: 'key0', - bucket: 'bucket0', - size: 0, - }]); + mockRequest = prepareBatchDeleteRequest([ + { + key: 'key0', + bucket: 'bucket0', + size: 0, + }, + ]); routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + void (await endPromise); assert(!validateQuotasSpy.called); @@ -644,53 +686,58 @@ describe('routeBackbeat', () => { }; routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; - assert.strictEqual(mockResponse.statusCode, 200); - assert.deepStrictEqual(mockResponse.body, {}); - }); + void (await endPromise); + assert.strictEqual(mockResponse.statusCode, 200); + assert.deepStrictEqual(mockResponse.body, {}); + }); - it('should not batchDelete with conditions if "if-unmodified-since" header unset', async () => { - mockRequest.headers = { - 'x-scal-versioning-required': 'true', - 'x-scal-storage-class': 'azurebackend', - }; + it('should not batchDelete with conditions if "if-unmodified-since" header unset', async () => { + mockRequest.headers = { + 'x-scal-versioning-required': 'true', + 'x-scal-storage-class': 'azurebackend', + }; - routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); + void (await endPromise); - assert.strictEqual(mockResponse.statusCode, 200); - }); + assert.strictEqual(mockResponse.statusCode, 200); + }); - it('should batchDelete with conditions and non-azure location', async () => { - const putRequest = prepareDummyRequest({ - 'x-scal-versioning-required': 'true', - }, JSON.stringify({ - Locations: [ + it('should batchDelete with conditions and non-azure location', async () => { + const putRequest = prepareDummyRequest( { - key: 'key0', + 'x-scal-versioning-required': 'true', + }, + JSON.stringify({ + Locations: [ + { + key: 'key0', + bucket: 'bucket0', + lastModified: '2020-01-01T00:00:00.000Z', + }, + ], + }) + ); + await promisify(dataWrapper.client.put)(putRequest, 91, 1, 'reqUids'); + + mockRequest = prepareBatchDeleteRequest([ + { + key: '1', bucket: 'bucket0', - lastModified: '2020-01-01T00:00:00.000Z', }, - ], - })); - await promisify(dataWrapper.client.put)(putRequest, 91, 1, 'reqUids'); - - mockRequest = prepareBatchDeleteRequest([{ - key: '1', - bucket: 'bucket0', - }]); - mockRequest.headers = { - 'if-unmodified-since': '2000-01-01T00:00:00.000Z', - 'x-scal-versioning-required': 'true', - 'x-scal-storage-class': 'gcpbackend', - 'x-scal-tags': JSON.stringify({ key: 'value' }), - }; + ]); + mockRequest.headers = { + 'if-unmodified-since': '2000-01-01T00:00:00.000Z', + 'x-scal-versioning-required': 'true', + 'x-scal-storage-class': 'gcpbackend', + 'x-scal-tags': JSON.stringify({ key: 'value' }), + }; - routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); - void await endPromise; + routeBackbeat('127.0.0.1', mockRequest, mockResponse, log); + void (await endPromise); - assert.strictEqual(mockResponse.statusCode, 200); - assert.deepStrictEqual(mockResponse.body, null); - }); + assert.strictEqual(mockResponse.statusCode, 200); + assert.deepStrictEqual(mockResponse.body, null); + }); }); }); diff --git a/tests/unit/server.js b/tests/unit/server.js index e0d8604e30..197b457698 100644 --- a/tests/unit/server.js +++ b/tests/unit/server.js @@ -26,7 +26,7 @@ describe('S3Server', () => { internalPort: undefined, internalListenOn: [], metricsListenOn: [], - metricsPort: 8002 + metricsPort: 8002, }; server = new S3Server(config); @@ -38,14 +38,15 @@ describe('S3Server', () => { sinon.restore(); }); - const waitReady = () => new Promise(resolve => { - const interval = setInterval(() => { - if (server.started) { - clearInterval(interval); - resolve(); - } - }, 100); - }); + const waitReady = () => + new Promise(resolve => { + const interval = setInterval(() => { + if (server.started) { + clearInterval(interval); + resolve(); + } + }, 100); + }); describe('initiateStartup', () => { beforeEach(() => { @@ -56,12 +57,13 @@ describe('S3Server', () => { // `sinon` matcher to match when the callback argument actually invokes the expected // function - const wrapperFor = expected => sinon.match(actual => { - const req = uuid.v4(); - const res = uuid.v4(); - actual(req, res); - return expected.calledWith(req, res); - }); + const wrapperFor = expected => + sinon.match(actual => { + const req = uuid.v4(); + const res = uuid.v4(); + actual(req, res); + return expected.calledWith(req, res); + }); it('should start API server with default port if no listenOn is provided', async () => { config.port = 8000; @@ -73,13 +75,12 @@ describe('S3Server', () => { assert.strictEqual(startServerStub.callCount, 2); assert(startServerStub.calledWith(wrapperFor(server.routeRequest), 8000)); assert(startServerStub.calledWith(wrapperFor(server.routeAdminRequest))); - }); - + it('should start API servers from listenOn array', async () => { config.listenOn = [ { port: 8000, ip: '127.0.0.1' }, - { port: 8001, ip: '0.0.0.0' } + { port: 8001, ip: '0.0.0.0' }, ]; config.port = 9999; // Should be ignored since listenOn is provided @@ -93,7 +94,7 @@ describe('S3Server', () => { assert(startServerStub.calledWith(wrapperFor(server.routeAdminRequest))); assert.strictEqual(startServerStub.neverCalledWith(sinon.any, 9999), true); }); - + it('should start internal API server with internalPort if no internalListenOn is provided', async () => { config.internalPort = 9000; @@ -104,11 +105,11 @@ describe('S3Server', () => { assert.strictEqual(startServerStub.callCount, 2); assert(startServerStub.calledWith(wrapperFor(server.internalRouteRequest), 9000)); }); - + it('should start internal API servers from internalListenOn array', async () => { config.internalListenOn = [ { port: 9000, ip: '127.0.0.1' }, - { port: 9001, ip: '0.0.0.0' } + { port: 9001, ip: '0.0.0.0' }, ]; config.internalPort = 9999; // Should be ignored since internalListenOn is provided @@ -122,29 +123,29 @@ describe('S3Server', () => { assert(startServerStub.calledWith(wrapperFor(server.routeAdminRequest))); assert.strictEqual(startServerStub.neverCalledWith(sinon.any, 9999), true); }); - + it('should start metrics server with metricsPort if no metricsListenOn is provided', async () => { config.metricsPort = 8012; server.initiateStartup(log); await waitReady(); - + assert.strictEqual(startServerStub.callCount, 1); assert(startServerStub.calledWith(wrapperFor(server.routeAdminRequest), 8012)); }); - + it('should start metrics servers from metricsListenOn array', async () => { config.metricsListenOn = [ { port: 8002, ip: '127.0.0.1' }, - { port: 8003, ip: '0.0.0.0' } + { port: 8003, ip: '0.0.0.0' }, ]; config.metricsPort = 9999; // Should be ignored since metricsListenOn is provided server.initiateStartup(log); await waitReady(); - + assert.strictEqual(startServerStub.callCount, 2); assert(startServerStub.calledWith(wrapperFor(server.routeAdminRequest), 8002, '127.0.0.1')); assert(startServerStub.calledWith(wrapperFor(server.routeAdminRequest), 8003, '0.0.0.0')); @@ -169,10 +170,10 @@ describe('S3Server', () => { describe('internalRouteRequest', () => { const resp = { - on: () => { }, - setHeader: () => { }, - writeHead: () => { }, - end: () => { }, + on: () => {}, + setHeader: () => {}, + writeHead: () => {}, + end: () => {}, }; let req; @@ -181,7 +182,7 @@ describe('S3Server', () => { req = { headers: {}, socket: { - setNoDelay: () => { }, + setNoDelay: () => {}, }, url: 'http://localhost:8000', }; @@ -219,7 +220,7 @@ describe('S3Server request timeout', () => { beforeEach(() => { sandbox = sinon.createSandbox(); - + // Create a mock server to capture the requestTimeout setting mockServer = { requestTimeout: null, @@ -227,7 +228,7 @@ describe('S3Server request timeout', () => { listen: sandbox.stub(), address: sandbox.stub().returns({ address: '127.0.0.1', port: 8000 }), }; - + // Mock server creation to return our mock sandbox.stub(http, 'createServer').returns(mockServer); sandbox.stub(https, 'createServer').returns(mockServer); @@ -240,12 +241,12 @@ describe('S3Server request timeout', () => { it('should set server.requestTimeout to 0 when starting server', () => { const server = new S3Server({ ...defaultConfig, - https: false + https: false, }); - + // Call _startServer which should set requestTimeout = 0 server._startServer(() => {}, 8000, '127.0.0.1'); - + // Verify that requestTimeout was set to 0 assert.strictEqual(mockServer.requestTimeout, 0); }); diff --git a/tests/unit/testConfigs/allOptsConfig/config.json b/tests/unit/testConfigs/allOptsConfig/config.json index 2c453c9889..6b313735a1 100644 --- a/tests/unit/testConfigs/allOptsConfig/config.json +++ b/tests/unit/testConfigs/allOptsConfig/config.json @@ -10,29 +10,34 @@ "127.0.0.2": "us-east-1", "s3.amazonaws.com": "us-east-1" }, - "websiteEndpoints": ["s3-website-us-east-1.amazonaws.com", - "s3-website.us-east-2.amazonaws.com", - "s3-website-us-west-1.amazonaws.com", - "s3-website-us-west-2.amazonaws.com", - "s3-website.ap-south-1.amazonaws.com", - "s3-website.ap-northeast-2.amazonaws.com", - "s3-website-ap-southeast-1.amazonaws.com", - "s3-website-ap-southeast-2.amazonaws.com", - "s3-website-ap-northeast-1.amazonaws.com", - "s3-website.eu-central-1.amazonaws.com", - "s3-website-eu-west-1.amazonaws.com", - "s3-website-sa-east-1.amazonaws.com", - "s3-website.localhost", - "s3-website.scality.test", - "zenkoazuretest.blob.core.windows.net"], - "replicationEndpoints": [{ - "site": "zenko", - "servers": ["127.0.0.1:8000"], - "default": true - }, { - "site": "us-east-2", - "type": "aws_s3" - }], + "websiteEndpoints": [ + "s3-website-us-east-1.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.localhost", + "s3-website.scality.test", + "zenkoazuretest.blob.core.windows.net" + ], + "replicationEndpoints": [ + { + "site": "zenko", + "servers": ["127.0.0.1:8000"], + "default": true + }, + { + "site": "us-east-2", + "type": "aws_s3" + } + ], "cdmi": { "host": "localhost", "port": 81, @@ -75,11 +80,11 @@ "recordLogName": "s3-recordlog" }, "mongodb": { - "replicaSetHosts": "localhost:27017,localhost:27018,localhost:27019", - "writeConcern": "majority", - "replicaSet": "rs0", - "readPreference": "primary", - "database": "metadata" + "replicaSetHosts": "localhost:27017,localhost:27018,localhost:27019", + "writeConcern": "majority", + "replicaSet": "rs0", + "readPreference": "primary", + "database": "metadata" }, "certFilePaths": { "key": "tests/unit/testConfigs/allOptsConfig/key.txt", diff --git a/tests/unit/testConfigs/bucketNotifConfigTest.js b/tests/unit/testConfigs/bucketNotifConfigTest.js index 9feded3dac..38118dee44 100644 --- a/tests/unit/testConfigs/bucketNotifConfigTest.js +++ b/tests/unit/testConfigs/bucketNotifConfigTest.js @@ -3,13 +3,15 @@ const { bucketNotifAssert } = require('../../../lib/Config'); describe('bucketNotifAssert', () => { it('should not throw an error if bucket notification config is valid', () => { - bucketNotifAssert([{ - resource: 'target1', - type: 'kafka', - host: 'localhost', - port: 8000, - auth: { user: 'user', password: 'password' }, - }]); + bucketNotifAssert([ + { + resource: 'target1', + type: 'kafka', + host: 'localhost', + port: 8000, + auth: { user: 'user', password: 'password' }, + }, + ]); }); it('should throw an error if bucket notification config is not an array', () => { assert.throws(() => { @@ -20,51 +22,58 @@ describe('bucketNotifAssert', () => { port: 8000, auth: { user: 'user', password: 'password' }, }); - }, - '/bad config: bucket notification configuration must be an array/'); + }, '/bad config: bucket notification configuration must be an array/'); }); it('should throw an error if resource is not a string', () => { assert.throws(() => { - bucketNotifAssert([{ - resource: 12345, - type: 'kafka', - host: 'localhost', - port: 8000, - auth: { user: 'user', password: 'password' }, - }]); + bucketNotifAssert([ + { + resource: 12345, + type: 'kafka', + host: 'localhost', + port: 8000, + auth: { user: 'user', password: 'password' }, + }, + ]); }, '/bad config: bucket notification configuration resource must be a string/'); }); it('should throw an error if type is not a string', () => { assert.throws(() => { - bucketNotifAssert([{ - resource: 'target1', - type: 12345, - host: 'localhost', - port: 8000, - auth: { user: 'user', password: 'password' }, - }]); + bucketNotifAssert([ + { + resource: 'target1', + type: 12345, + host: 'localhost', + port: 8000, + auth: { user: 'user', password: 'password' }, + }, + ]); }, '/bad config: bucket notification configuration type must be a string/'); }); it('should throw an error if host is not a string', () => { assert.throws(() => { - bucketNotifAssert([{ - resource: 'target1', - type: 'kafka', - host: 127.0, - port: 8000, - auth: { user: 'user', password: 'password' }, - }]); + bucketNotifAssert([ + { + resource: 'target1', + type: 'kafka', + host: 127.0, + port: 8000, + auth: { user: 'user', password: 'password' }, + }, + ]); }, '/bad config: bucket notification configuration type must be a string/'); }); it('should throw an error if port is not an integer', () => { assert.throws(() => { - bucketNotifAssert([{ - resource: 'target1', - type: 'kafka', - host: 'localhost', - port: '8000', - auth: { user: 'user', password: 'password' }, - }]); + bucketNotifAssert([ + { + resource: 'target1', + type: 'kafka', + host: 'localhost', + port: '8000', + auth: { user: 'user', password: 'password' }, + }, + ]); }, '/bad config: port must be a positive integer/'); }); // TODO: currently auth is fluid and once a concrete structure is diff --git a/tests/unit/testConfigs/configTest.js b/tests/unit/testConfigs/configTest.js index 881b5f9e1f..bffc832073 100644 --- a/tests/unit/testConfigs/configTest.js +++ b/tests/unit/testConfigs/configTest.js @@ -6,144 +6,185 @@ const { config } = require('../../../lib/Config'); const userBucketOwner = 'Bart'; const creationDate = new Date().toJSON(); -const serverSideEncryption = { cryptoScheme: 123, algorithm: 'algo', -masterKeyId: 'masterKeyId', mandatory: false }; -const bucketOne = new BucketInfo('bucketone', - userBucketOwner, userBucketOwner, creationDate, - BucketInfo.currentModelVersion()); -const bucketTwo = new BucketInfo('buckettwo', - userBucketOwner, userBucketOwner, creationDate, - BucketInfo.currentModelVersion()); -const bucketOnetWithEncryption = new BucketInfo('bucketone', - userBucketOwner, userBucketOwner, creationDate, - BucketInfo.currentModelVersion(), undefined, undefined, undefined, - serverSideEncryption); -const bucketTwoWithEncryption = new BucketInfo('buckettwo', - userBucketOwner, userBucketOwner, creationDate, - BucketInfo.currentModelVersion(), undefined, undefined, undefined, - serverSideEncryption); +const serverSideEncryption = { cryptoScheme: 123, algorithm: 'algo', masterKeyId: 'masterKeyId', mandatory: false }; +const bucketOne = new BucketInfo( + 'bucketone', + userBucketOwner, + userBucketOwner, + creationDate, + BucketInfo.currentModelVersion() +); +const bucketTwo = new BucketInfo( + 'buckettwo', + userBucketOwner, + userBucketOwner, + creationDate, + BucketInfo.currentModelVersion() +); +const bucketOnetWithEncryption = new BucketInfo( + 'bucketone', + userBucketOwner, + userBucketOwner, + creationDate, + BucketInfo.currentModelVersion(), + undefined, + undefined, + undefined, + serverSideEncryption +); +const bucketTwoWithEncryption = new BucketInfo( + 'buckettwo', + userBucketOwner, + userBucketOwner, + creationDate, + BucketInfo.currentModelVersion(), + undefined, + undefined, + undefined, + serverSideEncryption +); const results = [ - { sourceLocationConstraintName: 'azurebackend', - destLocationConstraintName: 'azurebackend', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: true, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'azurebackend2', - destLocationConstraintName: 'azurebackend2', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: true, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'awsbackend', - destLocationConstraintName: 'awsbackend', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: true, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'awsbackend', - destLocationConstraintName: 'awsbackend2', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: true, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'awsbackend2', - destLocationConstraintName: 'awsbackend2', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: true, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'scality-internal-mem', - destLocationConstraintName: 'scality-internal-mem', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: false, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'scality-internal-mem', - destLocationConstraintName: 'azurebackend', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: false, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'azurebackend', - destLocationConstraintName: 'scality-internal-mem', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: false, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'awsbackend', - destLocationConstraintName: 'scality-internal-mem', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: false, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'scality-internal-mem', - destLocationConstraintName: 'awsbackend', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: false, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'azurebackend', - destLocationConstraintName: 'awsbackend', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: false, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'azurebackend', - destLocationConstraintName: 'azurebackend2', - sourceBucketMD: bucketOne, - destBucketMD: bucketOne, - boolExpected: false, - description: 'same bucket metadata', - }, - { sourceLocationConstraintName: 'azurebackend', - destLocationConstraintName: 'azurebackend', - sourceBucketMD: bucketOne, - destBucketMD: bucketTwo, - boolExpected: true, - description: 'different non-encrypted bucket metadata', - }, - { sourceLocationConstraintName: 'azurebackend', - destLocationConstraintName: 'azurebackend', - sourceBucketMD: bucketOnetWithEncryption, - destBucketMD: bucketOnetWithEncryption, - boolExpected: true, - description: 'same encrypted bucket metadata', - }, - { sourceLocationConstraintName: 'azurebackend', - destLocationConstraintName: 'azurebackend', - sourceBucketMD: bucketOnetWithEncryption, - destBucketMD: bucketTwoWithEncryption, - boolExpected: false, - description: 'different encrypted bucket metadata', - }, + { + sourceLocationConstraintName: 'azurebackend', + destLocationConstraintName: 'azurebackend', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: true, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'azurebackend2', + destLocationConstraintName: 'azurebackend2', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: true, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'awsbackend', + destLocationConstraintName: 'awsbackend', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: true, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'awsbackend', + destLocationConstraintName: 'awsbackend2', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: true, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'awsbackend2', + destLocationConstraintName: 'awsbackend2', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: true, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'scality-internal-mem', + destLocationConstraintName: 'scality-internal-mem', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: false, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'scality-internal-mem', + destLocationConstraintName: 'azurebackend', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: false, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'azurebackend', + destLocationConstraintName: 'scality-internal-mem', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: false, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'awsbackend', + destLocationConstraintName: 'scality-internal-mem', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: false, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'scality-internal-mem', + destLocationConstraintName: 'awsbackend', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: false, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'azurebackend', + destLocationConstraintName: 'awsbackend', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: false, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'azurebackend', + destLocationConstraintName: 'azurebackend2', + sourceBucketMD: bucketOne, + destBucketMD: bucketOne, + boolExpected: false, + description: 'same bucket metadata', + }, + { + sourceLocationConstraintName: 'azurebackend', + destLocationConstraintName: 'azurebackend', + sourceBucketMD: bucketOne, + destBucketMD: bucketTwo, + boolExpected: true, + description: 'different non-encrypted bucket metadata', + }, + { + sourceLocationConstraintName: 'azurebackend', + destLocationConstraintName: 'azurebackend', + sourceBucketMD: bucketOnetWithEncryption, + destBucketMD: bucketOnetWithEncryption, + boolExpected: true, + description: 'same encrypted bucket metadata', + }, + { + sourceLocationConstraintName: 'azurebackend', + destLocationConstraintName: 'azurebackend', + sourceBucketMD: bucketOnetWithEncryption, + destBucketMD: bucketTwoWithEncryption, + boolExpected: false, + description: 'different encrypted bucket metadata', + }, ]; describe('Testing Config.js function: ', () => { results.forEach(result => { - it(`should return ${result.boolExpected} if source location ` + - `constraint === ${result.sourceLocationConstraintName} ` + - 'and destination location constraint ===' + - ` ${result.destLocationConstraintName} and ${result.description}`, - done => { - const isCopy = utils.externalBackendCopy(config, - result.sourceLocationConstraintName, - result.destLocationConstraintName, result.sourceBucketMD, - result.destBucketMD); - assert.strictEqual(isCopy, result.boolExpected); - done(); - }); + it( + `should return ${result.boolExpected} if source location ` + + `constraint === ${result.sourceLocationConstraintName} ` + + 'and destination location constraint ===' + + ` ${result.destLocationConstraintName} and ${result.description}`, + done => { + const isCopy = utils.externalBackendCopy( + config, + result.sourceLocationConstraintName, + result.destLocationConstraintName, + result.sourceBucketMD, + result.destBucketMD + ); + assert.strictEqual(isCopy, result.boolExpected); + done(); + } + ); }); }); diff --git a/tests/unit/testConfigs/locConstraintAssert.js b/tests/unit/testConfigs/locConstraintAssert.js index a82dc0ca15..4ba8d60b6c 100644 --- a/tests/unit/testConfigs/locConstraintAssert.js +++ b/tests/unit/testConfigs/locConstraintAssert.js @@ -7,23 +7,30 @@ class LocationConstraint { this.objectId = objectId; this.legacyAwsBehavior = legacyAwsBehavior || false; this.sizeLimitGB = sizeLimit || undefined; - this.details = Object.assign({}, { - awsEndpoint: 's3.amazonaws.com', - bucketName: 'tester', - credentialsProfile: 'default', - region: 'us-west-1', - }, details || {}); + this.details = Object.assign( + {}, + { + awsEndpoint: 's3.amazonaws.com', + bucketName: 'tester', + credentialsProfile: 'default', + region: 'us-west-1', + }, + details || {} + ); } } function getAzureDetails(replaceParams) { - return Object.assign({ - azureStorageEndpoint: 'https://fakeaccountname.blob.core.fake.net/', - azureStorageAccountName: 'fakeaccountname', - azureStorageAccessKey: 'Fake00Key123', - bucketMatch: false, - azureContainerName: 'test', - }, replaceParams); + return Object.assign( + { + azureStorageEndpoint: 'https://fakeaccountname.blob.core.fake.net/', + azureStorageAccountName: 'fakeaccountname', + azureStorageAccessKey: 'Fake00Key123', + bucketMatch: false, + azureContainerName: 'test', + }, + replaceParams + ); } // FIXME: most of tests using a line-wrapped regexp are broken, @@ -35,331 +42,301 @@ describe('locationConstraintAssert', () => { it('should throw error if locationConstraints is not an object', () => { assert.throws(() => { locationConstraintAssert(''); - }, - /bad config: locationConstraints must be an object/); + }, /bad config: locationConstraints must be an object/); }); it('should throw error if any location constraint is not an object', () => { - assert.throws(() => { - locationConstraintAssert({ notObject: '' }); - }, - err => { - assert.strictEqual(err.message, 'bad config: ' + - 'locationConstraints[region] must be an object'); - return true; - }); + assert.throws( + () => { + locationConstraintAssert({ notObject: '' }); + }, + err => { + assert.strictEqual(err.message, 'bad config: ' + 'locationConstraints[region] must be an object'); + return true; + } + ); }); it('should throw error if type is not a string', () => { const locationConstraint = new LocationConstraint(42, 'locId'); - assert.throws(() => { - locationConstraintAssert({ 'scality-east': locationConstraint }); - }, - /bad config: locationConstraints[region].type is mandatory/ + - /and must be a string/); + assert.throws( + () => { + locationConstraintAssert({ 'scality-east': locationConstraint }); + }, + /bad config: locationConstraints[region].type is mandatory/ + /and must be a string/ + ); }); it('should throw error if type is not mem/file/scality/dmf', () => { - const locationConstraint = new LocationConstraint( - 'notSupportedType', 'locId'); - assert.throws(() => { - locationConstraintAssert({ 'scality-east': locationConstraint }); - }, - /bad config: locationConstraints[region].type must be/ + - /one of mem,file,scality,tlp/); + const locationConstraint = new LocationConstraint('notSupportedType', 'locId'); + assert.throws( + () => { + locationConstraintAssert({ 'scality-east': locationConstraint }); + }, + /bad config: locationConstraints[region].type must be/ + /one of mem,file,scality,tlp/ + ); }); it('should throw error if legacyAwsBehavior is not a boolean', () => { - const locationConstraint = new LocationConstraint( - 'scality', 'locId', 42); - assert.throws(() => { - locationConstraintAssert({ 'scality-east': locationConstraint }); - }, - /bad config: locationConstraints[region].legacyAwsBehavior / + - /is mandatory and must be a boolean/); + const locationConstraint = new LocationConstraint('scality', 'locId', 42); + assert.throws( + () => { + locationConstraintAssert({ 'scality-east': locationConstraint }); + }, + /bad config: locationConstraints[region].legacyAwsBehavior / + /is mandatory and must be a boolean/ + ); }); it('should throw error if details is not an object', () => { - const locationConstraint = - new LocationConstraint('scality', 'locId', false, 42); - assert.throws(() => { - locationConstraintAssert({ 'scality-east': locationConstraint }); - }, - /bad config: locationConstraints[region].details is / + - /mandatory and must be an object/); + const locationConstraint = new LocationConstraint('scality', 'locId', false, 42); + assert.throws( + () => { + locationConstraintAssert({ 'scality-east': locationConstraint }); + }, + /bad config: locationConstraints[region].details is / + /mandatory and must be an object/ + ); }); it('should throw error if awsEndpoint is not a string', () => { - const locationConstraint = new LocationConstraint( - 'scality', 'locId', false, - { - awsEndpoint: 42, - }); + const locationConstraint = new LocationConstraint('scality', 'locId', false, { + awsEndpoint: 42, + }); assert.throws(() => { locationConstraintAssert({ 'scality-east': locationConstraint }); - }, - /bad config: awsEndpoint must be a string/); + }, /bad config: awsEndpoint must be a string/); }); it('should throw error if bucketName is not a string', () => { - const locationConstraint = new LocationConstraint( - 'scality', 'locId', false, - { - awsEndpoint: 's3.amazonaws.com', - bucketName: 42, - }); + const locationConstraint = new LocationConstraint('scality', 'locId', false, { + awsEndpoint: 's3.amazonaws.com', + bucketName: 42, + }); assert.throws(() => { locationConstraintAssert({ 'scality-east': locationConstraint }); - }, - /bad config: bucketName must be a string/); + }, /bad config: bucketName must be a string/); }); it('should throw error if credentialsProfile is not a string', () => { - const locationConstraint = new LocationConstraint( - 'scality', 'locId', false, - { - awsEndpoint: 's3.amazonaws.com', - bucketName: 'premadebucket', - credentialsProfile: 42, - }); + const locationConstraint = new LocationConstraint('scality', 'locId', false, { + awsEndpoint: 's3.amazonaws.com', + bucketName: 'premadebucket', + credentialsProfile: 42, + }); assert.throws(() => { locationConstraintAssert({ 'scality-east': locationConstraint }); - }, - /bad config: credentialsProfile must be a string/); + }, /bad config: credentialsProfile must be a string/); }); it('should throw error if region is not a string', () => { - const locationConstraint = new LocationConstraint( - 'scality', 'locId', false, - { - awsEndpoint: 's3.amazonaws.com', - bucketName: 'premadebucket', - credentialsProfile: 'zenko', - region: 42, - }); + const locationConstraint = new LocationConstraint('scality', 'locId', false, { + awsEndpoint: 's3.amazonaws.com', + bucketName: 'premadebucket', + credentialsProfile: 'zenko', + region: 42, + }); assert.throws(() => { locationConstraintAssert({ 'scality-east': locationConstraint }); - }, - /bad config: region must be a string/); + }, /bad config: region must be a string/); }); it('should throw error if us-east-1 not specified', () => { const locationConstraint = new LocationConstraint(); assert.throws(() => { locationConstraintAssert({ 'not-us-east-1': locationConstraint }); - }, - '/bad locationConfig: must ' + - 'include us-east-1 as a locationConstraint/'); + }, '/bad locationConfig: must ' + 'include us-east-1 as a locationConstraint/'); }); it('should not throw error for a valid azure location constraint', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); - const locationConstraint = new LocationConstraint( - 'azure', 'locId2', true, - getAzureDetails()); + const locationConstraint = new LocationConstraint('azure', 'locId2', true, getAzureDetails()); assert.doesNotThrow(() => { - locationConstraintAssert({ 'azurefaketest': locationConstraint, - 'us-east-1': usEast1 }); - }, - '/should not throw for a valid azure location constraint/'); + locationConstraintAssert({ azurefaketest: locationConstraint, 'us-east-1': usEast1 }); + }, '/should not throw for a valid azure location constraint/'); }); - it('should throw error if type is azure and azureContainerName is ' + - 'not specified', () => { + it('should throw error if type is azure and azureContainerName is ' + 'not specified', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); const locationConstraint = new LocationConstraint( - 'azure', 'locId2', true, - getAzureDetails({ azureContainerName: undefined })); + 'azure', + 'locId2', + true, + getAzureDetails({ azureContainerName: undefined }) + ); assert.throws(() => { locationConstraintAssert({ 'us-east-1': usEast1, - 'azurefaketest': locationConstraint, + azurefaketest: locationConstraint, }); - }, - '/bad location constraint: ' + - '"azurefaketest" azureContainerName must be defined/'); + }, '/bad location constraint: ' + '"azurefaketest" azureContainerName must be defined/'); }); - it('should throw error if type is azure and azureContainerName is ' + - 'invalid value', () => { + it('should throw error if type is azure and azureContainerName is ' + 'invalid value', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); const locationConstraint = new LocationConstraint( - 'azure', 'locId2', true, - getAzureDetails({ azureContainerName: '.' })); + 'azure', + 'locId2', + true, + getAzureDetails({ azureContainerName: '.' }) + ); assert.throws(() => { locationConstraintAssert({ 'us-east-1': usEast1, - 'azurefaketest': locationConstraint, + azurefaketest: locationConstraint, }); - }, - '/bad location constraint: "azurefaketest" ' + - 'azureContainerName is an invalid container name/'); + }, '/bad location constraint: "azurefaketest" ' + 'azureContainerName is an invalid container name/'); }); - it('should throw error if type is azure and azureStorageEndpoint ' + - 'is not specified', () => { + it('should throw error if type is azure and azureStorageEndpoint ' + 'is not specified', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); const locationConstraint = new LocationConstraint( - 'azure', 'locId2', true, - getAzureDetails({ azureStorageEndpoint: undefined })); - assert.throws(() => { - locationConstraintAssert({ - 'us-east-1': usEast1, - 'azurefaketest': locationConstraint, - }); - }, - '/bad location constraint: "azurefaketest" ' + - 'azureStorageEndpoint must be set in locationConfig ' + - 'or environment variable/'); + 'azure', + 'locId2', + true, + getAzureDetails({ azureStorageEndpoint: undefined }) + ); + assert.throws( + () => { + locationConstraintAssert({ + 'us-east-1': usEast1, + azurefaketest: locationConstraint, + }); + }, + '/bad location constraint: "azurefaketest" ' + + 'azureStorageEndpoint must be set in locationConfig ' + + 'or environment variable/' + ); }); - it('should throw error if type is azure and azureStorageAccountName ' + - 'is not specified', () => { + it('should throw error if type is azure and azureStorageAccountName ' + 'is not specified', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); const locationConstraint = new LocationConstraint( - 'azure', 'locId2', true, - getAzureDetails({ azureStorageAccountName: undefined })); - assert.throws(() => { - locationConstraintAssert({ - 'us-east-1': usEast1, - 'azurefaketest': locationConstraint, - }); - }, - '/bad location constraint: "azurefaketest" ' + - 'azureStorageAccountName must be set in locationConfig ' + - 'or environment variable/'); + 'azure', + 'locId2', + true, + getAzureDetails({ azureStorageAccountName: undefined }) + ); + assert.throws( + () => { + locationConstraintAssert({ + 'us-east-1': usEast1, + azurefaketest: locationConstraint, + }); + }, + '/bad location constraint: "azurefaketest" ' + + 'azureStorageAccountName must be set in locationConfig ' + + 'or environment variable/' + ); }); - it('should throw error if type is azure and azureStorageAccountName ' + - 'is invalid value', () => { + it('should throw error if type is azure and azureStorageAccountName ' + 'is invalid value', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); const locationConstraint = new LocationConstraint( - 'azure', 'locId2', true, - getAzureDetails({ azureStorageAccountName: 'invalid!!!' })); + 'azure', + 'locId2', + true, + getAzureDetails({ azureStorageAccountName: 'invalid!!!' }) + ); assert.throws(() => { locationConstraintAssert({ 'us-east-1': usEast1, - 'azurefaketest': locationConstraint, + azurefaketest: locationConstraint, }); - }, - '/bad location constraint: "azurefaketest" ' + - 'azureStorageAccountName "invalid!!!" is an invalid value/'); + }, '/bad location constraint: "azurefaketest" ' + 'azureStorageAccountName "invalid!!!" is an invalid value/'); }); - it('should throw error if type is azure and azureStorageAccessKey ' + - 'is not specified', () => { + it('should throw error if type is azure and azureStorageAccessKey ' + 'is not specified', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); const locationConstraint = new LocationConstraint( - 'azure', 'locId2', true, - getAzureDetails({ azureStorageAccessKey: undefined })); - assert.throws(() => { - locationConstraintAssert({ - 'us-east-1': usEast1, - 'azurefaketest': locationConstraint, - }); - }, - '/bad location constraint: "azurefaketest" ' + - 'azureStorageAccessKey must be set in locationConfig ' + - 'or environment variable/'); + 'azure', + 'locId2', + true, + getAzureDetails({ azureStorageAccessKey: undefined }) + ); + assert.throws( + () => { + locationConstraintAssert({ + 'us-east-1': usEast1, + azurefaketest: locationConstraint, + }); + }, + '/bad location constraint: "azurefaketest" ' + + 'azureStorageAccessKey must be set in locationConfig ' + + 'or environment variable/' + ); }); - it('should throw error if type is azure and azureStorageAccessKey ' + - 'is not a valid base64 string', () => { + it('should throw error if type is azure and azureStorageAccessKey ' + 'is not a valid base64 string', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); const locationConstraint = new LocationConstraint( - 'azure', 'locId2', true, - getAzureDetails({ azureStorageAccessKey: 'invalid!!!' })); + 'azure', + 'locId2', + true, + getAzureDetails({ azureStorageAccessKey: 'invalid!!!' }) + ); assert.throws(() => { locationConstraintAssert({ 'us-east-1': usEast1, - 'azurefaketest': locationConstraint, + azurefaketest: locationConstraint, }); - }, - '/bad location constraint: "azurefaketest" ' + - 'azureStorageAccessKey is not a valid base64 string/'); + }, '/bad location constraint: "azurefaketest" ' + 'azureStorageAccessKey is not a valid base64 string/'); }); it('should set https to true by default', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); - const locationConstraint = new LocationConstraint( - 'aws_s3', 'locId2', true); + const locationConstraint = new LocationConstraint('aws_s3', 'locId2', true); assert.doesNotThrow(() => { locationConstraintAssert({ 'us-east-1': usEast1, - 'awshttpsDefault': locationConstraint, + awshttpsDefault: locationConstraint, }); - }, '/bad location constraint awshttpsDefault,' + - 'incorrect default config for https'); - assert.strictEqual(locationConstraint.details.https, true, - 'https config should be true'); + }, '/bad location constraint awshttpsDefault,' + 'incorrect default config for https'); + assert.strictEqual(locationConstraint.details.https, true, 'https config should be true'); }); it('should override default if https is set to false', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); - const locationConstraint = new LocationConstraint( - 'aws_s3', 'locId2', true, { - https: false, - }); + const locationConstraint = new LocationConstraint('aws_s3', 'locId2', true, { + https: false, + }); assert.doesNotThrow(() => { locationConstraintAssert({ 'us-east-1': usEast1, - 'awshttpsFalse': locationConstraint, + awshttpsFalse: locationConstraint, }); - }, '/bad location constraint awshttpsFalse,' + - 'incorrect config for https'); - assert.strictEqual(locationConstraint.details.https, false, - 'https config should be false'); + }, '/bad location constraint awshttpsFalse,' + 'incorrect config for https'); + assert.strictEqual(locationConstraint.details.https, false, 'https config should be false'); }); it('should set pathStyle config option to false by default', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); - const locationConstraint = new LocationConstraint( - 'aws_s3', 'locId2', true); + const locationConstraint = new LocationConstraint('aws_s3', 'locId2', true); assert.doesNotThrow(() => { locationConstraintAssert({ 'us-east-1': usEast1, - 'awsdefaultstyle': locationConstraint, + awsdefaultstyle: locationConstraint, }); }, '/bad location constraint, unable to set default config'); - assert.strictEqual(locationConstraint.details.pathStyle, false, - 'pathstyle config should be false'); + assert.strictEqual(locationConstraint.details.pathStyle, false, 'pathstyle config should be false'); }); it('should override default if pathStyle is set to true', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); - const locationConstraint = new LocationConstraint( - 'aws_s3', 'locId2', true, - { pathStyle: true }); + const locationConstraint = new LocationConstraint('aws_s3', 'locId2', true, { pathStyle: true }); assert.doesNotThrow(() => { locationConstraintAssert({ 'us-east-1': usEast1, - 'awspathstyle': locationConstraint, + awspathstyle: locationConstraint, }); }, '/bad location constraint, unable to set pathSytle config'); - assert.strictEqual(locationConstraint.details.pathStyle, true, - 'pathstyle config should be true'); + assert.strictEqual(locationConstraint.details.pathStyle, true, 'pathstyle config should be true'); }); it('should throw error if sizeLimitGB is not a number', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); - const locationConstraint = new LocationConstraint( - 'aws_s3', 'locId2', true, - null, true); + const locationConstraint = new LocationConstraint('aws_s3', 'locId2', true, null, true); assert.throws(() => { locationConstraintAssert({ 'us-east-1': usEast1, - 'awsstoragesizelimit': locationConstraint, + awsstoragesizelimit: locationConstraint, }); - }, - '/bad config: locationConstraints[region].sizeLimitGB ' + - 'must be a number (in gigabytes)'); + }, '/bad config: locationConstraints[region].sizeLimitGB ' + 'must be a number (in gigabytes)'); }); it('should throw error if objectId is not set', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); - const locationConstraint = new LocationConstraint( - 'azure', undefined, true, - getAzureDetails()); + const locationConstraint = new LocationConstraint('azure', undefined, true, getAzureDetails()); assert.throws(() => { - locationConstraintAssert({ 'azurefaketest': locationConstraint, - 'us-east-1': usEast1 }); - }, - '/bad config: locationConstraints[region].objectId is mandatory ' + - 'and must be a unique string across locations'); + locationConstraintAssert({ azurefaketest: locationConstraint, 'us-east-1': usEast1 }); + }, '/bad config: locationConstraints[region].objectId is mandatory ' + 'and must be a unique string across locations'); }); it('should throw error if objectId is duplicated', () => { const usEast1 = new LocationConstraint(undefined, 'locId1'); - const locationConstraint = new LocationConstraint( - 'azure', 'locId1', true, - getAzureDetails()); + const locationConstraint = new LocationConstraint('azure', 'locId1', true, getAzureDetails()); assert.throws(() => { - locationConstraintAssert({ 'azurefaketest': locationConstraint, - 'us-east-1': usEast1 }); - }, - '/bad config: location constraint objectId "locId1" is not unique ' + - 'across configured locations'); + locationConstraintAssert({ azurefaketest: locationConstraint, 'us-east-1': usEast1 }); + }, '/bad config: location constraint objectId "locId1" is not unique ' + 'across configured locations'); }); }); diff --git a/tests/unit/testConfigs/parseKmsAWS.js b/tests/unit/testConfigs/parseKmsAWS.js index c075375192..c466bbee77 100644 --- a/tests/unit/testConfigs/parseKmsAWS.js +++ b/tests/unit/testConfigs/parseKmsAWS.js @@ -338,6 +338,6 @@ describe('parseKmsAWS TLS section', () => { assert(readFileSyncStub.calledWith(path.join(basePath, caPath))); }); - assert(readFileSyncStub.callCount === (keyPaths.length + certPaths.length + caPaths.length)); + assert(readFileSyncStub.callCount === keyPaths.length + certPaths.length + caPaths.length); }); }); diff --git a/tests/unit/testConfigs/parseRedisConfig.spec.js b/tests/unit/testConfigs/parseRedisConfig.spec.js index f585bcec7a..2583cc266c 100644 --- a/tests/unit/testConfigs/parseRedisConfig.spec.js +++ b/tests/unit/testConfigs/parseRedisConfig.spec.js @@ -31,8 +31,7 @@ describe('parseRedisConfig', () => { input: { host: 'localhost', port: 6479, - retry: { - }, + retry: {}, }, }, { @@ -195,8 +194,7 @@ describe('parseRedisConfig', () => { host: 'localhost', port: 6479, retry: { - connectBackoff: { - }, + connectBackoff: {}, }, }, }, diff --git a/tests/unit/testConfigs/parseSproxydConfig.js b/tests/unit/testConfigs/parseSproxydConfig.js index ae22ba94c6..4f057a7771 100644 --- a/tests/unit/testConfigs/parseSproxydConfig.js +++ b/tests/unit/testConfigs/parseSproxydConfig.js @@ -19,16 +19,14 @@ function makeSproxydConf(bootstrap, chordCos, sproxydPath) { describe('parseSproxydConfig', () => { it('should return a parsed config if valid', () => { - const sproxydConf = parseSproxydConfig(makeSproxydConf( - ['localhost:8181'], null, '/arc')); + const sproxydConf = parseSproxydConfig(makeSproxydConf(['localhost:8181'], null, '/arc')); assert.deepStrictEqual(sproxydConf, { bootstrap: ['localhost:8181'], path: '/arc', }); }); it('should return a parsed config with chordCos if valid', () => { - const sproxydConf = parseSproxydConfig(makeSproxydConf( - ['localhost:8181'], '3', '/arc')); + const sproxydConf = parseSproxydConfig(makeSproxydConf(['localhost:8181'], '3', '/arc')); assert.deepStrictEqual(sproxydConf, { bootstrap: ['localhost:8181'], path: '/arc', @@ -40,8 +38,7 @@ describe('parseSproxydConfig', () => { parseSproxydConfig(makeSproxydConf('localhost:8181')); }); }); - it('should throw an error if bootstrap array does not contain strings', - () => { + it('should throw an error if bootstrap array does not contain strings', () => { assert.throws(() => { parseSproxydConfig(makeSproxydConf([8181])); }); diff --git a/tests/unit/testConfigs/requestsConfigTest.js b/tests/unit/testConfigs/requestsConfigTest.js index 1c91368af1..af59596d50 100644 --- a/tests/unit/testConfigs/requestsConfigTest.js +++ b/tests/unit/testConfigs/requestsConfigTest.js @@ -5,11 +5,9 @@ describe('requestsConfigAssert', () => { it('should not throw an error if there is no requests config', () => { assert.doesNotThrow(() => { requestsConfigAssert({}); - }, - 'should not throw an error if there is no requests config'); + }, 'should not throw an error if there is no requests config'); }); - it('should not throw an error if requests config via proxy is set to false', - () => { + it('should not throw an error if requests config via proxy is set to false', () => { assert.doesNotThrow(() => { requestsConfigAssert({ viaProxy: false, @@ -17,25 +15,24 @@ describe('requestsConfigAssert', () => { extractClientIPFromHeader: '', extractProtocolFromHeader: '', }); - }, - 'shouldnt throw an error if requests config via proxy is set to false'); + }, 'shouldnt throw an error if requests config via proxy is set to false'); }); - it('should not throw an error if requests config via proxy is true, ' + - 'trustedProxyCIDRs & extractClientIPFromHeader & ' + - 'extractProtocolFromHeader are set', () => { - assert.doesNotThrow(() => { - requestsConfigAssert({ - viaProxy: true, - trustedProxyCIDRs: ['123.123.123.123'], - extractClientIPFromHeader: 'x-forwarded-for', - extractProtocolFromHeader: 'x-forwarded-proto', - }); - }, - 'should not throw an error if requests config ' + - 'via proxy is set correctly'); - }); - it('should throw an error if requests.viaProxy is not a boolean', - () => { + it( + 'should not throw an error if requests config via proxy is true, ' + + 'trustedProxyCIDRs & extractClientIPFromHeader & ' + + 'extractProtocolFromHeader are set', + () => { + assert.doesNotThrow(() => { + requestsConfigAssert({ + viaProxy: true, + trustedProxyCIDRs: ['123.123.123.123'], + extractClientIPFromHeader: 'x-forwarded-for', + extractProtocolFromHeader: 'x-forwarded-proto', + }); + }, 'should not throw an error if requests config ' + 'via proxy is set correctly'); + } + ); + it('should throw an error if requests.viaProxy is not a boolean', () => { assert.throws(() => { requestsConfigAssert({ viaProxy: 1, @@ -43,92 +40,96 @@ describe('requestsConfigAssert', () => { extractClientIPFromHeader: 'x-forwarded-for', extractProtocolFromHeader: 'x-forwarded-proto', }); - }, - '/config: invalid requests configuration. viaProxy must be a ' + - 'boolean/'); + }, '/config: invalid requests configuration. viaProxy must be a ' + 'boolean/'); }); - it('should throw an error if requests.trustedProxyCIDRs is not an array', - () => { - assert.throws(() => { - requestsConfigAssert({ - viaProxy: true, - trustedProxyCIDRs: 1, - extractClientIPFromHeader: 'x-forwarded-for', - extractProtocolFromHeader: 'x-forwarded-proto', - }); - }, - '/config: invalid requests configuration. ' + - 'trustedProxyCIDRs must be set if viaProxy is set to true ' + - 'and must be an array/'); + it('should throw an error if requests.trustedProxyCIDRs is not an array', () => { + assert.throws( + () => { + requestsConfigAssert({ + viaProxy: true, + trustedProxyCIDRs: 1, + extractClientIPFromHeader: 'x-forwarded-for', + extractProtocolFromHeader: 'x-forwarded-proto', + }); + }, + '/config: invalid requests configuration. ' + + 'trustedProxyCIDRs must be set if viaProxy is set to true ' + + 'and must be an array/' + ); }); - it('should throw an error if requests.trustedProxyCIDRs array is empty', - () => { - assert.throws(() => { - requestsConfigAssert({ - viaProxy: true, - trustedProxyCIDRs: [], - extractClientIPFromHeader: 'x-forwarded-for', - extractProtocolFromHeader: 'x-forwarded-proto', - }); - }, - '/config: invalid requests configuration. ' + - 'trustedProxyCIDRs must be set if viaProxy is set to true ' + - 'and must be an array/'); + it('should throw an error if requests.trustedProxyCIDRs array is empty', () => { + assert.throws( + () => { + requestsConfigAssert({ + viaProxy: true, + trustedProxyCIDRs: [], + extractClientIPFromHeader: 'x-forwarded-for', + extractProtocolFromHeader: 'x-forwarded-proto', + }); + }, + '/config: invalid requests configuration. ' + + 'trustedProxyCIDRs must be set if viaProxy is set to true ' + + 'and must be an array/' + ); }); - it('should throw an error if requests.extractClientIPFromHeader ' + - 'is not a string', () => { - assert.throws(() => { - requestsConfigAssert({ - viaProxy: true, - trustedProxyCIDRs: [], - extractClientIPFromHeader: 1, - extractProtocolFromHeader: 'x-forwarded-proto', - }); - }, - '/config: invalid requests configuration. ' + - 'extractClientIPFromHeader must be set if viaProxy is ' + - 'set to true and must be a string/'); + it('should throw an error if requests.extractClientIPFromHeader ' + 'is not a string', () => { + assert.throws( + () => { + requestsConfigAssert({ + viaProxy: true, + trustedProxyCIDRs: [], + extractClientIPFromHeader: 1, + extractProtocolFromHeader: 'x-forwarded-proto', + }); + }, + '/config: invalid requests configuration. ' + + 'extractClientIPFromHeader must be set if viaProxy is ' + + 'set to true and must be a string/' + ); }); - it('should throw an error if requests.extractProtocolFromHeader ' + - 'is not a string', () => { - assert.throws(() => { - requestsConfigAssert({ - viaProxy: true, - trustedProxyCIDRs: [], - extractClientIPFromHeader: 'x-forwarded-for', - extractProtocolFromHeader: 1, - }); - }, - '/config: invalid requests configuration. ' + - 'extractProtocolFromHeader must be set if viaProxy is ' + - 'set to true and must be a string/'); + it('should throw an error if requests.extractProtocolFromHeader ' + 'is not a string', () => { + assert.throws( + () => { + requestsConfigAssert({ + viaProxy: true, + trustedProxyCIDRs: [], + extractClientIPFromHeader: 'x-forwarded-for', + extractProtocolFromHeader: 1, + }); + }, + '/config: invalid requests configuration. ' + + 'extractProtocolFromHeader must be set if viaProxy is ' + + 'set to true and must be a string/' + ); }); - it('should throw an error if requests.extractClientIPFromHeader ' + - 'is empty', () => { - assert.throws(() => { - requestsConfigAssert({ - viaProxy: true, - trustedProxyCIDRs: [], - extractClientIPFromHeader: '', - extractProtocolFromHeader: 'x-forwarded-proto', - }); - }, - '/config: invalid requests configuration. ' + - 'extractClientIPFromHeader must be set if viaProxy is ' + - 'set to true and must be a string/'); + it('should throw an error if requests.extractClientIPFromHeader ' + 'is empty', () => { + assert.throws( + () => { + requestsConfigAssert({ + viaProxy: true, + trustedProxyCIDRs: [], + extractClientIPFromHeader: '', + extractProtocolFromHeader: 'x-forwarded-proto', + }); + }, + '/config: invalid requests configuration. ' + + 'extractClientIPFromHeader must be set if viaProxy is ' + + 'set to true and must be a string/' + ); }); - it('should throw an error if requests.extractProtocolFromHeader ' + - 'is empty', () => { - assert.throws(() => { - requestsConfigAssert({ - viaProxy: true, - trustedProxyCIDRs: [], - extractClientIPFromHeader: 'x-forwarded-for', - extractProtocolFromHeader: '', - }); - }, - '/config: invalid requests configuration. ' + - 'extractProtocolFromHeader must be set if viaProxy is ' + - 'set to true and must be a string/'); + it('should throw an error if requests.extractProtocolFromHeader ' + 'is empty', () => { + assert.throws( + () => { + requestsConfigAssert({ + viaProxy: true, + trustedProxyCIDRs: [], + extractClientIPFromHeader: 'x-forwarded-for', + extractProtocolFromHeader: '', + }); + }, + '/config: invalid requests configuration. ' + + 'extractProtocolFromHeader must be set if viaProxy is ' + + 'set to true and must be a string/' + ); }); }); diff --git a/tests/unit/utils/aclUtils.js b/tests/unit/utils/aclUtils.js index 9985ca8af1..c0b397b2a4 100644 --- a/tests/unit/utils/aclUtils.js +++ b/tests/unit/utils/aclUtils.js @@ -1,39 +1,33 @@ const assert = require('assert'); const aclUtils = require('../../../lib/utilities/aclUtils'); - describe('checkGrantHeaderValidity for acls', () => { const tests = [ { it: 'should allow valid x-amz-grant-read grant', headers: { - 'x-amz-grant-read': - 'uri=http://acs.amazonaws.com/groups/global/AllUsers', + 'x-amz-grant-read': 'uri=http://acs.amazonaws.com/groups/global/AllUsers', }, result: true, }, { it: 'should allow valid x-amz-grant-write grant', headers: { - 'x-amz-grant-write': - 'emailaddress=user2@example.com', + 'x-amz-grant-write': 'emailaddress=user2@example.com', }, result: true, }, { it: 'should allow valid x-amz-grant-read-acp grant', headers: { - 'x-amz-grant-read-acp': - 'emailaddress=superuser@example.com', + 'x-amz-grant-read-acp': 'emailaddress=superuser@example.com', }, result: true, }, { it: 'should allow valid x-amz-grant-write-acp grant', headers: { - 'x-amz-grant-write-acp': - 'id=79a59df900b949e55d96a1e6' + - '98fbacedfd6e09d98eacf8f8d5218e7cd47ef2be', + 'x-amz-grant-write-acp': 'id=79a59df900b949e55d96a1e6' + '98fbacedfd6e09d98eacf8f8d5218e7cd47ef2be', }, result: true, }, @@ -41,50 +35,44 @@ describe('checkGrantHeaderValidity for acls', () => { it: 'should allow valid x-amz-grant-full-control grant', headers: { 'x-amz-grant-full-control': - 'id=79a59df900b949e55d96a1e6' + - '98fbacedfd6e09d98eacf8f8d5218e7cd47ef2be,' + - 'emailaddress=foo@bar.com', + 'id=79a59df900b949e55d96a1e6' + + '98fbacedfd6e09d98eacf8f8d5218e7cd47ef2be,' + + 'emailaddress=foo@bar.com', }, result: true, }, { it: 'should deny grant without equal sign', headers: { - 'x-amz-grant-full-control': - 'id79a59df900b949e55d96a1e6' + - '98fbacedfd6e09d98eacf8f8d5218e7cd47ef2be', + 'x-amz-grant-full-control': 'id79a59df900b949e55d96a1e6' + '98fbacedfd6e09d98eacf8f8d5218e7cd47ef2be', }, result: false, }, { it: 'should deny grant with bad uri', headers: { - 'x-amz-grant-full-control': - 'uri=http://totallymadeup', + 'x-amz-grant-full-control': 'uri=http://totallymadeup', }, result: false, }, { it: 'should deny grant with bad emailaddress', headers: { - 'x-amz-grant-read': - 'emailaddress=invalidemail.com', + 'x-amz-grant-read': 'emailaddress=invalidemail.com', }, result: false, }, { it: 'should deny grant with bad canonicalID', headers: { - 'x-amz-grant-write': - 'id=123', + 'x-amz-grant-write': 'id=123', }, result: false, }, { it: 'should deny grant with bad type of identifier', headers: { - 'x-amz-grant-write': - 'madeupidentifier=123', + 'x-amz-grant-write': 'madeupidentifier=123', }, result: false, }, @@ -92,8 +80,7 @@ describe('checkGrantHeaderValidity for acls', () => { tests.forEach(test => { it(test.it, () => { - const actualResult = - aclUtils.checkGrantHeaderValidity(test.headers); + const actualResult = aclUtils.checkGrantHeaderValidity(test.headers); assert.strictEqual(actualResult, test.result); }); }); diff --git a/tests/unit/utils/bucketEncryption.js b/tests/unit/utils/bucketEncryption.js index 63cb33df9a..230f18f891 100644 --- a/tests/unit/utils/bucketEncryption.js +++ b/tests/unit/utils/bucketEncryption.js @@ -1,14 +1,12 @@ const metadata = require('../../../lib/metadata/wrapper'); - function templateSSEConfig({ algorithm, keyId }) { const xml = []; xml.push(` - ` - ); + `); if (algorithm) { xml.push(`${algorithm}`); diff --git a/tests/unit/utils/checkReadLocation.js b/tests/unit/utils/checkReadLocation.js index 6756f9bff4..34ce3fd712 100644 --- a/tests/unit/utils/checkReadLocation.js +++ b/tests/unit/utils/checkReadLocation.js @@ -1,8 +1,7 @@ const assert = require('assert'); const { ConfigObject } = require('../../../lib/Config'); -const checkReadLocation = - require('../../../lib/api/apiUtils/object/checkReadLocation'); +const checkReadLocation = require('../../../lib/api/apiUtils/object/checkReadLocation'); const locationConstraints = { bucketmatch: { @@ -38,14 +37,12 @@ describe('Testing checkReadLocation', () => { }); it('should return null if location does not exist', () => { - const testResult = checkReadLocation( - config, 'nonexistloc', key, bucket); + const testResult = checkReadLocation(config, 'nonexistloc', key, bucket); assert.deepStrictEqual(testResult, null); }); it('should return correct results for bucketMatch true location', () => { - const testResult = checkReadLocation( - config, 'bucketmatch', key, bucket); + const testResult = checkReadLocation(config, 'bucketmatch', key, bucket); const expectedResult = { location: 'bucketmatch', key, @@ -55,8 +52,7 @@ describe('Testing checkReadLocation', () => { }); it('should return correct results for bucketMatch false location', () => { - const testResult = checkReadLocation( - config, 'nobucketmatch', key, bucket); + const testResult = checkReadLocation(config, 'nobucketmatch', key, bucket); const expectedResult = { location: 'nobucketmatch', key: `${bucket}/${key}`, diff --git a/tests/unit/utils/collectResponseHeaders.js b/tests/unit/utils/collectResponseHeaders.js index 4bb44ba6bf..755596289a 100644 --- a/tests/unit/utils/collectResponseHeaders.js +++ b/tests/unit/utils/collectResponseHeaders.js @@ -1,6 +1,5 @@ const assert = require('assert'); -const collectResponseHeaders = - require('../../../lib/utilities/collectResponseHeaders'); +const collectResponseHeaders = require('../../../lib/utilities/collectResponseHeaders'); describe('Middleware: Collect Response Headers', () => { it('should be able to set replication status when config is set', () => { @@ -15,8 +14,7 @@ describe('Middleware: Collect Response Headers', () => { ].forEach(item => { it(`should skip replication header ${item.test}`, () => { const headers = collectResponseHeaders(item.md); - assert.deepStrictEqual(headers['x-amz-replication-status'], - undefined); + assert.deepStrictEqual(headers['x-amz-replication-status'], undefined); }); }); @@ -25,19 +23,16 @@ describe('Middleware: Collect Response Headers', () => { assert.strictEqual(headers['Accept-Ranges'], 'bytes'); }); - it('should return an undefined value when x-amz-website-redirect-location' + - ' is empty', () => { + it('should return an undefined value when x-amz-website-redirect-location' + ' is empty', () => { const objectMD = { 'x-amz-website-redirect-location': '' }; const headers = collectResponseHeaders(objectMD); - assert.strictEqual(headers['x-amz-website-redirect-location'], - undefined); + assert.strictEqual(headers['x-amz-website-redirect-location'], undefined); }); it('should return the (nonempty) value of WebsiteRedirectLocation', () => { const obj = { 'x-amz-website-redirect-location': 'google.com' }; const headers = collectResponseHeaders(obj); - assert.strictEqual(headers['x-amz-website-redirect-location'], - 'google.com'); + assert.strictEqual(headers['x-amz-website-redirect-location'], 'google.com'); }); it('should not set flag when transition not in progress', () => { diff --git a/tests/unit/utils/gcpMpuHelpers.js b/tests/unit/utils/gcpMpuHelpers.js index 99496a6a3d..6820dda281 100644 --- a/tests/unit/utils/gcpMpuHelpers.js +++ b/tests/unit/utils/gcpMpuHelpers.js @@ -39,8 +39,7 @@ describe('GcpUtils MPU Helper Functions:', () => { tests.forEach(test => { it(test.it, () => { const { partNumber, phase } = test.input; - assert.strictEqual(createMpuKey( - key, uploadId, partNumber, phase), test.output); + assert.strictEqual(createMpuKey(key, uploadId, partNumber, phase), test.output); }); }); }); @@ -56,9 +55,7 @@ describe('GcpUtils MPU Helper Functions:', () => { tests.forEach(test => { it(test.it, () => { const { phase, size } = test.input; - assert.deepStrictEqual(createMpuList( - { Key: key, UploadId: uploadId }, phase, size), - test.output); + assert.deepStrictEqual(createMpuList({ Key: key, UploadId: uploadId }, phase, size), test.output); }); }); }); diff --git a/tests/unit/utils/gcpTaggingHelpers.js b/tests/unit/utils/gcpTaggingHelpers.js index d2169156d0..04d81a67c7 100644 --- a/tests/unit/utils/gcpTaggingHelpers.js +++ b/tests/unit/utils/gcpTaggingHelpers.js @@ -1,10 +1,8 @@ const assert = require('assert'); const { errorInstances, storage } = require('arsenal'); const { gcpTaggingPrefix } = require('../../../constants'); -const { genPutTagObj } = - require('../../../tests/functional/raw-node/utils/gcpUtils'); -const { processTagSet, stripTags, retrieveTags, getPutTagsMetadata } = - storage.data.external.GcpUtils; +const { genPutTagObj } = require('../../../tests/functional/raw-node/utils/gcpUtils'); +const { processTagSet, stripTags, retrieveTags, getPutTagsMetadata } = storage.data.external.GcpUtils; const maxTagSize = 10; const validTagSet = genPutTagObj(2); @@ -36,26 +34,24 @@ describe('GcpUtils Tagging Helper Functions:', () => { { it: 'should return error for invalid tag set size', input: invalidSizeTagSet, - output: errorInstances.BadRequest.customizeDescription( - 'Object tags cannot be greater than 10'), + output: errorInstances.BadRequest.customizeDescription('Object tags cannot be greater than 10'), }, { it: 'should return error for duplicate tag keys', input: invalidDuplicateTagSet, output: errorInstances.InvalidTag.customizeDescription( - 'Cannot provide multiple Tags with the same key'), + 'Cannot provide multiple Tags with the same key' + ), }, { it: 'should return error for invalid "key" value', input: invalidKeyTagSet, - output: errorInstances.InvalidTag.customizeDescription( - 'The TagKey provided is too long, 129'), + output: errorInstances.InvalidTag.customizeDescription('The TagKey provided is too long, 129'), }, { it: 'should return error for invalid "value" value', input: invalidValueTagSet, - output: errorInstances.InvalidTag.customizeDescription( - 'The TagValue provided is too long, 257'), + output: errorInstances.InvalidTag.customizeDescription('The TagValue provided is too long, 257'), }, { it: 'should return empty tag object when input is undefined', @@ -123,8 +119,7 @@ describe('GcpUtils Tagging Helper Functions:', () => { describe('getPutTagsMetadata', () => { const tests = [ { - it: 'should return correct object when' + - ' given a tag query string and a metadata obj', + it: 'should return correct object when' + ' given a tag query string and a metadata obj', input: { metadata: Object.assign({}, onlyMetadata), tagQuery }, output: tagMetadata, }, @@ -139,7 +134,8 @@ describe('GcpUtils Tagging Helper Functions:', () => { output: onlyMetadata, }, { - it: 'should return metadata with correct tag properties ' + + it: + 'should return metadata with correct tag properties ' + 'if given a metdata with prior tags and query string', input: { metadata: Object.assign({}, withPriorTags), tagQuery }, output: tagMetadata, @@ -148,8 +144,7 @@ describe('GcpUtils Tagging Helper Functions:', () => { tests.forEach(test => { it(test.it, () => { const { metadata, tagQuery } = test.input; - assert.deepStrictEqual( - getPutTagsMetadata(metadata, tagQuery), test.output); + assert.deepStrictEqual(getPutTagsMetadata(metadata, tagQuery), test.output); }); }); }); diff --git a/tests/unit/utils/lifecycleHelpers.js b/tests/unit/utils/lifecycleHelpers.js index b9399b00ac..a35cff8e4d 100644 --- a/tests/unit/utils/lifecycleHelpers.js +++ b/tests/unit/utils/lifecycleHelpers.js @@ -33,7 +33,8 @@ function getLifecycleXml() { const days2 = 1; const action3 = 'AbortIncompleteMultipartUpload'; const days3 = 30; - return '' + '' + `${id1}` + @@ -61,7 +62,8 @@ function getLifecycleXml() { `${tags[0].value}` + `<${action1}>${days1}` + '' + - ''; + '' + ); } module.exports = { diff --git a/tests/unit/utils/monitoring.js b/tests/unit/utils/monitoring.js index 96ef99070b..3f8ba9aa7f 100644 --- a/tests/unit/utils/monitoring.js +++ b/tests/unit/utils/monitoring.js @@ -7,8 +7,12 @@ const monitoring = require('../../../lib/utilities/monitoringHandler'); describe('Monitoring: endpoint', () => { const sandbox = sinon.createSandbox(); const res = { - writeHead(/* result, headers */) { return this; }, - write(/* body */) { return this; }, + writeHead(/* result, headers */) { + return this; + }, + write(/* body */) { + return this; + }, end(/* body */) {}, }; monitoring.collectDefaultMetrics(); @@ -23,9 +27,20 @@ describe('Monitoring: endpoint', () => { }); async function fetchMetrics(req, res) { - await new Promise(resolve => monitoring.monitoringHandler(null, req, { - ...res, end: (...body) => { res.end(...body); resolve(); } - }, null)); + await new Promise(resolve => + monitoring.monitoringHandler( + null, + req, + { + ...res, + end: (...body) => { + res.end(...body); + resolve(); + }, + }, + null + ) + ); } it('should return an error is method is not GET', async () => { @@ -80,20 +95,28 @@ describe('Monitoring: endpoint', () => { }); function parseMetric(metrics, name, labels) { - const labelsString = Object.entries(labels).map(e => `${e[0]}="${e[1]}"`).join(','); + const labelsString = Object.entries(labels) + .map(e => `${e[0]}="${e[1]}"`) + .join(','); const metric = metrics.match(new RegExp(`^${name}{${labelsString}} (.*)$`, 'm')); return metric ? metric[1] : null; } function parseHttpRequestSize(metrics, action = 'putObject') { - const value = parseMetric(metrics, 's3_cloudserver_http_request_size_bytes_sum', - { method: 'PUT', action, code: '200' }); + const value = parseMetric(metrics, 's3_cloudserver_http_request_size_bytes_sum', { + method: 'PUT', + action, + code: '200', + }); return value ? parseInt(value, 10) : 0; } function parseHttpResponseSize(metrics, action = 'getObject') { - const value = parseMetric(metrics, 's3_cloudserver_http_response_size_bytes_sum', - { method: 'GET', action, code: '200' }); + const value = parseMetric(metrics, 's3_cloudserver_http_response_size_bytes_sum', { + method: 'GET', + action, + code: '200', + }); return value ? parseInt(value, 10) : 0; } @@ -101,8 +124,7 @@ describe('Monitoring: endpoint', () => { await fetchMetrics({ method: 'GET', url: '/metrics' }, res); const requestSize = parseHttpRequestSize(res.end.args[0][0]); - monitoring.promMetrics('PUT', 'stuff', '200', - 'putObject', 2357, 3572, false, null, 5723); + monitoring.promMetrics('PUT', 'stuff', '200', 'putObject', 2357, 3572, false, null, 5723); await fetchMetrics({ method: 'GET', url: '/metrics' }, res); assert(parseHttpRequestSize(res.end.args[1][0]) === requestSize + 2357); @@ -112,8 +134,7 @@ describe('Monitoring: endpoint', () => { await fetchMetrics({ method: 'GET', url: '/metrics' }, res); const responseSize = parseHttpResponseSize(res.end.args[0][0]); - monitoring.promMetrics('GET', 'stuff', '200', - 'getObject', 7532); + monitoring.promMetrics('GET', 'stuff', '200', 'getObject', 7532); await fetchMetrics({ method: 'GET', url: '/metrics' }, res); assert(parseHttpResponseSize(res.end.args[1][0]) === responseSize + 7532); diff --git a/tests/unit/utils/mpuUtils.js b/tests/unit/utils/mpuUtils.js index 397b07c5b9..1c73fa9004 100644 --- a/tests/unit/utils/mpuUtils.js +++ b/tests/unit/utils/mpuUtils.js @@ -4,14 +4,11 @@ const crypto = require('crypto'); const xml2js = require('xml2js'); const DummyRequest = require('../DummyRequest'); -const initiateMultipartUpload - = require('../../../lib/api/initiateMultipartUpload'); +const initiateMultipartUpload = require('../../../lib/api/initiateMultipartUpload'); const objectPutPart = require('../../../lib/api/objectPutPart'); -const completeMultipartUpload - = require('../../../lib/api/completeMultipartUpload'); +const completeMultipartUpload = require('../../../lib/api/completeMultipartUpload'); -const { makeAuthInfo } - = require('../helpers'); +const { makeAuthInfo } = require('../helpers'); const canonicalID = 'accessKey1'; const authInfo = makeAuthInfo(canonicalID); @@ -35,31 +32,35 @@ function createinitiateMPURequest(namespace, bucketName, objectKey) { } function createPutPartRequest(namespace, bucketName, objectKey, partNumber, testUploadId) { - const request = new DummyRequest({ - bucketName, - namespace, - objectKey, - headers: { host: `${bucketName}.s3.amazonaws.com` }, - url: `/${objectKey}?partNumber=${partNumber}&uploadId=${testUploadId}`, - query: { - partNumber, - uploadId: testUploadId, + const request = new DummyRequest( + { + bucketName, + namespace, + objectKey, + headers: { host: `${bucketName}.s3.amazonaws.com` }, + url: `/${objectKey}?partNumber=${partNumber}&uploadId=${testUploadId}`, + query: { + partNumber, + uploadId: testUploadId, + }, + calculatedHash, + actionImplicitDenies: false, }, - calculatedHash, - actionImplicitDenies: false, - }, partBody); + partBody + ); return request; } function createCompleteRequest(namespace, bucketName, objectKey, testUploadId) { - // only suports a single part for now - const completeBody = '' + - '' + - '1' + - `"${calculatedHash}"` + - '' + - ''; + // only suports a single part for now + const completeBody = + '' + + '' + + '1' + + `"${calculatedHash}"` + + '' + + ''; const request = { bucketName, @@ -78,34 +79,32 @@ function createCompleteRequest(namespace, bucketName, objectKey, testUploadId) { function createMPU(namespace, bucketName, objectKey, logger, cb) { let testUploadId; - async.waterfall([ - next => { - const initiateMPURequest = createinitiateMPURequest(namespace, - bucketName, - objectKey); - initiateMultipartUpload(authInfo, initiateMPURequest, logger, next); - }, - (result, corsHeaders, next) => xml2js.parseString(result, next), - (json, next) => { - testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; - const partRequest = - createPutPartRequest(namespace, bucketName, objectKey, 1, testUploadId); - objectPutPart(authInfo, partRequest, undefined, logger, next); - }, - (hexDigest, corsHeaders, next) => { - const completeRequest = - createCompleteRequest(namespace, bucketName, objectKey, testUploadId); - completeMultipartUpload(authInfo, completeRequest, logger, next); - }, - ], err => { - assert.ifError(err); - cb(null, testUploadId); - }); + async.waterfall( + [ + next => { + const initiateMPURequest = createinitiateMPURequest(namespace, bucketName, objectKey); + initiateMultipartUpload(authInfo, initiateMPURequest, logger, next); + }, + (result, corsHeaders, next) => xml2js.parseString(result, next), + (json, next) => { + testUploadId = json.InitiateMultipartUploadResult.UploadId[0]; + const partRequest = createPutPartRequest(namespace, bucketName, objectKey, 1, testUploadId); + objectPutPart(authInfo, partRequest, undefined, logger, next); + }, + (hexDigest, corsHeaders, next) => { + const completeRequest = createCompleteRequest(namespace, bucketName, objectKey, testUploadId); + completeMultipartUpload(authInfo, completeRequest, logger, next); + }, + ], + err => { + assert.ifError(err); + cb(null, testUploadId); + } + ); return testUploadId; } - module.exports = { createPutPartRequest, createCompleteRequest, diff --git a/tests/unit/utils/multipleBackendGateway.js b/tests/unit/utils/multipleBackendGateway.js index c62b89bff6..eab293744f 100644 --- a/tests/unit/utils/multipleBackendGateway.js +++ b/tests/unit/utils/multipleBackendGateway.js @@ -1,9 +1,7 @@ const assert = require('assert'); const { checkExternalBackend } = require('arsenal').storage.data.external.backendUtils; const sinon = require('sinon'); -const awsLocations = [ - 'awsbackend', -]; +const awsLocations = ['awsbackend']; const statusSuccess = { versioningStatus: 'Enabled', @@ -32,37 +30,46 @@ describe('Testing _checkExternalBackend', function describeF() { beforeEach(done => { this.clock = sinon.useFakeTimers({ shouldAdvanceTime: true }); const clients = getClients(true); - return checkExternalBackend(clients, awsLocations, 'aws_s3', false, - externalBackendHealthCheckInterval, done); + return checkExternalBackend(clients, awsLocations, 'aws_s3', false, externalBackendHealthCheckInterval, done); }); afterEach(() => { this.clock.restore(); }); - it('should not refresh response before externalBackendHealthCheckInterval', - done => { + it('should not refresh response before externalBackendHealthCheckInterval', done => { const clients = getClients(false); - return checkExternalBackend(clients, awsLocations, 'aws_s3', - false, externalBackendHealthCheckInterval, (err, res) => { - if (err) { - return done(err); + return checkExternalBackend( + clients, + awsLocations, + 'aws_s3', + false, + externalBackendHealthCheckInterval, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res[0].awsbackend, statusSuccess); + return done(); } - assert.strictEqual(res[0].awsbackend, statusSuccess); - return done(); - }); + ); }); - it('should refresh response after externalBackendHealthCheckInterval', - done => { + it('should refresh response after externalBackendHealthCheckInterval', done => { const clients = getClients(false); setTimeout(() => { - checkExternalBackend(clients, awsLocations, 'aws_s3', - false, externalBackendHealthCheckInterval, (err, res) => { - if (err) { - return done(err); + checkExternalBackend( + clients, + awsLocations, + 'aws_s3', + false, + externalBackendHealthCheckInterval, + (err, res) => { + if (err) { + return done(err); + } + assert.strictEqual(res[0].awsbackend, statusFailure); + return done(); } - assert.strictEqual(res[0].awsbackend, statusFailure); - return done(); - }); + ); }, externalBackendHealthCheckInterval + 1); this.clock.next(); // test faster }); diff --git a/tests/unit/utils/pushReplicationMetric.js b/tests/unit/utils/pushReplicationMetric.js index 02b9c944e6..7e818f1d77 100644 --- a/tests/unit/utils/pushReplicationMetric.js +++ b/tests/unit/utils/pushReplicationMetric.js @@ -1,31 +1,25 @@ const assert = require('assert'); const { ObjectMD } = require('arsenal').models; -const { getMetricToPush } = - require('../../../lib/routes/utilities/pushReplicationMetric'); +const { getMetricToPush } = require('../../../lib/routes/utilities/pushReplicationMetric'); describe('getMetricToPush', () => { it('should push metrics when putting a new replica version', () => { - const prevObjectMD = new ObjectMD() - .setVersionId('1'); - const objectMD = new ObjectMD() - .setVersionId('2') - .setReplicationStatus('REPLICA'); + const prevObjectMD = new ObjectMD().setVersionId('1'); + const objectMD = new ObjectMD().setVersionId('2').setReplicationStatus('REPLICA'); const result = getMetricToPush(prevObjectMD, objectMD); assert.strictEqual(result, 'replicateObject'); }); it('should not push metrics for non-replica operations', () => { const prevObjectMD = new ObjectMD(); - const objectMD = new ObjectMD() - .setReplicationStatus('COMPLETED'); + const objectMD = new ObjectMD().setReplicationStatus('COMPLETED'); const result = getMetricToPush(prevObjectMD, objectMD); assert.strictEqual(result, null); }); it('should push metrics for replica operations with tagging', () => { - const prevObjectMD = new ObjectMD() - .setVersionId('1'); + const prevObjectMD = new ObjectMD().setVersionId('1'); const objectMD = new ObjectMD() .setVersionId('1') .setReplicationStatus('REPLICA') @@ -34,80 +28,53 @@ describe('getMetricToPush', () => { assert.strictEqual(result, 'replicateTags'); }); - it('should push metrics for replica operations when deleting tagging', - () => { - const prevObjectMD = new ObjectMD() - .setTags({ 'object-tag-key': 'object-tag-value' }); + it('should push metrics for replica operations when deleting tagging', () => { + const prevObjectMD = new ObjectMD().setTags({ 'object-tag-key': 'object-tag-value' }); const objectMD = new ObjectMD().setReplicationStatus('REPLICA'); const result = getMetricToPush(prevObjectMD, objectMD); assert.strictEqual(result, 'replicateTags'); }); - it('should not push metrics for replica operations with tagging ' + - 'if tags are equal', - () => { - const prevObjectMD = new ObjectMD() - .setVersionId('1') - .setTags({ 'object-tag-key': 'object-tag-value' }); - const objectMD = new ObjectMD() - .setVersionId('1') - .setReplicationStatus('REPLICA') - .setTags({ 'object-tag-key': 'object-tag-value' }); - const result = getMetricToPush(prevObjectMD, objectMD); - assert.strictEqual(result, null); - } - ); + it('should not push metrics for replica operations with tagging ' + 'if tags are equal', () => { + const prevObjectMD = new ObjectMD().setVersionId('1').setTags({ 'object-tag-key': 'object-tag-value' }); + const objectMD = new ObjectMD() + .setVersionId('1') + .setReplicationStatus('REPLICA') + .setTags({ 'object-tag-key': 'object-tag-value' }); + const result = getMetricToPush(prevObjectMD, objectMD); + assert.strictEqual(result, null); + }); it('should push metrics for replica operations with acl', () => { - const prevObjectMD = new ObjectMD() - .setVersionId('1'); + const prevObjectMD = new ObjectMD().setVersionId('1'); const objectMD = new ObjectMD(); const publicACL = objectMD.getAcl(); publicACL.Canned = 'public-read'; - objectMD - .setReplicationStatus('REPLICA') - .setAcl(publicACL) - .setVersionId('1'); + objectMD.setReplicationStatus('REPLICA').setAcl(publicACL).setVersionId('1'); const result = getMetricToPush(prevObjectMD, objectMD); assert.strictEqual(result, 'replicateTags'); }); - - it('should push metrics for replica operations when resetting acl', - () => { + it('should push metrics for replica operations when resetting acl', () => { const prevObjectMD = new ObjectMD(); const publicACL = prevObjectMD.getAcl(); publicACL.Canned = 'public-read'; - prevObjectMD - .setReplicationStatus('REPLICA') - .setAcl(publicACL) - .setVersionId('1'); + prevObjectMD.setReplicationStatus('REPLICA').setAcl(publicACL).setVersionId('1'); const objectMD = new ObjectMD(); const privateACL = objectMD.getAcl(); privateACL.Canned = 'private'; - objectMD - .setReplicationStatus('REPLICA') - .setAcl(privateACL) - .setVersionId('1'); + objectMD.setReplicationStatus('REPLICA').setAcl(privateACL).setVersionId('1'); const result = getMetricToPush(prevObjectMD, objectMD); assert.strictEqual(result, 'replicateTags'); }); - it('should not push metrics for replica operations with acl ' + - 'when they are equal', - () => { - const objectMD = new ObjectMD(); - const publicACL = objectMD.getAcl(); - publicACL.Canned = 'public-read'; - objectMD - .setReplicationStatus('REPLICA') - .setAcl(publicACL) - .setVersionId('1'); - const prevObjectMD = new ObjectMD() - .setAcl(publicACL) - .setVersionId('1'); - const result = getMetricToPush(prevObjectMD, objectMD); - assert.strictEqual(result, null); - } - ); + it('should not push metrics for replica operations with acl ' + 'when they are equal', () => { + const objectMD = new ObjectMD(); + const publicACL = objectMD.getAcl(); + publicACL.Canned = 'public-read'; + objectMD.setReplicationStatus('REPLICA').setAcl(publicACL).setVersionId('1'); + const prevObjectMD = new ObjectMD().setAcl(publicACL).setVersionId('1'); + const result = getMetricToPush(prevObjectMD, objectMD); + assert.strictEqual(result, null); + }); }); diff --git a/tests/unit/utils/request.js b/tests/unit/utils/request.js index d403660e27..677a4d0045 100644 --- a/tests/unit/utils/request.js +++ b/tests/unit/utils/request.js @@ -102,10 +102,7 @@ function testHandler(req, res) { case '/raw': return respondWithValue(req, res, ['bitsandbytes']); case '/json': - return respondWithValue(req, res, [ - postJsonStringified.slice(0, 3), - postJsonStringified.slice(3) - ]); + return respondWithValue(req, res, [postJsonStringified.slice(0, 3), postJsonStringified.slice(3)]); case '/post': if (req.method !== 'POST') { return respondWithError(req, res, 405); @@ -124,7 +121,7 @@ function testHandler(req, res) { } function createProxyServer(proto, targetHost, hostname, port, callback) { - const target = new URL(targetHost); + const target = new URL(targetHost); let options = {}; let serverType = http; if (proto === 'https') { @@ -138,10 +135,7 @@ function createProxyServer(proto, targetHost, hostname, port, callback) { proxy.on('connect', (req, clnt) => { const svr = net.connect(target.port, target.hostname, () => { // handle http -> https - clnt.write( - `HTTP/${req.httpVersion} 200 Connection Established\r\n` + - '\r\n' - ); + clnt.write(`HTTP/${req.httpVersion} 200 Connection Established\r\n` + '\r\n'); svr.pipe(clnt); clnt.pipe(svr); }); @@ -157,8 +151,7 @@ function createTestServer(proto, hostname, port, handler, callback) { options = { key: testKey, cert: testCert }; serverType = https; } - const server = serverType.createServer(options, - handler); + const server = serverType.createServer(options, handler); server.on('error', err => { process.stdout.write(`https server: ${err.stack}\n`); process.exit(1); @@ -167,10 +160,7 @@ function createTestServer(proto, hostname, port, handler, callback) { return server; } -[ - 'http', - 'https', -].forEach(protocol => { +['http', 'https'].forEach(protocol => { describe(`test against ${protocol} server`, () => { const hostname = 'localhost'; const testPort = 4242; @@ -185,37 +175,45 @@ function createTestServer(proto, hostname, port, handler, callback) { before(done => { process.env.NODE_TLS_REJECT_UNAUTHORIZED = 0; - async.series([ - next => { - server = createTestServer( - protocol, hostname, testPort, testHandler, next); - }, - next => { - proxyTarget = createTestServer(protocol, hostname, 8081, - (req, res) => res.end('proxyTarget'), next); - }, - next => { - proxyServer = createProxyServer('http', - targetHost, hostname, proxyPort, next); - }, - next => { - sproxyServer = createProxyServer('https', - targetHost, hostname, sproxyPort, next); - }, - ], done); + async.series( + [ + next => { + server = createTestServer(protocol, hostname, testPort, testHandler, next); + }, + next => { + proxyTarget = createTestServer( + protocol, + hostname, + 8081, + (req, res) => res.end('proxyTarget'), + next + ); + }, + next => { + proxyServer = createProxyServer('http', targetHost, hostname, proxyPort, next); + }, + next => { + sproxyServer = createProxyServer('https', targetHost, hostname, sproxyPort, next); + }, + ], + done + ); }); after(done => { process.env.NODE_TLS_REJECT_UNAUTHORIZED = 1; - async.series([ - next => server.close(next), - next => proxyTarget.close(next), - next => { - proxyServer.close(); - sproxyServer.close(); - next(); - }, - ], done); + async.series( + [ + next => server.close(next), + next => proxyTarget.close(next), + next => { + proxyServer.close(); + sproxyServer.close(); + next(); + }, + ], + done + ); }); afterEach(() => { @@ -251,35 +249,34 @@ function createTestServer(proto, hostname, port, handler, callback) { }); it('should return data', done => { - request.request(`${host}/raw`, { json: false }, - (err, res, body) => { - assert.ifError(err); - assert.equal(body, 'bitsandbytes'); - done(); - }); + request.request(`${host}/raw`, { json: false }, (err, res, body) => { + assert.ifError(err); + assert.equal(body, 'bitsandbytes'); + done(); + }); }); it('should convert output to json if "json" flag is set', done => { - request.request(`${host}/json`, { json: true }, - (err, res, body) => { - assert.ifError(err); - assert.deepStrictEqual(body, postJson); - done(); - }); + request.request(`${host}/json`, { json: true }, (err, res, body) => { + assert.ifError(err); + assert.deepStrictEqual(body, postJson); + done(); + }); }); it('should set method to GET if it is missing', done => { - const req = request.request(`${host}`, - (err, res) => { - assert.ifError(err); - assert.equal(res.statusCode, 200); - assert.equal(req.method, 'GET'); - done(); - }); + const req = request.request(`${host}`, (err, res) => { + assert.ifError(err); + assert.equal(res.statusCode, 200); + assert.equal(req.method, 'GET'); + done(); + }); }); it('should set headers', done => { - const req = request.request(`${host}`, { + const req = request.request( + `${host}`, + { headers: { 'TEST-HEADERS-ONE': 'test-value-one', 'TEST-HEADERS-TWO': 'test-value-two', @@ -293,75 +290,74 @@ function createTestServer(proto, hostname, port, handler, callback) { 'test-headers-two': 'test-value-two', }); done(); - }); + } + ); }); }); describe('post', () => { it('should post data', done => { - request.post(`${host}/post`, { body: postData }, - (err, res, body) => { - assert.ifError(err); - assert.equal(res.statusCode, 200); - assert.equal(body, '{"body": "post completed"}'); - done(); - }); + request.post(`${host}/post`, { body: postData }, (err, res, body) => { + assert.ifError(err); + assert.equal(res.statusCode, 200); + assert.equal(body, '{"body": "post completed"}'); + done(); + }); }); it('should post with json data', done => { - request.post(`${host}/postjson`, { body: { key: 'value' } }, - (err, res, body) => { - assert.ifError(err); - assert.equal(res.statusCode, 200); - assert.equal(body, '{"body": "post completed"}'); - done(); - }); + request.post(`${host}/postjson`, { body: { key: 'value' } }, (err, res, body) => { + assert.ifError(err); + assert.equal(res.statusCode, 200); + assert.equal(body, '{"body": "post completed"}'); + done(); + }); }); it('should post with empty body', done => { - request.post(`${host}/postempty`, - (err, res, body) => { - assert.ifError(err); - assert.equal(res.statusCode, 200); - assert.equal(body, '{"body": "post completed"}'); - done(); - }); + request.post(`${host}/postempty`, (err, res, body) => { + assert.ifError(err); + assert.equal(res.statusCode, 200); + assert.equal(body, '{"body": "post completed"}'); + done(); + }); }); it('should post with json data (json response)', done => { - request.post(`${host}/postjson`, - { body: { key: 'value' }, json: true }, - (err, res, body) => { - assert.ifError(err); - assert.equal(res.statusCode, 200); - assert.deepStrictEqual(body, { - body: 'post completed', - }); - done(); + request.post(`${host}/postjson`, { body: { key: 'value' }, json: true }, (err, res, body) => { + assert.ifError(err); + assert.equal(res.statusCode, 200); + assert.deepStrictEqual(body, { + body: 'post completed', }); + done(); + }); }); it('should set content-type JSON if missing', done => { - const req = request.post(`${host}`, { + const req = request.post( + `${host}`, + { body: postJson, - headers: { 'EXTRA': 'header' }, + headers: { EXTRA: 'header' }, }, (err, res) => { assert.ifError(err); assert.equal(res.statusCode, 200); checkForHeaders(req.getHeaders(), { 'content-type': 'application/json', - 'content-length': - Buffer.byteLength(postJsonStringified), - 'extra': 'header', + 'content-length': Buffer.byteLength(postJsonStringified), + extra: 'header', }); done(); - }); + } + ); }); - it('should not overwrite existing content-type header value', - done => { - const req = request.post(`${host}`, { + it('should not overwrite existing content-type header value', done => { + const req = request.post( + `${host}`, + { body: postJson, headers: { 'Content-Type': 'text/plain' }, }, @@ -370,22 +366,22 @@ function createTestServer(proto, hostname, port, handler, callback) { assert.equal(res.statusCode, 200); checkForHeaders(req.getHeaders(), { 'content-type': 'text/plain', - 'content-length': - Buffer.byteLength(postJsonStringified), + 'content-length': Buffer.byteLength(postJsonStringified), }); done(); - }); - }); + } + ); + }); }); }); }); describe('utilities::request error handling', () => { - it('should throw an error if arguments are missing', () => { + it('should throw an error if arguments are missing', () => { assert.throws(request.request); }); - it('should throw an error if callback argument is missing', () => { + it('should throw an error if callback argument is missing', () => { assert.throws(() => request.request('http://test')); }); diff --git a/tests/unit/utils/responseStreamData.js b/tests/unit/utils/responseStreamData.js index 10066bfc42..1f6acd2268 100644 --- a/tests/unit/utils/responseStreamData.js +++ b/tests/unit/utils/responseStreamData.js @@ -8,8 +8,7 @@ const { config } = require('../../../lib/Config'); const { client, implName, data } = require('../../../lib/data/wrapper'); const kms = require('../../../lib/kms/wrapper'); const vault = require('../../../lib/auth/vault'); -const locationStorageCheck = - require('../../../lib/api/apiUtils/object/locationStorageCheck'); +const locationStorageCheck = require('../../../lib/api/apiUtils/object/locationStorageCheck'); const metadata = require('../../../lib/metadata/wrapper'); const routesUtils = s3routes.routesUtils; @@ -51,10 +50,12 @@ describe.skip('responseStreamData:', () => { it('should stream full requested object data for one part object', done => { ds.push(null, dataStoreEntry); - const dataLocations = [{ - key: 1, - dataStore: 'mem', - }]; + const dataLocations = [ + { + key: 1, + dataStore: 'mem', + }, + ]; const response = httpMocks.createResponse({ eventEmitter: EventEmitter, }); @@ -63,8 +64,16 @@ describe.skip('responseStreamData:', () => { assert.strictEqual(data, postBody.toString()); done(); }); - return responseStreamData(errCode, overrideHeaders, resHeaders, - dataLocations, dataRetrievalParams, response, null, log); + return responseStreamData( + errCode, + overrideHeaders, + resHeaders, + dataLocations, + dataRetrievalParams, + response, + null, + log + ); }); it('should stream full requested object data for two part object', done => { @@ -81,7 +90,8 @@ describe.skip('responseStreamData:', () => { dataStore: 'mem', start: 11, size: 11, - }]; + }, + ]; const response = httpMocks.createResponse({ eventEmitter: EventEmitter, }); @@ -91,17 +101,27 @@ describe.skip('responseStreamData:', () => { assert.strictEqual(data, doublePostBody); done(); }); - return responseStreamData(errCode, overrideHeaders, resHeaders, - dataLocations, dataRetrievalParams, response, null, log); + return responseStreamData( + errCode, + overrideHeaders, + resHeaders, + dataLocations, + dataRetrievalParams, + response, + null, + log + ); }); it('#334 non-regression test, destroy connection on error', done => { - const dataLocations = [{ - key: 1, - dataStore: 'mem', - start: 0, - size: 11, - }]; + const dataLocations = [ + { + key: 1, + dataStore: 'mem', + start: 0, + size: 11, + }, + ]; const prev = data.get; data.get = (objectGetInfo, response, log, cb) => { setTimeout(() => cb(errors.InternalError), 1000); @@ -117,12 +137,19 @@ describe.skip('responseStreamData:', () => { response.on('end', () => { data.get = prev; if (!destroyed) { - return done(new Error('end reached instead of destroying ' + - 'connection')); + return done(new Error('end reached instead of destroying ' + 'connection')); } return done(); }); - return responseStreamData(errCode, overrideHeaders, resHeaders, - dataLocations, dataRetrievalParams, response, null, log); + return responseStreamData( + errCode, + overrideHeaders, + resHeaders, + dataLocations, + dataRetrievalParams, + response, + null, + log + ); }); }); diff --git a/tests/unit/utils/setPartRanges.js b/tests/unit/utils/setPartRanges.js index 52c261e4e1..139318b484 100644 --- a/tests/unit/utils/setPartRanges.js +++ b/tests/unit/utils/setPartRanges.js @@ -6,76 +6,85 @@ describe('setPartRanges function', () => { it('should set range on a one part object', () => { const dataLocations = [{ key: '1' }]; const outerRange = [2, 8]; - const actual = - setPartRanges(dataLocations, outerRange); - assert.deepStrictEqual(actual, [{ - key: '1', - range: [2, 8], - }]); + const actual = setPartRanges(dataLocations, outerRange); + assert.deepStrictEqual(actual, [ + { + key: '1', + range: [2, 8], + }, + ]); }); - it('for a 3-part object, should include full first part, set range on ' + - 'middle part and exclude last part if range request starts at 0' + - 'and ends in the middle of the second part', + it( + 'for a 3-part object, should include full first part, set range on ' + + 'middle part and exclude last part if range request starts at 0' + + 'and ends in the middle of the second part', () => { - const dataLocations = [{ key: '1', size: '4', start: '0' }, + const dataLocations = [ + { key: '1', size: '4', start: '0' }, { key: '2', size: '10', start: '4' }, { key: '3', size: '20', start: '14' }, ]; const outerRange = [0, 10]; - const actual = - setPartRanges(dataLocations, outerRange); - assert.deepStrictEqual(actual, [{ key: '1', size: '4', start: '0' }, - { key: '2', size: '7', start: '4', range: [0, 6] }]); - }); + const actual = setPartRanges(dataLocations, outerRange); + assert.deepStrictEqual(actual, [ + { key: '1', size: '4', start: '0' }, + { key: '2', size: '7', start: '4', range: [0, 6] }, + ]); + } + ); - it('for a 3-part object, should include part of first part, all of ' + - 'second part and part of third part if range request starts within ' + - 'first part and ends before end of last part', + it( + 'for a 3-part object, should include part of first part, all of ' + + 'second part and part of third part if range request starts within ' + + 'first part and ends before end of last part', () => { - const dataLocations = [{ key: '1', size: '4', start: '0' }, + const dataLocations = [ + { key: '1', size: '4', start: '0' }, { key: '2', size: '10', start: '4' }, { key: '3', size: '20', start: '14' }, ]; const outerRange = [2, 18]; - const actual = - setPartRanges(dataLocations, outerRange); - assert.deepStrictEqual(actual, [{ key: '1', size: '2', start: '0', - range: [2, 3] }, - { key: '2', size: '10', start: '4' }, - { key: '3', size: '5', start: '14', range: [0, 4] }, + const actual = setPartRanges(dataLocations, outerRange); + assert.deepStrictEqual(actual, [ + { key: '1', size: '2', start: '0', range: [2, 3] }, + { key: '2', size: '10', start: '4' }, + { key: '3', size: '5', start: '14', range: [0, 4] }, ]); - }); + } + ); - it('for a 3-part object, should include only a range of the middle part ' + - 'if the range excludes both the beginning and the end', + it( + 'for a 3-part object, should include only a range of the middle part ' + + 'if the range excludes both the beginning and the end', () => { - const dataLocations = [{ key: '1', size: '4', start: '0' }, + const dataLocations = [ + { key: '1', size: '4', start: '0' }, { key: '2', size: '10', start: '4' }, { key: '3', size: '20', start: '14' }, ]; const outerRange = [5, 7]; - const actual = - setPartRanges(dataLocations, outerRange); - assert.deepStrictEqual(actual, [{ key: '2', size: '3', start: '4', - range: [1, 3] }, - ]); - }); + const actual = setPartRanges(dataLocations, outerRange); + assert.deepStrictEqual(actual, [{ key: '2', size: '3', start: '4', range: [1, 3] }]); + } + ); - it('for a 3-part object, should include only a range of the middle part ' + - 'and all of the third part if the range excludes a portion of the ' + - 'beginning', + it( + 'for a 3-part object, should include only a range of the middle part ' + + 'and all of the third part if the range excludes a portion of the ' + + 'beginning', () => { - const dataLocations = [{ key: '1', size: '4', start: '0' }, + const dataLocations = [ + { key: '1', size: '4', start: '0' }, { key: '2', size: '10', start: '4' }, { key: '3', size: '20', start: '14' }, ]; const outerRange = [5, 34]; - const actual = - setPartRanges(dataLocations, outerRange); - assert.deepStrictEqual(actual, [{ key: '2', size: '9', start: '4', - range: [1, 9] }, + const actual = setPartRanges(dataLocations, outerRange); + assert.deepStrictEqual(actual, [ + { key: '2', size: '9', start: '4', range: [1, 9] }, { key: '3', size: '20', start: '14' }, ]); - }); + } + ); }); diff --git a/tests/unit/utils/validateSearch.js b/tests/unit/utils/validateSearch.js index 4b803278aa..318d0c5e23 100644 --- a/tests/unit/utils/validateSearch.js +++ b/tests/unit/utils/validateSearch.js @@ -1,8 +1,6 @@ const assert = require('assert'); const { errorInstances } = require('arsenal'); -const validateSearch = - require('../../../lib/api/apiUtils/bucket/validateSearch'); - +const validateSearch = require('../../../lib/api/apiUtils/bucket/validateSearch'); describe('validate search where clause', () => { const tests = [ @@ -12,33 +10,30 @@ describe('validate search where clause', () => { result: undefined, }, { - it: 'should allow a simple search with known ' + - 'column attribute', + it: 'should allow a simple search with known ' + 'column attribute', searchParams: '`content-length`="10"', result: undefined, }, { it: 'should allow valid search with AND', - searchParams: '`x-amz-meta-dog`="labrador" ' + - 'AND `x-amz-meta-age`="5"', + searchParams: '`x-amz-meta-dog`="labrador" ' + 'AND `x-amz-meta-age`="5"', result: undefined, }, { it: 'should allow valid search with OR', - searchParams: '`x-amz-meta-dog`="labrador" ' + - 'OR `x-amz-meta-age`="5"', + searchParams: '`x-amz-meta-dog`="labrador" ' + 'OR `x-amz-meta-age`="5"', result: undefined, }, { it: 'should allow valid search with double AND', - searchParams: '`x-amz-meta-dog`="labrador" ' + - 'AND `x-amz-meta-age`="5" ' + - 'AND `x-amz-meta-whatever`="ok"', + searchParams: + '`x-amz-meta-dog`="labrador" ' + 'AND `x-amz-meta-age`="5" ' + 'AND `x-amz-meta-whatever`="ok"', result: undefined, }, { it: 'should allow valid chained search with tables and columns', - searchParams: '`x-amz-meta-dog`="labrador" ' + + searchParams: + '`x-amz-meta-dog`="labrador" ' + 'AND `x-amz-meta-age`="5" ' + 'AND `content-length`="10"' + 'OR isDeleteMarker="true"' + @@ -47,70 +42,70 @@ describe('validate search where clause', () => { }, { it: 'should allow valid LIKE search', - searchParams: '`x-amz-meta-dog` LIKE "lab%" ' + - 'AND `x-amz-meta-age` LIKE "5%" ' + - 'AND `content-length`="10"', + searchParams: + '`x-amz-meta-dog` LIKE "lab%" ' + 'AND `x-amz-meta-age` LIKE "5%" ' + 'AND `content-length`="10"', result: undefined, }, { it: 'should disallow a LIKE search with invalid attribute', searchParams: '`x-zma-meta-dog` LIKE "labrador"', - result: errorInstances.InvalidArgument.customizeDescription('Search ' + - 'param contains unknown attribute: x-zma-meta-dog'), + result: errorInstances.InvalidArgument.customizeDescription( + 'Search ' + 'param contains unknown attribute: x-zma-meta-dog' + ), }, { it: 'should disallow a simple search with unknown attribute', searchParams: '`x-zma-meta-dog`="labrador"', - result: errorInstances.InvalidArgument.customizeDescription('Search ' + - 'param contains unknown attribute: x-zma-meta-dog'), + result: errorInstances.InvalidArgument.customizeDescription( + 'Search ' + 'param contains unknown attribute: x-zma-meta-dog' + ), }, { - it: 'should disallow a compound search with unknown ' + - 'attribute on right', - searchParams: '`x-amz-meta-dog`="labrador" AND ' + - '`x-zma-meta-dog`="labrador"', - result: errorInstances.InvalidArgument.customizeDescription('Search ' + - 'param contains unknown attribute: x-zma-meta-dog'), + it: 'should disallow a compound search with unknown ' + 'attribute on right', + searchParams: '`x-amz-meta-dog`="labrador" AND ' + '`x-zma-meta-dog`="labrador"', + result: errorInstances.InvalidArgument.customizeDescription( + 'Search ' + 'param contains unknown attribute: x-zma-meta-dog' + ), }, { - it: 'should disallow a compound search with unknown ' + - 'attribute on left', - searchParams: '`x-zma-meta-dog`="labrador" AND ' + - '`x-amz-meta-dog`="labrador"', - result: errorInstances.InvalidArgument.customizeDescription('Search ' + - 'param contains unknown attribute: x-zma-meta-dog'), + it: 'should disallow a compound search with unknown ' + 'attribute on left', + searchParams: '`x-zma-meta-dog`="labrador" AND ' + '`x-amz-meta-dog`="labrador"', + result: errorInstances.InvalidArgument.customizeDescription( + 'Search ' + 'param contains unknown attribute: x-zma-meta-dog' + ), }, { - it: 'should disallow a chained search with one invalid ' + - 'table attribute', - searchParams: '`x-amz-meta-dog`="labrador" ' + - 'AND `x-amz-meta-age`="5" ' + - 'OR `x-zma-meta-whatever`="ok"', - result: errorInstances.InvalidArgument.customizeDescription('Search ' + - 'param contains unknown attribute: x-zma-meta-whatever'), + it: 'should disallow a chained search with one invalid ' + 'table attribute', + searchParams: + '`x-amz-meta-dog`="labrador" ' + 'AND `x-amz-meta-age`="5" ' + 'OR `x-zma-meta-whatever`="ok"', + result: errorInstances.InvalidArgument.customizeDescription( + 'Search ' + 'param contains unknown attribute: x-zma-meta-whatever' + ), }, { - it: 'should disallow a simple search with unknown ' + - 'column attribute', + it: 'should disallow a simple search with unknown ' + 'column attribute', searchParams: 'whatever="labrador"', - result: errorInstances.InvalidArgument.customizeDescription('Search ' + - 'param contains unknown attribute: whatever'), + result: errorInstances.InvalidArgument.customizeDescription( + 'Search ' + 'param contains unknown attribute: whatever' + ), }, { - it: 'should disallow a chained search with one invalid ' + - 'column attribute', - searchParams: '`x-amz-meta-dog`="labrador" ' + + it: 'should disallow a chained search with one invalid ' + 'column attribute', + searchParams: + '`x-amz-meta-dog`="labrador" ' + 'AND `x-amz-meta-age`="5" ' + 'OR madeUp="something"' + 'OR `x-amz-meta-whatever`="ok"', - result: errorInstances.InvalidArgument.customizeDescription('Search ' + - 'param contains unknown attribute: madeUp'), + result: errorInstances.InvalidArgument.customizeDescription( + 'Search ' + 'param contains unknown attribute: madeUp' + ), }, { it: 'should disallow unsupported query operators', searchParams: 'x-amz-meta-dog BETWEEN "labrador"', result: errorInstances.InvalidArgument.customizeDescription( - 'Invalid sql where clause sent as search query'), + 'Invalid sql where clause sent as search query' + ), }, { it: 'should allow a simple search with tag query', @@ -126,8 +121,7 @@ describe('validate search where clause', () => { tests.forEach(test => { it(test.it, () => { - const actualResult = - validateSearch(test.searchParams); + const actualResult = validateSearch(test.searchParams); if (test.result === undefined) { assert(typeof actualResult.ast === 'object'); } else { diff --git a/tests/utapi/awsNodeSdk.js b/tests/utapi/awsNodeSdk.js index debaa17a78..c33de7cea1 100644 --- a/tests/utapi/awsNodeSdk.js +++ b/tests/utapi/awsNodeSdk.js @@ -11,39 +11,51 @@ function wait(timeoutMs, cb) { setTimeout(cb, timeoutMs); } function createBucket(bucket, cb) { - return s3Client.createBucket({ - Bucket: bucket, - }, (err, data) => { - assert.ifError(err); - return cb(err, data); - }); + return s3Client.createBucket( + { + Bucket: bucket, + }, + (err, data) => { + assert.ifError(err); + return cb(err, data); + } + ); } function deleteBucket(bucket, cb) { - return s3Client.deleteBucket({ - Bucket: bucket, - }, err => { - assert.ifError(err); - return cb(err); - }); + return s3Client.deleteBucket( + { + Bucket: bucket, + }, + err => { + assert.ifError(err); + return cb(err); + } + ); } function putObject(bucket, key, size, cb) { - return s3Client.putObject({ - Bucket: bucket, - Key: key, - Body: Buffer.alloc(size), - }, (err, data) => { - assert.ifError(err); - return cb(err, data); - }); + return s3Client.putObject( + { + Bucket: bucket, + Key: key, + Body: Buffer.alloc(size), + }, + (err, data) => { + assert.ifError(err); + return cb(err, data); + } + ); } function deleteObject(bucket, key, cb) { - return s3Client.deleteObject({ - Bucket: bucket, - Key: key, - }, err => { - assert.ifError(err); - return cb(err); - }); + return s3Client.deleteObject( + { + Bucket: bucket, + Key: key, + }, + err => { + assert.ifError(err); + return cb(err); + } + ); } function deleteObjects(bucket, keys, cb) { const objects = keys.map(key => { @@ -65,26 +77,32 @@ function deleteObjects(bucket, keys, cb) { }); } function copyObject(bucket, key, cb) { - return s3Client.copyObject({ - Bucket: bucket, - CopySource: `/${bucket}/${key}`, - Key: `${key}-copy`, - }, err => { - assert.ifError(err); - return cb(err); - }); + return s3Client.copyObject( + { + Bucket: bucket, + CopySource: `/${bucket}/${key}`, + Key: `${key}-copy`, + }, + err => { + assert.ifError(err); + return cb(err); + } + ); } function enableVersioning(bucket, enable, cb) { const versioningStatus = { Status: enable ? 'Enabled' : 'Disabled', }; - return s3Client.putBucketVersioning({ - Bucket: bucket, - VersioningConfiguration: versioningStatus, - }, err => { - assert.ifError(err); - return cb(err); - }); + return s3Client.putBucketVersioning( + { + Bucket: bucket, + VersioningConfiguration: versioningStatus, + }, + err => { + assert.ifError(err); + return cb(err); + } + ); } function deleteVersionList(versionList, bucket, callback) { if (versionList === undefined || versionList.length === 0) { @@ -93,7 +111,8 @@ function deleteVersionList(versionList, bucket, callback) { const params = { Bucket: bucket, Delete: { Objects: [] } }; versionList.forEach(version => { params.Delete.Objects.push({ - Key: version.Key, VersionId: version.VersionId, + Key: version.Key, + VersionId: version.VersionId, }); }); @@ -101,24 +120,25 @@ function deleteVersionList(versionList, bucket, callback) { } function removeAllVersions(params, callback) { const bucket = params.Bucket; - async.waterfall([ - cb => s3Client.listObjectVersions(params, cb), - (data, cb) => deleteVersionList(data.DeleteMarkers, bucket, - err => cb(err, data)), - (data, cb) => deleteVersionList(data.Versions, bucket, - err => cb(err, data)), - (data, cb) => { - if (data.IsTruncated) { - const params = { - Bucket: bucket, - KeyMarker: data.NextKeyMarker, - VersionIdMarker: data.NextVersionIdMarker, - }; - return removeAllVersions(params, cb); - } - return cb(); - }, - ], callback); + async.waterfall( + [ + cb => s3Client.listObjectVersions(params, cb), + (data, cb) => deleteVersionList(data.DeleteMarkers, bucket, err => cb(err, data)), + (data, cb) => deleteVersionList(data.Versions, bucket, err => cb(err, data)), + (data, cb) => { + if (data.IsTruncated) { + const params = { + Bucket: bucket, + KeyMarker: data.NextKeyMarker, + VersionIdMarker: data.NextVersionIdMarker, + }; + return removeAllVersions(params, cb); + } + return cb(); + }, + ], + callback + ); } function objectMPU(bucket, key, parts, partSize, callback) { let ETags = []; @@ -128,67 +148,76 @@ function objectMPU(bucket, key, parts, partSize, callback) { Bucket: bucket, Key: key, }; - return async.waterfall([ - next => s3Client.createMultipartUpload(initiateMPUParams, - (err, data) => { - if (err) { - return next(err); - } - uploadId = data.UploadId; - return next(); - }), - next => - async.mapLimit(partNumbers, 1, (partNumber, callback) => { - const uploadPartParams = { + return async.waterfall( + [ + next => + s3Client.createMultipartUpload(initiateMPUParams, (err, data) => { + if (err) { + return next(err); + } + uploadId = data.UploadId; + return next(); + }), + next => + async.mapLimit( + partNumbers, + 1, + (partNumber, callback) => { + const uploadPartParams = { + Bucket: bucket, + Key: key, + PartNumber: partNumber + 1, + UploadId: uploadId, + Body: Buffer.alloc(partSize), + }; + + return s3Client.uploadPart(uploadPartParams, (err, data) => { + if (err) { + return callback(err); + } + return callback(null, data.ETag); + }); + }, + (err, results) => { + if (err) { + return next(err); + } + ETags = results; + return next(); + } + ), + next => { + const params = { Bucket: bucket, Key: key, - PartNumber: partNumber + 1, + MultipartUpload: { + Parts: partNumbers.map(n => ({ + ETag: ETags[n], + PartNumber: n + 1, + })), + }, UploadId: uploadId, - Body: Buffer.alloc(partSize), }; - - return s3Client.uploadPart(uploadPartParams, - (err, data) => { - if (err) { - return callback(err); - } - return callback(null, data.ETag); - }); - }, (err, results) => { - if (err) { - return next(err); - } - ETags = results; - return next(); - }), - next => { - const params = { - Bucket: bucket, - Key: key, - MultipartUpload: { - Parts: partNumbers.map(n => ({ - ETag: ETags[n], - PartNumber: n + 1, - })), - }, - UploadId: uploadId, - }; - return s3Client.completeMultipartUpload(params, next); - }, - ], callback); + return s3Client.completeMultipartUpload(params, next); + }, + ], + callback + ); } function removeVersions(buckets, cb) { - return async.each(buckets, - (bucket, done) => removeAllVersions({ Bucket: bucket }, done), cb); + return async.each(buckets, (bucket, done) => removeAllVersions({ Bucket: bucket }, done), cb); } function getObject(bucket, key, cb) { - return s3Client.getObject({ - Bucket: bucket, - Key: key, - }, (err, data) => { - assert.ifError(err); - return cb(err, data); - }); + return s3Client.getObject( + { + Bucket: bucket, + Key: key, + }, + (err, data) => { + assert.ifError(err); + return cb(err, data); + } + ); } describe('utapi v2 metrics incoming and outgoing bytes', function t() { @@ -216,18 +245,23 @@ describe('utapi v2 metrics incoming and outgoing bytes', function t() { }); it('should set metrics for createBucket and deleteBucket', done => { const bucket = 'bucket1'; - async.series([ - next => createBucket(bucket, next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + ], + done + ); }); it('should set metrics for putObject and deleteObject', done => { const bucket = 'bucket2'; @@ -236,75 +270,93 @@ describe('utapi v2 metrics incoming and outgoing bytes', function t() { const obj2Size = objectSize * 2; const key1 = '1.txt'; const key2 = '2.txt'; - async.series([ - next => createBucket(bucket, next), - next => putObject(bucket, key1, obj1Size, next), - next => wait(WAIT_MS, () => { - checkMetrics(obj1Size, 0, 1); - next(); - }), - next => putObject(bucket, key2, obj2Size, next), - next => wait(WAIT_MS, () => { - checkMetrics(obj1Size + obj2Size, 0, 2); - next(); - }), - next => deleteObject(bucket, key1, next), - next => wait(WAIT_MS, () => { - checkMetrics(obj2Size, 0, 1); - next(); - }), - next => deleteObject(bucket, key2, next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => putObject(bucket, key1, obj1Size, next), + next => + wait(WAIT_MS, () => { + checkMetrics(obj1Size, 0, 1); + next(); + }), + next => putObject(bucket, key2, obj2Size, next), + next => + wait(WAIT_MS, () => { + checkMetrics(obj1Size + obj2Size, 0, 2); + next(); + }), + next => deleteObject(bucket, key1, next), + next => + wait(WAIT_MS, () => { + checkMetrics(obj2Size, 0, 1); + next(); + }), + next => deleteObject(bucket, key2, next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); it('should set metrics for copyObject', done => { const bucket = 'bucket3'; const objectSize = 1024 * 1024 * 2; const key = '3.txt'; - async.series([ - next => createBucket(bucket, next), - next => putObject(bucket, key, objectSize, next), - next => copyObject(bucket, key, next), - next => wait(WAIT_MS, () => { - checkMetrics(objectSize * 2, 0, 2); - next(); - }), - next => deleteObject(bucket, key, next), - next => wait(WAIT_MS, () => { - checkMetrics(objectSize, 0, 1); - next(); - }), - next => deleteObject(bucket, `${key}-copy`, next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => putObject(bucket, key, objectSize, next), + next => copyObject(bucket, key, next), + next => + wait(WAIT_MS, () => { + checkMetrics(objectSize * 2, 0, 2); + next(); + }), + next => deleteObject(bucket, key, next), + next => + wait(WAIT_MS, () => { + checkMetrics(objectSize, 0, 1); + next(); + }), + next => deleteObject(bucket, `${key}-copy`, next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); it('should set metrics for getObject', done => { const bucket = 'bucket4'; const objectSize = 1024 * 1024 * 2; const key = '4.txt'; - async.series([ - next => createBucket(bucket, next), - next => putObject(bucket, key, objectSize, next), - next => getObject(bucket, key, next), - next => wait(WAIT_MS, () => { - checkMetrics(objectSize, objectSize, 1); - next(); - }), - next => deleteObject(bucket, key, next), - next => wait(WAIT_MS, () => { - checkMetrics(0, objectSize, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => putObject(bucket, key, objectSize, next), + next => getObject(bucket, key, next), + next => + wait(WAIT_MS, () => { + checkMetrics(objectSize, objectSize, 1); + next(); + }), + next => deleteObject(bucket, key, next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, objectSize, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); it('should set metrics for multiObjectDelete', done => { const bucket = 'bucket5'; @@ -313,133 +365,164 @@ describe('utapi v2 metrics incoming and outgoing bytes', function t() { const obj2Size = objectSize * 1; const key1 = '1.txt'; const key2 = '2.txt'; - async.series([ - next => createBucket(bucket, next), - next => putObject(bucket, key1, obj1Size, next), - next => wait(WAIT_MS, next), - next => putObject(bucket, key2, obj2Size, next), - next => wait(WAIT_MS, () => { - checkMetrics(obj1Size + obj2Size, 0, 2); - next(); - }), - next => deleteObjects(bucket, [key1, key2], next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => putObject(bucket, key1, obj1Size, next), + next => wait(WAIT_MS, next), + next => putObject(bucket, key2, obj2Size, next), + next => + wait(WAIT_MS, () => { + checkMetrics(obj1Size + obj2Size, 0, 2); + next(); + }), + next => deleteObjects(bucket, [key1, key2], next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); it('should set metrics for multiPartUpload', done => { const bucket = 'bucket6'; const partSize = 1024 * 1024 * 6; const parts = 2; const key = '6.txt'; - async.series([ - next => createBucket(bucket, next), - next => objectMPU(bucket, key, parts, partSize, next), - next => wait(WAIT_MS, () => { - checkMetrics(partSize * parts, 0, 1); - next(); - }), - next => deleteObject(bucket, key, next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => objectMPU(bucket, key, parts, partSize, next), + next => + wait(WAIT_MS, () => { + checkMetrics(partSize * parts, 0, 1); + next(); + }), + next => deleteObject(bucket, key, next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); it('should set metrics in versioned bucket', done => { const bucket = 'bucket7'; const objectSize = 1024 * 1024; const key = '7.txt'; - async.series([ - next => createBucket(bucket, next), - next => enableVersioning(bucket, true, next), - next => putObject(bucket, key, objectSize, next), - next => wait(WAIT_MS, next), - next => putObject(bucket, key, objectSize, next), - next => wait(WAIT_MS, () => { - checkMetrics(objectSize * 2, 0, 2); - next(); - }), - next => deleteObject(bucket, key, next), - next => wait(WAIT_MS, () => { - checkMetrics(objectSize * 2, 0, 3); - next(); - }), - next => removeVersions([bucket], next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => enableVersioning(bucket, true, next), + next => putObject(bucket, key, objectSize, next), + next => wait(WAIT_MS, next), + next => putObject(bucket, key, objectSize, next), + next => + wait(WAIT_MS, () => { + checkMetrics(objectSize * 2, 0, 2); + next(); + }), + next => deleteObject(bucket, key, next), + next => + wait(WAIT_MS, () => { + checkMetrics(objectSize * 2, 0, 3); + next(); + }), + next => removeVersions([bucket], next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); it('should set metrics for multipartUpload in a versioned bucket', done => { const bucket = 'bucket8'; const partSize = 1024 * 1024 * 6; const parts = 2; const key = '8.txt'; - async.series([ - next => createBucket(bucket, next), - next => enableVersioning(bucket, true, next), - next => objectMPU(bucket, key, parts, partSize, next), - next => wait(WAIT_MS, () => { - checkMetrics(partSize * parts, 0, 1); - next(); - }), - next => removeVersions([bucket], next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => enableVersioning(bucket, true, next), + next => objectMPU(bucket, key, parts, partSize, next), + next => + wait(WAIT_MS, () => { + checkMetrics(partSize * parts, 0, 1); + next(); + }), + next => removeVersions([bucket], next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); it('should set metrics for multipartUpload overwrite in a versioned bucket', done => { const bucket = 'bucket9'; const partSize = 1024 * 1024 * 6; const parts = 2; const key = '9.txt'; - async.series([ - next => createBucket(bucket, next), - next => enableVersioning(bucket, true, next), - next => objectMPU(bucket, key, parts, partSize, next), - next => objectMPU(bucket, key, parts, partSize, next), - next => wait(WAIT_MS, () => { - checkMetrics(partSize * parts * 2, 0, 2); - next(); - }), - next => removeVersions([bucket], next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => enableVersioning(bucket, true, next), + next => objectMPU(bucket, key, parts, partSize, next), + next => objectMPU(bucket, key, parts, partSize, next), + next => + wait(WAIT_MS, () => { + checkMetrics(partSize * parts * 2, 0, 2); + next(); + }), + next => removeVersions([bucket], next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); it('should set metrics for multiPartUpload overwrite', done => { const bucket = 'bucket10'; const partSize = 1024 * 1024 * 6; const parts = 2; const key = '10.txt'; - async.series([ - next => createBucket(bucket, next), - next => objectMPU(bucket, key, parts, partSize, next), - next => objectMPU(bucket, key, parts, partSize, next), - next => wait(WAIT_MS, () => { - checkMetrics(partSize * parts, 0, 1); - next(); - }), - next => deleteObject(bucket, key, next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => objectMPU(bucket, key, parts, partSize, next), + next => objectMPU(bucket, key, parts, partSize, next), + next => + wait(WAIT_MS, () => { + checkMetrics(partSize * parts, 0, 1); + next(); + }), + next => deleteObject(bucket, key, next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); it('should set metrics for multiObjectDelete in a versioned bucket', done => { const bucket = 'bucket11'; @@ -448,47 +531,58 @@ describe('utapi v2 metrics incoming and outgoing bytes', function t() { const obj2Size = objectSize * 1; const key1 = '1.txt'; const key2 = '2.txt'; - async.series([ - next => createBucket(bucket, next), - next => enableVersioning(bucket, true, next), - next => putObject(bucket, key1, obj1Size, next), - next => wait(WAIT_MS, next), - next => putObject(bucket, key2, obj2Size, next), - next => wait(WAIT_MS, () => { - checkMetrics(obj1Size + obj2Size, 0, 2); - next(); - }), - next => deleteObjects(bucket, [key1, key2], next), - next => wait(WAIT_MS, () => { - checkMetrics(obj1Size + obj2Size, 0, 4); - next(); - }), - next => removeVersions([bucket], next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => enableVersioning(bucket, true, next), + next => putObject(bucket, key1, obj1Size, next), + next => wait(WAIT_MS, next), + next => putObject(bucket, key2, obj2Size, next), + next => + wait(WAIT_MS, () => { + checkMetrics(obj1Size + obj2Size, 0, 2); + next(); + }), + next => deleteObjects(bucket, [key1, key2], next), + next => + wait(WAIT_MS, () => { + checkMetrics(obj1Size + obj2Size, 0, 4); + next(); + }), + next => removeVersions([bucket], next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); it('should not push a metric for a filtered bucket', done => { const bucket = 'utapi-event-filter-deny-bucket'; const objSize = 2 * 1024 * 1024; const key = '1.txt'; - async.series([ - next => createBucket(bucket, next), - next => putObject(bucket, key, objSize, next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteObject(bucket, key, next), - next => wait(WAIT_MS, () => { - checkMetrics(0, 0, 0); - next(); - }), - next => deleteBucket(bucket, next), - ], done); + async.series( + [ + next => createBucket(bucket, next), + next => putObject(bucket, key, objSize, next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteObject(bucket, key, next), + next => + wait(WAIT_MS, () => { + checkMetrics(0, 0, 0); + next(); + }), + next => deleteBucket(bucket, next), + ], + done + ); }); }); diff --git a/tests/utapi/utilities.js b/tests/utapi/utilities.js index b8b7295f9c..5046e029b9 100644 --- a/tests/utapi/utilities.js +++ b/tests/utapi/utilities.js @@ -4,481 +4,520 @@ const werelogs = require('werelogs'); const _config = require('../../lib/Config').config; const { makeAuthInfo } = require('../unit/helpers'); -const testEvents = [{ - action: 'getObject', - metrics: { - bucket: 'bucket1', - keys: ['1.txt'], - newByteLength: 2, - oldByteLength: null, - versionId: undefined, - location: 'us-west-1', - numberOfObjects: 1, - byteLength: null, - isDelete: false, - }, - expected: { - objectDelta: 1, - sizeDelta: 0, - incomingBytes: 0, - outgoingBytes: 2, - }, -}, { - action: 'deleteObject', - metrics: { - bucket: 'bucket1', - keys: ['1.txt'], - byteLength: 2, - numberOfObjects: 1, - location: 'us-west-1', - isDelete: true, - }, - expected: { - objectDelta: -1, - sizeDelta: -2, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'listBucket', - metrics: { - bucket: 'bucket1', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'putObject', - metrics: { - bucket: 'bucket1', - keys: ['2.txt'], - newByteLength: 2, - oldByteLength: null, - versionId: undefined, - location: 'us-west-1', - numberOfObjects: 1, - }, - expected: { - objectDelta: 1, - sizeDelta: 2, - incomingBytes: 2, - outgoingBytes: 0, - }, -}, { - action: 'listBucket', - metrics: { - bucket: 'bucket1', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'headObject', - metrics: { - bucket: 'bucket1', - keys: ['1.txt'], - versionId: undefined, - location: 'us-west-1', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'abortMultipartUpload', - metrics: { - bucket: 'destinationbucket815502017', - keys: ['copycatobject'], - byteLength: 26, - location: 'us-east-1', - }, - expected: { - objectDelta: undefined, - sizeDelta: -26, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'completeMultipartUpload', - metrics: { - oldByteLength: null, - bucket: 'destinationbucket815502017', - keys: ['copycatobject'], - versionId: undefined, - numberOfObjects: 1, - location: 'us-east-1', - }, - expected: { - objectDelta: 1, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'createBucket', - metrics: { - bucket: 'deletebucketpolicy-test-bucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'deleteBucket', - metrics: { - bucket: 'deletebucketpolicy-test-bucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'deleteBucketCors', - metrics: { - bucket: 'testdeletecorsbucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'deleteBucketReplication', - metrics: { - bucket: 'source-bucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'deleteBucketWebsite', - metrics: { - bucket: 'testdeletewebsitebucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'getBucketAcl', - metrics: { - bucket: 'putbucketaclfttest', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'getBucketCors', - metrics: { - bucket: 'testgetcorsbucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'getBucketLocation', - metrics: { - bucket: 'testgetlocationbucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'getBucketNotification', - metrics: { - bucket: 'notificationtestbucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'getBucketObjectLock', - metrics: { - bucket: 'mock-bucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'getBucketReplication', - metrics: { - bucket: 'source-bucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'getBucketVersioning', - metrics: { - bucket: 'bucket-with-object-lock', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'getBucketWebsite', - metrics: { - bucket: 'testgetwebsitetestbucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'getObjectTagging', - metrics: { - bucket: 'completempu1615102906771', - keys: ['keywithtags'], - versionId: undefined, - location: 'us-east-1', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'headObject', - metrics: { - bucket: 'supersourcebucket81033016532', - keys: ['supersourceobject'], - versionId: undefined, - location: 'us-east-1', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'initiateMultipartUpload', - metrics: { - bucket: 'destinationbucket815502017', - keys: ['copycatobject'], - location: 'us-east-1', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'listMultipartUploadParts', - metrics: { - bucket: 'ftest-mybucket-74', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'multiObjectDelete', - metrics: { - bucket: 'completempu1615102906771', - keys: [undefined], - byteLength: 3, - numberOfObjects: 1, - removedDeleteMarkers: 1, - isDelete: true, - }, - expected: { - objectDelta: -2, - sizeDelta: -3, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'putBucketAcl', - metrics: { - bucket: 'putbucketaclfttest', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'putBucketCors', - metrics: { - bucket: 'testcorsbucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'putBucketNotification', - metrics: { - bucket: 'notificationtestbucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'putBucketObjectLock', - metrics: { - bucket: 'mock-bucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'putBucketReplication', - metrics: { - bucket: 'source-bucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'putBucketVersioning', - metrics: { - bucket: 'source-bucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'putBucketWebsite', - metrics: { - bucket: 'testgetwebsitetestbucket', - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'uploadPart', - metrics: { - bucket: 'ftest-mybucket-74', - keys: ['toAbort&<>"\''], - newByteLength: 5242880, - oldByteLength: null, - location: 'us-east-1', - }, - expected: { - objectDelta: undefined, - sizeDelta: 5242880, - incomingBytes: 5242880, - outgoingBytes: 0, - }, -}, { - action: 'uploadPartCopy', - metrics: { - bucket: 'destinationbucket815502017', - keys: ['copycatobject'], - newByteLength: 26, - oldByteLength: null, - location: 'us-east-1', - }, - expected: { - objectDelta: undefined, - sizeDelta: 26, - incomingBytes: 26, - outgoingBytes: 0, - }, -}, { - action: 'replicateObject', - metrics: { - bucket: 'source-bucket', - keys: ['mykey'], - newByteLength: 26, - oldByteLength: null, - }, - expected: { - objectDelta: 1, - sizeDelta: 26, - incomingBytes: 26, - outgoingBytes: 0, - }, -}, { - action: 'replicateDelete', - metrics: { - bucket: 'source-bucket', - keys: ['mykey'], - }, - expected: { - objectDelta: 1, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}, { - action: 'replicateTags', - metrics: { - bucket: 'source-bucket', - keys: ['mykey'], - }, - expected: { - objectDelta: undefined, - sizeDelta: undefined, - incomingBytes: undefined, - outgoingBytes: 0, - }, -}]; +const testEvents = [ + { + action: 'getObject', + metrics: { + bucket: 'bucket1', + keys: ['1.txt'], + newByteLength: 2, + oldByteLength: null, + versionId: undefined, + location: 'us-west-1', + numberOfObjects: 1, + byteLength: null, + isDelete: false, + }, + expected: { + objectDelta: 1, + sizeDelta: 0, + incomingBytes: 0, + outgoingBytes: 2, + }, + }, + { + action: 'deleteObject', + metrics: { + bucket: 'bucket1', + keys: ['1.txt'], + byteLength: 2, + numberOfObjects: 1, + location: 'us-west-1', + isDelete: true, + }, + expected: { + objectDelta: -1, + sizeDelta: -2, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'listBucket', + metrics: { + bucket: 'bucket1', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'putObject', + metrics: { + bucket: 'bucket1', + keys: ['2.txt'], + newByteLength: 2, + oldByteLength: null, + versionId: undefined, + location: 'us-west-1', + numberOfObjects: 1, + }, + expected: { + objectDelta: 1, + sizeDelta: 2, + incomingBytes: 2, + outgoingBytes: 0, + }, + }, + { + action: 'listBucket', + metrics: { + bucket: 'bucket1', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'headObject', + metrics: { + bucket: 'bucket1', + keys: ['1.txt'], + versionId: undefined, + location: 'us-west-1', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'abortMultipartUpload', + metrics: { + bucket: 'destinationbucket815502017', + keys: ['copycatobject'], + byteLength: 26, + location: 'us-east-1', + }, + expected: { + objectDelta: undefined, + sizeDelta: -26, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'completeMultipartUpload', + metrics: { + oldByteLength: null, + bucket: 'destinationbucket815502017', + keys: ['copycatobject'], + versionId: undefined, + numberOfObjects: 1, + location: 'us-east-1', + }, + expected: { + objectDelta: 1, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'createBucket', + metrics: { + bucket: 'deletebucketpolicy-test-bucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'deleteBucket', + metrics: { + bucket: 'deletebucketpolicy-test-bucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'deleteBucketCors', + metrics: { + bucket: 'testdeletecorsbucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'deleteBucketReplication', + metrics: { + bucket: 'source-bucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'deleteBucketWebsite', + metrics: { + bucket: 'testdeletewebsitebucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'getBucketAcl', + metrics: { + bucket: 'putbucketaclfttest', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'getBucketCors', + metrics: { + bucket: 'testgetcorsbucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'getBucketLocation', + metrics: { + bucket: 'testgetlocationbucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'getBucketNotification', + metrics: { + bucket: 'notificationtestbucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'getBucketObjectLock', + metrics: { + bucket: 'mock-bucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'getBucketReplication', + metrics: { + bucket: 'source-bucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'getBucketVersioning', + metrics: { + bucket: 'bucket-with-object-lock', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'getBucketWebsite', + metrics: { + bucket: 'testgetwebsitetestbucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'getObjectTagging', + metrics: { + bucket: 'completempu1615102906771', + keys: ['keywithtags'], + versionId: undefined, + location: 'us-east-1', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'headObject', + metrics: { + bucket: 'supersourcebucket81033016532', + keys: ['supersourceobject'], + versionId: undefined, + location: 'us-east-1', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'initiateMultipartUpload', + metrics: { + bucket: 'destinationbucket815502017', + keys: ['copycatobject'], + location: 'us-east-1', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'listMultipartUploadParts', + metrics: { + bucket: 'ftest-mybucket-74', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'multiObjectDelete', + metrics: { + bucket: 'completempu1615102906771', + keys: [undefined], + byteLength: 3, + numberOfObjects: 1, + removedDeleteMarkers: 1, + isDelete: true, + }, + expected: { + objectDelta: -2, + sizeDelta: -3, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'putBucketAcl', + metrics: { + bucket: 'putbucketaclfttest', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'putBucketCors', + metrics: { + bucket: 'testcorsbucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'putBucketNotification', + metrics: { + bucket: 'notificationtestbucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'putBucketObjectLock', + metrics: { + bucket: 'mock-bucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'putBucketReplication', + metrics: { + bucket: 'source-bucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'putBucketVersioning', + metrics: { + bucket: 'source-bucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'putBucketWebsite', + metrics: { + bucket: 'testgetwebsitetestbucket', + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'uploadPart', + metrics: { + bucket: 'ftest-mybucket-74', + keys: ['toAbort&<>"\''], + newByteLength: 5242880, + oldByteLength: null, + location: 'us-east-1', + }, + expected: { + objectDelta: undefined, + sizeDelta: 5242880, + incomingBytes: 5242880, + outgoingBytes: 0, + }, + }, + { + action: 'uploadPartCopy', + metrics: { + bucket: 'destinationbucket815502017', + keys: ['copycatobject'], + newByteLength: 26, + oldByteLength: null, + location: 'us-east-1', + }, + expected: { + objectDelta: undefined, + sizeDelta: 26, + incomingBytes: 26, + outgoingBytes: 0, + }, + }, + { + action: 'replicateObject', + metrics: { + bucket: 'source-bucket', + keys: ['mykey'], + newByteLength: 26, + oldByteLength: null, + }, + expected: { + objectDelta: 1, + sizeDelta: 26, + incomingBytes: 26, + outgoingBytes: 0, + }, + }, + { + action: 'replicateDelete', + metrics: { + bucket: 'source-bucket', + keys: ['mykey'], + }, + expected: { + objectDelta: 1, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, + { + action: 'replicateTags', + metrics: { + bucket: 'source-bucket', + keys: ['mykey'], + }, + expected: { + objectDelta: undefined, + sizeDelta: undefined, + incomingBytes: undefined, + outgoingBytes: 0, + }, + }, +]; describe('utapi v2 pushmetrics utility', () => { const log = new werelogs.Logger('utapi-utility'); @@ -491,8 +530,7 @@ describe('utapi v2 pushmetrics utility', () => { before(() => { assert.strictEqual(utapiVersion, 2); - sinon.stub(UtapiClient.prototype, 'pushMetric') - .callsFake(pushMetricStub); + sinon.stub(UtapiClient.prototype, 'pushMetric').callsFake(pushMetricStub); pushMetric = require('../../lib/utapi/utilities').pushMetric; }); @@ -530,34 +568,34 @@ describe('utapi v2 pushmetrics utility', () => { ]); testEvents - .map(event => { - const modifiedEvent = event; - const authInfo = makeAuthInfo('accesskey1', 'Bart'); - authInfo.arn = `foo:assumed-role/${_config.lifecycleRoleName}/backbeat-lifecycle`; - modifiedEvent.metrics.authInfo = authInfo; - modifiedEvent.metrics.canonicalID = 'accesskey1'; - return modifiedEvent; - }) - .map(event => { - if (eventFilterList.has(event.action)) { - it(`should skip action ${event.action}`, () => { - _config.lifecycleRoleName = 'lifecycleTestRoleName'; - const eventPushed = pushMetric(event.action, log, event.metrics); - assert.strictEqual(eventPushed, undefined); - }); - } - return event; - }) - .forEach(event => { - if (!eventFilterList.has(event.action)) { - it(`should compute and push metrics for ${event.action}`, () => { - const eventPushed = pushMetric(event.action, log, event.metrics); - assert(eventPushed); - Object.keys(event.expected).forEach(key => { - assert.strictEqual(eventPushed[key], event.expected[key]); + .map(event => { + const modifiedEvent = event; + const authInfo = makeAuthInfo('accesskey1', 'Bart'); + authInfo.arn = `foo:assumed-role/${_config.lifecycleRoleName}/backbeat-lifecycle`; + modifiedEvent.metrics.authInfo = authInfo; + modifiedEvent.metrics.canonicalID = 'accesskey1'; + return modifiedEvent; + }) + .map(event => { + if (eventFilterList.has(event.action)) { + it(`should skip action ${event.action}`, () => { + _config.lifecycleRoleName = 'lifecycleTestRoleName'; + const eventPushed = pushMetric(event.action, log, event.metrics); + assert.strictEqual(eventPushed, undefined); }); - }); - } - }); + } + return event; + }) + .forEach(event => { + if (!eventFilterList.has(event.action)) { + it(`should compute and push metrics for ${event.action}`, () => { + const eventPushed = pushMetric(event.action, log, event.metrics); + assert(eventPushed); + Object.keys(event.expected).forEach(key => { + assert.strictEqual(eventPushed[key], event.expected[key]); + }); + }); + } + }); }); }); diff --git a/tests/utilities/bucketTagging-util.js b/tests/utilities/bucketTagging-util.js index fe4978510e..8cbe707bab 100644 --- a/tests/utilities/bucketTagging-util.js +++ b/tests/utilities/bucketTagging-util.js @@ -5,11 +5,16 @@ function assertError(err, expectedErr) { if (expectedErr === null) { assert.strictEqual(err, null, `expected no error but got '${err}'`); } else { - assert.strictEqual(err.code, expectedErr, 'incorrect error response ' + - `code: should be '${expectedErr}' but got '${err.code}'`); - assert.strictEqual(err.statusCode, errors[expectedErr].code, - 'incorrect error status code: should be ' + - `${errors[expectedErr].code}, but got '${err.statusCode}'`); + assert.strictEqual( + err.code, + expectedErr, + 'incorrect error response ' + `code: should be '${expectedErr}' but got '${err.code}'` + ); + assert.strictEqual( + err.statusCode, + errors[expectedErr].code, + 'incorrect error status code: should be ' + `${errors[expectedErr].code}, but got '${err.statusCode}'` + ); } } diff --git a/tests/utilities/mock/Scuba.js b/tests/utilities/mock/Scuba.js index 7dbfa49c72..a2996029ff 100644 --- a/tests/utilities/mock/Scuba.js +++ b/tests/utilities/mock/Scuba.js @@ -42,7 +42,8 @@ class Scuba { }); const immediateInflights = req.body?.action === 'objectRestore' ? 0 : inflight; return res.json({ - bytesTotal: (this._data.bucket.get(bucketName)?.current || 0) + + bytesTotal: + (this._data.bucket.get(bucketName)?.current || 0) + (this._data.bucket.get(bucketName)?.nonCurrent || 0) + (this._data.bucket.get(bucketName)?.inflight || 0) + immediateInflights, @@ -116,7 +117,7 @@ class Scuba { let inflightCount = 0; this._data.bucket.forEach((value, key) => { if (!this.supportsInflight && key === bucketName) { - inflightCount += (value.current + value.nonCurrent); + inflightCount += value.current + value.nonCurrent; } else if (this.supportsInflight && key.startsWith(`${bucketName}_`)) { inflightCount += value.inflight; } diff --git a/tests/utilities/objectLock-util.js b/tests/utilities/objectLock-util.js index 92a223e6cd..7237c69fd9 100644 --- a/tests/utilities/objectLock-util.js +++ b/tests/utilities/objectLock-util.js @@ -10,22 +10,26 @@ const versionIdUtils = versioning.VersionID; const log = new DummyRequestLogger(); function changeObjectLock(objects, newConfig, cb) { - async.each(objects, (object, next) => { - const { bucket, key, versionId } = object; - metadataGetObject(bucket, key, versionIdUtils.decode(versionId), null, log, (err, objMD) => { - assert.ifError(err); - // set newConfig as empty string to remove object lock - /* eslint-disable no-param-reassign */ - objMD.retentionMode = newConfig.mode; - objMD.retentionDate = newConfig.date; - objMD.legalHold = false; - const params = { versionId: objMD.versionId, isNull: false }; - metadata.putObjectMD(bucket, key, objMD, params, log, err => { + async.each( + objects, + (object, next) => { + const { bucket, key, versionId } = object; + metadataGetObject(bucket, key, versionIdUtils.decode(versionId), null, log, (err, objMD) => { assert.ifError(err); - next(); + // set newConfig as empty string to remove object lock + /* eslint-disable no-param-reassign */ + objMD.retentionMode = newConfig.mode; + objMD.retentionDate = newConfig.date; + objMD.legalHold = false; + const params = { versionId: objMD.versionId, isNull: false }; + metadata.putObjectMD(bucket, key, objMD, params, log, err => { + assert.ifError(err); + next(); + }); }); - }); - }, cb); + }, + cb + ); } module.exports = changeObjectLock; diff --git a/yarn.lock b/yarn.lock index b0fc2f6f58..cf5fdfc9de 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2561,6 +2561,11 @@ escodegen@1.8.x: optionalDependencies: source-map "~0.2.0" +eslint-config-prettier@^9.1.0: + version "9.1.2" + resolved "https://registry.yarnpkg.com/eslint-config-prettier/-/eslint-config-prettier-9.1.2.tgz#90deb4fa0259592df774b600dbd1d2249a78ce91" + integrity sha512-iI1f+D2ViGn+uvv5HuHVUamg8ll4tN+JRHGc6IJi4TP9Kl976C57fzPXgseXNs8v0iA8aSJpHsTWjDb9QJamGQ== + eslint-import-resolver-node@^0.3.9: version "0.3.9" resolved "https://registry.yarnpkg.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz#d4eaac52b8a2e7c3cd1903eb00f7e053356118ac" @@ -5523,6 +5528,11 @@ prelude-ls@~1.1.2: resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" integrity sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w== +prettier@^3.3.3: + version "3.6.2" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.6.2.tgz#ccda02a1003ebbb2bfda6f83a074978f608b9393" + integrity sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ== + proc-log@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/proc-log/-/proc-log-5.0.0.tgz#e6c93cf37aef33f835c53485f314f50ea906a9d8" @@ -6339,7 +6349,16 @@ stream-to-pull-stream@^1.7.1: looper "^3.0.0" pull-stream "^3.2.3" -"string-width-cjs@npm:string-width@^4.2.0", string-width@4.2.3, string-width@^4.1.0, string-width@^4.2.0, string-width@^5.0.1, string-width@^5.1.2: +"string-width-cjs@npm:string-width@^4.2.0": + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + +string-width@4.2.3, string-width@^4.1.0, string-width@^4.2.0, string-width@^5.0.1, string-width@^5.1.2: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -6402,7 +6421,14 @@ string_decoder@~0.10.x: resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" integrity sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ== -"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: +"strip-ansi-cjs@npm:strip-ansi@^6.0.1": + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -7114,8 +7140,7 @@ workerpool@^6.5.1: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.5.1.tgz#060f73b39d0caf97c6db64da004cd01b4c099544" integrity sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA== -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: - name wrap-ansi-cjs +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== @@ -7133,6 +7158,15 @@ wrap-ansi@^6.2.0: string-width "^4.1.0" strip-ansi "^6.0.0" +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + wrap-ansi@^8.1.0: version "8.1.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214"