Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
79c6c57
FT: Less verbose report handler
rachedbenmustapha Sep 6, 2017
e8e9287
Support target host header
rachedbenmustapha Nov 30, 2017
f474e7e
Pre-provision instance ID
rachedbenmustapha Oct 31, 2017
e976c4d
Update default configuration for Zenko
rachedbenmustapha Aug 22, 2017
36ff274
Default to S3DATA=multiple
rachedbenmustapha Oct 6, 2017
1264067
Initial management implementation
rachedbenmustapha Jul 25, 2017
fe129c7
Ignore data/metadata dirs
rachedbenmustapha Oct 26, 2017
1d191f4
bf: fix linter errors
Jan 9, 2018
2870477
chore: additional lint fixes
LaurenSpiegel Jan 16, 2018
7828c0d
FT: Add changes for clueso
LaurenSpiegel Dec 15, 2017
a95018a
TEMP: Special treatment for clueso
LaurenSpiegel Jan 16, 2018
798c42d
TEMP: Disable remote mgmt for mem
LaurenSpiegel Jan 30, 2018
337b049
FT: Switch out mongo search for spark
LaurenSpiegel Feb 1, 2018
343b658
use hosts instead of host and port
vrancurel Feb 12, 2018
b443b92
FT: ZENKO-118: Add support for Wasabi data backend
Feb 22, 2018
ebf1316
Fix: limit exported config items
rachedbenmustapha Feb 25, 2018
06d2dbc
FT: Adds support for GCP data backend
alexanderchan-scality Mar 13, 2018
c760229
FT: Add CRR statistics for Orbit (#1162)
Mar 16, 2018
e54318d
Multiple endpoints
Mar 19, 2018
8fe22c0
fix mongodb hosts entrypoint
Mar 19, 2018
bf811ec
S3C-1348 FT: Integrating 1-many locations replication into Orbit
nicolas2bert Mar 20, 2018
8bf35f7
Tested and completed prom-client for S3
anurag4DSB Mar 20, 2018
a612735
finished S3 prom client integration
anurag4DSB Mar 20, 2018
af3ea03
changed PR as adviced in the review
anurag4DSB Mar 22, 2018
9d1cd39
Disable remote management in tests
rachedbenmustapha Mar 29, 2018
61bb309
Make mongodb database configurable
rachedbenmustapha Mar 30, 2018
b505656
FX: functional tests (#1190)
nicolas2bert Apr 3, 2018
ed3ee6d
S3C-1354 Grant replication user permission to read/write buckets/objects
nicolas2bert Mar 28, 2018
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
node_modules
localData/*
localMetadata/*
100 changes: 100 additions & 0 deletions bin/search_bucket.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
#!/bin/sh
// 2>/dev/null ; exec "$(which nodejs 2>/dev/null || which node)" "$0" "$@"
'use strict'; // eslint-disable-line strict

const { auth } = require('arsenal');
const commander = require('commander');

const http = require('http');
const https = require('https');
const logger = require('../lib/utilities/logger');

function _performSearch(host,
port,
bucketName,
query,
accessKey,
secretKey,
verbose, ssl) {
const escapedSearch = encodeURIComponent(query);
const options = {
host,
port,
method: 'GET',
path: `/${bucketName}/?search=${escapedSearch}`,
headers: {
'Content-Length': 0,
},
rejectUnauthorized: false,
};
const transport = ssl ? https : http;
const request = transport.request(options, response => {
if (verbose) {
logger.info('response status code', {
statusCode: response.statusCode,
});
logger.info('response headers', { headers: response.headers });
}
const body = [];
response.setEncoding('utf8');
response.on('data', chunk => body.push(chunk));
response.on('end', () => {
if (response.statusCode >= 200 && response.statusCode < 300) {
logger.info('Success');
process.stdout.write(body.join(''));
process.exit(0);
} else {
logger.error('request failed with HTTP Status ', {
statusCode: response.statusCode,
body: body.join(''),
});
process.exit(1);
}
});
});
// generateV4Headers exepects request object with path that does not
// include query
request.path = `/${bucketName}`;
auth.client.generateV4Headers(request, { search: query },
accessKey, secretKey, 's3');
request.path = `/${bucketName}?search=${escapedSearch}`;
if (verbose) {
logger.info('request headers', { headers: request._headers });
}
request.end();
}

/**
* This function is used as a binary to send a request to S3 to perform a
* search on the objects in a bucket
*
* @return {undefined}
*/
function searchBucket() {
// TODO: Include other bucket listing possible query params?
commander
.version('0.0.1')
.option('-a, --access-key <accessKey>', 'Access key id')
.option('-k, --secret-key <secretKey>', 'Secret access key')
.option('-b, --bucket <bucket>', 'Name of the bucket')
.option('-q, --query <query>', 'Search query')
.option('-h, --host <host>', 'Host of the server')
.option('-p, --port <port>', 'Port of the server')
.option('-s', '--ssl', 'Enable ssl')
.option('-v, --verbose')
.parse(process.argv);

const { host, port, accessKey, secretKey, bucket, query, verbose, ssl } =
commander;

if (!host || !port || !accessKey || !secretKey || !bucket || !query) {
logger.error('missing parameter');
commander.outputHelp();
process.exit(1);
}

_performSearch(host, port, bucket, query, accessKey, secretKey, verbose,
ssl);
}

searchBucket();
1 change: 1 addition & 0 deletions circle.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ machine:
CXX: g++-4.9
ENABLE_LOCAL_CACHE: true
REPORT_TOKEN: report-token-1
REMOTE_MANAGEMENT_DISABLE: 1
hosts:
bucketwebsitetester.s3-website-us-east-1.amazonaws.com: 127.0.0.1
post:
Expand Down
22 changes: 22 additions & 0 deletions conf/authdata.json
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,27 @@
"access": "accessKey2",
"secret": "verySecretKey2"
}]
},
{
"name": "Clueso",
"email": "inspector@clueso.info",
"arn": "arn:aws:iam::123456789014:root",
"canonicalID": "http://acs.zenko.io/accounts/service/clueso",
"shortid": "123456789014",
"keys": [{
"access": "cluesoKey1",
"secret": "cluesoSecretKey1"
}]
},
{
"name": "Replication",
"email": "inspector@replication.info",
"arn": "arn:aws:iam::123456789015:root",
"canonicalID": "http://acs.zenko.io/accounts/service/replication",
"shortid": "123456789015",
"keys": [{
"access": "replicationKey1",
"secret": "replicationSecretKey1"
}]
}]
}
12 changes: 9 additions & 3 deletions config.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@
"cloudserver-front": "us-east-1",
"s3.docker.test": "us-east-1",
"127.0.0.2": "us-east-1",
"s3.amazonaws.com": "us-east-1"
"s3.amazonaws.com": "us-east-1",
"zenko-cloudserver-replicator": "us-east-1",
"lb": "us-east-1"
},
"websiteEndpoints": ["s3-website-us-east-1.amazonaws.com",
"s3-website.us-east-2.amazonaws.com",
Expand All @@ -33,6 +35,10 @@
"site": "us-east-2",
"type": "aws_s3"
}],
"backbeat": {
"host": "localhost",
"port": 8900
},
"cdmi": {
"host": "localhost",
"port": 81,
Expand All @@ -46,7 +52,7 @@
"host": "localhost",
"port": 8500
},
"clusters": 10,
"clusters": 1,
"log": {
"logLevel": "info",
"dumpLevel": "error"
Expand All @@ -71,7 +77,7 @@
"port": 9991
},
"recordLog": {
"enabled": false,
"enabled": true,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No sure it is useful for people using DMD. And Mongo does not use this param.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it works explicitly for dmd

"recordLogName": "s3-recordlog"
},
"mongodb": {
Expand Down
1 change: 1 addition & 0 deletions constants.js
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ const constants = {
legacyLocations: ['sproxyd', 'legacy'],
/* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true },
replicationBackends: { aws_s3: true, azure: true, gcp: true },
// some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods.
Expand Down
48 changes: 30 additions & 18 deletions docker-entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,15 @@ set -e
# modifying config.json
JQ_FILTERS_CONFIG="."

# ENDPOINT var can accept comma separated values
# for multiple endpoint locations
if [[ "$ENDPOINT" ]]; then
HOST_NAME="$ENDPOINT"
fi

if [[ "$HOST_NAME" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$HOST_NAME\"]=\"us-east-1\""
echo "Host name has been modified to $HOST_NAME"
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with $HOST_NAME"
IFS="," read -ra HOST_NAMES <<< "$ENDPOINT"
for host in "${HOST_NAMES[@]}"; do
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$host\"]=\"us-east-1\""
done
echo "Host name has been modified to ${HOST_NAMES[@]}"
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with ${HOST_NAMES[@]}"
fi

if [[ "$LOG_LEVEL" ]]; then
Expand All @@ -25,7 +26,7 @@ if [[ "$LOG_LEVEL" ]]; then
fi
fi

if [[ "$SSL" && "$HOST_NAME" ]]; then
if [[ "$SSL" && "$HOST_NAMES" ]]; then
# This condition makes sure that the certificates are not generated twice. (for docker restart)
if [ ! -f ./ca.key ] || [ ! -f ./ca.crt ] || [ ! -f ./server.key ] || [ ! -f ./server.crt ] ; then
# Compute config for utapi tests
Expand All @@ -36,15 +37,15 @@ prompt = no
req_extensions = s3_req

[req_distinguished_name]
CN = ${HOST_NAME}
CN = ${HOST_NAMES[0]}

[s3_req]
subjectAltName = @alt_names
extendedKeyUsage = serverAuth, clientAuth

[alt_names]
DNS.1 = *.${HOST_NAME}
DNS.2 = ${HOST_NAME}
DNS.1 = *.${HOST_NAMES[0]}
DNS.2 = ${HOST_NAMES[0]}

EOF

Expand Down Expand Up @@ -81,19 +82,18 @@ if [[ "$METADATA_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
fi

if [[ "$MONGODB_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.host=\"$MONGODB_HOST\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.port=27017"
fi

if [[ "$MONGODB_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.port=$MONGODB_PORT"
if [[ "$MONGODB_HOSTS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSetHosts=\"$MONGODB_HOSTS\""
fi

if [[ "$MONGODB_RS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSet=\"$MONGODB_RS\""
fi

if [[ "$MONGODB_DATABASE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.database=\"$MONGODB_DATABASE\""
fi

if [[ "$REDIS_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.host=\"$REDIS_HOST\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.port=6379"
Expand All @@ -107,11 +107,23 @@ if [[ "$RECORDLOG_ENABLED" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .recordLog.enabled=true"
fi

if [[ "$CRR_METRICS_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.host=\"$CRR_METRICS_HOST\""
fi

if [[ "$CRR_METRICS_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT"
fi

if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json
fi

if test -v INITIAL_INSTANCE_ID && test -v S3METADATAPATH && ! test -f ${S3METADATAPATH}/uuid ; then
echo -n ${INITIAL_INSTANCE_ID} > ${S3METADATAPATH}/uuid
fi

# s3 secret credentials for Zenko
if [ -r /run/secrets/s3-credentials ] ; then
. /run/secrets/s3-credentials
Expand Down
20 changes: 18 additions & 2 deletions lib/Config.js
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ class Config extends EventEmitter {
assert(externalBackends[type], 'bad config: `type` ' +
'property of `replicationEndpoints` object must be ' +
'a valid external backend (one of: "' +
`${Object.keys(externalBackends).join('", "')}")`);
`${Object.keys(externalBackends).join('", "')})`);
} else {
assert.notStrictEqual(servers, undefined, 'bad config: ' +
'each object of `replicationEndpoints` array that is ' +
Expand All @@ -402,6 +402,15 @@ class Config extends EventEmitter {
this.replicationEndpoints = replicationEndpoints;
}

if (config.backbeat) {
const { backbeat } = config;
assert.strictEqual(typeof backbeat.host, 'string',
'bad config: backbeat host must be a string');
assert(Number.isInteger(backbeat.port) && backbeat.port > 0,
'bad config: backbeat port must be a positive integer');
this.backbeat = backbeat;
}

// legacy
if (config.regions !== undefined) {
throw new Error('bad config: regions key is deprecated. ' +
Expand Down Expand Up @@ -825,14 +834,15 @@ class Config extends EventEmitter {
process.env.REPORT_TOKEN ||
config.reportToken ||
uuid.v4().toString();
this.reportEndpoint = process.env.REPORT_ENDPOINT;
}

_configureBackends() {
/**
* Configure the backends for Authentication, Data and Metadata.
*/
let auth = 'mem';
let data = 'file';
let data = 'multiple';
let metadata = 'file';
let kms = 'file';
if (process.env.S3BACKEND) {
Expand Down Expand Up @@ -938,6 +948,12 @@ class Config extends EventEmitter {
this.emit('location-constraints-update');
}

setReplicationEndpoints(locationConstraints) {
this.replicationEndpoints =
Object.keys(locationConstraints)
.map(key => ({ site: key, type: locationConstraints[key].type }));
}

getAzureEndpoint(locationConstraint) {
let azureStorageEndpoint =
process.env[`${locationConstraint}_AZURE_STORAGE_ENDPOINT`] ||
Expand Down
12 changes: 11 additions & 1 deletion lib/api/apiUtils/authorization/aclChecks.js
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
const constants = require('../../../../constants');

function isReplicationUser(canonicalID) {
const canonicalIDArray = canonicalID.split('/');
return canonicalIDArray[canonicalIDArray.length - 1] === 'replication';
}

function isBucketAuthorized(bucket, requestType, canonicalID) {
// Check to see if user is authorized to perform a
// particular action on bucket based on ACLs.
// TODO: Add IAM checks and bucket policy checks.
if (bucket.getOwner() === canonicalID) {
if (bucket.getOwner() === canonicalID || isReplicationUser(canonicalID)) {
return true;
} else if (requestType === 'bucketOwnerAction') {
// only bucket owner can modify or retrieve this property of a bucket
Expand Down Expand Up @@ -69,6 +74,10 @@ function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
if (objectMD['owner-id'] === canonicalID) {
return true;
}

if (isReplicationUser(canonicalID)) {
return true;
}
// account is authorized if:
// - requesttype is "bucketOwnerAction" (example: for objectTagging) and
// - account is the bucket owner
Expand Down Expand Up @@ -121,4 +130,5 @@ function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
module.exports = {
isBucketAuthorized,
isObjAuthorized,
isReplicationUser,
};
4 changes: 3 additions & 1 deletion lib/api/apiUtils/bucket/bucketCreation.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ const createKeyForUserBucket = require('./createKeyForUserBucket');
const metadata = require('../../../metadata/wrapper');
const kms = require('../../../kms/wrapper');
const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior');
const { isReplicationUser } = require('../authorization/aclChecks');

const usersBucket = constants.usersBucket;
const oldUsersBucket = constants.oldUsersBucket;
Expand Down Expand Up @@ -210,7 +211,8 @@ function createBucket(authInfo, bucketName, headers,
}
const existingBucketMD = results.getAnyExistingBucketInfo;
if (existingBucketMD instanceof BucketInfo &&
existingBucketMD.getOwner() !== canonicalID) {
existingBucketMD.getOwner() !== canonicalID &&
!isReplicationUser(canonicalID)) {
// return existingBucketMD to collect cors headers
return cb(errors.BucketAlreadyExists, existingBucketMD);
}
Expand Down
Loading