Skip to content

Commit

Permalink
improvement: ZENKO-760 prepare setup to work without cluster
Browse files Browse the repository at this point in the history
To be inline with Kubernetes pods sentiment of running one process per pod, the cluster
module usage is removed when workers are configured to be 1. Another change is to move
the metadata setup to make sure connection to MongoDB is in place before accepting any
requests.
The code related to Orbit's management has been adapted to work in non-cluster mode.
  • Loading branch information
rahulreddy committed Jul 23, 2018
1 parent fada992 commit 4c138ef
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 31 deletions.
3 changes: 0 additions & 3 deletions lib/metadata/wrapper.js
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,4 @@ if (clientName === 'mem') {

const metadata = new MetadataWrapper(config.backends.metadata, params,
bucketclient, logger);
// call setup
metadata.setup(() => {});

module.exports = metadata;
60 changes: 40 additions & 20 deletions lib/server.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
const http = require('http');
const https = require('https');
const cluster = require('cluster');
const { series } = require('async');
const arsenal = require('arsenal');
const { RedisClient, StatsClient } = require('arsenal').metrics;
const monitoringClient = require('./utilities/monitoringHandler');
Expand All @@ -12,6 +13,7 @@ const _config = require('./Config').config;
const { blacklistedPrefixes } = require('../constants');
const api = require('./api/api');
const data = require('./data/wrapper');
const metadata = require('./metadata/wrapper');
const { initManagement } = require('./management');

const routes = arsenal.s3routes.routes;
Expand Down Expand Up @@ -48,6 +50,7 @@ class S3Server {
*/
constructor(worker) {
this.worker = worker;
this.cluster = true;
http.globalAgent.keepAlive = true;

process.on('SIGINT', this.cleanUp.bind(this));
Expand All @@ -67,6 +70,7 @@ class S3Server {
});
this.caughtExceptionShutdown();
});
this.started = false;
}

routeRequest(req, res) {
Expand Down Expand Up @@ -165,47 +169,62 @@ class S3Server {
}

caughtExceptionShutdown() {
if (!this.cluster) {
process.exit(1);
}
logger.error('shutdown of worker due to exception', {
workerId: this.worker ? this.worker.id : undefined,
workerPid: this.worker ? this.worker.process.pid : undefined,
});
// Will close all servers, cause disconnect event on master and kill
// worker process with 'SIGTERM'.
this.worker.kill();
if (this.worker) {
this.worker.kill();
}
}

initiateStartup(log) {
clientCheck(true, log, (err, results) => {
series([
next => metadata.setup(next),
next => clientCheck(true, log, next),
], (err, results) => {
if (err) {
log.info('initial health check failed, delaying startup', {
error: err,
healthStatus: results,
});
setTimeout(() => this.initiateStartup(log), 2000);
} else {
log.debug('initial health check succeeded');
if (_config.listenOn.length > 0) {
_config.listenOn.forEach(item => {
this.startup(item.port, item.ip);
});
} else {
this.startup(_config.port);
}
return;
}
log.debug('initial health check succeeded');
if (_config.listenOn.length > 0) {
_config.listenOn.forEach(item => {
this.startup(item.port, item.ip);
});
return;
}
if (!this.started) {
this.startup(_config.port);
this.started = true;
}
});
}
}

function main() {
let clusters = _config.clusters || 1;
// TODO: change config to use workers prop. name for clarity
let workers = _config.clusters || 1;
if (process.env.S3BACKEND === 'mem') {
clusters = 1;
workers = 1;
}
if (cluster.isMaster) {
// Make sure all workers use the same report token
this.cluster = workers > 1;
if (!this.cluster) {
process.env.REPORT_TOKEN = _config.reportToken;

for (let n = 0; n < clusters; n++) {
const server = new S3Server();
server.initiateStartup(logger.newRequestLogger());
}
if (this.cluster && cluster.isMaster) {
for (let n = 0; n < workers; n++) {
const worker = cluster.fork();
logger.info('new worker forked', {
workerId: worker.id,
Expand All @@ -214,8 +233,8 @@ function main() {
}
setInterval(() => {
const len = Object.keys(cluster.workers).length;
if (len < clusters) {
for (let i = len; i < clusters; i++) {
if (len < workers) {
for (let i = len; i < workers; i++) {
const newWorker = cluster.fork();
logger.info('new worker forked', {
workerId: newWorker.id,
Expand Down Expand Up @@ -245,7 +264,8 @@ function main() {
workerPid: worker.process.pid,
});
});
} else {
}
if (this.cluster && cluster.isWorker) {
const server = new S3Server(cluster.worker);
server.initiateStartup(logger.newRequestLogger());
}
Expand Down
14 changes: 7 additions & 7 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
},
"homepage": "https://github.com/scality/S3#readme",
"dependencies": {
"arsenal": "github:scality/Arsenal#241338b",
"arsenal": "github:scality/Arsenal#dfcdea4",
"async": "~2.5.0",
"aws-sdk": "2.28.0",
"azure-storage": "^2.1.0",
Expand Down

0 comments on commit 4c138ef

Please sign in to comment.