Permalink
Browse files

Use dyno hostname to enable dyno's to live on separate servers

Conflicts:
	apiserver/api/deployer.js
  • Loading branch information...
1 parent dc9b8f6 commit 8784e4434c17d6adad490d87c182f269b93290d2 @tombh tombh committed Mar 28, 2013
View
2 apiserver/api/run.js
@@ -17,13 +17,15 @@ module.exports = {
var result;
var self = this;
+ // Query the Job Giver 15 times with the job ID created via the run_command PGSQL function.
@Filirom1
Filirom1 Mar 29, 2013

Query the Job Giver every 200ms but no more than 15 times

@tombh
tombh Mar 30, 2013

I'll add that to the comment.

async.whilst(function() {
return !(result || timesQueried > 15);
}, function(callback) {
dbfacade.exec('getJob', { jobId: jobId }, function(err, dbResult) {
timesQueried++;
if(err) return callback(err);
+ // Return a socket URI that connects to the dyno that has picked up the job.
var job = dbResult.rows[0];
if(job.distributed_to) {
result = {
View
20 apiserver/jobgiver.js
@@ -1,3 +1,12 @@
+/*
+ * The basic idea of how jobs work in Openruko is that jobs are first created by adding a job
+ * to the provision_job table. Take for example the very simple `openruko run ls'; this triggers
+ * the run_command.pgsql function, which in turns creates a job that waits to be picked up by a dyno.
+ * The dyno hostserver (only one runs inside each dyno server; NB. many dynos can run inside a
+ * dyno server) polls for jobs via the /internal/getjobs route below. If a dyno succesfully picks up
+ * a job then it is marked as 'distributed' via the distributed_to field in the provision_job table.
+ */
+
var db = require('./apidb');
var dbfacade= require('./dbfacade')(db);
var _ = require('underscore');
@@ -139,7 +148,18 @@ tasks.distributeTasksOutstanding = function(cb) {
var leastBurdenedHost = _.min(self.awaitingAssignment,function(host) {
return host.runningDynos;
});
+
+
if(leastBurdenedHost) {
+
+ // Although markJobsDistributed adds the same hostname value to the distributed_to field (which
+ // then gets persisted), it doesn't do so until after the job object is returned to the API client.
+ // Returning the requesting dyno's hostname to itself (a little tautological I know, but saves the dyno
+ // doing awkward system lookups of its external interfaces) allows it to update its record in the
+ // instance table with its hostname, which is then used by the HTTP router to forward external web
+ // requests.
+ task.dyno_hostname = leastBurdenedHost.host;
+
leastBurdenedHost.tasks.push(task);
leastBurdenedHost.runningDynos++;
self.jobsOutstanding.provision.splice(self.jobsOutstanding.provision.indexOf(task), 1);
View
7 apiserver/rendezpass.js
@@ -39,8 +39,11 @@ var server = tls.createServer(options, function(s) {
s.write('\n');
- // TODO localhost should be replaced by the right hostname
- var secureClient = tls.connect({ host: 'localhost', port: conf.dynohost.rendezvous.port }, function() {
+ var secureClient = tls.connect({
+ host: payload.hostname,
+ port: conf.dynohost.rendezvous.port,
+ rejectUnauthorized: false
@Filirom1
Filirom1 Mar 29, 2013

Good point, rejectUnauthorized is false by default but it's important to show it.

@tombh
tombh Mar 30, 2013

This came about when I tried upgrading to node 0.10 as it defaults to erroring on bad SSL connections. On a side note, there were a lot of other very weird problems, especially with dynohost, on upgrading to 0.10 :(

+ }, function() {
secureClient.write('xyz\n' + payload.dyno_id + '\n');
// Pass on data from dyno to Rendevous user
View
6 postgres/openruko_api/functions/update_state.pgsql
@@ -1,13 +1,13 @@
CREATE OR REPLACE FUNCTION update_state
-(p_instance_id text, p_dyno_id text, p_state text, p_port integer DEFAULT NULL)
+(p_instance_id text, p_dyno_id text, p_dyno_hostname text, p_state text, p_port integer DEFAULT NULL)
RETURNS integer AS
$BODY$
DECLARE
BEGIN
INSERT INTO instance_state
- (state, state_extra_info, transitioned_at, instance_id, dyno_id)
- VALUES (p_state, '', NOW(), p_instance_id, p_dyno_id);
+ (state, state_extra_info, transitioned_at, instance_id, dyno_id, dyno_hostname)
+ VALUES (p_state, '', NOW(), p_instance_id, p_dyno_id, p_dyno_hostname);
@Filirom1
Filirom1 Mar 29, 2013

It's just magic, dyno_hostname was already there in the table :)

@tombh
tombh Mar 30, 2013

Yeah, nonuby knew what he was doing!

IF (p_state = 'running') THEN
UPDATE instance SET port = p_port
View
3 test/mock/dynohost.js
@@ -3,12 +3,13 @@ var common = require('../common');
var base = 'https://:' + common.superUser.apiKey + '@localhost:5000';
-exports.updateState = function(appId, dynoId, instanceId, state, cb){
+exports.updateState = function(appId, dynoId, dynoHostname, instanceId, state, cb){
request.post({
url: base + '/internal/updatestate',
json: {
appId: appId,
dynoId: dynoId,
+ dynoHostname: dynoHostname,
instanceId: instanceId,
state: state
}
View
4 test/ps.js
@@ -101,7 +101,7 @@ describe('ps API', function(){
// I need to have the instance_id of the first dyno in order to kill it
dynohostMock.getJobs(function(err, data){
preReceiveMock('myApp', function(err){
- dynohostMock.updateState('myApp', data[0].dyno_id, data[0].instance_id, 'completed', done);
+ dynohostMock.updateState('myApp', data[0].dyno_id, data[0].dyno_hostname, data[0].instance_id, 'completed', done);
});
});
});
@@ -146,7 +146,7 @@ describe('ps API', function(){
describe('updating the state to ' + state, function(){
beforeEach(function(done){
dynohostMock.getJobs(function(err, data){
- dynohostMock.updateState('myApp', data[0].dyno_id, data[0].instance_id, state, done);
+ dynohostMock.updateState('myApp', data[0].dyno_id, data[0].dyno_hostname, data[0].instance_id, state, done);
});
});

0 comments on commit 8784e44

Please sign in to comment.