Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Question: doesn't seem to work, though all set up according to the docs #16

Closed
max-degterev opened this issue Jul 5, 2014 · 6 comments

Comments

@max-degterev
Copy link

Hi!

I have the following setup:

#=========================================================================================
# Application setup
#=========================================================================================
config = require('config')
cluster = require('cluster')
_ = require('underscore')
app = require('express')()
http = require('http').Server(app)
socket = require('socket.io')(http, serveClient: false)

env = require('env')
helpers = require('app/javascripts/shared/helpers')
log = helpers.log

passport = require('passport')
session = require('express-session')
mongoose = require('mongoose')
redis = require('socket.io-redis')
MongoStore = require('connect-mongostore')(session)
socket.adapter(redis(host: 'localhost', port: 6379))


#=========================================================================================
# Forking
#=========================================================================================
if cluster.isMaster
  for i in [1..config.workers]
    log("Starting worker #{i}")
    cluster.fork()

  cluster.on 'exit', (worker, code, signal)->
    log("Worker #{worker.process.pid} died")

    if config.debug
      process.exit()
    else
      cluster.fork()

else



  #=======================================================================================
  # Instantiate server
  #=======================================================================================
  domain = require('domain').create()
  domain.on 'error', (err)->
    log(err.stack || err, 'red')

    killtimer = setTimeout ->
      process.exit(1)
    , config.death_timeout
    killtimer.unref()

  domain.run ->
    server = require('./app/javascripts/server')
    unless config.debug
      assetsHashMap = {}
      for key, value of require('./public/assets/hashmap.json')
        assetsHashMap[key.replace('.min', '')] = value

    mongoose.connect("mongodb://#{config.mongodb.host}/#{config.mongodb.database}", server: socketOptions: keepAlive: 1)
    mongoose.connection.on('error', (e)-> log("MongoDB operation failed: #{e}"))


    #=====================================================================================
    # Template globals
    #=====================================================================================
    generateTemplateGlobals = ->
      app.locals.pretty = config.debug
      app.locals.config = _.omit(_.clone(config), 'server_only_keys', config.server_only_keys...)
      app.locals._ = _
      app.locals.helpers = helpers


    #=====================================================================================
    # Global middleware
    #=====================================================================================
    normalizeUrl = (req, res, next)->
      try
        decodeURIComponent(req.originalUrl)
      catch
        url = '/'
        log("malformed URL, redirecting to #{url}")
        return res.redirect(301, url)

      [href, qs...] = req.originalUrl.split('?')

      if qs.length > 1 # should be 1?2, [2].length = 1
        url = href + '?' + qs.join('&')
        log("malformed URL, redirecting to #{url}")
        return res.redirect(301, url)

      next()

    getAsset = (name)->
      name = assetsHashMap[name] unless config.debug
      "/assets/#{name}"

    injectGetAsset = (req, res, next)->
      req.app.locals.getAsset = getAsset
      next()

    generateEnv = (req, res, next)->
      res.locals.env.rendered = (new Date).toUTCString()
      res.locals.env.lang = require('./config/lang_en_us')

      next()

    updateUserSession = (req, res, next)->
      # env.csrf = req.csrfToken()
      req.session._updated = (new Date).toUTCString() # forcing cookie to refresh itself
      req.session.touch()
      next()

    preRouteMiddleware = ->
      morgan = require('morgan')

      if config.debug
        app.use(morgan('dev'))
      else
        app.use(morgan('default'))

      app.use(normalizeUrl)

      app.use(require('serve-favicon')(__dirname + '/public/favicon.ico'))
      app.use(require('serve-static')(__dirname + '/public', redirect: false))

      app.use(require('body-parser').json())
      app.use(require('cookie-parser')())

      app.use session
        key: 'sid'
        secret: config.session.secret
        cookie: maxAge: config.session.lifetime
        store: new MongoStore(mongooseConnection: mongoose.connection)

      app.use(passport.initialize())
      app.use(passport.session())

      app.use(injectGetAsset)
      app.use(env.create)
      app.use(generateEnv)
      app.use(updateUserSession)

    postRouteMiddleware = ->
      if config.debug
        app.use(require('errorhandler')(dumpExceptions: true, showStack: true))
      else
        app.use(require('compression')())


    #=====================================================================================
    # Start listening
    #=====================================================================================
    app.enable('trust proxy') # usually sitting behind nginx
    app.disable('x-powered-by')

    app.set('port', config.port)
    app.set('views', "#{__dirname}/app/templates")
    app.set('view engine', 'jade')
    app.set('json spaces', 2) if config.debug
    app.set('socket', socket)

    mongoose.connection.once 'open', ->
      log('MongoDB connection established', 'cyan')

      generateTemplateGlobals()

      preRouteMiddleware()
      server.use(app) # Fire up the server, all the routes go here
      postRouteMiddleware()

      app_root = "http://#{config.hostname}:#{config.port}"

      if config.ip
        http.listen(app.get('port'), config.ip, -> log("Server listening on #{app_root} (bound to ip: #{config.ip})", 'cyan'))
      else
        http.listen(app.get('port'), -> log("Server listening on #{app_root} (unbound)", 'cyan'))

With only 1 fork running it works perfectly, though socket seems to use polling instead of websocket as a transport.
screen shot 2014-07-05 at 2 00 27 pm

Connection to redis is successful:
screen shot 2014-07-05 at 2 04 26 pm

When I start 4 forks, socket can't finish handshakes anymore. What am I doing wrong?
screen shot 2014-07-05 at 1 56 37 pm

@shantanuthatte
Copy link

Hi,

You need to use sticky sessions. Refer to
http://socket.io/docs/using-multiple-nodes/

The problem is since there are multiple listeners on the same port, each
consecutive requests do not go to to the same worker that it started with.
Hence the error of incomplete handshake.

I am currently using Nginx as a forwarding proxy for multiple Socket.IO
servers (allowing me to load balance on multiple physical servers) ,
communicate using the redis pub/sub (Socket.IO-redis).

I will reply back a detailed answer when I get to my PC tomorrow.

Regards,
Shantanu Thatte
On Jul 5, 2014 7:02 AM, "Max" notifications@github.com wrote:

Hi!

I have the following setup:

#=========================================================================================# Application setup#=========================================================================================config = require('config')cluster = require('cluster')_ = require('underscore')app = require('express')()http = require('http').Server(app)socket = require('socket.io')(http, serveClient: false)
env = require('env')helpers = require('app/javascripts/shared/helpers')log = helpers.log
passport = require('passport')session = require('express-session')mongoose = require('mongoose')redis = require('socket.io-redis')MongoStore = require('connect-mongostore')(session)socket.adapter(redis(host: 'localhost', port: 6379))

#=========================================================================================# Forking#=========================================================================================if cluster.isMaster
for i in [1..config.workers]
log("Starting worker #{i}")
cluster.fork()

cluster.on 'exit', (worker, code, signal)->
log("Worker #{worker.process.pid} died")

if config.debug
  process.exit()
else
  cluster.fork()

else

#=======================================================================================

Instantiate server

#=======================================================================================
domain = require('domain').create()
domain.on 'error', (err)->
log(err.stack || err, 'red')

killtimer = setTimeout ->
  process.exit(1)
, config.death_timeout
killtimer.unref()

domain.run ->
server = require('./app/javascripts/server')
unless config.debug
assetsHashMap = {}
for key, value of require('./public/assets/hashmap.json')
assetsHashMap[key.replace('.min', '')] = value

mongoose.connect("mongodb://#{config.mongodb.host}/#{config.mongodb.database}", server: socketOptions: keepAlive: 1)
mongoose.connection.on('error', (e)-> log("MongoDB operation failed: #{e}"))


#=====================================================================================
# Template globals
#=====================================================================================
generateTemplateGlobals = ->
  app.locals.pretty = config.debug
  app.locals.config = _.omit(_.clone(config), 'server_only_keys', config.server_only_keys...)
  app.locals._ = _
  app.locals.helpers = helpers


#=====================================================================================
# Global middleware
#=====================================================================================
normalizeUrl = (req, res, next)->
  try
    decodeURIComponent(req.originalUrl)
  catch
    url = '/'
    log("malformed URL, redirecting to #{url}")
    return res.redirect(301, url)

  [href, qs...] = req.originalUrl.split('?')

  if qs.length > 1 # should be 1?2, [2].length = 1
    url = href + '?' + qs.join('&')
    log("malformed URL, redirecting to #{url}")
    return res.redirect(301, url)

  next()

getAsset = (name)->
  name = assetsHashMap[name] unless config.debug
  "/assets/#{name}"

injectGetAsset = (req, res, next)->
  req.app.locals.getAsset = getAsset
  next()

generateEnv = (req, res, next)->
  res.locals.env.rendered = (new Date).toUTCString()
  res.locals.env.lang = require('./config/lang_en_us')

  next()

updateUserSession = (req, res, next)->
  # env.csrf = req.csrfToken()
  req.session._updated = (new Date).toUTCString() # forcing cookie to refresh itself
  req.session.touch()
  next()

preRouteMiddleware = ->
  morgan = require('morgan')

  if config.debug
    app.use(morgan('dev'))
  else
    app.use(morgan('default'))

  app.use(normalizeUrl)

  app.use(require('serve-favicon')(__dirname + '/public/favicon.ico'))
  app.use(require('serve-static')(__dirname + '/public', redirect: false))

  app.use(require('body-parser').json())
  app.use(require('cookie-parser')())

  app.use session
    key: 'sid'
    secret: config.session.secret
    cookie: maxAge: config.session.lifetime
    store: new MongoStore(mongooseConnection: mongoose.connection)

  app.use(passport.initialize())
  app.use(passport.session())

  app.use(injectGetAsset)
  app.use(env.create)
  app.use(generateEnv)
  app.use(updateUserSession)

postRouteMiddleware = ->
  if config.debug
    app.use(require('errorhandler')(dumpExceptions: true, showStack: true))
  else
    app.use(require('compression')())


#=====================================================================================
# Start listening
#=====================================================================================
app.enable('trust proxy') # usually sitting behind nginx
app.disable('x-powered-by')

app.set('port', config.port)
app.set('views', "#{__dirname}/app/templates")
app.set('view engine', 'jade')
app.set('json spaces', 2) if config.debug
app.set('socket', socket)

mongoose.connection.once 'open', ->
  log('MongoDB connection established', 'cyan')

  generateTemplateGlobals()

  preRouteMiddleware()
  server.use(app) # Fire up the server, all the routes go here
  postRouteMiddleware()

  app_root = "http://#{config.hostname}:#{config.port}"

  if config.ip
    http.listen(app.get('port'), config.ip, -> log("Server listening on #{app_root} (bound to ip: #{config.ip})", 'cyan'))
  else
    http.listen(app.get('port'), -> log("Server listening on #{app_root} (unbound)", 'cyan'))

With only 1 fork running it works perfectly, though socket seems to use
polling instead of websocket as a transport.
[image: screen shot 2014-07-05 at 2 00 27 pm]
https://cloud.githubusercontent.com/assets/789031/3486477/1bb70bd8-043c-11e4-89d5-f1f81be65311.png

When I start 4 forks, socket can't finish handshakes anymore. What am I
doing wrong?
[image: screen shot 2014-07-05 at 1 56 37 pm]
https://cloud.githubusercontent.com/assets/789031/3486476/185c2838-043c-11e4-8987-308bda965336.png


Reply to this email directly or view it on GitHub
#16.

@shantanuthatte
Copy link

Also, can you run the server using debug as:
DEBUG=* node <file.js>

@max-degterev
Copy link
Author

@shantanuthatte I thought that this redis thing does the same as sticky sessions via using redis. Problem is sticky sessions doesn't do anything for me either :(

@shantanuthatte
Copy link

@suprMax Socket.IO client first connects to the server with a regular HTTP request and gets a session key. Say this was given by Worker 1. Now it makes a XHR Polling request. This can land up at any worker and they will not have the session id. This will result in failure at client and it will reconnect.

To avoid this you need a shared variable that holds all the client information and even with redis pub/sub there are times (when redis hasn't yet sent out the pub, and the XHR Request arrives) when using external services to synchronize would fail.

The redis adapter is primarily used to send messages to multiple clients connected across multiple workers.

For details run redis-cli and then MONITOR. Now connect using multiple clients and you can see the data exchanged between the workers.

@max-degterev
Copy link
Author

@shantanuthatte thanks for your reply, but I'm still confused. From what you said I take it that redis adapter will use redis to store sessions and therefore using clusterer shouldn't break socket anymore. But it does. So the question remains, what am I doing wrong? :D I know that redis connection is OK and redis is working...

@max-degterev
Copy link
Author

Ok, I re-read documentation and it seems that what I want is indeed sticky session. Unfortunately it's not maintained anymore, and I wanted a sort of quick drop in solution for now. I think I will use nginx load balancing for this project and sort out that clustering thing later. Thanks for your help!

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants