forked from tinode/chat
-
Notifications
You must be signed in to change notification settings - Fork 0
/
sessionstore.go
154 lines (126 loc) · 3.54 KB
/
sessionstore.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
/******************************************************************************
*
* Description :
*
* Management of long polling sessions
*
*****************************************************************************/
package main
import (
"container/list"
"log"
"net/http"
"sync"
"time"
"github.com/gorilla/websocket"
"github.com/tinode/chat/pbx"
"github.com/tinode/chat/server/store"
)
// SessionStore holds live sessions. Long polling sessions are stored in a linked list with
// most recent sessions on top. In addition all sessions are stored in a map indexed by session ID.
type SessionStore struct {
lock sync.Mutex
// Support for long polling sessions: a list of sessions sorted by last access time.
// Needed for cleaning abandoned sessions.
lru *list.List
lifeTime time.Duration
// All sessions indexed by session ID
sessCache map[string]*Session
}
// Create creates a new session and adds it to store.
func (ss *SessionStore) NewSession(conn interface{}, sid string) (*Session, int) {
var s Session
s.sid = sid
switch c := conn.(type) {
case *websocket.Conn:
s.proto = WEBSOCK
s.ws = c
case http.ResponseWriter:
s.proto = LPOLL
// no need to store c for long polling, it changes with every request
case *ClusterNode:
s.proto = CLUSTER
s.clnode = c
case pbx.Node_MessageLoopServer:
s.proto = GRPC
s.grpcnode = c
default:
s.proto = NONE
}
if s.proto != NONE {
s.subs = make(map[string]*Subscription)
s.send = make(chan interface{}, 256) // buffered
s.stop = make(chan interface{}, 1) // Buffered by 1 just to make it non-blocking
s.detach = make(chan string, 64) // buffered
}
s.lastTouched = time.Now()
if s.sid == "" {
s.sid = store.GetUidString()
}
ss.lock.Lock()
ss.sessCache[s.sid] = &s
count := len(ss.sessCache)
if s.proto == LPOLL {
// Only LP sessions need to be sorted by last active
s.lpTracker = ss.lru.PushFront(&s)
// Remove expired sessions
expire := s.lastTouched.Add(-ss.lifeTime)
for elem := ss.lru.Back(); elem != nil; elem = ss.lru.Back() {
sess := elem.Value.(*Session)
if sess.lastTouched.Before(expire) {
ss.lru.Remove(elem)
delete(ss.sessCache, sess.sid)
sess.cleanUp()
} else {
break // don't need to traverse further
}
}
}
ss.lock.Unlock()
return &s, count
}
// Get fetches a session from store by session ID.
func (ss *SessionStore) Get(sid string) *Session {
ss.lock.Lock()
defer ss.lock.Unlock()
if sess := ss.sessCache[sid]; sess != nil {
if sess.proto == LPOLL {
ss.lru.MoveToFront(sess.lpTracker)
sess.lastTouched = time.Now()
}
return sess
}
return nil
}
// Delete removes session from store.
func (ss *SessionStore) Delete(s *Session) int {
ss.lock.Lock()
defer ss.lock.Unlock()
delete(ss.sessCache, s.sid)
if s.proto == LPOLL {
ss.lru.Remove(s.lpTracker)
}
return len(ss.sessCache)
}
// Shutdown terminates sessionStore. No need to clean up.
// Don't send to clustered sessions, their servers are not being shut down.
func (ss *SessionStore) Shutdown() {
ss.lock.Lock()
defer ss.lock.Unlock()
shutdown := NoErrShutdown(time.Now().UTC().Round(time.Millisecond))
for _, s := range ss.sessCache {
if s.stop != nil && s.proto != CLUSTER {
s.stop <- s.serialize(shutdown)
}
}
log.Printf("SessionStore shut down, sessions terminated: %d", len(ss.sessCache))
}
// NewSessionStore initializes a session store.
func NewSessionStore(lifetime time.Duration) *SessionStore {
ss := &SessionStore{
lru: list.New(),
lifeTime: lifetime,
sessCache: make(map[string]*Session),
}
return ss
}