-
Notifications
You must be signed in to change notification settings - Fork 0
/
tunnels.go
200 lines (164 loc) · 5.37 KB
/
tunnels.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
package core
/*
Sliver Implant Framework
Copyright (C) 2019 Bishop Fox
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
import (
"crypto/rand"
"encoding/binary"
"errors"
"sync"
"time"
"github.com/starkzarn/glod/protobuf/rpcpb"
"github.com/starkzarn/glod/protobuf/glodpb"
"google.golang.org/protobuf/proto"
)
var (
// Tunnels - Interacting with duplex tunnels
Tunnels = tunnels{
tunnels: map[uint64]*Tunnel{},
mutex: &sync.Mutex{},
}
// ErrInvalidTunnelID - Invalid tunnel ID value
ErrInvalidTunnelID = errors.New("invalid tunnel ID")
)
const (
// delayBeforeClose - delay before closing the tunnel.
// I assume 10 seconds may be an overkill for a good connection, but it looks good enough for less stable one.
delayBeforeClose = 10 * time.Second
)
// Tunnel - Essentially just a mapping between a specific client and sliver
// with an identifier, these tunnels are full duplex. The server doesn't really
// care what data gets passed back and forth it just facilitates the connection
type Tunnel struct {
ID uint64
SessionID string
ToImplant chan []byte
ToImplantSequence uint64
FromImplant chan *glodpb.TunnelData
FromImplantSequence uint64
Client rpcpb.SliverRPC_TunnelDataServer
mutex *sync.RWMutex
lastDataMessageTime time.Time
}
func NewTunnel(id uint64, sessionID string) *Tunnel {
return &Tunnel{
ID: id,
SessionID: sessionID,
ToImplant: make(chan []byte),
FromImplant: make(chan *glodpb.TunnelData),
mutex: &sync.RWMutex{},
lastDataMessageTime: time.Now(), // need to be initialized
}
}
func (t *Tunnel) setLastMessageTime() {
t.mutex.Lock()
defer t.mutex.Unlock()
t.lastDataMessageTime = time.Now()
}
func (t *Tunnel) GetLastMessageTime() time.Time {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.lastDataMessageTime
}
func (t *Tunnel) SendDataFromImplant(tunnelData *glodpb.TunnelData) {
// Setting the date right before and right after message, since channel can be blocked for some amount of time
t.setLastMessageTime()
defer t.setLastMessageTime()
t.FromImplant <- tunnelData
}
type tunnels struct {
tunnels map[uint64]*Tunnel
mutex *sync.Mutex
}
func (t *tunnels) Create(sessionID string) *Tunnel {
tunnelID := NewTunnelID()
session := Sessions.Get(sessionID)
tunnel := NewTunnel(
tunnelID,
session.ID,
)
t.mutex.Lock()
defer t.mutex.Unlock()
t.tunnels[tunnel.ID] = tunnel
return tunnel
}
// ScheduleClose - schedules a close for tunnel, must be called as routine.
// will close it once there is no data for at least delayBeforeClose delay since last message
// This is _necessary_ since we processing messages asynchronously
// and if tunnelCloseHandler routine will fire before tunnelDataHandler routine we will lose some data
// (this is what happens for socks and portfwd)
// There is no another way around it, if we want to stick to async processing as we do now.
// All additional changes requires changes on implants(like sequencing for close messages),
// and as there is a goal to keep compatibility we don't do that at the moment.
// So there is trade off - more stability or more speed. Or rewriting implant logic.
// At the moment, i see it affects only `shell` command and locking it for 10 seconds on exit. Not a big deal.
func (t *tunnels) ScheduleClose(tunnelID uint64) {
tunnel := t.Get(tunnelID)
if tunnel == nil {
return
}
timeDelta := time.Since(tunnel.GetLastMessageTime())
coreLog.Printf("Scheduled close for channel %d (delta: %v)", tunnelID, timeDelta)
if timeDelta >= delayBeforeClose {
coreLog.Printf("Closing channel %d", tunnelID)
t.Close(tunnelID)
} else {
// Reschedule
coreLog.Printf("Rescheduling closing channel %d", tunnelID)
time.Sleep(delayBeforeClose - timeDelta + time.Second)
go t.ScheduleClose(tunnelID)
}
}
// Close - closing tunnel
// It's preferred to use ScheduleClose function if you don't 100% sure there is no more data to receive
func (t *tunnels) Close(tunnelID uint64) error {
t.mutex.Lock()
defer t.mutex.Unlock()
tunnel := t.tunnels[tunnelID]
if tunnel == nil {
return ErrInvalidTunnelID
}
tunnelClose, err := proto.Marshal(&glodpb.TunnelData{
TunnelID: tunnel.ID,
SessionID: tunnel.SessionID,
Closed: true,
})
if err != nil {
return err
}
data, err := proto.Marshal(&glodpb.Envelope{
Type: glodpb.MsgTunnelClose,
Data: tunnelClose,
})
if err != nil {
return err
}
tunnel.ToImplant <- data // Send an in-band close to implant
delete(t.tunnels, tunnelID)
close(tunnel.ToImplant)
close(tunnel.FromImplant)
return nil
}
// Get - Get a tunnel
func (t *tunnels) Get(tunnelID uint64) *Tunnel {
t.mutex.Lock()
defer t.mutex.Unlock()
return t.tunnels[tunnelID]
}
// NewTunnelID - New 64-bit identifier
func NewTunnelID() uint64 {
randBuf := make([]byte, 8)
rand.Read(randBuf)
return binary.LittleEndian.Uint64(randBuf)
}