forked from quantum1423-dustbin/kirisurf-legacy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
e2e_client.go
172 lines (165 loc) · 3.5 KB
/
e2e_client.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
// e2e_client.go
package main
import (
"io"
"sync"
"sync/atomic"
)
type e2e_client_ctx struct {
connid_chan chan int
chan_table map[int]chan e2e_segment
wire *gobwire
lock *sync.RWMutex
valid *bool
dying *bool
refcount *int32
}
func make_e2e_client_ctx(conn io.ReadWriteCloser) e2e_client_ctx {
wire := newGobWire(conn)
lock := new(sync.RWMutex)
valid := new(bool)
*valid = true
chan_table := make(map[int]chan e2e_segment)
connid_chan := make(chan int, 65536)
for i := 0; i < 65536; i++ {
connid_chan <- i
}
// Loop that pushes data onto clients
go func() {
for {
if !*valid {
return
}
newpkt, err := wire.Receive()
if err != nil {
*valid = false
wire.destroy()
return
}
if chan_table[newpkt.Connid] == nil {
continue
}
chan_table[newpkt.Connid] <- newpkt
}
}()
dying := new(bool)
*dying = false
refcount := new(int32)
*refcount = 0
return e2e_client_ctx{connid_chan, chan_table, wire, lock, valid, dying, refcount}
}
func (ctx e2e_client_ctx) AttachClient(client io.ReadWriteCloser) {
defer client.Close()
if !*ctx.valid {
CRITICAL("Possible race condition: AttachClient called on invalid wire!")
return
}
atomic.AddInt32(ctx.refcount, 1)
defer func() {
atomic.AddInt32(ctx.refcount, -1)
if *ctx.refcount == 0 && *ctx.dying {
DEBUG("Killing a subcircuit context due to refcount")
*ctx.valid = false
ctx.lock.RLock()
for _, e := range ctx.chan_table {
if e != nil {
close(e)
}
}
ctx.lock.RUnlock()
ctx.wire.destroy()
}
}()
// SOCKS5 stuff! Yay!
remaddr, err := socks5_handshake(client)
if err != nil {
WARNING("Error encountered while doing socks5: %s", err.Error())
return
}
DEBUG("SOCKS5 request to %s", remaddr)
defer func() {
DEBUG("Closed connection to %s", remaddr)
}()
// Obtain a connection ID
connid := <-ctx.connid_chan
ch := make(chan e2e_segment, 256)
ctr := 0
// Attach onto channel table
ctx.lock.Lock()
ctx.chan_table[connid] = ch
ctx.lock.Unlock()
// Detach function
var once sync.Once
detach := func() {
once.Do(func() {
ctx.lock.Lock()
ctx.chan_table[connid] = nil
close(ch)
ctx.lock.Unlock()
})
}
// Downstream
go func() {
defer client.Close()
defer detach()
for {
if !*ctx.valid {
ctx.wire.destroy()
return
}
pkt, ok := <-ch
if !ok {
return
}
if pkt.Flag == E2E_CLOSE {
return
}
n, err := client.Write(pkt.Body)
if err != nil {
return
}
incr_down_bytes(n)
ctr = (ctr + 1) % 256
// If wire of empty, sendings of sendmore
if ctr == 0 {
DEBUG("Bucket drained in subcircuit, sending SENDMORE to remote")
err = ctx.wire.Send(e2e_segment{E2E_SENDMORE, connid, []byte("")})
if err != nil {
panic(err.Error())
}
}
}
}()
defer detach()
// Upstream
err = ctx.wire.Send(e2e_segment{E2E_OPEN, connid, []byte(remaddr)})
if err != nil {
CRITICAL(err.Error())
return
}
for {
if !*ctx.valid {
DEBUG("Dying since ctx not valid!!!")
return
}
buf := make([]byte, 16384)
n, err := client.Read(buf)
if err != nil {
err := ctx.wire.Send(e2e_segment{E2E_CLOSE, connid, []byte("")})
if err != nil {
DEBUG("Dying since cannot into sendings: %s", err.Error())
*ctx.valid = false
ctx.wire.destroy()
}
return
}
err = ctx.wire.Send(e2e_segment{E2E_DATA, connid, buf[:n]})
if err != nil {
DEBUG("Dying since cannot into sendings: %s", err.Error())
*ctx.valid = false
ctx.wire.destroy()
return
}
incr_up_bytes(n)
}
}