forked from Psiphon-Labs/psiphon-tunnel-core
-
Notifications
You must be signed in to change notification settings - Fork 0
/
osl.go
1567 lines (1316 loc) · 48 KB
/
osl.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2016, Psiphon Inc.
* All rights reserved.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
// Package osl implements the Obfuscated Server List (OSL) mechanism. This
// mechanism is a method of distributing server lists only to clients that
// demonstrate certain behavioral traits. Clients are seeded with Server
// List Obfuscation Keys (SLOKs) as they meet the configured criteria. These
// keys are stored and later combined to assemble keys to decrypt out-of-band
// distributed OSL files that contain server lists.
//
// This package contains the core routines used in psiphond (to track client
// traits and issue SLOKs), clients (to manage SLOKs and decrypt OSLs), and
// automation (to create OSLs for distribution).
package osl
import (
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/md5"
"crypto/sha256"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/url"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/crypto/nacl/secretbox"
"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/sss"
)
const (
KEY_LENGTH_BYTES = 32
REGISTRY_FILENAME = "osl-registry"
OSL_FILENAME_FORMAT = "osl-%s"
)
// Config is an OSL configuration, which consists of a list of schemes.
// The Reload function supports hot reloading of rules data while the
// process is running.
type Config struct {
common.ReloadableFile
Schemes []*Scheme
}
// Scheme defines a OSL seeding and distribution strategy. SLOKs to
// decrypt OSLs are issued based on client network activity -- defined
// in the SeedSpecs -- and time. OSLs are created for periods of time
// and can be decrypted by clients that are seeded with a sufficient
// selection of SLOKs for that time period. Distribution of server
// entries to OSLs is delegated to automation.
type Scheme struct {
// Epoch is the start time of the scheme, the start time of the
// first OSL and when SLOKs will first be issued. It must be
// specified in UTC and must be a multiple of SeedPeriodNanoseconds.
Epoch string
// Regions is a list of client country codes this scheme applies to.
// If empty, the scheme applies to all regions.
Regions []string
// PropagationChannelIDs is a list of client propagtion channel IDs
// this scheme applies to. Propagation channel IDs are an input
// to SLOK key derivation.
PropagationChannelIDs []string
// MasterKey is the base random key used for SLOK key derivation. It
// must be unique for each scheme. It must be 32 random bytes, base64
// encoded.
MasterKey []byte
// SeedSpecs is the set of different client network activity patterns
// that will result in issuing SLOKs. For a given time period, a distinct
// SLOK is issued for each SeedSpec.
// Duplicate subnets may appear in multiple SeedSpecs.
SeedSpecs []*SeedSpec
// SeedSpecThreshold is the threshold scheme for combining SLOKs to
// decrypt an OSL. For any fixed time period, at least K (threshold) of
// N (total) SLOKs from the N SeedSpecs must be seeded for a client to be
// able to reassemble the OSL key.
// Limitation: thresholds must be at least 2.
SeedSpecThreshold int
// SeedPeriodNanoseconds is the time period granularity of SLOKs.
// New SLOKs are issued every SeedPeriodNanoseconds. Client progress
// towards activity levels is reset at the end of each period.
SeedPeriodNanoseconds int64
// KeySplits is the time period threshold scheme layered on top of the
// SeedSpecThreshold scheme for combining SLOKs to decrypt an OSL.
// There must be at least one level. For one level, any K (threshold) of
// N (total) SeedSpec SLOK groups must be sufficiently seeded for a client
// to be able to reassemble the OSL key. When an additional level is
// specified, then K' of N' groups of N of K SeedSpec SLOK groups must be
// sufficiently seeded. And so on. The first level in the list is the
// lowest level. The time period for OSLs is determined by the totals in
// the KeySplits.
//
// Example:
//
// SeedSpecs = <3 specs>
// SeedSpecThreshold = 2
// SeedPeriodNanoseconds = 100,000,000 = 100 milliseconds
// SeedPeriodKeySplits = [{10, 7}, {60, 5}]
//
// In these scheme, up to 3 distinct SLOKs, one per spec, are issued
// every 100 milliseconds.
//
// Distinct OSLs are paved for every minute (60 seconds). Each OSL
// key is split such that, for those 60 seconds, a client must seed
// 2/3 spec SLOKs for 7 of 10 consecutive 100 ms. time periods within
// a second, for any 5 of 60 seconds within the minute.
//
SeedPeriodKeySplits []KeySplit
// The following fields are ephemeral state.
epoch time.Time
subnetLookups []common.SubnetLookup
derivedSLOKCacheMutex sync.RWMutex
derivedSLOKCache map[slokReference]*SLOK
}
// SeedSpec defines a client traffic pattern that results in a seeded SLOK.
// For each time period, a unique SLOK is issued to a client that meets the
// traffic levels specified in Targets. All upstream port forward traffic to
// UpstreamSubnets is counted towards the targets.
//
// ID is a SLOK key derivation component and must be 32 random bytes, base64
// encoded. UpstreamSubnets is a list of CIDRs. Description is not used; it's
// for JSON config file comments.
type SeedSpec struct {
Description string
ID []byte
UpstreamSubnets []string
Targets TrafficValues
}
// TrafficValues defines a client traffic level that seeds a SLOK.
// BytesRead and BytesWritten are the minimum bytes transferred counts to
// seed a SLOK. Both UDP and TCP data will be counted towards these totals.
// PortForwardDurationNanoseconds is the duration that a TCP or UDP port
// forward is active (not connected, in the UDP case). All threshold
// settings must be met to seed a SLOK; any threshold may be set to 0 to
// be trivially satisfied.
type TrafficValues struct {
BytesRead int64
BytesWritten int64
PortForwardDurationNanoseconds int64
}
// KeySplit defines a secret key splitting scheme where the secret is split
// into n (total) shares and any K (threshold) of N shares must be known
// to recostruct the split secret.
type KeySplit struct {
Total int
Threshold int
}
// ClientSeedState tracks the progress of a client towards seeding SLOKs
// across all schemes the client qualifies for.
type ClientSeedState struct {
propagationChannelID string
seedProgress []*ClientSeedProgress
mutex sync.Mutex
signalIssueSLOKs chan struct{}
issuedSLOKs map[string]*SLOK
payloadSLOKs []*SLOK
}
// ClientSeedProgress tracks client progress towards seeding SLOKs for
// a particular scheme.
type ClientSeedProgress struct {
// Note: 64-bit ints used with atomic operations are placed
// at the start of struct to ensure 64-bit alignment.
// (https://golang.org/pkg/sync/atomic/#pkg-note-BUG)
progressSLOKTime int64
scheme *Scheme
trafficProgress []*TrafficValues
}
// ClientSeedPortForward map a client port forward, which is relaying
// traffic to a specific upstream address, to all seed state progress
// counters for SeedSpecs with subnets containing the upstream address.
// As traffic is relayed through the port forwards, the bytes transferred
// and duration count towards the progress of these SeedSpecs and
// associated SLOKs.
type ClientSeedPortForward struct {
state *ClientSeedState
progressReferences []progressReference
}
// progressReference points to a particular ClientSeedProgress and
// TrafficValues for to update with traffic events for a
// ClientSeedPortForward.
type progressReference struct {
seedProgressIndex int
trafficProgressIndex int
}
// slokReference uniquely identifies a SLOK by specifying all the fields
// used to derive the SLOK secret key and ID.
// Note: SeedSpecID is not a []byte as slokReference is used as a map key.
type slokReference struct {
PropagationChannelID string
SeedSpecID string
Time time.Time
}
// SLOK is a seeded SLOK issued to a client. The client will store the
// SLOK in its local database; look it up by ID when checking which OSLs it
// can reassemble keys for; and use the key material to reassemble OSL
// file keys.
type SLOK struct {
ID []byte
Key []byte
}
// SeedPayload is the list of seeded SLOKs sent to a client.
type SeedPayload struct {
SLOKs []*SLOK
}
// NewConfig initializes a Config with the settings in the specified
// file.
func NewConfig(filename string) (*Config, error) {
config := &Config{}
config.ReloadableFile = common.NewReloadableFile(
filename,
true,
func(fileContent []byte, _ time.Time) error {
newConfig, err := LoadConfig(fileContent)
if err != nil {
return common.ContextError(err)
}
// Modify actual traffic rules only after validation
config.Schemes = newConfig.Schemes
return nil
})
_, err := config.Reload()
if err != nil {
return nil, common.ContextError(err)
}
return config, nil
}
// LoadConfig loads, validates, and initializes a JSON encoded OSL
// configuration.
func LoadConfig(configJSON []byte) (*Config, error) {
var config Config
err := json.Unmarshal(configJSON, &config)
if err != nil {
return nil, common.ContextError(err)
}
var previousEpoch time.Time
for _, scheme := range config.Schemes {
epoch, err := time.Parse(time.RFC3339, scheme.Epoch)
if err != nil {
return nil, common.ContextError(fmt.Errorf("invalid epoch format: %s", err))
}
if epoch.UTC() != epoch {
return nil, common.ContextError(errors.New("invalid epoch timezone"))
}
if epoch.Round(time.Duration(scheme.SeedPeriodNanoseconds)) != epoch {
return nil, common.ContextError(errors.New("invalid epoch period"))
}
if epoch.Before(previousEpoch) {
return nil, common.ContextError(errors.New("invalid epoch order"))
}
previousEpoch = epoch
scheme.epoch = epoch
scheme.subnetLookups = make([]common.SubnetLookup, len(scheme.SeedSpecs))
scheme.derivedSLOKCache = make(map[slokReference]*SLOK)
if len(scheme.MasterKey) != KEY_LENGTH_BYTES {
return nil, common.ContextError(errors.New("invalid master key"))
}
for index, seedSpec := range scheme.SeedSpecs {
if len(seedSpec.ID) != KEY_LENGTH_BYTES {
return nil, common.ContextError(errors.New("invalid seed spec ID"))
}
// TODO: check that subnets do not overlap, as required by SubnetLookup
subnetLookup, err := common.NewSubnetLookup(seedSpec.UpstreamSubnets)
if err != nil {
return nil, common.ContextError(fmt.Errorf("invalid upstream subnets: %s", err))
}
scheme.subnetLookups[index] = subnetLookup
}
if !isValidShamirSplit(len(scheme.SeedSpecs), scheme.SeedSpecThreshold) {
return nil, common.ContextError(errors.New("invalid seed spec key split"))
}
if len(scheme.SeedPeriodKeySplits) < 1 {
return nil, common.ContextError(errors.New("invalid seed period key split count"))
}
for _, keySplit := range scheme.SeedPeriodKeySplits {
if !isValidShamirSplit(keySplit.Total, keySplit.Threshold) {
return nil, common.ContextError(errors.New("invalid seed period key split"))
}
}
}
return &config, nil
}
// NewClientSeedState creates a new client seed state to track
// client progress towards seeding SLOKs. psiphond maintains one
// ClientSeedState for each connected client.
//
// A signal is sent on signalIssueSLOKs when sufficient progress
// has been made that a new SLOK *may* be issued. psiphond will
// receive the signal and then call GetClientSeedPayload/IssueSLOKs
// to issue SLOKs, generate payload, and send to the client. The
// sender will not block sending to signalIssueSLOKs; the channel
// should be appropriately buffered.
func (config *Config) NewClientSeedState(
clientRegion, propagationChannelID string,
signalIssueSLOKs chan struct{}) *ClientSeedState {
config.ReloadableFile.RLock()
defer config.ReloadableFile.RUnlock()
state := &ClientSeedState{
propagationChannelID: propagationChannelID,
signalIssueSLOKs: signalIssueSLOKs,
issuedSLOKs: make(map[string]*SLOK),
payloadSLOKs: nil,
}
for _, scheme := range config.Schemes {
// All matching schemes are selected.
// Note: this implementation assumes a few simple schemes. For more
// schemes with many propagation channel IDs or region filters, use
// maps for more efficient lookup.
if scheme.epoch.Before(time.Now().UTC()) &&
common.Contains(scheme.PropagationChannelIDs, propagationChannelID) &&
(len(scheme.Regions) == 0 || common.Contains(scheme.Regions, clientRegion)) {
// Empty progress is initialized up front for all seed specs. Once
// created, the progress structure is read-only (the slice, not the
// TrafficValue fields); this permits lock-free operation.
trafficProgress := make([]*TrafficValues, len(scheme.SeedSpecs))
for index := 0; index < len(scheme.SeedSpecs); index++ {
trafficProgress[index] = &TrafficValues{}
}
seedProgress := &ClientSeedProgress{
scheme: scheme,
progressSLOKTime: getSLOKTime(scheme.SeedPeriodNanoseconds),
trafficProgress: trafficProgress,
}
state.seedProgress = append(state.seedProgress, seedProgress)
}
}
return state
}
// Hibernate clears references to short-lived objects (currently,
// signalIssueSLOKs) so that a ClientSeedState can be stored for
// later resumption without blocking garbage collection of the
// short-lived objects.
//
// The ClientSeedState will still hold references to its Config;
// the caller is responsible for discarding hibernated seed states
// when the config changes.
//
// The caller should ensure that all ClientSeedPortForwards
// associated with this ClientSeedState are closed before
// hibernation.
func (state *ClientSeedState) Hibernate() {
state.mutex.Lock()
defer state.mutex.Unlock()
state.signalIssueSLOKs = nil
}
// Resume resumes a hibernated ClientSeedState by resetting the required
// objects (currently, signalIssueSLOKs) cleared by Hibernate.
func (state *ClientSeedState) Resume(
signalIssueSLOKs chan struct{}) {
state.mutex.Lock()
defer state.mutex.Unlock()
state.signalIssueSLOKs = signalIssueSLOKs
}
// NewClientSeedPortForward creates a new client port forward
// traffic progress tracker. Port forward progress reported to the
// ClientSeedPortForward is added to seed state progress for all
// seed specs containing upstreamIPAddress in their subnets.
// The return value will be nil when activity for upstreamIPAddress
// does not count towards any progress.
// NewClientSeedPortForward may be invoked concurrently by many
// psiphond port forward establishment goroutines.
func (state *ClientSeedState) NewClientSeedPortForward(
upstreamIPAddress net.IP) *ClientSeedPortForward {
// Concurrency: access to ClientSeedState is unsynchronized
// but references only read-only fields.
if len(state.seedProgress) == 0 {
return nil
}
var progressReferences []progressReference
// Determine which seed spec subnets contain upstreamIPAddress
// and point to the progress for each. When progress is reported,
// it is added directly to all of these TrafficValues instances.
// Assumes state.progress entries correspond 1-to-1 with
// state.scheme.subnetLookups.
// Note: this implementation assumes a small number of schemes and
// seed specs. For larger numbers, instead of N SubnetLookups, create
// a single SubnetLookup which returns, for a given IP address, all
// matching subnets and associated seed specs.
for seedProgressIndex, seedProgress := range state.seedProgress {
for trafficProgressIndex, subnetLookup := range seedProgress.scheme.subnetLookups {
if subnetLookup.ContainsIPAddress(upstreamIPAddress) {
progressReferences = append(
progressReferences,
progressReference{
seedProgressIndex: seedProgressIndex,
trafficProgressIndex: trafficProgressIndex,
})
}
}
}
if progressReferences == nil {
return nil
}
return &ClientSeedPortForward{
state: state,
progressReferences: progressReferences,
}
}
func (state *ClientSeedState) sendIssueSLOKsSignal() {
state.mutex.Lock()
defer state.mutex.Unlock()
if state.signalIssueSLOKs != nil {
select {
case state.signalIssueSLOKs <- *new(struct{}):
default:
}
}
}
// UpdateProgress adds port forward bytes transferred and duration to
// all seed spec progresses associated with the port forward.
// If UpdateProgress is invoked after the SLOK time period has rolled
// over, any pending seeded SLOKs are issued and all progress is reset.
// UpdateProgress may be invoked concurrently by many psiphond port
// relay goroutines. The implementation of UpdateProgress prioritizes
// not blocking port forward relaying; a consequence of this lock-free
// design is that progress reported at the exact time of SLOK time period
// rollover may be dropped.
func (portForward *ClientSeedPortForward) UpdateProgress(
bytesRead, bytesWritten int64, durationNanoseconds int64) {
// Concurrency: non-blocking -- access to ClientSeedState is unsynchronized
// to read-only fields, atomic, or channels, except in the case of a time
// period rollover, in which case a mutex is acquired.
for _, progressReference := range portForward.progressReferences {
seedProgress := portForward.state.seedProgress[progressReference.seedProgressIndex]
trafficProgress := seedProgress.trafficProgress[progressReference.trafficProgressIndex]
slokTime := getSLOKTime(seedProgress.scheme.SeedPeriodNanoseconds)
// If the SLOK time period has changed since progress was last recorded,
// call issueSLOKs which will issue any SLOKs for that past time period
// and then clear all progress. Progress will then be recorded for the
// current time period.
// As it acquires the state mutex, issueSLOKs may stall other port
// forwards for this client. The delay is minimized by SLOK caching,
// which avoids redundant crypto operations.
if slokTime != atomic.LoadInt64(&seedProgress.progressSLOKTime) {
portForward.state.mutex.Lock()
portForward.state.issueSLOKs()
portForward.state.mutex.Unlock()
// Call to issueSLOKs may have issued new SLOKs. Note that
// this will only happen if the time period rolls over with
// sufficient progress pending while the signalIssueSLOKs
// receiver did not call IssueSLOKs soon enough.
portForward.state.sendIssueSLOKsSignal()
}
// Add directly to the permanent TrafficValues progress accumulators
// for the state's seed specs. Concurrently, other port forwards may
// be adding to the same accumulators. Also concurrently, another
// goroutine may be invoking issueSLOKs, which zeros all the accumulators.
// As a consequence, progress may be dropped at the exact time of
// time period rollover.
seedSpec := seedProgress.scheme.SeedSpecs[progressReference.trafficProgressIndex]
alreadyExceedsTargets := trafficProgress.exceeds(&seedSpec.Targets)
atomic.AddInt64(&trafficProgress.BytesRead, bytesRead)
atomic.AddInt64(&trafficProgress.BytesWritten, bytesWritten)
atomic.AddInt64(&trafficProgress.PortForwardDurationNanoseconds, durationNanoseconds)
// With the target newly met for a SeedSpec, a new
// SLOK *may* be issued.
if !alreadyExceedsTargets && trafficProgress.exceeds(&seedSpec.Targets) {
portForward.state.sendIssueSLOKsSignal()
}
}
}
func (lhs *TrafficValues) exceeds(rhs *TrafficValues) bool {
return atomic.LoadInt64(&lhs.BytesRead) >= atomic.LoadInt64(&rhs.BytesRead) &&
atomic.LoadInt64(&lhs.BytesWritten) >= atomic.LoadInt64(&rhs.BytesWritten) &&
atomic.LoadInt64(&lhs.PortForwardDurationNanoseconds) >=
atomic.LoadInt64(&rhs.PortForwardDurationNanoseconds)
}
// issueSLOKs checks client progress against each candidate seed spec
// and seeds SLOKs when the client traffic levels are achieved. After
// checking progress, and if the SLOK time period has changed since
// progress was last recorded, progress is reset. Partial, insufficient
// progress is intentionally dropped when the time period rolls over.
// Derived SLOKs are cached to avoid redundant CPU intensive operations.
// All issued SLOKs are retained in the client state for the duration
// of the client's session.
func (state *ClientSeedState) issueSLOKs() {
// Concurrency: the caller must lock state.mutex.
if len(state.seedProgress) == 0 {
return
}
for _, seedProgress := range state.seedProgress {
progressSLOKTime := time.Unix(0, seedProgress.progressSLOKTime)
for index, trafficProgress := range seedProgress.trafficProgress {
seedSpec := seedProgress.scheme.SeedSpecs[index]
if trafficProgress.exceeds(&seedSpec.Targets) {
ref := &slokReference{
PropagationChannelID: state.propagationChannelID,
SeedSpecID: string(seedSpec.ID),
Time: progressSLOKTime,
}
seedProgress.scheme.derivedSLOKCacheMutex.RLock()
slok, ok := seedProgress.scheme.derivedSLOKCache[*ref]
seedProgress.scheme.derivedSLOKCacheMutex.RUnlock()
if !ok {
slok = seedProgress.scheme.deriveSLOK(ref)
seedProgress.scheme.derivedSLOKCacheMutex.Lock()
seedProgress.scheme.derivedSLOKCache[*ref] = slok
seedProgress.scheme.derivedSLOKCacheMutex.Unlock()
}
// Previously issued SLOKs are not re-added to
// the payload.
if state.issuedSLOKs[string(slok.ID)] == nil {
state.issuedSLOKs[string(slok.ID)] = slok
state.payloadSLOKs = append(state.payloadSLOKs, slok)
}
}
}
slokTime := getSLOKTime(seedProgress.scheme.SeedPeriodNanoseconds)
if slokTime != atomic.LoadInt64(&seedProgress.progressSLOKTime) {
atomic.StoreInt64(&seedProgress.progressSLOKTime, slokTime)
// The progress map structure is not reset or modifed; instead
// the mapped accumulator values are zeroed. Concurrently, port
// forward relay goroutines continue to add to these accumulators.
for _, trafficProgress := range seedProgress.trafficProgress {
atomic.StoreInt64(&trafficProgress.BytesRead, 0)
atomic.StoreInt64(&trafficProgress.BytesWritten, 0)
atomic.StoreInt64(&trafficProgress.PortForwardDurationNanoseconds, 0)
}
}
}
}
func getSLOKTime(seedPeriodNanoseconds int64) int64 {
return time.Now().UTC().Truncate(time.Duration(seedPeriodNanoseconds)).UnixNano()
}
// GetSeedPayload issues any pending SLOKs and returns the accumulated
// SLOKs for a given client. psiphond will calls this when it receives
// signalIssueSLOKs which is the trigger to check for new SLOKs.
// Note: caller must not modify the SLOKs in SeedPayload.SLOKs
// as these are shared data.
func (state *ClientSeedState) GetSeedPayload() *SeedPayload {
state.mutex.Lock()
defer state.mutex.Unlock()
if len(state.seedProgress) == 0 {
return &SeedPayload{}
}
state.issueSLOKs()
sloks := make([]*SLOK, len(state.payloadSLOKs))
for index, slok := range state.payloadSLOKs {
sloks[index] = slok
}
return &SeedPayload{
SLOKs: sloks,
}
}
// ClearSeedPayload resets the accumulated SLOK payload (but not SLOK
// progress). psiphond calls this after the client has acknowledged
// receipt of a payload.
func (state *ClientSeedState) ClearSeedPayload() {
state.mutex.Lock()
defer state.mutex.Unlock()
state.payloadSLOKs = nil
}
// deriveSLOK produces SLOK secret keys and IDs using HKDF-Expand
// defined in https://tools.ietf.org/html/rfc5869.
func (scheme *Scheme) deriveSLOK(ref *slokReference) *SLOK {
timeBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(timeBytes, uint64(ref.Time.UnixNano()))
key := deriveKeyHKDF(
scheme.MasterKey,
[]byte(ref.PropagationChannelID),
[]byte(ref.SeedSpecID),
timeBytes)
// TODO: is ID derivation cryptographically sound?
id := deriveKeyHKDF(
scheme.MasterKey,
key)
return &SLOK{
ID: id,
Key: key,
}
}
// GetOSLDuration returns the total time duration of an OSL,
// which is a function of the scheme's SeedPeriodNanoSeconds,
// the duration of a single SLOK, and the scheme's SeedPeriodKeySplits,
// the number of SLOKs associated with an OSL.
func (scheme *Scheme) GetOSLDuration() time.Duration {
slokTimePeriodsPerOSL := 1
for _, keySplit := range scheme.SeedPeriodKeySplits {
slokTimePeriodsPerOSL *= keySplit.Total
}
return time.Duration(
int64(slokTimePeriodsPerOSL) * scheme.SeedPeriodNanoseconds)
}
// PaveFile describes an OSL data file to be paved to an out-of-band
// distribution drop site. There are two types of files: a registry,
// which describes how to assemble keys for OSLs, and the encrypted
// OSL files.
type PaveFile struct {
Name string
Contents []byte
}
// Registry describes a set of OSL files.
type Registry struct {
FileSpecs []*OSLFileSpec
}
// An OSLFileSpec includes an ID which is used to reference the
// OSL file and describes the key splits used to divide the OSL
// file key along with the SLOKs required to reassemble those keys.
//
// The MD5Sum field is a checksum of the contents of the OSL file
// to be used to skip redownloading previously downloaded files.
// MD5 is not cryptographically secure and this checksum is not
// relied upon for OSL verification. MD5 is used for compatibility
// with out-of-band distribution hosts.
type OSLFileSpec struct {
ID []byte
KeyShares *KeyShares
MD5Sum []byte
}
// KeyShares is a tree data structure which describes the
// key splits used to divide a secret key. BoxedShares are encrypted
// shares of the key, and #Threshold amount of decrypted BoxedShares
// are required to reconstruct the secret key. The keys for BoxedShares
// are either SLOKs (referenced by SLOK ID) or random keys that are
// themselves split as described in child KeyShares.
type KeyShares struct {
Threshold int
BoxedShares [][]byte
SLOKIDs [][]byte
KeyShares []*KeyShares
}
type PaveLogInfo struct {
FileName string
SchemeIndex int
PropagationChannelID string
OSLID string
OSLTime time.Time
OSLDuration time.Duration
ServerEntryCount int
}
// Pave creates the full set of OSL files, for all schemes in the
// configuration, to be dropped in an out-of-band distribution site.
// Only OSLs for the propagation channel ID associated with the
// distribution site are paved. This function is used by automation.
//
// The Name component of each file relates to the values returned by
// the client functions GetRegistryURL and GetOSLFileURL.
//
// Pave returns a pave file for the entire registry of all OSLs from
// epoch to endTime, and a pave file for each OSL. paveServerEntries is
// a map from hex-encoded OSL IDs to server entries to pave into that OSL.
// When entries are found, OSL will contain those entries, newline
// separated. Otherwise the OSL will still be issued, but be empty (unless
// the scheme is in omitEmptyOSLsSchemes).
//
// As OSLs outside the epoch-endTime range will no longer appear in
// the registry, Pave is intended to be used to create the full set
// of OSLs for a distribution site; i.e., not incrementally.
//
// Automation is responsible for consistently distributing server entries
// to OSLs in the case where OSLs are repaved in subsequent calls.
func (config *Config) Pave(
endTime time.Time,
propagationChannelID string,
signingPublicKey string,
signingPrivateKey string,
paveServerEntries map[string][]string,
omitMD5SumsSchemes []int,
omitEmptyOSLsSchemes []int,
logCallback func(*PaveLogInfo)) ([]*PaveFile, error) {
config.ReloadableFile.RLock()
defer config.ReloadableFile.RUnlock()
var paveFiles []*PaveFile
registry := &Registry{}
for schemeIndex, scheme := range config.Schemes {
if common.Contains(scheme.PropagationChannelIDs, propagationChannelID) {
omitMD5Sums := common.ContainsInt(omitMD5SumsSchemes, schemeIndex)
omitEmptyOSLs := common.ContainsInt(omitEmptyOSLsSchemes, schemeIndex)
oslDuration := scheme.GetOSLDuration()
oslTime := scheme.epoch
for !oslTime.After(endTime) {
firstSLOKTime := oslTime
fileKey, fileSpec, err := makeOSLFileSpec(
scheme, propagationChannelID, firstSLOKTime)
if err != nil {
return nil, common.ContextError(err)
}
hexEncodedOSLID := hex.EncodeToString(fileSpec.ID)
serverEntryCount := len(paveServerEntries[hexEncodedOSLID])
if serverEntryCount > 0 || !omitEmptyOSLs {
registry.FileSpecs = append(registry.FileSpecs, fileSpec)
// serverEntries will be "" when nothing is found in paveServerEntries
serverEntries := strings.Join(paveServerEntries[hexEncodedOSLID], "\n")
serverEntriesPackage, err := common.WriteAuthenticatedDataPackage(
serverEntries,
signingPublicKey,
signingPrivateKey)
if err != nil {
return nil, common.ContextError(err)
}
boxedServerEntries, err := box(fileKey, serverEntriesPackage)
if err != nil {
return nil, common.ContextError(err)
}
if !omitMD5Sums {
md5sum := md5.Sum(boxedServerEntries)
fileSpec.MD5Sum = md5sum[:]
}
fileName := fmt.Sprintf(
OSL_FILENAME_FORMAT, hexEncodedOSLID)
paveFiles = append(paveFiles, &PaveFile{
Name: fileName,
Contents: boxedServerEntries,
})
if logCallback != nil {
logCallback(&PaveLogInfo{
FileName: fileName,
SchemeIndex: schemeIndex,
PropagationChannelID: propagationChannelID,
OSLID: hexEncodedOSLID,
OSLTime: oslTime,
OSLDuration: oslDuration,
ServerEntryCount: serverEntryCount,
})
}
}
oslTime = oslTime.Add(oslDuration)
}
}
}
registryJSON, err := json.Marshal(registry)
if err != nil {
return nil, common.ContextError(err)
}
registryPackage, err := common.WriteAuthenticatedDataPackage(
base64.StdEncoding.EncodeToString(registryJSON),
signingPublicKey,
signingPrivateKey)
if err != nil {
return nil, common.ContextError(err)
}
paveFiles = append(paveFiles, &PaveFile{
Name: REGISTRY_FILENAME,
Contents: registryPackage,
})
return paveFiles, nil
}
// CurrentOSLIDs returns a mapping from each propagation channel ID in the
// specified scheme to the corresponding current time period, hex-encoded OSL ID.
func (config *Config) CurrentOSLIDs(schemeIndex int) (map[string]string, error) {
config.ReloadableFile.RLock()
defer config.ReloadableFile.RUnlock()
if schemeIndex < 0 || schemeIndex >= len(config.Schemes) {
return nil, common.ContextError(errors.New("invalid scheme index"))
}
scheme := config.Schemes[schemeIndex]
now := time.Now().UTC()
oslDuration := scheme.GetOSLDuration()
oslTime := scheme.epoch.Add((now.Sub(scheme.epoch) / oslDuration) * oslDuration)
OSLIDs := make(map[string]string)
for _, propagationChannelID := range scheme.PropagationChannelIDs {
_, fileSpec, err := makeOSLFileSpec(scheme, propagationChannelID, oslTime)
if err != nil {
return nil, common.ContextError(err)
}
OSLIDs[propagationChannelID] = hex.EncodeToString(fileSpec.ID)
}
return OSLIDs, nil
}
// makeOSLFileSpec creates an OSL file key, splits it according to the
// scheme's key splits, and sets the OSL ID as its first SLOK ID. The
// returned key is used to encrypt the OSL payload and then discarded;
// the key may be reassembled using the data in the KeyShares tree,
// given sufficient SLOKs.
func makeOSLFileSpec(
scheme *Scheme,
propagationChannelID string,
firstSLOKTime time.Time) ([]byte, *OSLFileSpec, error) {
ref := &slokReference{
PropagationChannelID: propagationChannelID,
SeedSpecID: string(scheme.SeedSpecs[0].ID),
Time: firstSLOKTime,
}
firstSLOK := scheme.deriveSLOK(ref)
oslID := firstSLOK.ID
// Note: previously, fileKey was a random key. Now, the key
// is derived from the master key and OSL ID. This deterministic
// derivation ensures that repeated paves of the same OSL
// with the same ID and same content yields the same MD5Sum
// to avoid wasteful downloads.
//
// Similarly, the shareKeys generated in divideKey and the Shamir
// key splitting random polynomials are now both determinisitcally
// generated from a seeded CSPRNG. This ensures that the OSL
// registry remains identical for repeated paves of the same config
// and parameters.
//
// The split structure is added to the deterministic key
// derivation so that changes to the split configuration will not
// expose the same key material to different SLOK combinations.
splitStructure := make([]byte, 16*(1+len(scheme.SeedPeriodKeySplits)))
i := 0
binary.LittleEndian.PutUint64(splitStructure[i:], uint64(len(scheme.SeedSpecs)))
binary.LittleEndian.PutUint64(splitStructure[i+8:], uint64(scheme.SeedSpecThreshold))
i += 16
for _, keySplit := range scheme.SeedPeriodKeySplits {
binary.LittleEndian.PutUint64(splitStructure[i:], uint64(keySplit.Total))
binary.LittleEndian.PutUint64(splitStructure[i+8:], uint64(keySplit.Threshold))
i += 16
}
fileKey := deriveKeyHKDF(
scheme.MasterKey,
splitStructure,
[]byte("osl-file-key"),
oslID)
splitKeyMaterialSeed := deriveKeyHKDF(
scheme.MasterKey,
splitStructure,
[]byte("osl-file-split-key-material-seed"),
oslID)
keyMaterialReader, err := newSeededKeyMaterialReader(splitKeyMaterialSeed)
if err != nil {
return nil, nil, common.ContextError(err)
}
keyShares, err := divideKey(
scheme,
keyMaterialReader,
fileKey,