-
Notifications
You must be signed in to change notification settings - Fork 33
/
web.go
2280 lines (1979 loc) · 66.4 KB
/
web.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
This file supports the DVID REST API, breaking down URLs into
commands and massaging attached data into appropriate data types.
*/
package server
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"sort"
"strings"
"sync"
"time"
"github.com/janelia-flyem/dvid/datastore"
"github.com/janelia-flyem/dvid/dvid"
"github.com/janelia-flyem/dvid/storage"
"github.com/janelia-flyem/go/nrsc"
"github.com/rs/cors"
"github.com/zenazn/goji/web"
"github.com/zenazn/goji/web/middleware"
)
const webHelp = `
<!DOCTYPE html>
<html>
<head>
<meta charset='utf-8' />
<meta http-equiv="X-UA-Compatible" content="chrome=1" />
<meta name="description" content="DVID Web Server Home Page" />
<title>Embedded DVID Web Server</title>
</head>
<body>
<!-- HEADER -->
<div id="header_wrap" class="outer">
<header class="inner">
<h2 id="project_tagline">Stock help page for DVID server currently running on %s</h2>
</header>
</div>
<!-- MAIN CONTENT -->
<div id="main_content_wrap" class="outer">
<section id="main_content" class="inner">
<h3>Welcome to DVID</h3>
<p>This page provides an introduction to the currently running DVID server.
Developers can visit the <a href="https://github.com/janelia-flyem/dvid">Github repo</a>
for more documentation and code.
The <a href="/">DVID admin console</a> may be available if you have downloaded the
<a href="https://github.com/janelia-flyem/dvid-console">DVID console web client repo</a>
and included <i>-webclient=/path/to/console</i> when running the
<code>dvid serve</code> command.</p>
<h3>HTTP API and command line use</h3>
<p>All API endpoints (except for memory profiling) follow this layout:
<pre>
/serverhost:someport/api/...
</pre>
The online documentation doesn't show the server host prefixed to the "/api/..." URL,
but it is required.
<h4>General commands</h4>
<pre>
GET /api/help
The current page that lists all general and type-specific commands/HTTP API.
GET /api/help/{typename}
Returns help for the given datatype.
GET /api/load
Returns a JSON of server load statistics.
GET /api/storage
Returns a JSON object for each backend store where the key is the backend store name.
The store object has local instance ID keys with the following object value:
{
"Name": "grayscale",
"DataType": "uint8blk",
"DataUUID": ...,
"RootUUID": ..., // this is the UUID of this data's root
"Bytes": ...
}
GET /api/heartbeat[?u=<username>]
Preferred method to test whether server is alive. If a username is provided, the
time it takes to respond to the request (including transmission to remote client)
is recorded. A compilation of user latencies is available through the
/api/user-latencies endpoint.
GET /api/user-latencies[?u=<username]
Returns JSON for the all user latencies or a particular user if one is specified.
The format is:
[
{
"User": "Name",
"LastPing": <string timestamp in RFC3339 format>,
"LastLatency": <duration in milliseconds>,
"MinLatency": <duration in milliseconds>,
"MaxLatency": <duration in milliseconds>
},
...
]
GET /api/server/info
Returns JSON for server properties.
GET /api/server/note
Returns any value of [server.note] from the configuration TOML.
GET /api/server/types
Returns JSON with the datatypes of currently stored data instances. Datatypes are represented
by a name and the URL of the reference implementation. To see all possible datatypes, i.e., the
list of compiled datatypes, use the "compiled-types" endpoint.
GET /api/server/compiled-types
Returns JSON of all possible datatypes for this server, i.e., the list of compiled datatypes.
GET /api/server/groupcache
Returns JSON for groupcache statistics for this server. See github.com/golang/groupcache package
Stats and CacheStats for MainCache and HotCache.
POST /api/server/settings
Sets server parameters. Expects JSON to be posted with optional keys denoting parameters:
{
"gc": 500,
"throttle": 2
}
Possible keys:
gc Garbage collection target percentage. This is a low-level server tuning
request that can affect overall request latency.
See: https://golang.org/pkg/runtime/debug/#SetGCPercent
throttle Maximum number of CPU-intensive requests that can be executed under throttle mode.
See imageblk and labelblk GET 3d voxels and POST voxels.
Default = 1.
GET /api/server/blobstore/{reference}
GETs data with the given reference string from this server's blobstore. The blobstore is
populated as part of mutation logging and is read-only. The reference is a URL-friendly
content hash (FNV-128) of the blob data.
POST /api/server/reload-auth
Reloads any authorization file as configured in the TOML file.
-------------------------
Memory Profiler endpoints
-------------------------
GET /profiler/start
Starts a memory profiler and redirects to a web page where you can see real-time
memory usage graphs.
GET /profiler/stop
Stops the memory profiler.
GET /profiler/info.html
Shows real-time memory usage graphs.
GET /profiler/info
Returns JSON of memory usage data.
-------------------------
Repo-Level REST endpoints
-------------------------
POST /api/repos
Creates a new repository. Expects configuration data in JSON as the body of the POST.
Configuration is a JSON object with optional "alias", "description", "root" (the desired
UUID of the root), and "passcode" properties. Returns the root UUID of the newly created
repo in JSON object: {"root": uuid}
GET /api/repos/info
Returns JSON for the repositories under management by this server.
Note that any versioned properties for image-based data instances (e.g., extents)
will be drawn from the leaf of the master branch.
HEAD /api/repo/{uuid}
Returns 200 if a repo with given UUID is available.
GET /api/repo/{uuid}/info
Returns JSON for just the repository with given root UUID. The UUID string can be
shortened as long as it is uniquely identifiable across the managed repositories.
Note that any versioned properties for image-based data instances (e.g., extents)
will be drawn from the leaf of the master branch.
POST /api/repo/{uuid}/info
Allows changing of some repository properties by POSTing of a JSON similar to what
you'd use in posting a new repo. The "alias" and "description" properties can be
optionally modified using this endpoint by POST of a JSON like:
{
"alias": "myrepo",
"description": "This is the best repository in the universe"
}
Leaving out a property will keep it unchanged.
POST /api/repo/{uuid}/instance
Creates a new instance of the given data type. Expects configuration data in JSON
as the body of the POST. Configuration data is a JSON object with each property
corresponding to a configuration keyword for the particular data type.
JSON name/value pairs:
REQUIRED "typename" Type name of the new instance,
REQUIRED "dataname" Name of the new instance
OPTIONAL "versioned" If "false" or "0", the data is unversioned and acts as if
all UUIDs within a repo become the root repo UUID. (True by default.)
OPTIONAL "Compression" Specify the compression format to use when serializing values to disk.
(Applies to most instance types, but not all.)
Choices are: “none”, “snappy”, “lz4”, “gzip”, “jpeg”.
Where applicable, the compression level can be appended, e.g. "jpeg:80".
OPTIONAL "Tags" Can send list of tags as a series of equal statements separated by
commas, e.g., "type=meshes,stuff=something-something". This will
create a tag "type" set to "meshes" and a tag "stuff" set to
"something-something". Note that the formatting prevents spaces
and commas from being part of a tag.
A JSON message will be sent to any associated Kafka system with the following format:
{
"Action": "newinstance",
"UUID": <UUID>,
"Typename": <typename>,
"Dataname": <dataname>
}
GET /api/repo/{uuid}/log
POST /api/repo/{uuid}/log
GETs or POSTs log data to the repo with given UUID. The get or post body should
be JSON of the following format:
{ "log": [ "provenance data...", "provenance data...", ...] }
The log is a list of strings that will be appended to the repo's log. They should be
descriptions for the entire repo and not just one node. For particular versions, use
node-level logging (below).
GET /api/repo/{uuid}/branch-versions/{branch name}
Returns a JSON list of version UUIDs for the given branch name, starting with the
current leaf and working back to the root. Use "master" for the default branch.
If a repository predates the introduction of branch names and has multiple paths
for the given branch name, the endpoint will return an Bad Request Error (400).
POST /api/repo/{uuid}/merge
Creates a conflict-free merge of a set of committed parent UUIDs into a child. Note
the merge will not necessarily create an error immediately, but later GETs that
detect conflicts will produce an error at that time. These can be resolved by
doing a POST on the "resolve" endpoint below.
The post body should be JSON of the following format:
{
"mergeType": "conflict-free",
"parents": [ "parent-uuid1", "parent-uuid2", ... ],
"note": "this is a description of what I did on this commit"
}
The elements of the JSON object are:
mergeType: must be "conflict-free". We will introduce other merge options in future.
parents: a list of the parent UUIDs to be merged.
note: any note that should be set for the child version.
A JSON response will be sent with the following format:
{ "child": "3f01a8856" }
The response includes the UUID of the new merged, child node.
POST /api/repo/{uuid}/resolve
Forces a merge of a set of committed parent UUIDs into a child by specifying a
UUID order that establishes priorities in case of conflicts (see "parents" description
below.
Unlike the very fast but lazily-enforced 'merge' endpoint, this request spawns an
asynchronous routine that checks all data for the given data instances (see "data" in
JSON post), creates versions to delete conflicts, and then performs the conflict-free
merge to a final child.
The post body should be JSON of the following format:
{
"data": [ "instance-name-1", "instance-name2", ... ],
"parents": [ "parent-uuid1", "parent-uuid2", ... ],
"note": "this is a description of what I did on this commit"
}
The elements of the JSON object are:
data: A list of the data instance names to be scanned for possible conflicts.
parents: A list of the parent UUIDs to be merged in order of priority. If
there is a conflict between the second and third UUID, the conflicting
data in the third UUID will be deleted in favor of the second UUID.
note: Any note that should be set for the child version.
A JSON response will be sent with the following format:
{ "child": "3f01a8856" }
The response includes the UUID of the new merged, child node.
-------------------------
Node-Level REST endpoints
-------------------------
Note: UUIDs referenced below are strings that may either be a unique prefix of a
hexadecimal UUID string (e.g., 3FA22) or a branch leaf specification that adds
a colon (":") followed by the case-dependent branch name. In the case of a
branch leaf specification, the unique UUID prefix just identifies the repo of
the branch, and the UUID referenced is really the leaf of the branch name.
For example, if we have a DAG with root A -> B -> C where C is the current
HEAD or leaf of the "master" (default) branch, then asking for "B:master" is
the same as asking for "C". If we add another version so A -> B -> C -> D, then
references to "B:master" now return the data from "D".
GET /api/node/{uuid}/note
POST /api/node/{uuid}/note
GETs or POSTs note data to the node (version) with given UUID. The get or post body should
be JSON of the following format:
{ "note": "this is a description of what I did on this version" }
The log is a list of strings that will be appended to the node's log. They should be
data usable by clients to reconstruct the types of operation done to that version
of data.
A JSON message will be sent to any associated Kafka system with the following format:
{
"Action": "nodenote",
"UUID": <UUID>,
"Note": <note>
}
GET /api/node/{uuid}/log
POST /api/node/{uuid}/log
GETs or POSTs log data to the node (version) with given UUID. The get or post body should
be JSON of the following format:
{ "log": [ "provenance data...", "provenance data...", ...] }
The log is a list of strings that will be appended to the node's log. They should be
data usable by clients to reconstruct the types of operation done to that version
of data.
A JSON message will be sent to any associated Kafka system with the following format:
{
"Action": "nodelog",
"UUID": <UUID>,
"Log": [<msg 1>, <msg2>, ...]
}
GET /api/node/{uuid}/status
Returns the commit or lock status of the node with given UUID in JSON format:
{ "Locked": true }
POST /api/node/{uuid}/commit
Commits (locks) the node/version with given UUID. This is required before a version can
be branched or pushed to a remote server. The post body should be JSON of the
following format:
{
"note": "this is a description of what I did on this commit",
"log": [ "provenance data...", "provenance data...", ...]
}
The note is a human-readable commit message. The log is a slice of strings that may
be computer-readable.
If successful, a valid JSON response will be sent with the following format:
{ "committed": "3f01a8856" }
A JSON message will be sent to any associated Kafka system with the following format:
{
"Action": "commit",
"UUID": <UUID of committed node>
"Note": <string>,
"Log": [<msg 1>, <msg2>, ...]
}
POST /api/node/{uuid}/branch
Creates a new branch child node (version) of the node with given UUID.
The branch name must be unique across the DAG.
The post body should be in JSON format, where "note" and "uuid" are optional:
{
"branch": "unique name of new branch",
"note": "this is what we'll be doing on this version",
"uuid": <desired UUID>
}
A JSON response will be sent with the following format:
{ "child": "3f01a8856" }
The response includes the UUID of the new child node.
A JSON message will be sent to any associated Kafka system with the following format:
{
"Action": "branch",
"Parent": <UUID of parent>
"Child": <UUID of new child>,
"Branch": <branch designation as string>,
"Note": <string>
}
POST /api/node/{uuid}/newversion
Creates a new child node (version) of the node with given
UUID if no open node exists.
An optional post body should be in JSON format:
{
"note": "this is what we'll be doing on this version",
"uuid": <desired UUID>
}
A JSON response will be sent with the following format:
{ "child": "3f01a8856" }
The response includes the UUID of the new child node.
A JSON message will be sent to any associated Kafka system with the following format:
{
"Action": "newinstance",
"Parent": <UUID of parent>
"Child": <UUID of new child>,
"Note": <string>
}
GET /api/node/{uuid}/{data name}/blobstore/{reference}
POST /api/node/{uuid}/{data name}/blobstore
GETs or POSTs data into a blob store associated with the given data instance.
The reference is a URL-friendly content hash (FNV-128) of the blob data.
The POST will store data and return the folloing JSON:
{ "reference": "<the content hash of blob data>" }
Note that POST /blobstore will not be logged in any associated kafka system.
</pre>
<h4>Data type commands</h4>
<p>This server has compiled in the following data types, each of which have a HTTP API.
Click on the links below to explore each data type's command line and HTTP API.</p>
%s
<p>Background batch processes like generation of tiles, sparse volumes, and various
indices, will be paused if a single server receives more than a few data type API r
requests over a 5 minute moving window. You can mark your API request as
non-interactive (i.e., you don't mind if it's delayed) by appending a query string
<code>interactive=false</code>.
<h3>Licensing</h3>
<p><a href="https://github.com/janelia-flyem/dvid">DVID</a> is released under the
<a href="http://janelia-flyem.github.com/janelia_farm_license.html">Janelia Farm license</a>, a
<a href="http://en.wikipedia.org/wiki/BSD_license#3-clause_license_.28.22New_BSD_License.22_or_.22Modified_BSD_License.22.29">
3-clause BSD license</a>.
</p>
</section>
</div>
<!-- FOOTER -->
<div id="footer_wrap" class="outer">
<footer class="inner">
</footer>
</div>
</body>
</html>
`
const (
// WebAPIVersion is the string version of the API. Once DVID is somewhat stable,
// this will be "v1/", "v2/", etc.
WebAPIVersion = ""
// WebAPIPath is the relative URL path to our Level 2 REST API
WebAPIPath = "/api/" + WebAPIVersion
// WriteTimeout is the maximum time in seconds DVID will wait to write data down HTTP connection.
WriteTimeout = 600 * time.Second
// ReadTimeout is the maximum time in seconds DVID will wait to read data from HTTP connection.
ReadTimeout = 600 * time.Second
)
var (
webMux struct {
*web.Mux
routesSetup bool
}
webMuxMu sync.Mutex // Can Lock() to prevent any kind of web requests from initiating actions.
userLatencies userLatencyMap
adminToken string // if set in environment variable DVID_ADMIN_TOKEN, requests have admin rights.
)
type userLatencyMap struct {
sync.RWMutex
data map[string]userLatency
}
type userLatency struct {
User string
LastPing string
LastLatency int64
MinLatency int64
MaxLatency int64
}
func init() {
webMux.Mux = web.New()
webMux.Use(middleware.RequestID)
userLatencies.data = make(map[string]userLatency)
adminToken = os.Getenv("DVID_ADMIN_TOKEN")
}
// ThrottledHTTP checks if a request can continue under throttling. If so, it returns
// false. If it cannot (throttled state), it sends a http.StatusServiceUnavailable and
// returns true. Throttling is controlled by
func ThrottledHTTP(w http.ResponseWriter) bool {
curThrottleMu.Lock()
if curThrottledOps < maxThrottledOps {
curThrottledOps++
curThrottleMu.Unlock()
return false
}
curThrottleMu.Unlock()
msg := fmt.Sprintf("Server already running %d throttled operations (max = %d)\n", curThrottledOps, maxThrottledOps)
http.Error(w, msg, http.StatusServiceUnavailable)
return true
}
// ThrottledOpDone marks the end of a throttled operation, allowing another op blocked
// by ThrottledHTTP() to succeed.
func ThrottledOpDone() {
curThrottleMu.Lock()
curThrottledOps--
curThrottleMu.Unlock()
}
// ServeSingleHTTP fulfills one request using the default web Mux.
func ServeSingleHTTP(w http.ResponseWriter, r *http.Request) {
if !webMux.routesSetup {
initRoutes()
}
// Allow cross-origin resource sharing.
w.Header().Add("Access-Control-Allow-Origin", "*")
webMux.ServeHTTP(w, r)
}
// Listen and serve HTTP requests using address and don't let stay-alive
// connections hog goroutines for more than an hour.
// See for discussion:
// http://stackoverflow.com/questions/10971800/golang-http-server-leaving-open-goroutines
func serveHTTP() {
var mode string
if readonly {
mode = " (read-only mode)"
} else if fullwrite {
mode = " (full write mode)"
}
dvid.Infof("Web server listening at %s%s ...\n", HTTPAddress(), mode)
if !webMux.routesSetup {
initRoutes()
}
// Install our handler at the root of the standard net/http default mux.
// This allows packages like expvar to continue working as expected. (From goji.go)
http.Handle("/", webMux)
// TODO: Could have used "graceful" goji package but doesn't allow tailoring of timeouts
// unless package is modified. Not sure graceful features needed whereas tailoring
// of server is more important.
s := &http.Server{
Addr: HTTPAddress(),
WriteTimeout: WriteTimeout,
ReadTimeout: ReadTimeout,
}
log.Fatal(s.ListenAndServe())
// graceful.HandleSignals()
// if err := graceful.ListenAndServe(address, http.DefaultServeMux); err != nil {
// log.Fatal(err)
// }
// graceful.Wait()
}
// High-level switchboard for DVID HTTP API.
func initRoutes() {
if webMux.routesSetup {
return
}
var wildcardOrigin bool
var c *cors.Cors
authorizationOn := (len(tc.Auth.ProxyAddress) != 0)
if len(corsDomains) > 0 {
copts := cors.Options{
AllowedMethods: []string{"GET", "POST", "DELETE", "HEAD"},
}
if authorizationOn {
copts.AllowOriginFunc = corsValidator
copts.AllowedHeaders = []string{"Authorization", "authorization"}
copts.AllowCredentials = true
c = cors.New(copts)
} else {
var allowed []string
for domain := range corsDomains {
if domain == "*" {
dvid.Infof("setting allowed origins to wildcard *\n")
wildcardOrigin = true
break
}
allowed = append(allowed, domain)
}
if !wildcardOrigin {
copts.AllowedOrigins = allowed
dvid.Infof("setting allowed origins to %v\n", allowed)
c = cors.New(copts)
}
}
}
webMuxMu.Lock()
silentMux := web.New()
webMux.Handle("/api/load", silentMux)
webMux.Handle("/api/heartbeat", silentMux)
webMux.Handle("/api/user-latencies", silentMux)
if c != nil {
silentMux.Use(c.Handler)
} else if wildcardOrigin {
silentMux.Use(wildcardAccessHandler)
}
silentMux.Use(latencyHandler)
silentMux.Get("/api/load", loadHandler)
silentMux.Get("/api/heartbeat", heartbeatHandler)
silentMux.Get("/api/user-latencies", latenciesHandler)
mainMux := web.New()
webMux.Handle("/*", mainMux)
mainMux.Use(middleware.Logger)
mainMux.Use(middleware.AutomaticOptions)
mainMux.Use(httpAvailHandler)
mainMux.Use(recoverHandler)
mainMux.Use(adminPrivHandler)
if c != nil {
mainMux.Use(c.Handler)
} else if wildcardOrigin {
mainMux.Use(wildcardAccessHandler)
}
mainMux.Get("/interface", interfaceHandler)
mainMux.Get("/interface/version", versionHandler)
mainMux.Get("/api/help", helpHandler)
mainMux.Get("/api/help/", helpHandler)
mainMux.Get("/api/help/:typename", typehelpHandler)
mainMux.Get("/api/storage", serverStorageHandler)
serverMux := web.New()
mainMux.Handle("/api/server/:action", serverMux)
serverMux.Use(activityLogHandler)
serverMux.Get("/api/server/info", serverInfoHandler)
serverMux.Get("/api/server/info/", serverInfoHandler)
serverMux.Get("/api/server/note", serverNoteHandler)
serverMux.Get("/api/server/note/", serverNoteHandler)
serverMux.Get("/api/server/types", serverTypesHandler)
serverMux.Get("/api/server/types/", serverTypesHandler)
serverMux.Get("/api/server/compiled-types", serverCompiledTypesHandler)
serverMux.Get("/api/server/compiled-types/", serverCompiledTypesHandler)
serverMux.Get("/api/server/groupcache", serverGroupcacheHandler)
serverMux.Get("/api/server/groupcache/", serverGroupcacheHandler)
serverMux.Post("/api/server/settings", serverSettingsHandler)
serverMux.Get("/api/server/blobstore/:ref", blobstoreHandler)
serverMux.Get("/api/server/token", serverTokenHandler)
serverMux.Get("/api/server/token/", serverTokenHandler)
serverMux.Post("/api/server/reload-auth", serverReloadAuthHandler)
serverMux.Post("/api/server/reload-auth/", serverReloadAuthHandler)
mainMux.Post("/api/repos", reposPostHandler)
mainMux.Get("/api/repos/info", reposInfoHandler)
repoRawMux := web.New()
mainMux.Handle("/api/repo/:uuid", repoRawMux)
repoRawMux.Use(activityLogHandler)
repoRawMux.Use(repoRawSelector)
repoRawMux.Head("/api/repo/:uuid", repoHeadHandler)
repoMux := web.New()
mainMux.Handle("/api/repo/:uuid/:action", repoMux)
mainMux.Handle("/api/repo/:uuid/:action/:name", repoMux)
repoMux.Use(repoRawSelector)
if authorizationOn {
repoMux.Use(isAuthorized)
}
repoMux.Use(mutationsHandler)
repoMux.Use(activityLogHandler)
repoMux.Use(repoSelector)
repoMux.Get("/api/repo/:uuid/info", repoInfoHandler)
repoMux.Post("/api/repo/:uuid/info", repoPostInfoHandler)
repoMux.Post("/api/repo/:uuid/instance", repoNewDataHandler)
repoMux.Get("/api/repo/:uuid/branch-versions/:name", repoBranchVersionsHandler)
repoMux.Get("/api/repo/:uuid/log", getRepoLogHandler)
repoMux.Post("/api/repo/:uuid/log", postRepoLogHandler)
repoMux.Post("/api/repo/:uuid/merge", repoMergeHandler)
repoMux.Post("/api/repo/:uuid/resolve", repoResolveHandler)
nodeMux := web.New()
mainMux.Handle("/api/node/:uuid", nodeMux)
mainMux.Handle("/api/node/:uuid/:action", nodeMux)
nodeMux.Use(repoRawSelector)
if authorizationOn {
nodeMux.Use(isAuthorized)
}
nodeMux.Use(mutationsHandler)
nodeMux.Use(activityLogHandler)
nodeMux.Use(nodeSelector)
nodeMux.Get("/api/node/:uuid/note", getNodeNoteHandler)
nodeMux.Post("/api/node/:uuid/note", postNodeNoteHandler)
nodeMux.Get("/api/node/:uuid/log", getNodeLogHandler)
nodeMux.Post("/api/node/:uuid/log", postNodeLogHandler)
nodeMux.Get("/api/node/:uuid/commit", repoCommitStateHandler)
nodeMux.Get("/api/node/:uuid/status", repoCommitStateHandler)
nodeMux.Post("/api/node/:uuid/commit", repoCommitHandler)
nodeMux.Post("/api/node/:uuid/branch", repoBranchHandler)
nodeMux.Post("/api/node/:uuid/newversion", repoNewVersionHandler)
instanceMux := web.New()
mainMux.Handle("/api/node/:uuid/:dataname/:keyword", instanceMux)
mainMux.Handle("/api/node/:uuid/:dataname/:keyword/*", instanceMux)
instanceMux.Use(repoRawSelector)
if authorizationOn {
instanceMux.Use(isAuthorized)
}
instanceMux.Use(mutationsHandler)
instanceMux.Use(instanceSelector)
instanceMux.NotFound(notFound)
mainMux.Get("/*", mainHandler)
webMux.routesSetup = true
webMuxMu.Unlock()
}
// returns true and sends a 503 (Service Unavailable) status code if unavailable.
func httpUnavailable(w http.ResponseWriter) bool {
if dvid.RequestsOK() {
return false
}
http.Error(w, "DVID server is unavailable.", http.StatusServiceUnavailable)
return true
}
type wrappedResponseWriter struct {
http.ResponseWriter
wroteHeader bool
status int
bytes int
}
func (w *wrappedResponseWriter) WriteHeader(code int) {
if !w.wroteHeader {
w.status = code
w.wroteHeader = true
w.ResponseWriter.WriteHeader(code)
}
}
func (w *wrappedResponseWriter) Write(buf []byte) (int, error) {
w.WriteHeader(http.StatusOK)
n, err := w.ResponseWriter.Write(buf)
w.bytes += n
return n, err
}
func wrapResponseWriter(w http.ResponseWriter) *wrappedResponseWriter {
wr := wrappedResponseWriter{
ResponseWriter: w,
}
return &wr
}
// Middleware that prevents any web requests if httpAvail is false. Allows draconian
// shutdown of server when doing critical reorg of internals.
func httpAvailHandler(c *web.C, h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
if httpUnavailable(w) {
return
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
// Middleware that recovers from panics, sends email if a notification email
// has been provided, and log issues.
func recoverHandler(c *web.C, h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
reqID := middleware.GetReqID(*c)
defer func() {
if e := recover(); e != nil {
msg := fmt.Sprintf("Panic detected on request %s:\n%+v\nIP: %v, URL: %s\n",
reqID, e, r.RemoteAddr, r.URL.Path)
dvid.ReportPanic(msg, WebServer())
http.Error(w, msg, 500)
}
}()
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
// Middleware that allows any server to access to prevent CORS issues.
func wildcardAccessHandler(c *web.C, h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
// Middleware that logs all mutations to any configured mutation log
func mutationsHandler(c *web.C, h http.Handler) http.Handler {
mutConfig := MutationLogSpec()
fn := func(w http.ResponseWriter, r *http.Request) {
if mutConfig.Httpstore != "" {
buf, err := ioutil.ReadAll(r.Body)
if err != nil {
BadRequest(w, r, "unable to read POST for mirroring: %v", err)
return
}
dup := make([]byte, len(buf))
copy(dup, buf)
r.Body = ioutil.NopCloser(bytes.NewBuffer(dup))
uuid, ok := c.Env["uuid"].(dvid.UUID)
if !ok {
msg := fmt.Sprintf("Bad format for UUID %q\n", c.Env["uuid"])
BadRequest(w, r, msg)
return
}
var dataID dvid.UUID
dataname := c.Env["dataname"]
if dataname != nil {
instancename, ok := dataname.(dvid.InstanceName)
if !ok {
BadRequest(w, r, "bad data instance name")
}
data, err := datastore.GetDataByUUIDName(uuid, instancename)
if err != nil {
BadRequest(w, r, err)
return
}
dataID = data.DataUUID()
}
if err := LogHTTPMutation(uuid, dataID, r, buf); err != nil {
BadRequest(w, r, err)
return
}
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
// Middleware that logs activity to kafka if available.
func activityLogHandler(c *web.C, h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
t0 := time.Now()
myw := wrapResponseWriter(w)
h.ServeHTTP(myw, r)
if KafkaAvailable() {
user := r.URL.Query().Get("u")
app := r.URL.Query().Get("app")
t := time.Since(t0)
activity := map[string]interface{}{
"time": t0.Unix(),
"duration": t.Seconds() * 1000.0,
"status": myw.status,
"user": user,
"client": app,
"method": r.Method,
"uri": r.RequestURI,
"bytes_in": r.ContentLength,
"bytes_out": myw.bytes,
"remote_addr": r.RemoteAddr,
}
storage.LogActivityToKafka(activity)
}
}
return http.HandlerFunc(fn)
}
// Middleware that measures latency for a request
func latencyHandler(c *web.C, h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
t0 := time.Now()
myw := wrapResponseWriter(w)
h.ServeHTTP(myw, r)
user := r.URL.Query().Get("u")
if user != "" {
t := time.Since(t0)
ms := int64(t.Seconds() * 1000.0)
userLatencies.Lock()
data, found := userLatencies.data[user]
if found {
data.LastPing = t0.Format(time.RFC3339)
if data.MinLatency > ms {
data.MinLatency = ms
}
if data.MaxLatency < ms {
data.MaxLatency = ms
}
} else {
data = userLatency{
User: user,
LastPing: t0.Format(time.RFC3339),
LastLatency: ms,
MinLatency: ms,
MaxLatency: ms,
}
}
userLatencies.data[user] = data
userLatencies.Unlock()