forked from wundergraph/graphql-go-tools
-
Notifications
You must be signed in to change notification settings - Fork 0
/
doc.go
604 lines (502 loc) · 18.9 KB
/
doc.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
// Package graphql-go-tools is library to create GraphQL services using the go programming language.
//
// # About GraphQL
//
// GraphQL is a query language for APIs and a runtime for fulfilling those queries with your existing data. GraphQL provides a complete and understandable description of the data in your API, gives clients the power to ask for exactly what they need and nothing more, makes it easier to evolve APIs over time, and enables powerful developer tools.
//
// Source: https://graphql.org
//
// # About this library
//
// This library is intended to be a set of low level building blocks to write high performance and secure GraphQL applications.
// Use cases could range from writing layer seven GraphQL proxies, firewalls, caches etc..
// You would usually not use this library to write a GraphQL server yourself but to build tools for the GraphQL ecosystem.
//
// To achieve this goal the library has zero dependencies at its core functionality.
// It has a full implementation of the GraphQL AST and supports lexing, parsing, validation, normalization, introspection, query planning as well as query execution etc.
//
// With the execution package it's possible to write a fully functional GraphQL server that is capable to mediate between various protocols and formats.
// In it's current state you can use the following DataSources to resolve fields:
// - Static data (embed static data into a schema to extend a field in a simple way)
// - HTTP JSON APIs (combine multiple Restful APIs into one single GraphQL Endpoint, nesting is possible)
// - GraphQL APIs (you can combine multiple GraphQL APIs into one single GraphQL Endpoint, nesting is possible)
// - Webassembly/WASM Lambdas (e.g. resolve a field using a Rust lambda)
//
// If you're looking for a ready to use solution that has all this functionality packaged as a Gateway have a look at: https://matthewmcneely.com
//
// Created by Jens Neuse
package main
import (
"bytes"
"context"
"fmt"
"github.com/cespare/xxhash/v2"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/ast"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/astnormalization"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/astparser"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/astprinter"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/asttransform"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/astvalidation"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/astvisitor"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/engine/datasource/staticdatasource"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/engine/plan"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/engine/resolve"
"github.com/matthewmcneely/graphql-go-tools/v2/pkg/operationreport"
)
/*
ExampleParsePrintDocument shows you the most basic usage of the library.
It parses a GraphQL document and prints it back to a writer.
*/
func ExampleParsePrintDocument() {
input := []byte(`query { hello }`)
report := &operationreport.Report{}
document := ast.NewSmallDocument()
parser := astparser.NewParser()
printer := &astprinter.Printer{}
document.Input.ResetInputBytes(input)
parser.Parse(document, report)
if report.HasErrors() {
panic(report.Error())
}
out := &bytes.Buffer{}
err := printer.Print(document, nil, out)
if err != nil {
panic(err)
}
fmt.Println(out.String()) // Output: query { hello }
}
/*
Okay, that was easy, but also not very useful.
Let's try to parse a more complex document and print it back to a writer.
*/
// ExampleParseComplexDocument shows a special feature of the printer
func ExampleParseComplexDocument() {
input := []byte(`
query {
hello
foo {
bar
}
}
`)
report := &operationreport.Report{}
document := ast.NewSmallDocument()
parser := astparser.NewParser()
printer := &astprinter.Printer{}
document.Input.ResetInputBytes(input)
parser.Parse(document, report)
if report.HasErrors() {
panic(report.Error())
}
out := &bytes.Buffer{}
err := printer.Print(document, nil, out)
if err != nil {
panic(err)
}
fmt.Println(out.String()) // Output: query { hello foo { bar } }
}
/*
You'll notice that the printer removes all whitespace and newlines.
But what if we wanted to print the document with indentation?
*/
func ExamplePrintWithIndentation() {
input := []byte(`
query {
hello
foo {
bar
}
}
`)
report := &operationreport.Report{}
document := ast.NewSmallDocument()
parser := astparser.NewParser()
document.Input.ResetInputBytes(input)
parser.Parse(document, report)
if report.HasErrors() {
panic(report.Error())
}
out, err := astprinter.PrintStringIndent(document, nil, " ")
if err != nil {
panic(err)
}
fmt.Println(out)
// Output: query {
// hello
// foo {
// bar
// }
// }
}
/*
Okay, fantastic. We can parse and print GraphQL documents.
As a next step, we could analyze the document and extract some information from it.
What if we wanted to know the name of the operation in the document, if any?
And what if we wanted to know about the Operation type?
*/
func ExampleParseOperationNameAndType() {
input := []byte(`
query MyQuery {
hello
foo {
bar
}
}
`)
report := &operationreport.Report{}
document := ast.NewSmallDocument()
parser := astparser.NewParser()
document.Input.ResetInputBytes(input)
parser.Parse(document, report)
if report.HasErrors() {
panic(report.Error())
}
operationCount := 0
var (
operationNames []string
operationTypes []ast.OperationType
)
for _, node := range document.RootNodes {
if node.Kind != ast.NodeKindOperationDefinition {
continue
}
operationCount++
name := document.RootOperationTypeDefinitionNameString(node.Ref)
operationNames = append(operationNames, name)
operationType := document.RootOperationTypeDefinitions[node.Ref].OperationType
operationTypes = append(operationTypes, operationType)
}
fmt.Println(operationCount) // Output: 1
fmt.Println(operationNames) // Output: [MyQuery]
fmt.Println(operationTypes) // Output: [query]
}
/*
We've now seen how to analyze the document and learn a bit about it.
We could now add some validation to our application,
e.g. we could check for the number of operations in the document,
and return an error if there are multiple anonymous operations.
We could also validate the Operation content against a schema.
But before we do this, we need to normalize the document.
This is important because validation relies on the document being normalized.
It was much easier to build the validation and many other features on top of a normalized document.
Normalization is the process of transforming the document into a canonical form.
This means that the document is transformed in a way that makes it easier to reason about it.
We inline fragments, we remove unused fragments,
we remove duplicate fields, we remove unused variables,
we remove unused operations etc...
So, let's normalize the document!
*/
func ExampleNormalizeDocument() {
input := []byte(`
query MyQuery {
hello
hello
foo {
bar
bar
}
...MyFragment
}
fragment MyFragment on Query {
hello
foo {
bar
}
}
`)
schema := []byte(`
type Query {
hello: String
foo: Foo
}
type Foo {
bar: String
}
`)
report := &operationreport.Report{}
document := ast.NewSmallDocument()
parser := astparser.NewParser()
document.Input.ResetInputBytes(input)
parser.Parse(document, report)
if report.HasErrors() {
panic(report.Error())
}
schemaDocument := ast.NewSmallDocument()
schemaParser := astparser.NewParser()
schemaDocument.Input.ResetInputBytes(schema)
schemaParser.Parse(schemaDocument, report)
if report.HasErrors() {
panic(report.Error())
}
// graphql-go-tools is very strict about the schema
// the above GraphQL Schema is not fully valid, e.g. the `schema { query: Query }` part is missing
// we can fix this automatically by merging the schema with a base schema
err := asttransform.MergeDefinitionWithBaseSchema(schemaDocument)
if err != nil {
panic(err)
}
// you can customize what rules the normalizer should apply
normalizer := astnormalization.NewWithOpts(
astnormalization.WithExtractVariables(),
astnormalization.WithInlineFragmentSpreads(),
astnormalization.WithRemoveFragmentDefinitions(),
astnormalization.WithRemoveNotMatchingOperationDefinitions(),
)
// It's generally recommended to always give your operation a name
// If it doesn't have a name, just add one to the AST before normalizing it
// This is not strictly necessary, but ensures that all normalization rules work as expected
normalizer.NormalizeNamedOperation(document, schemaDocument, []byte("MyQuery"), report)
if report.HasErrors() {
panic(report.Error())
}
out, err := astprinter.PrintStringIndent(document, nil, " ")
if err != nil {
panic(err)
}
fmt.Println(out)
// Output: query MyQuery {
// hello
// foo {
// bar
// }
// }
}
/*
Okay, that was a lot of work, but now we have a normalized document.
As you can see, all the duplicate fields have been removed and the fragment has been inlined.
What can we do with it?
Well, the possibilities are endless,
but why don't we start with validating the document against a schema?
Alright. Let's do it!
*/
func ExampleValidateDocument() {
schemaDocument := ast.NewSmallDocument()
operationDocument := ast.NewSmallDocument()
report := &operationreport.Report{}
validator := astvalidation.DefaultOperationValidator()
validator.Validate(schemaDocument, operationDocument, report)
if report.HasErrors() {
panic(report.Error())
}
}
/*
Fantastic, we've now got a GraphQL document that is valid against a schema.
As a next step, we could generate a cache key for the document.
This is very useful if we want to start doing expensive operations afterward that could be de-duplicated or cached.
At the same time, generating a cache key from a normalized document is not as trivial as it sounds.
Let's take a look!
*/
func ExampleGenerateCacheKey() {
operationDocument := ast.NewSmallDocument()
schemaDocument := ast.NewSmallDocument()
report := &operationreport.Report{}
normalizer := astnormalization.NewWithOpts(
astnormalization.WithExtractVariables(),
astnormalization.WithInlineFragmentSpreads(),
astnormalization.WithRemoveFragmentDefinitions(),
astnormalization.WithRemoveNotMatchingOperationDefinitions(),
)
normalizer.NormalizeNamedOperation(operationDocument, schemaDocument, []byte("MyQuery"), report)
printer := &astprinter.Printer{}
keyGen := xxhash.New()
err := printer.Print(operationDocument, schemaDocument, keyGen)
if err != nil {
panic(err)
}
// you might be thinking that we're done now, but we're not
// we've extracted the variables, so we need to add them to the cache key
_, err = keyGen.Write(operationDocument.Input.Variables)
if err != nil {
panic(err)
}
key := keyGen.Sum64()
fmt.Printf("%x", key) // Output: {cache key}
}
/*
Good job! We now have a correct cache key for the document.
We're using this ourselves in production to de-duplicate e.g. planning the execution of a GraphQL Operation.
There's just one problem with the above code.
An attacker could easily send the same document with a different Operation name and get a different cache key.
This could quite easily fill up our cache with duplicate entries.
To prevent this, we can make the operation name static.
Let's change out code to account for this.
*/
func ExampleGenerateCacheKeyWithStaticOperationName() {
staticOperationName := []byte("O")
operationDocument := ast.NewSmallDocument()
schemaDocument := ast.NewSmallDocument()
report := &operationreport.Report{}
normalizer := astnormalization.NewWithOpts(
astnormalization.WithExtractVariables(),
astnormalization.WithInlineFragmentSpreads(),
astnormalization.WithRemoveFragmentDefinitions(),
astnormalization.WithRemoveNotMatchingOperationDefinitions(),
)
// First, we add the static operation name to the document and get an "address" to the byte slice (string) in the document
// We cannot just add a string to an AST because the AST only stores references to byte slices
// Storing strings in AST nodes would be very inefficient and would require a lot of allocations
nameRef := operationDocument.Input.AppendInputBytes(staticOperationName)
for _, node := range operationDocument.RootNodes {
if node.Kind != ast.NodeKindOperationDefinition {
continue
}
name := operationDocument.OperationDefinitionNameString(node.Ref)
if name != "MyQuery" {
continue
}
// Then we set the name of the operation to the address of the static operation name
// Now we have renamed MyQuery to O
operationDocument.OperationDefinitions[node.Ref].Name = nameRef
}
// Now we can normalize the modified document
// All Operations that don't have the name O will be removed
normalizer.NormalizeNamedOperation(operationDocument, schemaDocument, staticOperationName, report)
printer := &astprinter.Printer{}
keyGen := xxhash.New()
err := printer.Print(operationDocument, schemaDocument, keyGen)
if err != nil {
panic(err)
}
_, err = keyGen.Write(operationDocument.Input.Variables)
if err != nil {
panic(err)
}
key := keyGen.Sum64()
fmt.Printf("%x", key) // Output: {cache key}
}
/*
With these changes, the name of the operation doesn't matter anymore.
Independent of the name, the cache key will always be the same.
As a next step, we could start planning the execution of the operation.
This is a very complex topic, so we'll just show you how to plan the operation.
Going into detail would be beyond the scope of this example.
It took us years to get this right, so we won't be able to explain it in a few lines of code.
graphql-go-tools is not a GraphQL server by itself.
It's a library that you can use to build Routers, Gateways, or even GraphQL Server frameworks on top of it.
What this means is that there's no built-in support to define "resolvers".
Instead, you have to define DataSources that are used to resolve fields.
A DataSource can be anything, e.g. a static value, a HTTP JSON API, a GraphQL API, a WASM Lambda, a Database etc.
It's up to you to implement the DataSource interface.
The simplest DataSource is the StaticDataSource.
It's a DataSource that returns a static value for a field.
Let's see how to use it!
You have to attach the DataSource to one or more fields in the schema,
and you have to provide a config and a factory for the DataSource,
so that the planner knows how to create an execution plan for the DataSource and an "instance" of the DataSource.
*/
func ExamplePlanOperation() {
config := plan.Configuration{
DataSources: []plan.DataSourceConfiguration{
{
RootNodes: []plan.TypeField{
{
TypeName: "Query",
FieldNames: []string{"hello"},
},
},
Custom: staticdatasource.ConfigJSON(staticdatasource.Configuration{
Data: `{"hello":"world"}`,
}),
Factory: &staticdatasource.Factory{},
},
},
Fields: []plan.FieldConfiguration{
{
TypeName: "Query", // attach this config to the Query type and the field hello
FieldName: "hello",
DisableDefaultMapping: true, // disable the default mapping for this field which only applies to GraphQL APIs
Path: []string{"hello"}, // returns the value of the field "hello" from the JSON data
},
},
IncludeInfo: true,
}
operationDocument := ast.NewSmallDocument() // containing the following query: query O { hello }
schemaDocument := ast.NewSmallDocument()
report := &operationreport.Report{}
operationName := "O"
planner := plan.NewPlanner(context.Background(), config)
executionPlan := planner.Plan(operationDocument, schemaDocument, operationName, report)
if report.HasErrors() {
panic(report.Error())
}
fmt.Printf("%+v", executionPlan) // Output: Plan...
}
/*
As you can see, the planner has created a plan for us.
This plan can now be executed by using the Resolver.
*/
func ExampleExecuteOperation() {
var preparedPlan plan.Plan
resolver := resolve.New(context.Background(), resolve.ResolverOptions{
MaxConcurrency: 1024,
})
ctx := resolve.NewContext(context.Background())
switch p := preparedPlan.(type) {
case *plan.SynchronousResponsePlan:
out := &bytes.Buffer{}
err := resolver.ResolveGraphQLResponse(ctx, p.Response, nil, out)
if err != nil {
panic(err)
}
fmt.Println(out.String()) // Output: {"data":{"hello":"world"}}
case *plan.SubscriptionResponsePlan:
// this is a Query, so we ignore Subscriptions for now, but they are supported
}
}
/*
Well done! You've now seen how to parse, print, validate, normalize, plan and execute a GraphQL document.
You've built a complete GraphQL API Gateway from scratch.
That said, this was really just the tip of the iceberg.
When you look under the hood of graphql-go-tools, you'll notice that a lot of its functionality is built on top of the AST,
more specifically on top of the "astvisitor" package.
It comes with a lot of useful bells and whistles that help you to solve complex problems.
You'll notice that almost everything, from normalization to printing, planning, validation, etc.
is built on top of the AST and the astvisitor package.
Let's take a look at a basic example of how to use the astvisitor package to build higher level functionality.
Here's a simple use case:
Let's walk through the AST of a GraphQL document and extract all tuples of (TypeName, FieldName).
This is useful, e.g. when you want to extract information about the fields that are used in a document.
*/
type visitor struct {
walker *astvisitor.Walker
operation, definition *ast.Document
typeFields [][]string
}
func (v *visitor) EnterField(ref int) {
// get the name of the enclosing type (Query)
enclosingTypeName := v.walker.EnclosingTypeDefinition.NameString(v.definition)
// get the name of the field (hello)
fieldName := v.operation.FieldNameString(ref)
// get the type definition of the field (String)
definitionRef, exists := v.walker.FieldDefinition(ref)
if !exists {
return
}
// get the name of the field type (String)
fieldTypeName := v.definition.FieldDefinitionTypeNameString(definitionRef)
v.typeFields = append(v.typeFields, []string{enclosingTypeName, fieldName, fieldTypeName})
}
func ExampleWalkAST() {
operationDocument := ast.NewSmallDocument() // containing the following query: query O { hello }
schemaDocument := ast.NewSmallDocument() // containing the following schema: type Query { hello: String }
report := &operationreport.Report{}
walker := astvisitor.NewWalker(24)
vis := &visitor{
walker: &walker,
operation: operationDocument,
definition: schemaDocument,
}
walker.RegisterEnterFieldVisitor(vis)
walker.Walk(operationDocument, schemaDocument, report)
if report.HasErrors() {
panic(report.Error())
}
fmt.Printf("%+v", vis.typeFields) // Output: [[Query hello String]]
}
/*
This is just a very basic example of what you can do with the astvisitor package,
but you can see that it's very powerful and flexible.
You can register callbacks for every AST node and do whatever you want with it.
In addition, the walker helps you to keep track of the current position in the AST,
and it can help you to figure out the enclosing type of a field,or the ancestors or a node.
*/