From f46e13656ea45d974e92702fcb24c1f08e67c1a9 Mon Sep 17 00:00:00 2001 From: Paris Date: Tue, 9 Oct 2018 11:50:33 -0700 Subject: [PATCH 01/10] Initial GraphQL docs --- Gemfile.lock | 2 +- _config.yml | 6 + js/graphql.md | 1346 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1353 insertions(+), 1 deletion(-) create mode 100644 js/graphql.md diff --git a/Gemfile.lock b/Gemfile.lock index d266eb96bec..d3d2951e086 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -241,4 +241,4 @@ DEPENDENCIES tzinfo-data BUNDLED WITH - 1.16.5 + 1.16.6 diff --git a/_config.yml b/_config.yml index ad4edddd3e1..3683d0c54f1 100644 --- a/_config.yml +++ b/_config.yml @@ -169,6 +169,12 @@ category_list: class: aws-icon-ps-60-network-nodes url_external: 'https://marketplace.visualstudio.com/items?itemName=aws-amplify.aws-amplify-vscode' icon: '/images/icons/Misc/Present.svg' + - title: GraphQL Transform + excerpt: Transform simple GraphQL schema files into full application backends. + cta: Read more + class: aws-icon-ps-60-network-nodes + icon: '/images/icons/Misc/Tool.svg' + url: '/js/graphql' - category: Categories title: API Guides excerpt: Add cloud features to your app quickly using our toolchain and declarative APIs. diff --git a/js/graphql.md b/js/graphql.md new file mode 100644 index 00000000000..2bdcb1cfc57 --- /dev/null +++ b/js/graphql.md @@ -0,0 +1,1346 @@ +--- +--- +# The GraphQL Transform + +The GraphQL Transform is a library that simplifies the process of developing, deploying, and maintaining GraphQL APIs. With it, you define your API using the GraphQL Schema Definition Language (SDL) and can then use this library where to transform it into a fully descriptive cloudformation template that implements the API's data model. + +The Transform can be run as an independent library, however it is integrated into the Amplify CLI via the `API` category for you to use immediately. + +## Quick Start + +Navigate into the root of a JavaScript, iOS, or Android project and run: + +```bash +amplify init +``` + +Follow the wizard to create a new app. After finishing the wizard run: + +```bash +amplify add api + +# Select the graphql option and when asked if you +# have a schema, say No. +# Select one of the default samples. You can change it later. +# Choose to edit the schema and it will open your schema.graphql in your editor. +``` + +You can leave the sample as is or try this schema. + +``` +type Blog @model { + id: ID! + name: String! + posts: [Post] @connection(name: "BlogPosts") +} +type Post @model { + id: ID! + title: String! + blog: Blog @connection(name: "BlogPosts") + comments: [Comment] @connection(name: "PostComments") +} +type Comment @model { + id: ID! + content: String + post: Post @connection(name: "PostComments") +} +``` + +Once you are happy with your schema, save the file and click enter in your +terminal window. If now error messages are thrown then you are good to go +and can deploy your new API. + +```bash +amplify push +``` + +Go to AWS CloudFormation to view it. You can also find your project assets in the amplify/backend folder under your API. + +Once the API is finsihed deploying, try going to the AWS AppSync console and +running some of these queries in your new API's query page. + +``` +# Create a blog. Remember the returned id. +# Provide the returned id as the "blogId" variable. +mutation CreateBlog { + createBlog(input: { + name: "My New Blog!" + }) { + id + name + } +} + +# Create a post and associate it with the blog via the "postBlogId" input field. +# Provide the returned id as the "postId" variable. +mutation CreatePost($blogId:ID!) { + createPost(input:{title:"My Post!", postBlogId: $blogId}) { + id + title + blog { + id + name + } + } +} + +# Create a comment and associate it with the post via the "commentPostId" input field. +mutation CreateComment($postId:ID!) { + createComment(input:{content:"A comment!", commentPostId:$postId}) { + id + content + post { + id + title + blog { + id + name + } + } + } +} + +# Get a blog, its posts, and its posts comments. +query GetBlog($blogId:ID!) { + getBlog(id:$blogId) { + id + name + posts(filter: { + title: { + eq: "My Post!" + } + }) { + items { + id + title + comments { + items { + id + content + } + } + } + } + } +} + +# List all blogs, their posts, and their posts comments. +query ListBlogs { + listBlogs { # Try adding: listBlog(filter: { name: { eq: "My New Blog!" } }) + items { + id + name + posts { # or try adding: posts(filter: { title: { eq: "My Post!" } }) + items { + id + title + comments { # and so on ... + items { + id + content + } + } + } + } + } + } +} +``` + +If you want to update your API, open your project's `backend/api/~apiname~/schema.graphql` file (NOT the one in the `backend/api/~apiname~/build` folder) and edit it in your favorite code editor. You can compile the `backend/api/~apiname~/schema.graphql` by running: + +``` +amplify api gql-compile +``` + +and view the compiled schema output in `backend/api/~apiname~/build/schema.graphql`. + +You can then push updated changes with: + +``` +amplify push +``` + +## Directives + +### @model + +Object types that are annotated with `@model` are top-level entities in the +generated API. Objects annotated with `@model` are stored in DynamoDB and are +capable of being protected via `@auth`, related to other objects via `@connection`, +and streamed into Elasticsearch via `@searchable`. + +#### Definition + +``` +directive @model( + queries: ModelQueryMap, + mutations: ModelMutationMap +) on OBJECT +input ModelMutationMap { create: String, update: String, delete: String } +input ModelQueryMap { get: String, list: String } +``` + +#### Usage + +Define a GraphQL object type and annotate it with the `@model` directive to store +objects of that type in DynamoDB and automatically configure CRUDL queries and +mutations. + +``` +type Post @model { + id: ID! # id: ID! is a required attribute. + title: String! + tags: [String!]! +} +``` + +You may also override the names of any generated queries and mutations, or remove operations entirely. + +``` +type Post @model(queries: { get: "post" }, mutations: null) { + id: ID! + title: String! + tags: [String!]! +} +``` + +This would create and configure a single query field `post(id: ID!): Post` and +no mutation fields. + +#### Generates + +A single `@model` directive configures the following AWS resources: + +- An Amazon DynamoDB table with 5 r/w units. Support for auto-scaling and encryption at rest coming soon. +- An AWS AppSync DataSource configured to access the table above. +- An AWS IAM role attached to the DataSource that allows AWS AppSync to call the above table on your behalf. +- Up to 8 resolvers (create, update, delete, get, list, onCreate, onUpdate, onDelete) but this is configurable via the `query`, `mutation`, and `subscription` arguments on the `@model` directive. + +This input schema document + +``` +type Post @model { + id: ID! + title: String +} +type MetaData { + category: Category +} +enum Category { comedy news } +``` + +would generate the following schema parts + +``` +type Post { + id: ID! + title: String! + metadata: MetaData +} + +type MetaData { + category: Category +} + +enum Category { + comedy + news +} + +input MetaDataInput { + category: Category +} + +enum ModelSortDirection { + ASC + DESC +} + +type ModelPostConnection { + items: [Post] + nextToken: String +} + +input ModelStringFilterInput { + ne: String + eq: String + le: String + lt: String + ge: String + gt: String + contains: String + notContains: String + between: [String] + beginsWith: String +} + +input ModelIDFilterInput { + ne: ID + eq: ID + le: ID + lt: ID + ge: ID + gt: ID + contains: ID + notContains: ID + between: [ID] + beginsWith: ID +} + +input ModelIntFilterInput { + ne: Int + eq: Int + le: Int + lt: Int + ge: Int + gt: Int + contains: Int + notContains: Int + between: [Int] +} + +input ModelFloatFilterInput { + ne: Float + eq: Float + le: Float + lt: Float + ge: Float + gt: Float + contains: Float + notContains: Float + between: [Float] +} + +input ModelBooleanFilterInput { + ne: Boolean + eq: Boolean +} + +input ModelPostFilterInput { + id: ModelIDFilterInput + title: ModelStringFilterInput + and: [ModelPostFilterInput] + or: [ModelPostFilterInput] + not: ModelPostFilterInput +} + +type Query { + getPost(id: ID!): Post + listPosts(filter: ModelPostFilterInput, limit: Int, nextToken: String): ModelPostConnection +} + +input CreatePostInput { + title: String! + metadata: MetaDataInput +} + +input UpdatePostInput { + id: ID! + title: String + metadata: MetaDataInput +} + +input DeletePostInput { + id: ID +} + +type Mutation { + createPost(input: CreatePostInput!): Post + updatePost(input: UpdatePostInput!): Post + deletePost(input: DeletePostInput!): Post +} + +type Subscription { + onCreatePost: Post @aws_subscribe(mutations: ["createPost"]) + onUpdatePost: Post @aws_subscribe(mutations: ["updatePost"]) + onDeletePost: Post @aws_subscribe(mutations: ["deletePost"]) +} +``` + +### @auth + +Object types that are annotated with `@auth` are protected by one of the +supported authorization strategies. Types that are annotated with `@auth` +must also be annotated with `@model`. Currently, Amazon Cognito user pools +is the only supported authorization mode. + +#### Definition + +``` +# When applied to a type, augments the application with +# owner and group-based authorization rules. +directive @auth(rules: [AuthRule!]!) on OBJECT +input AuthRule { + allow: AuthStrategy! + ownerField: String # defaults to "owner" + identityField: String # defaults to "username" + groupsField: String + groups: [String] + queries: [ModelQuery] + mutations: [ModelMutation] +} +enum AuthStrategy { owner groups } +enum ModelQuery { get list } +enum ModelMutation { create update delete } +``` + +#### Usage + +**Owner Authorization** + +``` +# The simplest case +type Post @model @auth(rules: [{allow: owner}]) { + id: ID! + title: String! +} + +# The long form way +type Post + @model + @auth( + rules: [ + {allow: owner, ownerField: "owner", mutations: [create, update, delete], queries: [get, list]} + ]) +{ + id: ID! + title: String! + owner: String +} +``` + +Owner authorization specifies that a user (and soon to be set of users) can access an object. To +do so, each object has an *ownerField* (by default "owner") that stores ownership information +and is verified in various ways during resolver execution. + +You can use the *queries* and *mutations* arguments to specify which operations are augmented as follows: + +**get**: If the record's owner is not the same as the logged in user (via `$ctx.identity.username`), throw `$util.unauthorized()`. +**list**: Filter `$ctx.result.items` for owned items. +**create**: Inject the logged in user's `$ctx.identity.username` as the *ownerField* automatically. +**update**: Add conditional update that checks the stored *ownerField* is the same as `$ctx.identity.username`. +**delete**: Add conditional update that checks the stored *ownerField* is the same as `$ctx.identity.username`. + +**Multi Owner Authorization (Coming Soon)** + +In the future, we will support multiple owners: + +``` +type Post @model @auth(rules: [{allow: owner, ownerField: "owners"}]) { + id: ID! + title: String! + owners: [String] +} +``` + +**Static Group Authorization** + +``` +# Static group auth +type Post @model @auth(rules: [{allow: groups, groups: ["Admin"]}]) { + id: ID! + title: String +} +``` + +If the user credential (as specified by the resolver's `$ctx.identity`) is not +enrolled in the *Admin* group, throw an unauthorized error using `$util.unauthorized()`. + +**Dynamic Group Auth** + +``` +# Dynamic group authorization with multiple groups +type Post @model @auth(rules: [{allow: groups, groupsField: "groups"}]) { + id: ID! + title: String + groups: [String] +} + +# Dynamic group authorization with a single group +type Post @model @auth(rules: [{allow: groups, groupsField: "group"}]) { + id: ID! + title: String + group: String +} +``` + +With dynamic group authorization, each record contains an attribute specifying +what groups should be able to access it. Use the *groupsField* argument to +specify which attribute in the underlying data store holds this group +information. To specify that a single group should have access, use a field of +type `String`. To specify that multiple groups should have access, use a field of +type `[String]`. + +#### Generates + +The `@auth` directive will add authorization snippets to any relevant resolver +mapping templates at compile time. Different operations use different methods +of authorization. + +**Owner Authorization** + +``` +type Post @model @auth(rules: [{allow: owner}]) { + id: ID! + title: String! +} +``` + +the generated resolvers would be protected like so: + +- `Mutation.createX`: Verify the requesting user has a valid credential and automatically set the **owner** attribute to equal `$ctx.identity.username`. +- `Mutation.updateX`: Update the condition expression so that the DynamoDB `UpdateItem` operation only succeeds if the record's **owner** attribute equals the caller's `$ctx.identity.username`. +- `Mutation.deleteX`: Update the condition expression so that the DynamoDB `DeleteItem` operation only succeeds if the record's **owner** attribute equals the caller's `$ctx.identity.username`. +- `Query.getX`: In the response mapping template verify that the result's **owner** attribute is the same as the `$ctx.identity.username`. If it is not return null. +- `Query.listX`: In the response mapping template filter the result's **items** such that only items with an **owner** attribute that is the same as the `$ctx.identity.username` are returned. + +**Multie Owner Authorization** + +Work in progress. + +**Static Group Authorization** + +``` +type Post @model @auth(rules: [{allow: groups, groups: ["Admin"]}]) { + id: ID! + title: String! + groups: String +} +``` + +Static group auth is simpler than the others. The generated resolvers would be protected like so: + +- `Mutation.createX`: Verify the requesting user has a valid credential and that `ctx.identity.claims.get("cognito:groups")` contains the **Admin** group. If it does not, fail`. +- `Mutation.updateX`: Verify the requesting user has a valid credential and that `ctx.identity.claims.get("cognito:groups")` contains the **Admin** group. If it does not, fail. +- `Mutation.deleteX`: Verify the requesting user has a valid credential and that `ctx.identity.claims.get("cognito:groups")` contains the **Admin** group. If it does not, fail. +- `Query.getX`: Verify the requesting user has a valid credential and that `ctx.identity.claims.get("cognito:groups")` contains the **Admin** group. If it does not, fail. +- `Query.listX`: Verify the requesting user has a valid credential and that `ctx.identity.claims.get("cognito:groups")` contains the **Admin** group. If it does not, fail. + +**Dynamic Group Authorization** + +``` +type Post @model @auth(rules: [{allow: groups, groupsField: "groups"}]) { + id: ID! + title: String! + groups: String +} +``` + +the generated resolvers would be protected like so: + +- `Mutation.createX`: Verify the requesting user has a valid credential and that it contains a claim to atleast one group passed to the query in the `$ctx.args.input.groups` argument. +- `Mutation.updateX`: Update the condition expression so that the DynamoDB `UpdateItem` operation only succeeds if the record's **groups** attribute contains at least one of the caller's claimed groups via `ctx.identity.claims.get("cognito:groups")`. +- `Mutation.deleteX`: Update the condition expression so that the DynamoDB `DeleteItem` operation only succeeds if the record's **groups** attribute contains at least one of the caller's claimed groups via `ctx.identity.claims.get("cognito:groups")` +- `Query.getX`: In the response mapping template verify that the result's **groups** attribute contains at least one of the caller's claimed groups via `ctx.identity.claims.get("cognito:groups")`. +- `Query.listX`: In the response mapping template filter the result's **items** such that only items with a **groups** attribute that contains at least one of the caller's claimed groups via `ctx.identity.claims.get("cognito:groups")`. + + +### @connection + +The `@connection` directive enables you to specify relationships between `@model` object types. +Currently, this supports one-to-one, one-to-many, and many-to-one relationships. An error +is thrown if you try to configure a many-to-many relationship. + +#### Definition + +``` +directive @connection(name: String) on FIELD_DEFINITION +``` + +#### Usage + +Relationships are specified by annotating fields on an `@model` object type with +the `@connection` directive. + +**Unnamed Connections** + +In the simplest case, you can define a one-to-one connection: + +``` +type Project @model { + id: ID! + name: String + team: Team @connection +} +type Team @model { + id: ID! + name: String! +} +``` + +After it's transformed, you can create projects with a team as follows: + +``` +mutation CreateProject { + createProject(input: { name: "New Project", projectTeamId: "a-team-id"}) { + id + name + team { + id + name + } + } +} +``` + +> **Note** The **Project.team** resolver is preconfigured to work with the defined connection. + +Likewise, you can make a simple one-to-many connection as follows: + +``` +type Post { + id: ID! + title: String! + comments: [Comment] @connection +} +type Comment { + id: ID! + content: String! +} +``` + +After it's transformed, you can create comments with a post as follows: + +``` +mutation CreateCommentOnPost { + createComment(input: { content: "A comment", postCommentsId: "a-post-id"}) { + id + content + } +} +``` + +> **Note** The postCommentsId field on the input may seem unusual. In the one-to-many case without a provided `name` argument there is only partial information to work with, which results in the unusual name. To fix this, provide a value for the @connection's *name* argument and complete the bi-directional relationship by adding a corresponding @connection field to the **Comment** type. + +**Named Connections** + +The **name** argument specifies a name for the +connection and it's used to create bi-directional relationships that reference +the same underlying foreign key. + +For example, if you wanted your `Post.comments` +and `Comment.post` fields to refer to opposite sides of the same relationship, +you need to provide a name. + +``` +type Post { + id: ID! + title: String! + comments: [Comment] @connection(name: "PostComments") +} +type Comment { + id: ID! + content: String! + post: Post @connection(name: "PostComments") +} +``` + +After it's transformed, create comments with a post as follows: + +``` +mutation CreateCommentOnPost { + createComment(input: { content: "A comment", commentPostId: "a-post-id"}) { + id + content + post { + id + title + comments { + id + # and so on... + } + } + } +} +``` + +#### Generates + +In order to keep connection queries fast and efficient, the GraphQL transform manages +global secondary indexes (GSIs) on the generated tables on your behalf. In the future we +are investigating using adjacency lists along side GSIs for different use cases that are +connection heavy. + +TODO: Finish docs + + +### @versioned + +The `@versioned` directive adds object versioning and conflict resolution to a type. + +#### Definition + +``` +directive @versioned(versionField: String = "version", versionInput: String = "expectedVersion") on OBJECT +``` + +#### Usage + +Annotate a `@model` type with the `@versioned` directive to add object versioning and conflict detection to a type. + +``` +type Post @model @versioned { + id: ID! + title: String! + version: Int! # <- If not provided, it is added for you. +} +``` + +**Creating a Post automatically sets the version to 1** + +``` +mutation Create { + createPost(input:{ + title:"Conflict detection in the cloud!" + }) { + id + title + version # will be 1 + } +} +``` + +**Updating a Post requires passing the "expectedVersion" which is the object's last saved version** + +> Note: When updating an object, the version number will automatically increment. + +``` +mutation Update($postId: ID!) { + updatePost( + input:{ + id: $postId, + title: "Conflict detection in the cloud is great!", + expectedVersion: 1 + } + ) { + id + title + version # will be 2 + } +} +``` + +**Deleting a Post requires passing the "expectedVersion" which is the object's last saved version** + +``` +mutation Delete($postId: ID!) { + deletePost( + input: { + id: $postId, + expectedVersion: 2 + } + ) { + id + title + version + } +} +``` + +Update and delete operations will fail if the **expectedVersion** does not match the version +stored in DynamoDB. You may change the default name of the version field on the type as well as the name +of the input field via the **versionField** and **versionInput** arguments on the `@versioned` directive. + +#### Generates + +The `@versioned` directive manipulates resolver mapping templates and will store a `version` field in versioned objects. + +### @searchable + +The `@searchable` directive handles streaming the data of an `@model` object type to +Amazon Elasticsearch Service and configures search resolvers that search that information. + +> Note: Support for adding the `@searchable` directive does not yet provide automatic indexing for any existing data to Elasticsearch. View the feature request [here](https://github.com/aws-amplify/amplify-cli/issues/98). + +#### Definition + +``` +# Streams data from DynamoDB to Elasticsearch and exposes search capabilities. +directive @searchable(queries: SearchableQueryMap) on OBJECT +input SearchableQueryMap { search: String } +``` + +#### Usage + +Store posts in DynamoDB and automatically stream them to ElasticSearch +via lambda and connect a searchQueryField resolver. + +``` +type Post @model @searchable { + id: ID! + title: String! + createdAt: String! + updatedAt: String! + upvotes: Int +} +``` + +You may then create objects in DynamoDB that will automatically streamed to lambda +using the normal `createPost` mutation. + +``` +mutation CreatePost { + createPost(input: { title: "Stream me to Elasticsearch!" }) { + id + title + createdAt + updatedAt + upvotes + } +} +``` + +And then search for posts using a `match` query: + +``` +query SearchPosts { + searchPost(filter: { title: { match: "Stream" }}) { + items { + id + title + } + } +} +``` + +There are multiple `SearchableTypes` generated in the schema, based on the datatype of the fields you specify in the Post type. + +The `filter` parameter in the search queery has a searchable type field that corresponds to the field listed in the Post type. For example, the `title` field of the `filter` object, has the following properties (containing the operators that are applicable to the `string` type): + +* `eq` - which uses the Elasticsearch keyword type to match for the exact term. +* `ne` - this is an iverse operation of `eq`. +* `matchPhrase` - searches using the Elasticsearch's [Match Phrase Query](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/query-dsl-match-query-phrase.html) to filter the documents in the search query. +* `matchPhrasePrefix` - This uses the Elasticsearch's [Match Phrase Prefix Query](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/query-dsl-match-query-phrase-prefix.html) to filter the documents in the search query. +* `multiMatch` - Corresponds to the Elasticsearch [Multi Match Query](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/query-dsl-multi-match-query.html). +* `exists` - Corresponds to the Elasticsearch [Exists Query](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/query-dsl-exists-query.html). +* `wildcard` - Corresponds to the Elasticsearch [Wildcard Query](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/query-dsl-wildcard-query.html). +* `regexp` - Corresponds to the Elasticsearch [Regexp Query](https://www.elastic.co/guide/en/elasticsearch/reference/6.2/query-dsl-regexp-query.html). + + +For example, you can filter using the wildcard expression to search for posts using the following `wildcard` query: + +``` +query SearchPosts { + searchPost(filter: { title: { wildcard: "S*Elasticsearch!" }}) { + items { + id + title + } + } +} +``` + +The above query returns all documents whose `title` begins with `S` and ends with `Elasticsearch!`. + +Moreover you can use the `filter` parameter to pass a nested `and`/`or`/`not` conditions. By default, every operation in the filter properties is *AND* ed. You can use the `or` or `not` properties in the `filter` parameter of the search query to override this behavior. Each of these operators (`and`, `or`, `not` properties in the filter object) accepts an array of SearchableTypes which are in turn joined by the corresponding operator. For example, consider the following search query: + +``` +query SearchPosts { + searchPost(filter: { + title: { wildcard: "S*" } + or: [ + { createdAt: { eq: "08/20/2018" } }, + { updatedAt: { eq: "08/20/2018" } } + ] + }) { + items { + id + title + } + } +} +``` + +Assuming, you used the `createPost` mutation to create new posts with `title`, `createdAt` and `updatedAt` values, the above search query will return you a list of all `Posts`, whose `title` starts with `S` _and_ have `createdAt` _or_ `updatedAt` value as `08/20/2018`. + +Here is a complete list of searchable operations per GraphQL type supported as of today: + +| GraphQL Type | Searchable Operation | +|-------------:|:-------------| +| String | `ne`, `eq`, `match`, `matchPhrase`, `matchPhrasePrefix`, `multiMatch`, `exists`, `wildcard`, `regexp` | +| Int | `ne`, `gt`, `lt`, `gte`, `lte`, `eq`, `range` | +| Float | `ne`, `gt`, `lt`, `gte`, `lte`, `eq`, `range` | +| Boolean | `eq`, `ne` | + +#### Generates + +Todo: What does searchable generate. + +## Examples + +### Simple Todo + +``` +type Todo @model { + id: ID! + name: String! + description: String +} +``` + +### Blog + +``` +type Blog @model { + id: ID! + name: String! + posts: [Post] @connection(name: "BlogPosts") +} +type Post @model { + id: ID! + title: String! + blog: Blog @connection(name: "BlogPosts") + comments: [Comment] @connection(name: "PostComments") +} +type Comment @model { + id: ID! + content: String + post: Post @connection(name: "PostComments") +} +``` + +#### Blog Queries + +``` +# Create a blog. Remember the returned id. +# Provide the returned id as the "blogId" variable. +mutation CreateBlog { + createBlog(input: { + name: "My New Blog!" + }) { + id + name + } +} + +# Create a post and associate it with the blog via the "postBlogId" input field. +# Provide the returned id as the "postId" variable. +mutation CreatePost($blogId:ID!) { + createPost(input:{title:"My Post!", postBlogId: $blogId}) { + id + title + blog { + id + name + } + } +} + +# Create a comment and associate it with the post via the "commentPostId" input field. +mutation CreateComment($postId:ID!) { + createComment(input:{content:"A comment!", commentPostId:$postId}) { + id + content + post { + id + title + blog { + id + name + } + } + } +} + +# Get a blog, its posts, and its posts comments. +query GetBlog($blogId:ID!) { + getBlog(id:$blogId) { + id + name + posts(filter: { + title: { + eq: "My Post!" + } + }) { + items { + id + title + comments { + items { + id + content + } + } + } + } + } +} + +# List all blogs, their posts, and their posts comments. +query ListBlogs { + listBlogs { # Try adding: listBlog(filter: { name: { eq: "My New Blog!" } }) + items { + id + name + posts { # or try adding: posts(filter: { title: { eq: "My Post!" } }) + items { + id + title + comments { # and so on ... + items { + id + content + } + } + } + } + } + } +} +``` + +### Task App + +**Note: To use the @auth directive, the API must be configured to use Amazon Cognito user pools.** + +**There is currently a bug with the user pool creation. To make this work you need to have your own user pool and then pass the id via a CloudFormation parameter. The Amplify CLI user pool with the default setting doesn't work (a fix is in the works).** + +``` +type Task + @model + @auth(rules: [ + {allow: groups, groups: ["Managers"], mutations: [create, update, delete], queries: null}, + {allow: groups, groups: ["Employees"], mutations: null, queries: [get, list]} + ]) +{ + id: ID! + title: String! + description: String + status: String +} +type PrivateNote + @model + @auth(rules: [{allow: owner}]) +{ + id: ID! + content: String! +} +``` + +#### Task Queries + +``` +# Create a task. Only allowed if a manager. +mutation M { + createTask(input:{ + title:"A task", + description:"A task description", + status: "pending" + }) { + id + title + description + } +} + +# Get a task. Allowed if an employee. +query GetTask($taskId:ID!) { + getTask(id:$taskId) { + id + title + description + } +} + +# Automatically inject the username as owner attribute. +mutation CreatePrivateNote { + createPrivateNote(input:{content:"A private note of user 1"}) { + id + content + } +} + +# Unauthorized error if not owner. +query GetPrivateNote($privateNoteId:ID!) { + getPrivateNote(id:$privateNoteId) { + id + content + } +} + +# Return only my own private notes. +query ListPrivateNote { + listPrivateNote { + items { + id + content + } + } +} +``` + +### Conflict Detection + +``` +type Note @model @versioned { + id: ID! + content: String! + version: Int! # You can leave this out. Validation fails if this is not a int like type (Int/BigInt) and is always coerced to non-null. +} +``` + +#### Conflict Detection Queries + +``` +mutation Create { + createNote(input:{ + content:"A note" + }) { + id + content + version + } +} + +mutation Update($noteId: ID!) { + updateNote(input:{ + id: $noteId, + content:"A second version", + expectedVersion: 1 + }) { + id + content + version + } +} + +mutation Delete($noteId: ID!) { + deleteNote(input:{ + id: $noteId, + expectedVersion: 2 + }) { + id + content + version + } +} +``` + +## Writing Custom Transformers + +This document outlines the process of writing custom GraphQL transformers. The `graphql-transform` package serves as a lightweight framework that takes as input a GraphQL SDL document +and a list of **GraphQL Transformers** and returns a cloudformation document that fully implements the data model defined by the input schema. A GraphQL Transformer is a class the defines a directive and a set of functions that manipulate a context and are called whenever that directive is found in an input schema. + +For example, the AWS Amplify CLI calls the GraphQL Transform like this: + +```javascript +import GraphQLTransform from 'graphql-transformer-core' +import DynamoDBModelTransformer from 'graphql-dynamodb-transformer' +import ModelConnectionTransformer from 'graphql-connection-transformer' +import ModelAuthTransformer from 'graphql-auth-transformer' +import AppSyncTransformer from 'graphql-appsync-transformer' +import VersionedModelTransformer from 'graphql-versioned-transformer' + +// Note: This is not exact as we are omitting the @searchable transformer. +const transformer = new GraphQLTransform({ + transformers: [ + new AppSyncTransformer(), + new DynamoDBModelTransformer(), + new ModelAuthTransformer(), + new ModelConnectionTransformer(), + new VersionedModelTransformer() + ] +}) +const schema = ` +type Post @model { + id: ID! + title: String! + comments: [Comment] @connection(name: "PostComments") +} +type Comment @model { + id: ID! + content: String! + post: Post @connection(name: "PostComments") +} +` +const cfdoc = transformer.transform(schema); +const out = await createStack(cfdoc, name, region) +console.log('Application creation successfully started. It may take a few minutes to finish.') +``` + +As shown above the `GraphQLTransform` class takes a list of transformers and later is able to transform +GraphQL SDL documents into CloudFormation documents. + +### The transform lifecycle + +At a high level the `GraphQLTransform` takes the input SDL, parses it, validates the schema +is complete and satisfies the directive definitions, calls each transformers `.before()` method +if one exists, walks the parsed AST and called the relevant methods if they exists (e.g. `object()`, `field()`, `interface()` etc), +in reverse order calls each transformer's `.after()` method if one exists, and finally returns the context's finished template. + +Here is pseudo code for how `const cfdoc = transformer.transform(schema);` works. + +```javascript +function transform(schema: string): Template { + + // ... + + for (const transformer of this.transformers) { + // Run the before function one time per transformer. + if (isFunction(transformer.before)) { + transformer.before(context) + } + // Transform each definition in the input document. + for (const def of context.inputDocument.definitions as TypeDefinitionNode[]) { + switch (def.kind) { + case 'ObjectTypeDefinition': + this.transformObject(transformer, def, context) + // Walk the fields and call field transformers. + break + case 'InterfaceTypeDefinition': + this.transformInterface(transformer, def, context) + // Walk the fields and call field transformers. + break; + case 'ScalarTypeDefinition': + this.transformScalar(transformer, def, context) + break; + case 'UnionTypeDefinition': + this.transformUnion(transformer, def, context) + break; + case 'EnumTypeDefinition': + this.transformEnum(transformer, def, context) + break; + case 'InputObjectTypeDefinition': + this.transformInputObject(transformer, def, context) + break; + default: + continue + } + } + } + // transform() is meant to behave like a composition so the after + // functions are called in the reverse order (as if they were popping off a stack + let reverseThroughTransformers = this.transformers.length - 1; + while (reverseThroughTransformers >= 0) { + const transformer = this.transformers[reverseThroughTransformers] + if (isFunction(transformer.after)) { + transformer.after(context) + } + reverseThroughTransformers -= 1 + } + // Write the schema. + return context.template +} +``` + +### The TransformerContext + +The transformer context serves like an accumulator that is manipulated by transformers. See the code to see what methods are available +to you. + +[https://github.com/aws-amplify/amplify-cli/blob/7f0cb11915fa945ad9d518e8f9a8f74378fef5de/packages/graphql-transformer-core/src/TransformerContext.ts](https://github.com/aws-amplify/amplify-cli/blob/7f0cb11915fa945ad9d518e8f9a8f74378fef5de/packages/graphql-transformer-core/src/TransformerContext.ts) + +> For now, the transform only support cloudformation and uses a library called `cloudform` to create cloudformation resources in code. In the future we would like to support alternative deployment mechanisms like terraform. + +### Example + +As an example let's walk through how we implemented the @versioned transformer. The first thing to do is to define a directive for our transformer. + +```javascript +const VERSIONED_DIRECTIVE = ` + directive @versioned(versionField: String = "version", versionInput: String = "expectedVersion") on OBJECT +` +``` + +Our `@versioned` directive can be applied to `OBJECT` type definitions and automatically adds object versioning and conflict detection to an APIs mutations. For example, we might write + +``` +# Any mutations that deal with the Post type will ask for an `expectedVersion` +# input that will be checked using DynamoDB condition expressions. +type Post @model @versioned { + id: ID! + title: String! + version: Int! +} +``` + +> Note: @versioned depends on @model so we must pass `new new DynamoDBModelTransformer()` before `new new VersionedModelTransformer()`. Also note that `new AppSyncTransformer()` must go first for now. In the future we can add a dependency mechanism and topologically sort it outselves. + +The next step after defining the directive is to implement the transformer's business logic. The `graphql-transformer-core` package makes this a little easier +by exporting a common class through which we may define transformers. User's extend the `Transformer` class and implement the required functions. + +```javascript +export class Transformer { + before?: (acc: TransformerContext) => void + after?: (acc: TransformerContext) => void + object?: (definition: ObjectTypeDefinitionNode, directive: DirectiveNode, acc: TransformerContext) => void + interface?: (definition: InterfaceTypeDefinitionNode, directive: DirectiveNode, acc: TransformerContext) => void + field?: ( + parent: ObjectTypeDefinitionNode | InterfaceTypeDefinitionNode, + definition: FieldDefinitionNode, + directive: DirectiveNode, + acc: TransformerContext) => void + argument?: (definition: InputValueDefinitionNode, directive: DirectiveNode, acc: TransformerContext) => void + union?: (definition: UnionTypeDefinitionNode, directive: DirectiveNode, acc: TransformerContext) => void + enum?: (definition: EnumTypeDefinitionNode, directive: DirectiveNode, acc: TransformerContext) => void + enumValue?: (definition: EnumValueDefinitionNode, directive: DirectiveNode, acc: TransformerContext) => void + scalar?: (definition: ScalarTypeDefinitionNode, directive: DirectiveNode, acc: TransformerContext) => void + input?: (definition: InputObjectTypeDefinitionNode, directive: DirectiveNode, acc: TransformerContext) => void + inputValue?: (definition: InputValueDefinitionNode, directive: DirectiveNode, acc: TransformerContext) => void +} +``` + +Since our `VERSIONED_DIRECTIVE` only specifies `OBJECT` in its **on** condition, we only **NEED* to implement the `object` function. You may also +implement the `before` and `after` functions which will be called once at the beginning and end respectively of the transformation process. + +```javascript +/** + * Users extend the Transformer class and implement the relevant functions. + */ +export class VersionedModelTransformer extends Transformer { + + constructor() { + super( + 'VersionedModelTransformer', + VERSIONED_DIRECTIVE + ) + } + + /** + * When a type is annotated with @versioned enable conflict resolution for the type. + * + * Usage: + * + * type Post @model @versioned(versionField: "version", versionInput: "expectedVersion") { + * id: ID! + * title: String + * version: Int! + * } + * + * Enabling conflict resolution automatically manages a "version" attribute in + * the @model type's DynamoDB table and injects a conditional expression into + * the types mutations that actually perform the conflict resolutions by + * checking the "version" attribute in the table with the "expectedVersion" passed + * by the user. + */ + public object = (def: ObjectTypeDefinitionNode, directive: DirectiveNode, ctx: TransformerContext): void => { + // @versioned may only be used on types that are also @model + const modelDirective = def.directives.find((dir) => dir.name.value === 'model') + if (!modelDirective) { + throw new InvalidDirectiveError('Types annotated with @auth must also be annotated with @model.') + } + + const isArg = (s: string) => (arg: ArgumentNode) => arg.name.value === s + const getArg = (arg: string, dflt?: any) => { + const argument = directive.arguments.find(isArg(arg)) + return argument ? valueFromASTUntyped(argument.value) : dflt + } + + const versionField = getArg('versionField', "version") + const versionInput = getArg('versionInput', "expectedVersion") + const typeName = def.name.value + + // Make the necessary changes to the context + this.augmentCreateMutation(ctx, typeName, versionField, versionInput) + this.augmentUpdateMutation(ctx, typeName, versionField, versionInput) + this.augmentDeleteMutation(ctx, typeName, versionField, versionInput) + this.stripCreateInputVersionedField(ctx, typeName, versionField) + this.addVersionedInputToDeleteInput(ctx, typeName, versionInput) + this.addVersionedInputToUpdateInput(ctx, typeName, versionInput) + this.enforceVersionedFieldOnType(ctx, typeName, versionField) + } + + // ... Implement the functions that do the real work by calling the context methods. +} +``` From 71b096246a5d98d948e84dc9560a45a7a4456492 Mon Sep 17 00:00:00 2001 From: Paris Date: Tue, 9 Oct 2018 12:17:23 -0700 Subject: [PATCH 02/10] How to write a transformer updates --- js/graphql.md | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/js/graphql.md b/js/graphql.md index 2bdcb1cfc57..88e48e6282e 100644 --- a/js/graphql.md +++ b/js/graphql.md @@ -1162,12 +1162,13 @@ console.log('Application creation successfully started. It may take a few minute As shown above the `GraphQLTransform` class takes a list of transformers and later is able to transform GraphQL SDL documents into CloudFormation documents. -### The transform lifecycle +### The Transform Lifecycle -At a high level the `GraphQLTransform` takes the input SDL, parses it, validates the schema -is complete and satisfies the directive definitions, calls each transformers `.before()` method -if one exists, walks the parsed AST and called the relevant methods if they exists (e.g. `object()`, `field()`, `interface()` etc), -in reverse order calls each transformer's `.after()` method if one exists, and finally returns the context's finished template. +At a high level the `GraphQLTransform` takes the input SDL, parses it, and validates the schema +is complete and satisfies the directive definitions. It then iterates through the list of transformers +passed to the transform when it was created and calls `.before()` if it exists. It then walks the parsed AST +and calls the relevant transformer methods (e.g. `object()`, `field()`, `interface()` etc) as directive matches are found. +In reverse order it then calls each transformer's `.after()` method if it exists, and finally returns the context's finished template. Here is pseudo code for how `const cfdoc = transformer.transform(schema);` works. @@ -1204,13 +1205,13 @@ function transform(schema: string): Template { case 'InputObjectTypeDefinition': this.transformInputObject(transformer, def, context) break; + // Note: Extension and operation definition nodes are not supported. default: continue } } } - // transform() is meant to behave like a composition so the after - // functions are called in the reverse order (as if they were popping off a stack + // After is called in the reverse order as if they were popping off a stack. let reverseThroughTransformers = this.transformers.length - 1; while (reverseThroughTransformers >= 0) { const transformer = this.transformers[reverseThroughTransformers] @@ -1219,12 +1220,13 @@ function transform(schema: string): Template { } reverseThroughTransformers -= 1 } - // Write the schema. + // Return the template. + // In the future there will likely be a formatter concept here. return context.template } ``` -### The TransformerContext +### The Transformer Context The transformer context serves like an accumulator that is manipulated by transformers. See the code to see what methods are available to you. From 47e01e4291f8ee9090411a6018e9cd0d1dc53cb4 Mon Sep 17 00:00:00 2001 From: Paris Date: Tue, 9 Oct 2018 12:25:51 -0700 Subject: [PATCH 03/10] Updating intro --- js/graphql.md | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/js/graphql.md b/js/graphql.md index 88e48e6282e..d760f12e5e3 100644 --- a/js/graphql.md +++ b/js/graphql.md @@ -2,9 +2,35 @@ --- # The GraphQL Transform -The GraphQL Transform is a library that simplifies the process of developing, deploying, and maintaining GraphQL APIs. With it, you define your API using the GraphQL Schema Definition Language (SDL) and can then use this library where to transform it into a fully descriptive cloudformation template that implements the API's data model. +The GraphQL Transform is a library that simplifies the process of designing GraphQL APIs. +In short, it allows a developer to design application backends with a simple `schema.graphql` +which is then *transformed* into a AWS CloudFormation that implements the data model +defined in `schema.graphql`. For example you might create the backend for a blog like this: -The Transform can be run as an independent library, however it is integrated into the Amplify CLI via the `API` category for you to use immediately. +``` +type Blog @model { + id: ID! + name: String! + posts: [Post] @connection(name: "BlogPosts") +} +type Post @model { + id: ID! + title: String! + blog: Blog @connection(name: "BlogPosts") + comments: [Comment] @connection(name: "PostComments") +} +type Comment @model { + id: ID! + content: String + post: Post @connection(name: "PostComments") +} +``` + +When used along with tools like the Amplify CLI, the GraphQL Transform simplifies the process of +developing, deploying, and maintaining GraphQL APIs. With it, you define your API using the +GraphQL Schema Definition Language (SDL) and can then use automation to transform it into a fully +descriptive cloudformation template that implements the spec. The transform also provides a framework +through which you can define you own transformers as `@directives` for custom workflows. ## Quick Start From c8ed768268e5dc35227dc070d8ef9d84a5d1361a Mon Sep 17 00:00:00 2001 From: Paris Date: Tue, 9 Oct 2018 13:55:11 -0700 Subject: [PATCH 04/10] Updating for comments --- js/graphql.md | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/js/graphql.md b/js/graphql.md index d760f12e5e3..0ac00270cb5 100644 --- a/js/graphql.md +++ b/js/graphql.md @@ -73,8 +73,8 @@ type Comment @model { ``` Once you are happy with your schema, save the file and click enter in your -terminal window. If now error messages are thrown then you are good to go -and can deploy your new API. +terminal window. if no error messages are thrown this means the transformation +was successful and you can deploy your new API. ```bash amplify push @@ -110,6 +110,12 @@ mutation CreatePost($blogId:ID!) { } } +# Provide the returned id from the CreateBlog mutation as the "blogId" variable +# in the "variables" pane (bottom left pane) of the query editor: +{ + "blogId": "returned-id-goes-here" +} + # Create a comment and associate it with the post via the "commentPostId" input field. mutation CreateComment($postId:ID!) { createComment(input:{content:"A comment!", commentPostId:$postId}) { @@ -126,7 +132,13 @@ mutation CreateComment($postId:ID!) { } } -# Get a blog, its posts, and its posts comments. +# Provide the returned id from the CreatePost mutation as the "postId" variable +# in the "variables" pane (bottom left pane) of the query editor: +{ + "postId": "returned-id-goes-here" +} + +# Get a blog, its posts, and its posts' comments. query GetBlog($blogId:ID!) { getBlog(id:$blogId) { id @@ -150,7 +162,7 @@ query GetBlog($blogId:ID!) { } } -# List all blogs, their posts, and their posts comments. +# List all blogs, their posts, and their posts' comments. query ListBlogs { listBlogs { # Try adding: listBlog(filter: { name: { eq: "My New Blog!" } }) items { @@ -537,7 +549,7 @@ type Post @model @auth(rules: [{allow: groups, groups: ["Admin"]}]) { Static group auth is simpler than the others. The generated resolvers would be protected like so: -- `Mutation.createX`: Verify the requesting user has a valid credential and that `ctx.identity.claims.get("cognito:groups")` contains the **Admin** group. If it does not, fail`. +- `Mutation.createX`: Verify the requesting user has a valid credential and that `ctx.identity.claims.get("cognito:groups")` contains the **Admin** group. If it does not, fail. - `Mutation.updateX`: Verify the requesting user has a valid credential and that `ctx.identity.claims.get("cognito:groups")` contains the **Admin** group. If it does not, fail. - `Mutation.deleteX`: Verify the requesting user has a valid credential and that `ctx.identity.claims.get("cognito:groups")` contains the **Admin** group. If it does not, fail. - `Query.getX`: Verify the requesting user has a valid credential and that `ctx.identity.claims.get("cognito:groups")` contains the **Admin** group. If it does not, fail. @@ -789,8 +801,8 @@ input SearchableQueryMap { search: String } #### Usage -Store posts in DynamoDB and automatically stream them to ElasticSearch -via lambda and connect a searchQueryField resolver. +Store posts in Amazon DynamoDB and automatically stream them to Amazon ElasticSearch +via AWS Lambda and connect a searchQueryField resolver. ``` type Post @model @searchable { @@ -802,7 +814,7 @@ type Post @model @searchable { } ``` -You may then create objects in DynamoDB that will automatically streamed to lambda +You may then create objects in DynamoDB that will be automatically streamed to lambda using the normal `createPost` mutation. ``` @@ -859,7 +871,7 @@ query SearchPosts { The above query returns all documents whose `title` begins with `S` and ends with `Elasticsearch!`. -Moreover you can use the `filter` parameter to pass a nested `and`/`or`/`not` conditions. By default, every operation in the filter properties is *AND* ed. You can use the `or` or `not` properties in the `filter` parameter of the search query to override this behavior. Each of these operators (`and`, `or`, `not` properties in the filter object) accepts an array of SearchableTypes which are in turn joined by the corresponding operator. For example, consider the following search query: +Moreover you can use the `filter` parameter to pass a nested `and`/`or`/`not` condition. By default, every operation in the filter properties is *AND* ed. You can use the `or` or `not` properties in the `filter` parameter of the search query to override this behavior. Each of these operators (`and`, `or`, `not` properties in the filter object) accepts an array of SearchableTypes which are in turn joined by the corresponding operator. For example, consider the following search query: ``` query SearchPosts { @@ -878,7 +890,7 @@ query SearchPosts { } ``` -Assuming, you used the `createPost` mutation to create new posts with `title`, `createdAt` and `updatedAt` values, the above search query will return you a list of all `Posts`, whose `title` starts with `S` _and_ have `createdAt` _or_ `updatedAt` value as `08/20/2018`. +Assuming you used the `createPost` mutation to create new posts with `title`, `createdAt` and `updatedAt` values, the above search query will return you a list of all `Posts`, whose `title` starts with `S` _and_ have `createdAt` _or_ `updatedAt` value as `08/20/2018`. Here is a complete list of searchable operations per GraphQL type supported as of today: From a1ac538aa0e4906b227d1663738a17b6d953731f Mon Sep 17 00:00:00 2001 From: Paris Date: Tue, 9 Oct 2018 14:23:28 -0700 Subject: [PATCH 05/10] Wording update --- js/graphql.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/js/graphql.md b/js/graphql.md index 0ac00270cb5..018cf040aa6 100644 --- a/js/graphql.md +++ b/js/graphql.md @@ -2,10 +2,11 @@ --- # The GraphQL Transform -The GraphQL Transform is a library that simplifies the process of designing GraphQL APIs. -In short, it allows a developer to design application backends with a simple `schema.graphql` -which is then *transformed* into a AWS CloudFormation that implements the data model -defined in `schema.graphql`. For example you might create the backend for a blog like this: +After defining your API using the GraphQL Schema Definition Language (SDL), +you can then use this library to transform it into a fully descriptive +CloudFormation template that implements the API's data model. + +For example you might create the backend for a blog like this: ``` type Blog @model { From ae8281bd65add7e6f336d177667dc7572ddc57d0 Mon Sep 17 00:00:00 2001 From: Paris Date: Fri, 12 Oct 2018 09:34:35 -0700 Subject: [PATCH 06/10] Updating docs for comments --- js/graphql.md | 50 +++++++++++++++++++++++--------------------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/js/graphql.md b/js/graphql.md index 018cf040aa6..eab3ccbc01e 100644 --- a/js/graphql.md +++ b/js/graphql.md @@ -27,9 +27,11 @@ type Comment @model { } ``` +> This is just an example. The transform defines more directives such as @auth and @searchable below. + When used along with tools like the Amplify CLI, the GraphQL Transform simplifies the process of developing, deploying, and maintaining GraphQL APIs. With it, you define your API using the -GraphQL Schema Definition Language (SDL) and can then use automation to transform it into a fully +[GraphQL Schema Definition Language (SDL)](https://facebook.github.io/graphql/June2018/) and can then use automation to transform it into a fully descriptive cloudformation template that implements the spec. The transform also provides a framework through which you can define you own transformers as `@directives` for custom workflows. @@ -205,12 +207,17 @@ amplify push ### @model Object types that are annotated with `@model` are top-level entities in the -generated API. Objects annotated with `@model` are stored in DynamoDB and are +generated API. Objects annotated with `@model` are stored in Amazon DynamoDB and are capable of being protected via `@auth`, related to other objects via `@connection`, -and streamed into Elasticsearch via `@searchable`. +and streamed into Amazon Elasticsearch via `@searchable`. You may also apply the +`@versioned` directive to instantly add versioning and conflict detection to a +model type. #### Definition +The following SDL defines the `@model` directive that allows you to easily define +top level object types in your API that are backed by Amazon DynamoDB. + ``` directive @model( queries: ModelQueryMap, @@ -251,10 +258,12 @@ no mutation fields. A single `@model` directive configures the following AWS resources: -- An Amazon DynamoDB table with 5 r/w units. Support for auto-scaling and encryption at rest coming soon. +- An Amazon DynamoDB table with 5 read/write units. - An AWS AppSync DataSource configured to access the table above. - An AWS IAM role attached to the DataSource that allows AWS AppSync to call the above table on your behalf. - Up to 8 resolvers (create, update, delete, get, list, onCreate, onUpdate, onDelete) but this is configurable via the `query`, `mutation`, and `subscription` arguments on the `@model` directive. +- Input objects for create, update, and delete mutations. +- Filter input objects that allow you to filter objects in list queries and connection fields. This input schema document @@ -399,10 +408,9 @@ type Subscription { ### @auth -Object types that are annotated with `@auth` are protected by one of the -supported authorization strategies. Types that are annotated with `@auth` -must also be annotated with `@model`. Currently, Amazon Cognito user pools -is the only supported authorization mode. +Object types that are annotated with `@auth` are protected by a set of authorization +rules. Currently, @auth only supports APIs with Amazon Cognito User Pools enabled. +Types that are annotated with `@auth` must also be annotated with `@model`. #### Definition @@ -449,29 +457,17 @@ type Post } ``` -Owner authorization specifies that a user (and soon to be set of users) can access an object. To +Owner authorization specifies that a user can access an object. To do so, each object has an *ownerField* (by default "owner") that stores ownership information and is verified in various ways during resolver execution. You can use the *queries* and *mutations* arguments to specify which operations are augmented as follows: -**get**: If the record's owner is not the same as the logged in user (via `$ctx.identity.username`), throw `$util.unauthorized()`. -**list**: Filter `$ctx.result.items` for owned items. -**create**: Inject the logged in user's `$ctx.identity.username` as the *ownerField* automatically. -**update**: Add conditional update that checks the stored *ownerField* is the same as `$ctx.identity.username`. -**delete**: Add conditional update that checks the stored *ownerField* is the same as `$ctx.identity.username`. - -**Multi Owner Authorization (Coming Soon)** - -In the future, we will support multiple owners: - -``` -type Post @model @auth(rules: [{allow: owner, ownerField: "owners"}]) { - id: ID! - title: String! - owners: [String] -} -``` +- **get**: If the record's owner is not the same as the logged in user (via `$ctx.identity.username`), throw `$util.unauthorized()`. +- **list**: Filter `$ctx.result.items` for owned items. +- **create**: Inject the logged in user's `$ctx.identity.username` as the *ownerField* automatically. +- **update**: Add conditional update that checks the stored *ownerField* is the same as `$ctx.identity.username`. +- **delete**: Add conditional update that checks the stored *ownerField* is the same as `$ctx.identity.username`. **Static Group Authorization** @@ -716,7 +712,7 @@ directive @versioned(versionField: String = "version", versionInput: String = "e #### Usage -Annotate a `@model` type with the `@versioned` directive to add object versioning and conflict detection to a type. +Add `@versioned` to a type that is also annotate with `@model` to enable object versioning and conflict detection for a type. ``` type Post @model @versioned { From f74ba6269f93d42253a071b7b7f22001c7880a4f Mon Sep 17 00:00:00 2001 From: Paris Date: Tue, 16 Oct 2018 17:18:16 -0700 Subject: [PATCH 07/10] Adding S3 docs for the transform --- js/graphql.md | 271 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 269 insertions(+), 2 deletions(-) diff --git a/js/graphql.md b/js/graphql.md index eab3ccbc01e..948754e36c7 100644 --- a/js/graphql.md +++ b/js/graphql.md @@ -898,9 +898,276 @@ Here is a complete list of searchable operations per GraphQL type supported as o | Float | `ne`, `gt`, `lt`, `gte`, `lte`, `eq`, `range` | | Boolean | `eq`, `ne` | -#### Generates +## S3 Objects + +The GraphQL Transform, Amplify CLI, and Amplify Library make it simple to add complex object +support with Amazon S3 to an application. + +### Basics + +At a minimum the steps to add S3 Object support are as follows: + +**Create a Amazon S3 bucket to hold files via `amplify add storage`.** + +**Create a user pool in Amazon Cognito User Pools via `amplify add auth`.** + +**Create a GraphQL API via `amplify add api` and add the following type definition:** + +``` +type S3Object { + bucket: String! + region: String! + key: String! +} +``` + +**Reference the S3Object type from some `@model` type:** + +``` +type Picture @model @auth(rules: [{allow: owner}]) { + id: ID! + name: String + owner: String + + # Reference the S3Object type from a field. + file: S3Object +} +``` + +The GraphQL Transform handles creating the relevant input types and will store pointers to S3 objects in Amazon DynamoDB. The AppSync SDKs and Amplify library handle uploading the files to S3 transparently. + +**Run a mutation with s3 objects from your client app:** + +``` +mutation ($input: CreatePictureInput!) { + createPicture(input: $input) { + id + name + visibility + owner + createdAt + file { + region + bucket + key + } + } +} +``` + +### Tutorial (S3 & React) + +**First create an amplify project:** + +``` +amplify init +``` + +**Next add the `auth` category to enable Amazon Cognito User Pools:** + +``` +amplify add auth + +# You may use the default settings. +``` + +**Then add the `storage` category and configure an Amazon S3 bucket to store files.** + +``` +amplify add storage + +# Select "Content (Images, audio, video, etc.)" +# Follow the rest of the instructions and customize as necessary. +``` + +**Next add the `api` category and configure a GraphQL API with Amazon Cognito User Pools enabled.** + +``` +amplify add api + +# Select the graphql option and then Amazon Cognito User Pools option. +# When asked if you have a schema, say No. +# Select one of the default samples. You can change it later. +# Choose to edit the schema and it will open your schema.graphql in your editor. +``` + +**Once your `schema.graphql` is open in your editor of choice, enter the following:** + +``` +type Picture @model @auth(rules: [{allow: owner}]) { + id: ID! + name: String + owner: String + visibility: Visibility + file: S3Object + createdAt: String +} + +type S3Object { + bucket: String! + region: String! + key: String! +} + +enum Visibility { + public + private +} +``` + +**After defining your API's schema.graphql deploy it to AWS.** + +``` +amplify push +``` + +**In your top level `App.js` (or similar), instantiate the AppSync client and include +the necessary `` and `` components.** + +```javascript +import React, { Component } from 'react'; +import Amplify, { Auth } from 'aws-amplify'; +import { withAuthenticator } from 'aws-amplify-react'; +import AWSAppSyncClient from "aws-appsync"; +import { Rehydrated } from 'aws-appsync-react'; +import { ApolloProvider } from 'react-apollo'; +import awsconfig from './aws-exports'; + +// Amplify init +Amplify.configure(awsconfig); + +const GRAPHQL_API_REGION = awsconfig.aws_appsync_region +const GRAPHQL_API_ENDPOINT_URL = awsconfig.aws_appsync_graphqlEndpoint +const S3_BUCKET_REGION = awsconfig.aws_user_files_s3_bucket_region +const S3_BUCKET_NAME = awsconfig.aws_user_files_s3_bucket +const AUTH_TYPE = awsconfig.aws_appsync_authenticationType + +// AppSync client instantiation +const client = new AWSAppSyncClient({ + url: GRAPHQL_API_ENDPOINT_URL, + region: GRAPHQL_API_REGION, + auth: { + type: AUTH_TYPE, + // Get the currently logged in users credential from + // Amazon Cognito User Pools. + jwtToken: async () => ( + await Auth.currentSession()).getAccessToken().getJwtToken(), + }, + // Uses Amazon IAM credentials to authorize requests to S3. + complexObjectsCredentials: () => Auth.currentCredentials(), +}); + +// Define you root app component +class App extends Component { + render() { + // ... your code here + } +} + +const AppWithAuth = withAuthenticator(App, true); + +export default () => ( + + + + + +); +``` + +**Then define a component and call a mutation to create a Picture object and upload +a file.** + +```javascript +import React, { Component } from 'react'; +import gql from 'graphql-tag'; + +// Define your component +class AddPhoto extends Component { + render() { +