From 179b46028b914ef743674a5c59e0f3a6edc31638 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 12 Apr 2023 10:09:51 +0200 Subject: [PATCH] Fix potential bug when an @interfaceObject has a @requires (#2524) When an `@interfaceObject` type has a field with a `@requires` and the query requests that field only for some specific implementations of the corresponding interface, then the generated query plan was sometimes invalid and could result in an invalid query to a subgraph (against a subgraph that rely on `@apollo/subgraph`, this lead the subgraph to produce an error message looking like `"The _entities resolver tried to load an entity for type X, but no object or interface type of that name was found in the schema"`). The underlying reason is that the plan was mistakenly requesting the required fields for any object of the interface (corresponding to the `@interfaceObject` in question), instead of only requesting them only for only the implementation types it needed to. Not only is that inefficient in principle, but this lead to invalid fetches because the "rewrite" logic used to fixup the `__typename` for `@interfaceObject` under the hood was only rewriting the types of the implementation types in question (only expecting those to need rewrite, which technically was correct) but was mistakenly getting values for other implementation types. --- .changeset/spotty-monkeys-clean.md | 13 + .../src/__tests__/executeQueryPlan.test.ts | 245 ++++++++++++++++++ internals-js/src/operations.ts | 18 +- query-planner-js/src/buildPlan.ts | 73 ++---- 4 files changed, 299 insertions(+), 50 deletions(-) create mode 100644 .changeset/spotty-monkeys-clean.md diff --git a/.changeset/spotty-monkeys-clean.md b/.changeset/spotty-monkeys-clean.md new file mode 100644 index 000000000..07a990db8 --- /dev/null +++ b/.changeset/spotty-monkeys-clean.md @@ -0,0 +1,13 @@ +--- +"@apollo/query-planner": patch +"@apollo/federation-internals": patch +"@apollo/gateway": patch +--- + +Fix potential bug when an `@interfaceObject` type has a `@requires`. When an `@interfaceObject` type has a field with a +`@requires` and the query requests that field only for some specific implementations of the corresponding interface, +then the generated query plan was sometimes invalid and could result in an invalid query to a subgraph (against a +subgraph that rely on `@apollo/subgraph`, this lead the subgraph to produce an error message looking like `"The +_entities resolver tried to load an entity for type X, but no object or interface type of that name was found in the +schema"`). + \ No newline at end of file diff --git a/gateway-js/src/__tests__/executeQueryPlan.test.ts b/gateway-js/src/__tests__/executeQueryPlan.test.ts index b35c244bd..1a63b4deb 100644 --- a/gateway-js/src/__tests__/executeQueryPlan.test.ts +++ b/gateway-js/src/__tests__/executeQueryPlan.test.ts @@ -4863,6 +4863,251 @@ describe('executeQueryPlan', () => { } `); }); + + test('handles @requires on @interfaceObject that applies to only one of the queried implementation', async () => { + // The case this test is that where the @interfaceObject in s2 has a @requires, but the query we send requests the field on which + // there is this @require only for one of the implementation type, which it request another field with no require for another implementation. + // And we're making sure the requirements only get queried for T1, the first type. + const s1 = { + name: 's1', + typeDefs: gql` + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.4", import: ["@key"]) + + type Query { + is: [I!]! + } + + interface I @key(fields: "id") { + id: ID! + name: String + req: Req + } + + type T1 implements I @key(fields: "id") { + id: ID! + name: String + req: Req + } + + type T2 implements I @key(fields: "id") { + id: ID! + name: String + req: Req + } + + type Req { + id: ID! + } + `, + resolvers: { + Query: { + is: () => [ + { __typename: 'T1', id: '2', name: 'e2', req: { id: 'r1'} }, + { __typename: 'T2', id: '4', name: 'e4', req: { id: 'r2'} }, + { __typename: 'T1', id: '1', name: 'e1', req: { id: 'r3'} } + ] + }, + } + } + + const s2 = { + name: 's2', + typeDefs: gql` + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.4", import: ["@key", "@interfaceObject", "@external", "@requires"]) + + type I @key(fields: "id") @interfaceObject { + id: ID! + req: Req @external + v: String! @requires(fields: "req { id }") + } + + type Req { + id: ID! @external + } + `, + resolvers: { + I: { + __resolveReference(ref: any) { + return { + ...ref, + v: `req=${ref.req.id}` + }; + }, + } + } + } + + const { serviceMap, schema, queryPlanner} = getFederatedTestingSchema([ s1, s2 ]); + + let operation = parseOp(` + { + is { + ... on T1 { + v + } + ... on T2 { + name + } + } + } + `, schema); + + let queryPlan = buildPlan(operation, queryPlanner); + expect(queryPlan).toMatchInlineSnapshot(` + QueryPlan { + Sequence { + Fetch(service: "s1") { + { + is { + __typename + ... on T1 { + __typename + id + req { + id + } + } + ... on T2 { + name + } + } + } + }, + Flatten(path: "is.@") { + Fetch(service: "s2") { + { + ... on T1 { + __typename + id + } + ... on I { + __typename + req { + id + } + } + } => + { + ... on I { + v + } + } + }, + }, + }, + } + `); + + let response = await executePlan(queryPlan, operation, undefined, schema, serviceMap); + expect(response.errors).toBeUndefined(); + expect(response.data).toMatchInlineSnapshot(` + Object { + "is": Array [ + Object { + "v": "req=r1", + }, + Object { + "name": "e4", + }, + Object { + "v": "req=r3", + }, + ], + } + `); + + // Sanity checking that if we ask for `v` (the field with @requires), then everything still works. + operation = parseOp(` + { + is { + ... on T1 { + v + } + ... on T2 { + v + name + } + } + } + `, schema); + + global.console = require('console'); + queryPlan = buildPlan(operation, queryPlanner); + expect(queryPlan).toMatchInlineSnapshot(` + QueryPlan { + Sequence { + Fetch(service: "s1") { + { + is { + __typename + ... on T1 { + __typename + id + req { + id + } + } + ... on T2 { + __typename + id + req { + id + } + name + } + } + } + }, + Flatten(path: "is.@") { + Fetch(service: "s2") { + { + ... on T1 { + __typename + id + } + ... on I { + __typename + req { + id + } + } + ... on T2 { + __typename + id + } + } => + { + ... on I { + v + } + } + }, + }, + }, + } + `); + + response = await executePlan(queryPlan, operation, undefined, schema, serviceMap); + expect(response.errors).toBeUndefined(); + expect(response.data).toMatchInlineSnapshot(` + Object { + "is": Array [ + Object { + "v": "req=r1", + }, + Object { + "name": "e4", + "v": "req=r2", + }, + Object { + "v": "req=r3", + }, + ], + } + `); + }); }); describe('fields with conflicting types needing aliasing', () => { diff --git a/internals-js/src/operations.ts b/internals-js/src/operations.ts index 27a20ba27..acc16a1ea 100644 --- a/internals-js/src/operations.ts +++ b/internals-js/src/operations.ts @@ -49,7 +49,7 @@ import { } from "./definitions"; import { isInterfaceObjectType } from "./federation"; import { ERRORS } from "./error"; -import { sameType } from "./types"; +import { isSubtype, sameType } from "./types"; import { assert, isDefined, mapEntries, mapValues, MapWithCachedArrays, MultiMap, SetMultiMap } from "./utils"; import { argumentsEquals, argumentsFromAST, isValidValue, valueToAST, valueToString } from "./values"; import { v1 as uuidv1 } from 'uuid'; @@ -678,12 +678,12 @@ function isUselessFollowupElement(first: OperationElement, followup: OperationEl : first.typeCondition; // The followup is useless if it's a fragment (with no directives we would want to preserve) whose type - // is already that of the first element. + // is already that of the first element (or a supertype). return !!typeOfFirst && followup.kind === 'FragmentElement' && !!followup.typeCondition && (followup.appliedDirectives.length === 0 || isDirectiveApplicationsSubset(conditionals, followup.appliedDirectives)) - && sameType(typeOfFirst, followup.typeCondition); + && isSubtype(followup.typeCondition, typeOfFirst); } export type RootOperationPath = { @@ -1598,9 +1598,19 @@ function addOneToKeyedUpdates(keyedUpdates: MultiMap, s } } +function maybeRebaseOnSchema(toRebase: CompositeType, schema: Schema): CompositeType { + if (toRebase.schema() === schema) { + return toRebase; + } + + const rebased = schema.type(toRebase.name); + assert(rebased && isCompositeType(rebased), () => `Expected ${toRebase} to exists and be composite in the rebased schema, but got ${rebased?.kind}`); + return rebased; +} + function isUnecessaryFragment(parentType: CompositeType, fragment: FragmentSelection): boolean { return fragment.element.appliedDirectives.length === 0 - && (!fragment.element.typeCondition || sameType(parentType, fragment.element.typeCondition)); + && (!fragment.element.typeCondition || isSubtype(maybeRebaseOnSchema(fragment.element.typeCondition, parentType.schema()), parentType)); } function withUnecessaryFragmentsRemoved( diff --git a/query-planner-js/src/buildPlan.ts b/query-planner-js/src/buildPlan.ts index 28c31cf81..d5cc93546 100644 --- a/query-planner-js/src/buildPlan.ts +++ b/query-planner-js/src/buildPlan.ts @@ -732,7 +732,6 @@ class GroupInputs { } toSelectionSetNode(variablesDefinitions: VariableDefinitions, handledConditions: Conditions): SelectionSetNode { - const selectionSets = mapValues(this.perType).map((s) => removeConditionsFromSelectionSet(s.get(), handledConditions)); // Making sure we're not generating something invalid. selectionSets.forEach((s) => s.validate(variablesDefinitions)); @@ -969,13 +968,12 @@ class FetchGroup { */ isChildOfWithArtificialDependency(maybeParent: FetchGroup): boolean { const relation = this.parentRelation(maybeParent); - // To be a child with an artificial dependency, it needs to be a child first, - // and the "path in parent" should be know to be empty (which essentially - // means the groups are on the same entity at the same level). - if (!relation || relation.path?.length !== 0) { + // To be a child with an artificial dependency, it needs to be a child first, and the "path in parent" should be know. + if (!relation || !relation.path) { return false; } - // If we have no inputs, we don't really care about what `maybeParent` fetches. + + // Then, if we have no inputs, we know we don't depend on anything from the parent no matter what. if (!this.inputs) { return true; } @@ -1959,8 +1957,12 @@ class FetchDependencyGraph { // 2. has the same mergeAt // 3. is for the same entity type (we don't reuse groups for different entities just yet, as this can create unecessary dependencies that // gets in the way of some optimizations; the final optimizations in `reduceAndOptimize` will however later merge groups on the same subgraph - // and mergeAt when possibleA). + // and mergeAt when possible). // 4. is not part of our conditions or our conditions ancestors (meaning that we annot reuse a group if it fetches something we take as input). + // 5. is part of the same "defer" grouping + // 6. has the same path in parents (here again, we _will_ eventually merge fetches for which this is not true later in `reduceAndOptimize`, but + // for now, keeping groups separated when they have a different path in their parent allows to keep that "path in parent" more precisely, + // which is important for some case of @requires). for (const existing of parent.group.children()) { if (existing.subgraphName === subgraphName && existing.mergeAt @@ -1968,16 +1970,8 @@ class FetchDependencyGraph { && existing.selection.selections().every((s) => s.kind === 'FragmentSelection' && s.element.castedType() === type) && !this.isInGroupsOrTheirAncestors(existing, conditionsGroups) && existing.deferRef === deferRef + && samePathsInParents(existing.parentRelation(parent.group)?.path, parent.path) ) { - const existingPathInParent = existing.parentRelation(parent.group)?.path; - if (!samePathsInParents(existingPathInParent, parent.path)) { - // We're at the same "mergeAt", so the 'path in parent' should mostly be the same, but it's not guaranteed to - // be the exact same, in particular due to fragments with conditions (@skip/@include) that may be present in - // one case but not the other. This is fine, and we still want to reuse the group in those case, but we should - // erase the 'path in parent' in that case so we don't end up merging this group to another one "the wrong way" - // later on. - this.removePathInParent(parent.group, existing); - } return existing; } } @@ -1990,12 +1984,6 @@ class FetchDependencyGraph { return newGroup } - private removePathInParent(parent: FetchGroup, child: FetchGroup) { - // Simplest option is to remove the parent edge and re-add it without a path. - parent.removeChild(child); - child.addParent({ group: parent }); - } - newRootTypeFetchGroup({ subgraphName, rootKind, @@ -3599,9 +3587,15 @@ function wrapInputsSelections( ); } -function createFetchInitialPath(wrappingType: CompositeType, context: PathContext): OperationPath { +function createFetchInitialPath(supergraphSchema: Schema, wrappingType: CompositeType, context: PathContext): OperationPath { + // We make sure that all `OperationPath` are based on the supergraph as `OperationPath` is really about path on the input query/overall supergraph data + // (most other places already do this as the elements added to the operation path are from the input query, but this is an exception + // when we create an element from an type that may/usually will not be from the supergraph). Doing this make sure we can rely on things like checking + // subtyping between the types of a given path. + const rebasedType = supergraphSchema.type(wrappingType.name); + assert(rebasedType && isCompositeType(rebasedType), () => `${wrappingType} should be composite in the supergraph but got ${rebasedType?.kind}`) return wrapSelectionWithTypeAndConditions( - wrappingType, + rebasedType, [], (fragment, path) => [fragment as OperationElement].concat(path), context, @@ -3612,7 +3606,7 @@ function wrapSelectionWithTypeAndConditions( wrappingType: CompositeType, initialSelection: TSelection, wrapInFragment: (fragment: FragmentElement, current: TSelection) => TSelection, - context: PathContext + context: PathContext, ): TSelection { if (context.conditionals.length === 0) { return wrapInFragment(new FragmentElement(wrappingType, wrappingType.name), initialSelection); @@ -3639,19 +3633,6 @@ function wrapSelectionWithTypeAndConditions( return updatedSelection; } -function extractPathInParentForKeyFetch(type: CompositeType, path: GroupPath): OperationPath { - // A "key fetch" (calls to the `_entities` operation) always have to start with some type-cast into - // the entity fetched (`type` in this function), so we can remove a type-cast into the entity from - // the parent path if it is the last thing in the past. And doing that removal ensures the code - // later reuse fetch groups for different entities, as long as they get otherwise merged into the - // parent at the same place. - const inGroup = path.inGroup(); - const lastElement = inGroup[inGroup.length - 1]; - return (lastElement && lastElement.kind === 'FragmentElement' && lastElement.typeCondition?.name === type.name) - ? inGroup.slice(0, inGroup.length - 1) - : inGroup; -} - /** * If `maybePrefix` is a prefix of `basePath`, then return the path corresponding to the end of `basePath` after `maybePrefix` (which may * be empty if those are the same path). Otherwise, if `maybePrefix` is not a proper prefix, return `undefined`. @@ -3715,7 +3696,7 @@ function computeGroupsForTree( // on the condition ones. const sourceType = edge.head.type as CompositeType; // We shouldn't have a key on a non-composite type const destType = edge.tail.type as CompositeType; // We shouldn't have a key on a non-composite type - const pathInParent = extractPathInParentForKeyFetch(sourceType, path); + const pathInParent = path.inGroup(); const updatedDeferContext = deferContextAfterSubgraphJump(deferContext); // Note that we use the name of `destType` for the inputs parent type, which can seem strange, but the reason is that we // 2 kind of cases: @@ -3764,7 +3745,7 @@ function computeGroupsForTree( stack.push({ tree: child, group: newGroup, - path: path.forNewKeyFetch(createFetchInitialPath(edge.tail.type as CompositeType, newContext)), + path: path.forNewKeyFetch(createFetchInitialPath(dependencyGraph.supergraphSchema, edge.tail.type as CompositeType, newContext)), context: newContext, deferContext: updatedDeferContext, }); @@ -3804,7 +3785,7 @@ function computeGroupsForTree( stack.push({ tree: child, group: newGroup, - path: path.forNewKeyFetch(createFetchInitialPath(type, newContext)), + path: path.forNewKeyFetch(createFetchInitialPath(dependencyGraph.supergraphSchema, type, newContext)), context: newContext, deferContext: updatedDeferContext, }); @@ -4119,7 +4100,7 @@ function handleRequires( newGroup.addParent(parent); newGroup.copyInputsOf(group); const createdGroups = computeGroupsForTree(dependencyGraph, requiresConditions, newGroup, path, deferContextForConditions(deferContext)); - if (createdGroups.length == 0) { + if (createdGroups.length === 0) { // All conditions were local. Just merge the newly created group back in the current group (we didn't need it) // and continue. assert(group.canMergeSiblingIn(newGroup), () => `We should be able to merge ${newGroup} into ${group} by construction`); @@ -4145,9 +4126,9 @@ function handleRequires( if (newGroupIsUnneeded) { // Up to this point, `newGroup` had no parent, so let's first merge `newGroup` to the parent, thus "rooting" - // it's children to it. Note that we just checked that `newGroup` selection was just its inputs, so + // its children to it. Note that we just checked that `newGroup` selection was just its inputs, so // we know that merging it to the parent is mostly a no-op from that POV, except maybe for requesting - // a few addition `__typename` we didn't before (due to the exclusion of `__typename` in the `newGroupIsUnneeded` check) + // a few additional `__typename` we didn't before (due to the exclusion of `__typename` in the `newGroupIsUnneeded` check) parent.group.mergeChildIn(newGroup); // Now, all created groups are going to be descendant of `parentGroup`. But some of them may actually be @@ -4261,7 +4242,7 @@ function handleRequires( ); return { group: postRequireGroup, - path: path.forNewKeyFetch(createFetchInitialPath(entityType, context)), + path: path.forNewKeyFetch(createFetchInitialPath(dependencyGraph.supergraphSchema, entityType, context)), createdGroups: unmergedGroups.concat(postRequireGroup), }; } else { @@ -4295,7 +4276,7 @@ function handleRequires( ); return { group: newGroup, - path: path.forNewKeyFetch(createFetchInitialPath(entityType, context)), + path: path.forNewKeyFetch(createFetchInitialPath(dependencyGraph.supergraphSchema, entityType, context)), createdGroups: createdGroups.concat(newGroup), }; }