diff --git a/package.json b/package.json index ea6a7a851..d67b77193 100644 --- a/package.json +++ b/package.json @@ -110,7 +110,8 @@ "slonik@30.4.4": "patches/slonik@30.4.4.patch", "@oclif/core@3.26.6": "patches/@oclif__core@3.26.6.patch", "oclif@4.13.6": "patches/oclif@4.13.6.patch", - "@graphiql/react@1.0.0-alpha.3": "patches/@graphiql__react@1.0.0-alpha.3.patch" + "@graphiql/react@1.0.0-alpha.3": "patches/@graphiql__react@1.0.0-alpha.3.patch", + "@theguild/components": "patches/@theguild__components.patch" } } } diff --git a/packages/libraries/apollo/README.md b/packages/libraries/apollo/README.md index bf3a016b5..d07df48c5 100644 --- a/packages/libraries/apollo/README.md +++ b/packages/libraries/apollo/README.md @@ -1,6 +1,6 @@ # Hive Client for Apollo Server -[Documentation](https://the-guild.dev/graphql/hive/docs/integrations/apollo-server) +[Documentation](https://the-guild.dev/graphql/hive/docs/other-integrations/apollo-server) --- diff --git a/packages/libraries/cli/README.md b/packages/libraries/cli/README.md index b1fa111be..1dbb73d61 100644 --- a/packages/libraries/cli/README.md +++ b/packages/libraries/cli/README.md @@ -1,7 +1,7 @@ # Hive CLI (Command Line Interface) A CLI util to manage and control your GraphQL Hive. You can perform -[schema-registry actions](https://the-guild.dev/graphql/hive/docs/features/schema-registry#actions-on-schemas) +[schema-registry actions](https://the-guild.dev/graphql/hive/docs/schema-registry#actions-on-schemas) on your Hive targets using the Hive CLI. [![Version](https://img.shields.io/npm/v/@graphql-hive/cli.svg)](https://npmjs.org/package/@graphql-hive/cli) diff --git a/packages/libraries/cli/src/commands/schema/check.ts b/packages/libraries/cli/src/commands/schema/check.ts index b39ec2030..dcb1af36c 100644 --- a/packages/libraries/cli/src/commands/schema/check.ts +++ b/packages/libraries/cli/src/commands/schema/check.ts @@ -200,7 +200,7 @@ export default class SchemaCheck extends Command { if (!git.pullRequestNumber) { this.warn( "Could not resolve pull request number. Are you running this command on a 'pull_request' event?\n" + - 'See https://the-guild.dev/graphql/hive/docs/integrations/ci-cd#github-workflow-for-ci', + 'See https://the-guild.dev/graphql/hive/docs/other-integrations/ci-cd#github-workflow-for-ci', ); } diff --git a/packages/libraries/envelop/README.md b/packages/libraries/envelop/README.md index 37fcf5d5b..8595d64db 100644 --- a/packages/libraries/envelop/README.md +++ b/packages/libraries/envelop/README.md @@ -1,6 +1,6 @@ # Hive Client for Envelop -[Documentation](https://the-guild.dev/graphql/hive/docs/integrations/envelop) +[Documentation](https://the-guild.dev/graphql/hive/docs/other-integrations/envelop) --- diff --git a/packages/libraries/yoga/README.md b/packages/libraries/yoga/README.md index 31462821d..ada969543 100644 --- a/packages/libraries/yoga/README.md +++ b/packages/libraries/yoga/README.md @@ -1,6 +1,6 @@ # Hive Client for GraphQL Yoga -[Documentation](https://the-guild.dev/graphql/hive/docs/integrations/graphql-yoga) +[Documentation](https://the-guild.dev/graphql/hive/docs/other-integrations/graphql-yoga) --- diff --git a/packages/services/demo/federation/README.md b/packages/services/demo/federation/README.md new file mode 100644 index 000000000..50d5dfd56 --- /dev/null +++ b/packages/services/demo/federation/README.md @@ -0,0 +1,4 @@ +# Federation Demo Subgraphs + +Some Apollo Federation compatible subgraphs we deploy to cloudflare workers for usage within our +getting started guides. diff --git a/packages/services/demo/federation/package.json b/packages/services/demo/federation/package.json new file mode 100644 index 000000000..32fe1ab84 --- /dev/null +++ b/packages/services/demo/federation/package.json @@ -0,0 +1,17 @@ +{ + "name": "@hive/demo-federation", + "version": "1.0.0", + "type": "module", + "private": true, + "scripts": { + "deploy": "wrangler deploy src/main.ts" + }, + "dependencies": { + "@apollo/subgraph": "2.8.4", + "graphql": "16.9.0", + "graphql-yoga": "5.6.0" + }, + "devDependencies": { + "wrangler": "3.61.0" + } +} diff --git a/packages/services/demo/federation/src/main.ts b/packages/services/demo/federation/src/main.ts new file mode 100644 index 000000000..765a35297 --- /dev/null +++ b/packages/services/demo/federation/src/main.ts @@ -0,0 +1,16 @@ +import { yoga as yogaProducts } from './products'; +import { yoga as yogaReviews } from './reviews'; + +export default { + async fetch(request: Request) { + const url = new URL(request.url); + if (url.pathname.startsWith('/products')) { + return yogaProducts.fetch(request); + } + if (url.pathname.startsWith('/reviews')) { + return yogaReviews.fetch(request); + } + + return new Response('Not Found', { status: 404 }); + }, +}; diff --git a/packages/services/demo/federation/src/products.ts b/packages/services/demo/federation/src/products.ts new file mode 100644 index 000000000..47bfc3fd1 --- /dev/null +++ b/packages/services/demo/federation/src/products.ts @@ -0,0 +1,70 @@ +import { parse } from 'graphql'; +import { createYoga } from 'graphql-yoga'; +import { buildSubgraphSchema } from '@apollo/subgraph'; + +const products = [ + { + upc: '1', + name: 'Table', + price: 899, + weight: 100, + }, + { + upc: '2', + name: 'Couch', + price: 1299, + weight: 1000, + }, + { + upc: '3', + name: 'Chair', + price: 54, + weight: 50, + }, +]; + +type Product = (typeof products)[number]; +type Context = { + products: Product[]; +}; + +const typeDefs = parse(/* GraphQL */ ` + extend type Query { + topProducts(first: Int = 5): [Product] + } + + type Product @key(fields: "upc") { + upc: String! + name: String + price: Int + weight: Int + } +`); + +const resolvers = { + Product: { + __resolveReference(object: Product, context: Context) { + return { + ...object, + ...context.products.find(product => product.upc === object.upc), + }; + }, + }, + Query: { + topProducts(_: unknown, args: { first: number }, context: Context) { + return context.products.slice(0, args.first); + }, + }, +}; + +export const yoga = createYoga({ + schema: buildSubgraphSchema([{ typeDefs, resolvers }]), + context() { + return { products }; + }, + landingPage: false, + graphqlEndpoint: '/products', + graphiql: { + title: 'Products Subgraph', + }, +}); diff --git a/packages/services/demo/federation/src/reviews.ts b/packages/services/demo/federation/src/reviews.ts new file mode 100644 index 000000000..63ad94eb5 --- /dev/null +++ b/packages/services/demo/federation/src/reviews.ts @@ -0,0 +1,108 @@ +import { parse } from 'graphql'; +import { createYoga } from 'graphql-yoga'; +import { buildSubgraphSchema } from '@apollo/subgraph'; + +const usernames = [ + { id: '1', username: '@ada' }, + { id: '2', username: '@complete' }, +]; + +const reviews = [ + { + id: '1', + authorID: '1', + product: { upc: '1' }, + body: 'Love it!', + }, + { + id: '2', + authorID: '1', + product: { upc: '2' }, + body: 'Too expensive.', + }, + { + id: '3', + authorID: '2', + product: { upc: '3' }, + body: 'Could be better.', + }, + { + id: '4', + authorID: '2', + product: { upc: '1' }, + body: 'Prefer something else.', + }, +]; + +type Review = (typeof reviews)[number]; +type User = (typeof usernames)[number]; +type Context = { + reviews: Review[]; + usernames: User[]; +}; + +const typeDefs = parse(/* GraphQL */ ` + type Review @key(fields: "id") { + id: ID! + body: String + author: User @provides(fields: "username") + product: Product + } + + extend type User @key(fields: "id") { + id: ID! @external + username: String @external + reviews: [Review] + } + + extend type Product @key(fields: "upc") { + upc: String! @external + reviews: [Review] + } +`); + +const resolvers = { + Review: { + __resolveReference(review: Review, context: Context) { + return { + ...review, + ...context.reviews.find(r => r.id === review.id), + }; + }, + author(review: Review) { + return { __typename: 'User', id: review.authorID }; + }, + }, + User: { + __resolveReference(user: User, context: Context) { + return { ...user, ...context.usernames.find(u => u.id === user.id) }; + }, + reviews(user: User, _: unknown, context: Context) { + return context.reviews.filter(review => review.authorID === user.id); + }, + numberOfReviews(user: User) { + return reviews.filter(review => review.authorID === user.id).length; + }, + username(user: User) { + const found = usernames.find(username => username.id === user.id); + return found ? found.username : null; + }, + }, + Product: { + reviews(product: { upc: string }, context: Context) { + return context.reviews.filter(review => review.product.upc === product.upc); + }, + }, +}; + +export const yoga = createYoga({ + schema: buildSubgraphSchema([{ typeDefs, resolvers }]), + context() { + return { reviews, usernames }; + }, + landingPage: false, + graphqlEndpoint: '/reviews', + graphiql: { + title: 'Reviews Subgraph', + }, +}); diff --git a/packages/services/demo/federation/tsconfig.json b/packages/services/demo/federation/tsconfig.json new file mode 100644 index 000000000..a84c652a2 --- /dev/null +++ b/packages/services/demo/federation/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "../../../../tsconfig.json", + "compilerOptions": { + "target": "ES2020", + "module": "esnext", + "rootDir": "../../.." + }, + "include": ["./src/**/*.ts"] +} diff --git a/packages/services/demo/federation/wrangler.toml b/packages/services/demo/federation/wrangler.toml new file mode 100644 index 000000000..e8ad63ad0 --- /dev/null +++ b/packages/services/demo/federation/wrangler.toml @@ -0,0 +1,6 @@ +name = "federation-demo" +compatibility_date = "2024-04-03" +node_compat = true + +[limits] +cpu_ms = 100 diff --git a/packages/web/docs/next.config.mjs b/packages/web/docs/next.config.mjs index 5afb40a8f..950340e02 100644 --- a/packages/web/docs/next.config.mjs +++ b/packages/web/docs/next.config.mjs @@ -31,32 +31,42 @@ export default withGuildDocs({ }, { source: '/docs/features/publish-schema', - destination: '/docs/features/schema-registry#publish-a-schema', + destination: '/docs/schema-registry#publish-a-schema', permanent: true, }, { source: '/docs/features/checking-schema', - destination: '/docs/features/schema-registry#check-a-schema', + destination: '/docs/schema-registry#check-a-schema', permanent: true, }, { source: '/docs/features/delete-schema', - destination: '/docs/features/schema-registry#delete-a-service', + destination: '/docs/schema-registry#delete-a-service', permanent: true, }, { source: '/docs/features/registry-usage', - destination: '/docs/features/high-availability-cdn', + destination: '/docs/high-availability-cdn', + permanent: true, + }, + { + source: '/docs/features/high-availability-cdn', + destination: '/docs/high-availability-cdn', permanent: true, }, { source: '/docs/features/monitoring', - destination: '/docs/features/usage-reporting', + destination: '/docs/schema-registry/usage-reporting', + permanent: true, + }, + { + source: '/docs/features/usage-reporting', + destination: '/docs/schema-registry/usage-reporting', permanent: true, }, { source: '/docs/features/schema-history', - destination: '/docs/features/schema-registry#schema-history-and-changelog', + destination: '/docs/schema-registry#schema-history-and-changelog', permanent: true, }, { @@ -69,9 +79,14 @@ export default withGuildDocs({ destination: '/docs/management/projects#alerts-and-notifications', permanent: true, }, + { + source: '/docs/management/external-schema-composition', + destination: '/docs/schema-registry/external-schema-composition', + permanent: true, + }, { source: '/docs/features/external-schema-composition', - destination: '/docs/management/external-schema-composition', + destination: '/docs/schema-registry/external-schema-composition', permanent: true, }, { @@ -89,12 +104,47 @@ export default withGuildDocs({ destination: '/docs/management/sso-oidc-provider', permanent: true, }, + { + source: '/docs/features/schema-registry', + destination: '/docs/schema-registry', + permanent: true, + }, + { + source: '/docs/management/external-schema-composition', + destination: '/docs/schema-registry/external-schema-composition', + permanent: true, + }, + { + source: '/docs/features/laboratory', + destination: '/docs/dashboard/laboratory', + permanent: true, + }, + { + source: '/docs/management/contracts', + destination: '/docs/schema-registry/contracts', + permanent: true, + }, + { + source: '/docs/features/schema-policy', + destination: '/docs/schema-registry/schema-policy', + permanent: true, + }, + { + source: '/docs/features/app-deployments', + destination: '/docs/schema-registry/app-deployments', + permanent: true, + }, { // SEO: Redirect to the new URL source: '/docs/self-hosting/federation-2', destination: '/docs/self-hosting/external-composition', permanent: true, }, + { + source: '/docs/integrations/:path*', + destination: '/docs/other-integrations/:path*', + permanent: false, + }, ], swcMinify: true, webpack: (config, { webpack }) => { diff --git a/packages/web/docs/public/docs/pages/get-started/apollo-federation/cdn-access-token-settings.png b/packages/web/docs/public/docs/pages/get-started/apollo-federation/cdn-access-token-settings.png new file mode 100644 index 000000000..0b3514686 Binary files /dev/null and b/packages/web/docs/public/docs/pages/get-started/apollo-federation/cdn-access-token-settings.png differ diff --git a/packages/web/docs/public/docs/pages/get-started/apollo-federation/create-access-token.png b/packages/web/docs/public/docs/pages/get-started/apollo-federation/create-access-token.png new file mode 100644 index 000000000..61b26cea2 Binary files /dev/null and b/packages/web/docs/public/docs/pages/get-started/apollo-federation/create-access-token.png differ diff --git a/packages/web/docs/public/docs/pages/get-started/apollo-federation/create-cdn-access-token.png b/packages/web/docs/public/docs/pages/get-started/apollo-federation/create-cdn-access-token.png new file mode 100644 index 000000000..f1bce10c3 Binary files /dev/null and b/packages/web/docs/public/docs/pages/get-started/apollo-federation/create-cdn-access-token.png differ diff --git a/packages/web/docs/public/docs/pages/get-started/apollo-federation/created-access-token.png b/packages/web/docs/public/docs/pages/get-started/apollo-federation/created-access-token.png new file mode 100644 index 000000000..f83e0c920 Binary files /dev/null and b/packages/web/docs/public/docs/pages/get-started/apollo-federation/created-access-token.png differ diff --git a/packages/web/docs/public/docs/pages/get-started/apollo-federation/created-cdn-access-token.png b/packages/web/docs/public/docs/pages/get-started/apollo-federation/created-cdn-access-token.png new file mode 100644 index 000000000..b215e1b20 Binary files /dev/null and b/packages/web/docs/public/docs/pages/get-started/apollo-federation/created-cdn-access-token.png differ diff --git a/packages/web/docs/public/docs/pages/get-started/apollo-federation/created-cdn-access-token.png.png b/packages/web/docs/public/docs/pages/get-started/apollo-federation/created-cdn-access-token.png.png new file mode 100644 index 000000000..b215e1b20 Binary files /dev/null and b/packages/web/docs/public/docs/pages/get-started/apollo-federation/created-cdn-access-token.png.png differ diff --git a/packages/web/docs/public/docs/pages/get-started/apollo-federation/mesh-landing-page.png b/packages/web/docs/public/docs/pages/get-started/apollo-federation/mesh-landing-page.png new file mode 100644 index 000000000..3c485df72 Binary files /dev/null and b/packages/web/docs/public/docs/pages/get-started/apollo-federation/mesh-landing-page.png differ diff --git a/packages/web/docs/public/docs/pages/get-started/apollo-federation/registry-token-settings.png b/packages/web/docs/public/docs/pages/get-started/apollo-federation/registry-token-settings.png new file mode 100644 index 000000000..dea780962 Binary files /dev/null and b/packages/web/docs/public/docs/pages/get-started/apollo-federation/registry-token-settings.png differ diff --git a/packages/web/docs/public/docs/pages/get-started/apollo-federation/target-overview.png b/packages/web/docs/public/docs/pages/get-started/apollo-federation/target-overview.png new file mode 100644 index 000000000..0cd47549b Binary files /dev/null and b/packages/web/docs/public/docs/pages/get-started/apollo-federation/target-overview.png differ diff --git a/packages/web/docs/public/install-gateway.sh b/packages/web/docs/public/install-gateway.sh new file mode 100644 index 000000000..ee1eb289d --- /dev/null +++ b/packages/web/docs/public/install-gateway.sh @@ -0,0 +1,95 @@ +#!/bin/sh + +set -u + +GITHUB_OWNER="ardatan" +GITHUB_REPO="graphql-mesh" +BINARY_NAME="hive-gateway" + +# Determine the package version +if [ "$#" -eq 1 ]; then + TARGET_VERSION=$1 +else + echo "Version not provided. Retrieving the latest version..." + TARGET_VERSION=$(npm show @graphql-hive/gateway version 2> /dev/null) + if [ -z "$TARGET_VERSION" ]; then + echo "Could not retrieve the latest version of @graphql-hive/gateway." + exit 1 + fi + echo "Using version: $TARGET_VERSION" +fi + +fetch_and_prepare_binary() { + identify_architecture || return 1 + architecture="$ARCH_DETECTED" + check_non_empty "$architecture" "architecture" + + RELEASE_TAG="v$TARGET_VERSION" + + DOWNLOAD_URL="https://github.com/$GITHUB_OWNER/$GITHUB_REPO/releases/download/$RELEASE_TAG/$BINARY_NAME-${architecture}.gz" + + destination_file="./$BINARY_NAME-${architecture}.gz" + echo "Downloading $BINARY_NAME from $DOWNLOAD_URL ..." + curl -sSfL "$DOWNLOAD_URL" -o "$destination_file" + + if [ $? -ne 0 ]; then + echo "Download failed: $DOWNLOAD_URL" + exit 1 + fi + + echo "Unzipping $destination_file..." + gunzip "$destination_file" + + if [ $? -ne 0 ]; then + echo "Unzipping failed: $destination_file" + exit 1 + fi + + binary_path="./$BINARY_NAME" + + mv "$BINARY_NAME-${architecture}" "$BINARY_NAME" + chmod +x "$BINARY_NAME" + echo "Binary downloaded and ready to use at $binary_path." +} + +identify_architecture() { + os_type="$(uname -s)" + cpu_type="$(uname -m)" + + case "$os_type" in + Linux) + os_type="Linux" + ;; + Darwin) + os_type="macOS" + ;; + *) + echo "No binaries available for OS: $os_type" + return 1 + ;; + esac + + case "$cpu_type" in + x86_64 | x64 | amd64) + cpu_type="X64" + ;; + arm64 | aarch64) + cpu_type="ARM64" + ;; + *) + echo "No binaries available for CPU architecture: $cpu_type" + return 1 + ;; + esac + + ARCH_DETECTED="$os_type-$cpu_type" +} + +check_non_empty() { + if [ -z "$1" ]; then + echo "Error: $2 is empty or undefined" + exit 1 + fi +} + +fetch_and_prepare_binary "$@" || exit 1 diff --git a/packages/web/docs/src/authors.ts b/packages/web/docs/src/authors.ts index ba83cbad4..e99fd67b1 100644 --- a/packages/web/docs/src/authors.ts +++ b/packages/web/docs/src/authors.ts @@ -16,6 +16,11 @@ export const authors: Record = { link: 'https://twitter.com/n1rual', github: 'n1ru4l', }, + arda: { + name: 'Arda Tanrikulu', + link: 'https://twitter.com/ardatanrikulu', + github: 'ardatan', + }, aleksandra: { name: 'Aleksandra Sikora', link: 'https://twitter.com/aleksandrasays', diff --git a/packages/web/docs/src/components/company-testimonials/index.tsx b/packages/web/docs/src/components/company-testimonials/index.tsx index 04f5b83ff..d211a1b27 100644 --- a/packages/web/docs/src/components/company-testimonials/index.tsx +++ b/packages/web/docs/src/components/company-testimonials/index.tsx @@ -89,7 +89,7 @@ export function CompanyTestimonialsSection({ className }: { className?: string } )} > - Loved by developers, trusted by business + Loved by developers, trusted by businesses } - documentationLink="/docs/features/schema-registry" + documentationLink="/docs/schema-registry" description="Publish schemas, compose federated services, and detect backward-incompatible changes with ease." highlights={highlights['Schema Registry']} setActiveHighlight={setActiveHighlight} @@ -166,7 +166,7 @@ export function FeatureTabs({ className }: { className?: string }) { } - documentationLink="/docs/features/usage-reporting" + documentationLink="/docs/schema-registry/usage-reporting" description="Enhanced GraphQL Observability tools provide insights into API usage and user experience metrics." highlights={highlights['GraphQL Observability']} setActiveHighlight={setActiveHighlight} diff --git a/packages/web/docs/src/components/frequently-asked-questions/questions.mdx b/packages/web/docs/src/components/frequently-asked-questions/questions.mdx index eeb0aad9d..36233a753 100644 --- a/packages/web/docs/src/components/frequently-asked-questions/questions.mdx +++ b/packages/web/docs/src/components/frequently-asked-questions/questions.mdx @@ -8,20 +8,21 @@ - Can GraphQL Hive be self-hosted? - Yes, the on-premise version (identical to Hive Cloud) is free and open-source. You can read - “Self-Hosting Hive”(link) in our documentation. + Yes, the on-premise version (identical to Hive Cloud) is free and open-source. + [Read about Self-Hosting Hive in our documentation](/docs/self-hosting/get-started). -- What counts as GraphQL operation? +- What counts as a GraphQL operation? Every GraphQL request that is processed by your GraphQL API and reported to GraphQL Hive. If your server receives 1M GraphQL requests, all of them will be reported to Hive (assuming no sampling). - Are you SOC-2 Type II complaint? - We’re currently about to finish the process of being SOC-2 Type II complaint and getting the + We arere currently about to finish the process of being SOC-2 Type II complaint and getting the certificate. -- Do you have a gateway? +- Do you have a GraphQL Gateway? - Of course, we do! It is called Hive Gateway (previously known as GraphQL Mesh), we maintain it for - years already. + Of course, we do! It is called Hive Gateway (previously known as GraphQL Mesh) that supports + Apollo Federation and has been battle-tested by our clients for a few years now. + [Read more in our documentation](/docs/gateway). diff --git a/packages/web/docs/src/components/landing-page.tsx b/packages/web/docs/src/components/landing-page.tsx index 13a387740..adf1c69e6 100644 --- a/packages/web/docs/src/components/landing-page.tsx +++ b/packages/web/docs/src/components/landing-page.tsx @@ -175,19 +175,12 @@ function EnterpriseFocusedCards({ className }: { className?: string }) { } className="flex-1 px-0 sm:px-8 sm:py-0 md:px-8 md:py-0" > - - Persisted Documents - {' '} - secure and reduce traffic by hashing operations on app deployments. + Control user access with detailed, role-based permissions for enhanced security and + flexibility. @@ -216,8 +209,8 @@ function UltimatePerformanceCards() { icon={} className="flex-1 basis-full rounded-2xl md:basis-0 md:rounded-3xl" > - Minimize unnecessary network calls that hinder your application’s speed. Hive leverages - GraphQL to enhance responsiveness and scales these benefits across your enterprise. + Minimize unnecessary network calls that hinder your application's speed. Use GraphQL to + enhance responsiveness and scales these benefits across your enterprise. } className="flex-1 basis-full rounded-2xl md:rounded-3xl lg:basis-0" > - Reduce latency effectively with Hive by enabling frontend teams to obtain all required - data in a single request, maximizing GraphQL’s inherent performance benefits. + Streamline communication between frontend and backend by enabling precise data selection, + reducing unnecessary payloads and simplifying API interactions. diff --git a/packages/web/docs/src/pages/docs/_meta.ts b/packages/web/docs/src/pages/docs/_meta.ts index a788e2069..68df531c2 100644 --- a/packages/web/docs/src/pages/docs/_meta.ts +++ b/packages/web/docs/src/pages/docs/_meta.ts @@ -1,10 +1,13 @@ export default { index: 'Introduction', 'get-started': 'Get Started', - features: 'Features', - 'api-reference': 'CLI/API Reference', + 'schema-registry': 'Schema Registry', + 'high-availability-cdn': 'High-Availability CDN', + dashboard: 'Dashboard', + gateway: 'Gateway', management: 'Management', - integrations: 'Integrations and Guides', + 'other-integrations': 'Other Integrations', + 'api-reference': 'CLI/API Reference', specs: 'Specifications', 'use-cases': 'Use Cases', 'self-hosting': 'Self-Hosting', diff --git a/packages/web/docs/src/pages/docs/api-reference/_meta.ts b/packages/web/docs/src/pages/docs/api-reference/_meta.ts index 2a5cf1edd..ed9e9f82d 100644 --- a/packages/web/docs/src/pages/docs/api-reference/_meta.ts +++ b/packages/web/docs/src/pages/docs/api-reference/_meta.ts @@ -1,4 +1,6 @@ export default { cli: 'Hive CLI', + 'gateway-config': 'Hive Gateway Configuration', + 'gateway-cli': 'Hive Gateway CLI', client: 'Hive Client', }; diff --git a/packages/web/docs/src/pages/docs/api-reference/cli.mdx b/packages/web/docs/src/pages/docs/api-reference/cli.mdx index a5dfadc03..3f794799c 100644 --- a/packages/web/docs/src/pages/docs/api-reference/cli.mdx +++ b/packages/web/docs/src/pages/docs/api-reference/cli.mdx @@ -2,8 +2,8 @@ import { Callout, Tabs } from '@theguild/components' # Hive CLI (Command Line Interface) -You can perform [schema-registry actions](/docs/features/schema-registry#actions-on-schemas) on your -Hive targets schemas using the Hive CLI. +You can perform [schema-registry actions](/docs/schema-registry#actions-on-schemas) on your Hive +targets schemas using the Hive CLI. ## Installation @@ -73,7 +73,7 @@ You can also download a specific version of the binary: We recommend publishing the schema from your CI/CD pipeline. You can find more information in out - [CI/CD Integration guide](../integrations/ci-cd.mdx). + [CI/CD Integration guide](/docs/other-integrations/ci-cd). Start by setting your Hive token in @@ -82,7 +82,7 @@ file, or set it as `HIVE_TOKEN` environment variable. Further reading: -- [Publishing a schema to the Schema Registry](/docs/features/schema-registry#publish-a-schema) +- [Publishing a schema to the Schema Registry](/docs/schema-registry#publish-a-schema) #### Single Schema Project @@ -111,8 +111,8 @@ hive schema:publish --service reviews --url http://my-service.com/graphql schema Further reading: - [`schema:publish` API Reference](/docs/api-reference/cli#hive-schemapublish-file) -- [Apollo Router integration](/docs/integrations/apollo-router) -- [Apollo-Server integration](/docs/integrations/apollo-server) +- [Apollo Router integration](/docs/other-integrations/apollo-router) +- [Apollo-Server integration](/docs/other-integrations/apollo-server) #### Hive Metadata @@ -135,7 +135,7 @@ hive schema:publish schema.graphql --metadata '{ "someData": true }' Further reading: -- [Fetching Hive Metadata from the CDN](/docs/features/high-availability-cdn) +- [Fetching Hive Metadata from the CDN](/docs/high-availability-cdn) ### Check a schema @@ -176,7 +176,7 @@ hive schema:check --contextId "pr-123" "src/*.graphql" Further reading: -- [Publishing a schema to the Schema Registry](/docs/features/schema-registry#publish-a-schema) +- [Publishing a schema to the Schema Registry](/docs/schema-registry#publish-a-schema) - [Conditional Breaking Changes](/docs/management/targets#conditional-breaking-changes) ### Delete a schema @@ -381,9 +381,8 @@ Sometimes it is useful to fetch a schema (SDL or Supergraph) from Hive, for exam local development. This can be done using the `schema:fetch` command. - Don't confuse this with the [high-availability CDN](/docs/features/high-availability-cdn.mdx). - This command is used to fetch a schema from the API where the CDN always represents the latest - valid schema. + Don't confuse this with the [high-availability CDN](/docs/high-availability-cdn). This command is + used to fetch a schema from the API where the CDN always represents the latest valid schema. You can fetch a schema by using the action id (commit sha) that was used for publishing the schema @@ -408,7 +407,7 @@ You can fetch the GraphQL schema from the CDN using the `artifact:fetch` command You can learn how to create a CDN access token in the [High-Availability CDN - documentation](/docs/features/high-availability-cdn). + documentation](/docs/high-availability-cdn). ```bash @@ -436,8 +435,8 @@ hive schema:check schema.graphql --github ``` - Check our [CI/CD Integration guide](../integrations/ci-cd.mdx) for more information and GitHub - workflow examples. + Check our [CI/CD Integration guide](/docs/other-integrations/ci-cd) for more information and + GitHub workflow examples. ## API Reference diff --git a/packages/web/docs/src/pages/docs/api-reference/client.mdx b/packages/web/docs/src/pages/docs/api-reference/client.mdx index 238838564..8b6015f16 100644 --- a/packages/web/docs/src/pages/docs/api-reference/client.mdx +++ b/packages/web/docs/src/pages/docs/api-reference/client.mdx @@ -14,11 +14,11 @@ import { Tabs } from '@theguild/components' Here's a list of official libraries for JavaScript and NodeJS: - `@graphql-hive/yoga` ([npm](https://npmjs.com/package/@graphql-hive/yoga), - [guide](../integrations/graphql-yoga)) - Integration with GraphQL Yoga. + [guide](/docs/other-integrations/graphql-yoga)) - Integration with GraphQL Yoga. - `@graphql-hive/apollo` ([npm](https://npmjs.com/package/@graphql-hive/apollo), - [guide](../integrations/apollo-server)) - Integration with Apollo Server. + [guide](/docs/other-integrations/apollo-server)) - Integration with Apollo Server. - `@graphql-hive/envelop` ([npm](https://npmjs.com/package/@graphql-hive/envelop), - [guide](../integrations/envelop)) - Integration with Envelop. + [guide](/docs/other-integrations/envelop)) - Integration with Envelop. - `@graphql-hive/core` ([npm](https://npmjs.com/package/@graphql-hive/core)) - core library for interacting with Hive's **Schema Registry** and **Usage Reporting**. @@ -26,12 +26,16 @@ You can refer to the following guides for getting started within your project, t page for configuring the client to your needs. - - - - - - + + + + + + #### Configuration @@ -316,7 +320,7 @@ useHive({ #### Persisted Documents Hive client supports resolving persisted documents. For getting started please refer to our -[App Deployments (Persisted Documents) documentation](/docs/features/app-deployments). +[App Deployments (Persisted Documents) documentation](/docs/schema-registry/app-deployments). ##### Basic Configuration @@ -429,7 +433,7 @@ The [`graphql-hive` gem](https://github.com/charlypoly/graphql-ruby-hive) allows Refer to the following guides for integration with your project: - + ### PHP Client @@ -438,7 +442,7 @@ The [Lighthouse Hive](https://github.com/stayallive/lighthouse-graphql-hive) is integration can be used to measure and collect data against all your GraphQL operations. - + ### Rust Client @@ -446,5 +450,5 @@ integration can be used to measure and collect data against all your GraphQL ope Refer to the following guides for integration with your Rust project: - + diff --git a/packages/web/docs/src/pages/docs/api-reference/gateway-cli.mdx b/packages/web/docs/src/pages/docs/api-reference/gateway-cli.mdx new file mode 100644 index 000000000..4b4eb1022 --- /dev/null +++ b/packages/web/docs/src/pages/docs/api-reference/gateway-cli.mdx @@ -0,0 +1,75 @@ +--- +description: Hive Gateway CLI Reference +--- + +import { Callout } from '@theguild/components' + +# Hive Gateway CLI Reference + +An overview of all the CLI arguments and environment variables for the `hive-gateway` CLI. +[Get started with the CLI](/docs/gateway). + +### Usage + +You can get help with using the CLI by appending the `--help` argument: + +```sh +hive-gateway --help +``` + +which will print out the following: + +{/* IMPORTANT: please dont forget to update the following when arguments change. simply run `node --import tsx packages/hive-gateway/src/bin.ts --help` and copy over the text */} + +``` +Usage: hive-gateway [options] [command] + +serve GraphQL federated architecture for any API service(s) + +Options: + --fork count of workers to spawn. defaults to "os.availableParallelism()" when NODE_ENV is "production", otherwise only one (the main) worker + (default: 1 (env: FORK) + -c, --config-path path to the configuration file. defaults to the following files respectively in the current working directory: gateway.config.ts, + gateway.config.mts, gateway.config.cts, gateway.config.js, gateway.config.mjs, gateway.config.cjs (env: CONFIG_PATH) + -h, --host host to use for serving (default: "127.0.0.1" (default: "127.0.0.1") + -p, --port port to use for serving (default: 4000 (env: PORT) + --polling schema polling interval in human readable duration (default: "10s") (env: POLLING) + --no-masked-errors don't mask unexpected errors in responses + --masked-errors mask unexpected errors in responses (default: true) + --hive-registry-token Hive registry token for usage metrics reporting (env: HIVE_REGISTRY_TOKEN) + --apollo-graph-ref Apollo graph ref of the managed federation graph (@) (env: APOLLO_GRAPH_REF) + --apollo-key Apollo API key to use to authenticate with the managed federation up link (env: APOLLO_KEY) + --help display help for command + +Commands: + supergraph [options] [schemaPathOrUrl] serve a Federation supergraph provided by a compliant composition tool such as GraphQL Mesh or Apollo Rover + subgraph [schemaPathOrUrl] serve a Federation subgraph that can be used with any Federation compatible router like Hive Gateway or Apollo Router + proxy [options] [endpoint] serve a proxy to a GraphQL API and add additional features such as monitoring/tracing, caching, rate limiting, security, and more + help [command] display help for command +``` + +All arguments can also be configured in the config file. + +### Environment Variables + +In addition to the env vars showcased in the [CLI usage](#usage), more are available. + +These are usually used for easier usage with these two schema registry services. You don't need any +configuration file if you provide these environment variables. + +#### Hive Registry + +- `HIVE_CDN_ENDPOINT`: The endpoint of the Hive Registry CDN +- `HIVE_CDN_KEY`: The API key provided by Hive Registry to fetch the schema +- `HIVE_REGISTRY_TOKEN`: The token to push the metrics to Hive Registry + +[Learn more about Hive Registry integration here](/docs/gateway/supergraph-proxy-source) + +#### Apollo GraphOS + +- `APOLLO_KEY`: The API key provided by Apollo GraphOS to fetch the supergraph. +- `APOLLO_GRAPH_REF`: The API key provided by Apollo GraphOS to fetch the supergraph. +- `APOLLO_SCHEMA_CONFIG_DELIVERY_ENDPOINT`: The URL of the managed federation up link. By default, + it uses the first uplink in the list. + +[Learn more about GraphOS integration here](/docs/gateway/supergraph-proxy-source) diff --git a/packages/web/docs/src/pages/docs/api-reference/gateway-config.mdx b/packages/web/docs/src/pages/docs/api-reference/gateway-config.mdx new file mode 100644 index 000000000..ce959cfc2 --- /dev/null +++ b/packages/web/docs/src/pages/docs/api-reference/gateway-config.mdx @@ -0,0 +1,449 @@ +--- +description: Hive Gateway Configuration Reference +--- + +import { Callout } from '@theguild/components' + +# Hive Gateway Configuration Reference + +An overview of all the configuration options for the `gateway.config.ts` used by the `hive-gateway` +CLI. + +Both TypeScript (`*.ts`) and JavaScript (`*.js`) config filetypes are supported. + +## Default config files + +The following list of files are loaded by default, sorted by priority: + +- `gateway.config.ts` _(recommended)_ +- `gateway.config.mts` +- `gateway.config.cts` +- `gateway.config.js` +- `gateway.config.mjs` +- `gateway.config.cjs` + +### Supergraph Related + +#### `supergraph` + +You can provide `GraphQLSchema`, `DocumentNode` which has the AST of the supergraph or `string` +which is the `SDL` representation of the supergraph. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + supergraph: './supergraph.graphql', + // or + supergraph: 'https://example.com/supergraph.graphql', + // or you can provide a function that returns a promise of the schema + supergraph: () => + fetch('https://example.com/unified.graphql', { + headers: { + Authorization: 'Bearer token' + } + }).then(res => res.text()) +}) +``` + + + +For Hive Registry and Apollo GraphOS, you probably don't need to provide the `supergraph` option. + +- If you use Hive Registry, please refer to the dedicated section for + [Fetching Supergraph from Hive Registry CDN](/docs/gateway/supergraph-proxy-source) +- If you use Apollo GraphOS, please refer to the dedicated section for + [Fetching Supergraph from Apollo GraphOS](/docs/gateway/supergraph-proxy-source). + + + +#### Polling + +Let's say you have a source that can be changed after a while, it can be a CDN, schema registry or a +local file. So by enabling this option, Hive Gateway can poll the source and update the schema +automatically. + +If a function is provided as in the example above, that function will be called every time the +polling interval is reached. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + pollingInterval: 5_000 // Polling interval in milliseconds +}) +``` + +#### `additionalResolvers` + +You can provide additional resolvers to the supergraph. This can be useful if you want to add a +custom resolver to the supergraph, or override the existing one. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + additionalResolvers: { + Query: { + hello: () => 'Hello World' + } + } +}) +``` + +#### `transports` (Advanced usage only) + + + This is an advanced feature and should be used with caution. Use this only if you know what you + are doing. + + +Gateway Transports are the key component of the gateway runtime's execution. It allows the gateway +to communicate with the subgraph. For example `@graphql-mesh/transport-rest` is used to communicate +with the REST subgraphs generated by +[OpenAPI](https://the-guild.dev/graphql/mesh/v1/source-handlers/openapi) and +[JSON Schema](https://the-guild.dev/graphql/mesh/v1/source-handlers/json-schema) source handlers. +And GraphQL subgraphs use GraphQL HTTP Transport(`@graphql-mesh/transport-http`). + +Gateway looks up the supergraph, and checks the kind of the subgraph, and loads it by checking the +`@graphql-mesh/transport-{kind}` package, then loads it to create an executor for the subgraph. You +can see how an example `@transport` definition looks like +[here](https://github.com/ardatan/graphql-mesh/blob/master/e2e/auto-type-merging/__snapshots__/auto-type-merging.test.ts.snap#L4). + +And see the implementation of the default `transport` loading logic +[here](https://github.com/ardatan/graphql-mesh/blob/master/packages/fusion/runtime/src/utils.ts#L32). + +You can replace this logic by providing your own `transports`. + +### Subgraphs + +If you want to serve a single subgraph, you can provide the subgraph configuration as well. You can +generate subgraphs by using [GraphQL Mesh](https://graphql-mesh.com/) or any other Federation +compliant tool listed +[here](https://www.apollographql.com/docs/federation/building-supergraphs/supported-subgraphs/). + +#### `subgraph` + +You can provide `GraphQLSchema`, `DocumentNode` which has the AST of the subgraph or `string` which +is the `SDL` representation of the subgraph + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + subgraph: './subgraph.graphql', + // or + subgraph: 'https://example.com/subgraph.graphql', + // or you can provide a function that returns a promise of the schema + subgraph: () => + fetch('https://example.com/subgraph.graphql', { + headers: { + Authorization: 'Bearer token' + } + }).then(res => res.text()) +}) +``` + + + The rest of the configuration options are the same as the supergraph configuration. + + +### Configure Hive Gateway as a GraphQL Proxy + +#### `proxy` + +HTTP executor options to proxy all incoming requests to another HTTP endpoint. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + proxy: { + endpoint: 'https://example.com/graphql' + } +}) +``` + + +By default, Hive Gateway introspects the schema from the endpoint. And if it fails, it skips the +validation and schema aware features. But if Hive CDN endpoint and key have been provided in the +configuration, Hive Gateway will fetch the schema from the Hive CDN. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + proxy: { + endpoint: 'https://example.com/graphql' + }, + schema: { + type: 'hive', + endpoint: 'https://cdn.graphql-hive.com/artifacts/v1/0123-3434/sdl', + key: 'SOME_HIVE_KEY' + } +}) +``` + + + +##### `endpoint` + +The URL of the GraphQL endpoint to proxy requests to. + +##### `headers` + +Additional headers to include when querying the original schema It can be a plain object or a +function that returns a plain object. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + proxy: { + endpoint: 'https://example.com/graphql', + headers: execReq => ({ + // This will pass the original request headers to the proxied request + authorization: execReq.context.headers.authorization + }) + } +}) +``` + +##### `useGETForQueries` + +Whether to use the GET HTTP method for queries when querying the original schema. In that case, the +query will be sent as a query string parameter named `query`. + +##### `method` + +The HTTP method to use when querying the original schema. Default is `POST`. + +##### `timeout` + +The timeout in milliseconds for the request to the original schema. There is no timeout by default. + +##### `retry` + +Retry attempts in case of a failure. Default is 0. + +##### `credentials` + +Request Credentials (default: 'same-origin') +[Learn more](https://developer.mozilla.org/en-US/docs/Web/API/Request/credentials) + +#### `skipValidation` + +By default, Hive Gateway validates the operations on the gateway against the introspected schema. +This is recommended to keep it enabled for security reasons. But it brings a performance overhead. +If you want to disable this validation and send the operations directly to the upstream service, you +can set this option to `true`. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + proxy: { + endpoint: 'https://example.com/graphql' + }, + skipValidation: true +}) +``` + +### Configure Server + +#### `sslCredentials` for HTTPS + +This is the option to provide SSL Credentials for HTTPS Server. If this is provided, Hive Gateway +will be served via HTTPS instead of HTTP. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + sslCredentials: { + key_file_name: 'path/to/key.pem', + cert_file_name: 'path/to/cert.pem', + ca_file_name: 'path/to/ca.pem', + passphrase: 'passphrase', + dh_params_file_name: 'path/to/dhparams.pem', + ssl_ciphers: 'ECDHE-R', + // This translates to SSL_MODE_RELEASE_BUFFERS + ssl_prefer_low_memory_usage: false + } +}) +``` + +#### `browser` + +This is the option to open the browser automatically when the server is started. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + /** + * Path to the browser that will be used by `Hive Gateway` to open a playground window in development mode + * This feature can be disabled by passing `false` + */ + browser: true // or `google-chrome` or `firefox` or `safari` or `edge` or `opera` or `vivaldi` or `brave` or `chromium` or `msedge` or `false` +}) +``` + +#### `port` and `host` + +These are the options to configure the port and host of the server in the configuration file rather +than passing them as CLI arguments. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + port: 4000, + host: 'localhost' +}) +``` + +#### `maxHeaderSize` + +This is the option to configure the maximum header size of the server. By default, it is 16KB. If +longer headers are sent, the server will respond with a 431 status code. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + maxHeaderSize: 32 * 1024 // 32KB +}) +``` + +#### `plugins` + +This is the option to extend your Hive Gateway with plugins. Hive Gateway uses +[GraphQL Yoga](https://the-guild.dev/graphql/yoga-server/docs/features/envelop-plugins), and +[Envelop](https://the-guild.dev/graphql/envelop) plugin system which allows you to hook into the +different phases of the GraphQL execution to manipulate or track the entire workflow step-by-step. + +[See dedicated plugins feature page for more information](/docs/gateway/other-features/custom-plugins) + +#### `cors` + +[See dedicated CORS feature page for more information](/docs/gateway/other-features/security/cors) + +#### `graphiql` + +You can configure GraphiQL playground that allows you to play with your GraphQL API. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + graphiql: { + defaultQuery: 'query { hello }' + } +}) +``` + +[Learn more about available GraphiQL Options from the dedicated GraphQL Yoga page](https://the-guild.dev/graphql/yoga-server/docs/features/graphiql) + +`TODO: Move those into a dedicated GraphiQL page under Features` + +#### `landingPage` + +If you want to disable the landing page, you can set this option to `false`. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + landingPage: false +}) +``` + +#### `batching` + +[See dedicated page](/docs/gateway/other-features/performance/request-batching) + +#### `fetchAPI` (Advanced usage only) + + + This is an advanced feature and should be used with caution. Use this only if you know what you + are doing. Use it on your own risk. + + +Hive Gateway heavily relies on WHATWG Fetch API not only as a HTTP Client but also for handling HTTP +Server components. So it uses [`@whatwg-node/fetch`](https://github.com/ardatan/whatwg-node) by +default which is a platform agnostic implementation of the Fetch API. If you want to use a different +Fetch API implementation, you can provide it here. + +```ts filename="gateway.config.ts" +import fetch from 'node-fetch' +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + fetchAPI: { + fetch + } +}) +``` + +#### `logger` + +By default, Hive Gateway uses a simple logger that logs to the console by using standard `console` +methods. + +Using this option, you can do; + +- Disable logging by providing `false` +- Provide your own logger instance +- Choose a log level + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' +import { createLogger } from 'some-logger-library' + +export const gatewayConfig = defineConfig({ + logger: createLogger() + // or + logger: 'info' // or 'debug' or 'warn' or 'error' + // or + logger: false +}) +``` + +[Hive Gateway uses the same logging mechanism of GraphQL Yoga](https://the-guild.dev/graphql/yoga-server/docs/features/logging-and-debugging) + +#### `graphqlEndpoint` + +This is the option to provide a custom GraphQL endpoint for the server. By default, it is +`/graphql`. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + graphqlEndpoint: '/my-graphql-endpoint' +}) +``` + +#### `maskedErrors` + +This is enabled by default for security reasons. + +[Learn more about Error Masking](/docs/gateway/other-features/security/error-masking) + +#### `cache` + +Provide a cache storage for the server. By default, Hive Gateway uses an in-memory cache. + +[Learn more about Caching](/docs/gateway/other-features/performance) + +#### `pubsub` + +Provide a PubSub instance for the server. By default, Hive Gateway uses an in-memory PubSub. In +order to have a better scalability, you can provide a custom PubSub. + +[Learn more about Subscriptions and Webhooks to see if you need this option](/docs/gateway/subscriptions#subscriptions-using-http-callback) + +#### `healthCheckEndpoint` and `readinessCheckEndpoint` + +[Learn more about Health Check and Readiness Check](/docs/gateway/monitoring-tracing#healthcheck) diff --git a/packages/web/docs/src/pages/docs/dashboard/_meta.ts b/packages/web/docs/src/pages/docs/dashboard/_meta.ts new file mode 100644 index 000000000..479699cbb --- /dev/null +++ b/packages/web/docs/src/pages/docs/dashboard/_meta.ts @@ -0,0 +1,5 @@ +export default { + insights: 'Insights', + explorer: 'Explorer', + laboratory: 'Laboratory', +}; diff --git a/packages/web/docs/src/pages/docs/dashboard/explorer.mdx b/packages/web/docs/src/pages/docs/dashboard/explorer.mdx new file mode 100644 index 000000000..117a5acc4 --- /dev/null +++ b/packages/web/docs/src/pages/docs/dashboard/explorer.mdx @@ -0,0 +1,31 @@ +import NextImage from 'next/image' +import { Callout } from '@theguild/components' +import schemaExplorerImage from '../../../../public/docs/pages/features/schema-explorer.png' + +# Schema Explorer + +The Hive Schema Explorer is a useful tool that can provide you with a comprehensive understanding of +your GraphQL schema. Not only does it allow you to explore the different types and fields of your +schema, but it also enables you to gain a deeper understanding of the arguments and their respective +input types. + + + +## Schema Usage and Coverage + +With [Usage Reporting](/docs/schema-registry/usage-reporting) feature enabled, you'll be able to see +an overview of the schema usage and coverage (for types, fields and input types), based on the +GraphQL operations you report to Hive. + +This feature is useful if you wish to understand how your GraphQL schema is being used and queried, +and understand the impact of changes you make to your schema. + + + The maximum duration is defined by the retention of your [Hive + plan](/docs/management/organizations#subscription-and-billing), and depends on the data you + already sent before to Hive. + diff --git a/packages/web/docs/src/pages/docs/dashboard/insights.mdx b/packages/web/docs/src/pages/docs/dashboard/insights.mdx new file mode 100644 index 000000000..ffaea4c7b --- /dev/null +++ b/packages/web/docs/src/pages/docs/dashboard/insights.mdx @@ -0,0 +1,14 @@ +import NextImage from 'next/image' +import usageOperationsImage from '../../../../public/docs/pages/features/usage-operations.png' + +# Insights + +A list of all the GraphQL operations executed by your consumers, their performance metrics and total +count. By clicking on a specific query, you'll be able to see the full list of fields and arguments +used in the operation. + + diff --git a/packages/web/docs/src/pages/docs/features/laboratory.mdx b/packages/web/docs/src/pages/docs/dashboard/laboratory.mdx similarity index 97% rename from packages/web/docs/src/pages/docs/features/laboratory.mdx rename to packages/web/docs/src/pages/docs/dashboard/laboratory.mdx index 44ff12470..ee00cdbd6 100644 --- a/packages/web/docs/src/pages/docs/features/laboratory.mdx +++ b/packages/web/docs/src/pages/docs/dashboard/laboratory.mdx @@ -64,7 +64,7 @@ curl -X POST -H "X-Hive-Key: HIVE_TOKEN_HERE" -H "Content-Type: application/json We recommend using the CDN for consuming the GraphQL schema in your project. [See GraphQL Code - Generator Integration](/docs/integrations/graphql-code-generator). + Generator Integration](/docs/other-integrations/graphql-code-generator). Since the Laboratory schema is a valid GraphQL schema, and supports introspection, you may use it diff --git a/packages/web/docs/src/pages/docs/gateway/_meta.ts b/packages/web/docs/src/pages/docs/gateway/_meta.ts new file mode 100644 index 000000000..0265dcc79 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/_meta.ts @@ -0,0 +1,12 @@ +export default { + index: 'Getting Started', + 'supergraph-proxy-source': 'Supergraph / Proxy Source', + 'usage-reporting': 'Usage Reporting', + 'persisted-documents': 'Persisted Documents', + 'authorization-authentication': 'Authorization / Authentication', + 'monitoring-tracing': 'Monitoring/Tracing', + 'defer-stream': 'Incremental Delivery (Defer & Stream)', + subscriptions: 'Subscriptions', + 'other-features': 'Other Features', + deployment: 'Deployment', +}; diff --git a/packages/web/docs/src/pages/docs/gateway/authorization-authentication.mdx b/packages/web/docs/src/pages/docs/gateway/authorization-authentication.mdx new file mode 100644 index 000000000..8f04016ec --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/authorization-authentication.mdx @@ -0,0 +1,733 @@ +import { Callout, Tabs } from '@theguild/components' + +# Authorization and Authentication + +Hive Gateway supports Authentication and Authorization using JSON Web Tokens (JWT). + +A [JSON Web Tokens (JWT)](https://jwt.io/) is a signed token containing arbitrary informations, +commonly used for authentication. By being signed by the issuer of the token, it can be verified +that the token is valid and has not been tampered with. + +Hive Gateway provides a plugin to easily integrate JWT into your API, allowing you to easily +validate, decode and use the token (for identity and authorization). + +Once you have the JWT token extract and validated, the JWT claims (and optionally, the full token) +are injected to the Hive Gateway execution context, and forwarded to upstream GraphQL subgraphs, +using the `extensions` field. + + + +When JWT is enabled and claims are forwarded to the upstream GraphQL subgraphs, you might want to +use [HMAC Signature](/docs/gateway/other-features/security/hmac-signature) between your Hive Gateway +and the subgraphs. This will ensure that the requests to the subgraphs are trusted and signed by the +gateway, and no other entity can execute requests to the subgraph on behalf of the end-users. + + + + + You can refer to [Generic Auth plugin docs](https://www.npmjs.com/package/@envelop/generic-auth), + if you need a more customized auth setup without JWT. + + +## How to use? + +Here's a mininal example for configuring the JWT plugin with a local signing key, and looking for +the token in the `authorization` header: + +```ts filename="gateway.config.ts" +import { + createInlineSigningKeyProvider, + defineConfig, + extractFromHeader +} from '@graphql-hive/gateway' + +const signingKey = 'my-secret-key' + +export const gatewayConfig = defineConfig({ + jwt: { + // Look and extract for the token in the 'authorization' header, with the 'Bearer' prefix. + lookupLocations: [extractFromHeader({ name: 'authorization', prefix: 'Bearer' })], + // Decode and validate the token using the provided signing key. + singingKeyProviders: [createInlineSigningKeyProvider(signingKey)], + // Forward the verified token payload to the upstream GraphQL subgraphs. + forwared: { + payload: true + } + } +}) +``` + +You can also pass +[additional configuration options to the Yoga plugin](https://the-guild.dev/graphql/yoga-server/docs/features/jwt#additional-configuration): + +```ts filename="gateway.config.ts" +import { defineConfig, createInlineSigningKeyProvider, createRemoteJwksSigningKeyProvider, extractFromHeader, extractFromCookie } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + jwt: { + // Forward the extracted token and claims to the upstream GraphQL subgraphs. + forwarded: { + payload: true, // optional, defualt is "true" + token: false, // optional, defualt is "false" + extensionsFieldName: "jwt", // optional, defualt is "jwt" + }, + // Configure your signing providers: either a local signing-key or a remote JWKS are supported. + singingKeyProviders: [ + createInlineSigningKeyProvider(signingKey), + createRemoteJwksSigningKeyProvider({ jwksUri: 'https://example.com/.well-known/jwks.json' }) + ] + // Configure where to look for the JWT token: in the headers, or cookies. + // By default, the plugin will look for the token in the 'authorization' header only. + lookupLocations: [ + extractFromHeader({ name: 'authorization', prefix: 'Bearer' }), + extractFromCookie({ name: 'auth' }), + ], + // Configure your token issuers/audience/algorithms verification options. + // By default, the plugin will only verify the HS256/RS256 algorithms. + // Please note that this should match the JWT signer issuer/audience/algorithms. + tokenVerification: { + issuer: 'http://my-issuer.com', + audience: 'my-audience', + algorithms: ['HS256', 'RS256'], + }, + // The plugin can reject the request if the token is missing or invalid (doesn't pass JWT `verify` flow). + // By default, the plugin will reject the request if the token is missing or invalid. + reject: { + missingToken: true, + invalidToken: true, + } + } +}) +``` + +## Configuration Options + +Please refer to the +[configuration options of the Yoga plugin](https://the-guild.dev/graphql/yoga-server/docs/features/jwt#additional-configuration) +for complete details and examples. + +## Forwarding the JWT token and payload + +The JWT token and payload can be forwarded to the upstream GraphQL subgraphs, using the `extensions` +field of the request body. + +This workflow can allow you to easily delegate the authentication process to Hive Gateway, and allow +the subgraphs to deal only with the user identity and authorization. + +```mermaid +flowchart LR + 1(["End-user"]) --->|"query { comments { id author { id name }}}"| 2 + + subgraph Hive Gateway + 2["Engine"] + 3["JWT Plugin"] + 4["Query Planner"] + 2--->|"Bearer XYZ"|3 + 3--->|"{ sub: 123 }"|2 + 2--->4 + end + + subgraph "Users" + 5["Users Subgraph"] + 4--->|"query { _entities(representations: $r) { ... on User { name }} }\nextensions: { jwt: { payload: { sub: 123 }}}"|5 + end + + subgraph "Comments" + 6["Comments Subgraph"] + + 4--->|"query { comments { id author { id }} }\nextensions: { jwt: { payload: { sub: 123 }}}"|6 + end +``` + +To pass the full token payload, you can use the `forwarded.claims` option: + +```ts +{ + forwarded: { + payload: true // optional, defualt is "true" + } +} +``` + +The token payload will be injected into `extensions.jwt.payload` of the upstream request body: + +```json +{ + "query": "{ comments { id author { id }} }", + "extensions": { + "jwt": { + "payload": { + "sub": 123 + } + } + } +} +``` + +You can also pass the full token, using the `forwared.token` option: + +```ts +{ + forwared: { + payload: true, // optional, defualt is "true" + token: true // optional, defualt is "false" + } +} +``` + +And the token and (optional) prefix will be injected into `extensions.jwt.token` of the upstream +HTTP request: + +```json +{ + "query": "{ comments { id author { id }} }", + "extensions": { + "jwt": { + "payload": { + "sub": 123 + }, + "token": { + "value": "XYZ", + "prefix": "Bearer" + } + } + } +} +``` + +Additionally, if you wish to change the name of the `jwt` field in the extensions, you can use the +`forwarded.extensionsFieldName` option to change it: + +```ts +{ + forwarded: { + extensionsFieldName: 'myJwt' // optional, defualt is "jwt" + } +} +``` + +## Using the JWT token + +### Within Gateway + +The JWT plugin will inject the decoded token and payload into the context of Hive Gateway. + +You can use the injected payload with other plugins, to implement things like authorization or +user-identity based logic. + +For example, with a plugin like Operation Field Permissions, you can use the `jwt` property of the +context to access the decoded JWT token, and decide what permissions to allow to the user based on +identity or token claims: + +```ts filename="gateway.config.ts" +import { useOperationFieldPermissions } from '@envelop/operation-field-permissions' +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // ... + }, + plugins: () => [ + useOperationFieldPermissions({ + getPermissions: async context => { + const { jwt } = context + + // Check based on identity / user-id. + if (jwt?.payload?.sub === '123') { + return new Set(['Query.*']) + } + + // Check based on token payload + if (jwt?.payload?.role === 'admin') { + return new Set(['Query.*']) + } + + // Default permissions + return new Set(['Query.greetings']) + } + }) + ] +}) +``` + +### In upstream GraphQL subgraphs + + + +{/* GraphQL-Yoga */} + + + +The JWT token and claims are forwarded to the upstream GraphQL subgraphs, using the `extensions` +field. + +To access the JWT token and claims in your upstream service resolvers/execution, you can use the +`extensions` field of the incoming GraphQL request. + +If you are using [GraphQL-Yoga](https://the-guild.dev/graphql/yoga-server) for your upstream +subgraph implementation, you can use a built-in utility for extracting it for you in an easy way: + +```ts filename="yoga-subgraph.ts" +import { useForwardedJWT } from '@graphql-hive/gateway' + +const myYogaSubgraphServer = createYoga({ + schema: mySchema, + plugins: [ + useForwardedJWT({ + // The name of the field in the extensions object, default is "jwt" + extensionsFieldName: 'jwt', + // The name of the field to inject into the local context object, default is "jwt" + extendContextFieldName: 'jwt' + }) + ] +}) +``` + +With this plugin configured, you should be able to just access `context.jwt` in your subgraphs, just +like you would in the gateway. + +This makes the process of integrating JWT easier, and streamlined across the whole flow of +execution. + + + +{/* Apollo Server */} + + + +With Apollo-Server, you can access the forwarded claims/token, using a custom Apollo-Server plugin +that extracts `extensions` and injects it into the context: + +```ts filename="apollo-subgraph.ts" +import { ApolloServer, ApolloServerPlugin } from '@apollo/server' + +const extractJwtPlugin = { + async requestDidStart({ request, contextValue }) { + contextValue.jwt = request.extensions?.jwt + } +} satisfies ApolloServerPlugin<{ jwt?: { payload: Record } }> + +const server = new ApolloServer({ + // Now, in your schema resolvers, you can access the JWT token and claims using `context.jwt`. + plugins: [extractJwtPlugin] + // ... +}) +``` + + + +{/* Other GraphQL servers */} + + + +Other implementations for GraphQL subgraph servers can also access the JWT token and claims, by +looking at the `extensions` field of the incoming request. + +The `extensions` field of the incoming request will contain the JWT token and claims, injected by +Hive Gateway, following this structure: + +```json +{ + "extensions": { + "jwt": { + "payload": { + "sub": 123 + }, + // optional, depends on the gateway plugin configuration + "token": { + "value": "XYZ", + "prefix": "Bearer" + } + } + } +} +``` + + + + + +## Additional Configuration + +### Token lookup + +The plugin can be configured to look for the JWT token in different locations: + + + +{/* HTTP Header */} + + + +By default, the plugin will look for the token in the `authorization` header. You can configure the +plugin to look for the token in a different header or with a different prefix. + +The prefix is being validated along with the token (for example: `Bearer my-token`). + +```ts +import { defineConfig, extractFromHeader } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // ... + lookupLocations: [extractFromHeader({ name: 'x-auth-token', prefix: 'Bearer' })] + } +}) +``` + + + +{/* HTTP Cookie */} + + + +You can configure the plugin to look for the token in a cookie. To do so, you need to enable cookie +parsing in the gateway. + +```ts +import { extractFromCookie, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // ... + lookupLocations: [extractFromCookie({ name: 'my-cookie' })] + } + // Make sure you enabled cookie parsing in the gateway + cookies: true, +}) +``` + + + +{/* Custom Function */} + + + +You can configure the plugin to use a custom function to look for the token: + +```ts +import { defineConfig } from '@graphql-hive/gateway' + +const getToken = ({ request, serverContext, url }) => { + return request.headers.get('x-my-token') +} + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // ... + lookupLocations: [getToken] + } +}) +``` + + + +{/* Multiple locations */} + + + +You can configure the plugin to look for the token in multiple locations. The plugin will look for +the token in the order you provide. + +```ts +import { defineConfig, extractFromCookie, extractFromHeader } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // ... + lookupLocations: [ + extractFromHeader({ name: 'authorization', prefix: 'Bearer' }), + extractFromHeader({ name: 'x-legacy-auth' }), + extractFromHeader({ name: 'x-api-key', prefix: 'API-Access' }), + extractFromCookie({ name: 'browserAuth' }) + ] + } +}) +``` + + + + + +--- + +### Signing Key providers + +The plugin can be configured to use different signing key providers: + + + +{/* Inline */} + + + +You can provide the signing key directly in the configuration. + +> Do not hardcode the signing key in your code. Use environment variables, local encrypted file or a +> secret store! + +```ts +import { createInlineSigningKeyProvider, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // ... + singingKeyProviders: [createInlineSigningKeyProvider(process.env.MY_JWT_SECRET)] + } +}) +``` + +> In case you are using an inline signing key provider, all `keyid` / `kid` will be allowed in +> tokens. + + + +{/* Remote JWKS */} + + + +You can configure the plugin to fetch the signing key from a remote JWKS endpoint. + +Provide a `jwksClientOptions` object (see +[node-jwks-rsa documentation](https://github.com/auth0/node-jwks-rsa/blob/master/EXAMPLES.md)]. + +```ts +import { createRemoteJwksSigningKeyProvider, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // ... + singingKeyProviders: [ + createRemoteJwksSigningKeyProvider({ + jwksUri: 'https://example.com/.well-known/jwks.json' + }) + ] + } +}) +``` + + + +{/* Multiple providers */} + + + +When using multiple providers, the plugin will try to use the first available signing key. + +```ts +import { + createInlineSigningKeyProvider, + createRemoteJwksSigningKeyProvider, + defineConfig +} from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // ... + singingKeyProviders: [ + // In case your remote provider is not available, the plugin will try use the inline provider. + createRemoteJwksSigningKeyProvider({ + jwksUri: 'https://example.com/.well-known/jwks.json' + }), + createInlineSigningKeyProvider(process.env.MY_JWT_SECRET) + ] + } +}) +``` + + + + + +--- + +### Token Verification + +The plugin verification process can be customized to match the JWT token `issuer`, `audience`, and +algorithms. + +> Note that the verification options should match the JWT signer's configuration. + +You can find +[here the complete list of verification options](https://github.com/DefinitelyTyped/DefinitelyTyped/blob/master/types/jsonwebtoken/index.d.ts#L58-L77) +for this plugin. + +```ts +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // ... + tokenVerification: { + issuer: ['http://yoga'], + audience: 'my-audience', + algorithms: ['HS256', 'RS256'] + } + } +}) +``` + +### Execution Rejection + +The plugin can be configured to reject the request if the token is missing or invalid. + +By default, an authentication error will be thrown if the token is missing or invalid, and the +request will be reject with status code `401`. + +```ts +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // ... + reject: { + missingToken: true, + invalidToken: true + } + } +}) +``` + +> In case you want to handle the error yourself, you can set +> `reject: { missingToken: false, invalidToken: false }` and handle the error in your resolvers. The +> `context.jwt` will be `undefined` in case of missing or invalid token. + +## Granular Protection using Auth Directives (`@authenticated`, `@requiresScopes` and `@policy`) + +### Configuration + +By default, the JWT plugin protects the whole schema. If you want to use a granular protection by +using Federation directives such as `@authenticated`, `@requiresScopes` and `@policy`, you can use +the Generic Auth plugin to have a granular protection using with or without JWT. + +With the following configuration, you can use the JWT plugin to extract the token and claims, and +then use the Generic Auth plugin to protect the schema with the Federation directives: + +```ts +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // ... + jwt: { + // You have to disable the default rejection of the JWT plugin + reject: { + missingToken: false, + invalidToken: false + } + }, + genericAuth: { + // Then set generic auth plugin to use granular mode + mode: 'protect-granular', + // Set where to extract the payload + resolveUser: ctx => ctx.jwt?.payload, + // If you want to continue execution even if some fields are rejected + rejectUnauthenticated: false + } +}) +``` + +### Protect a field using a field `@authenticated` + +In your GraphQL schema SDL, you can add `@authenticated` directive to your fields. + +```graphql +# Import it from Federation spec +extend schema @link(url: "https://specs.apollo.dev/federation/v2.6", import: ["@authenticated"]) + +type Query { + me: User! @authenticated + protectedField: String @authenticated + # publicField: String +} +``` + +> You can apply that directive to any GraphQL `field` definition, not only to root fields. + +### Role/scope based authentication (RBAC) with `@requiresScope` directive + +You can use `@requiresScope` directive to protect your schema based on the user's role or scope. +Here's an example of how you can use it: + +```graphql +extend schema @link(url: "https://specs.apollo.dev/federation/v2.5", import: ["@requiresScopes"]) + +type Query { + me: User! @requiresScopes(scopes: [["read:user"]]) + protectedField: String @requiresScopes(scopes: [["read:admin"]]) + publicField: String +} +``` + +By default, the plugin will try to extract available scopes for the current payload from `scope` +property which is expected to be a string like `read:user read:admin`. However you can customize +this behavior by providing a custom `extractScopes` function. + +```ts +{ + resolveUserFn, + validateUser, + mode: 'protect-granular', + // Set where to extract the payload + resolveUser: ctx => ctx.jwt?.payload, + extractScopes: jwtPayload => jwtPayload?.scopes // Expected to return an array of strings +} +``` + +You can also apply `AND` or `OR` logic to the scopes: + +```graphql +extend schema @link(url: "https://specs.apollo.dev/federation/v2.5", import: ["@requiresScopes"]) + +type Query { + # This field requires the user to have `read:user` OR `read:admin` scopes + me: User! @requiresScopes(scopes: [["read:user"], ["read:admin"]]) + # This field requires the user to have `read:user` AND `read:admin` scopes + protectedField: String @requiresScopes(scopes: [["read:admin", "read:user"]]) + publicField: String +} +``` + +### `@policy` directive to fetch the roles from a policy service + +You can use the `@policy` directive to fetch the roles from a policy service. Here's an example of +how you can use it: + +```graphql +extend schema @link(url: "https://specs.apollo.dev/federation/v2.5", import: ["@policy"]) + +type Query { + me: User! @policy(policies: [["read:user"]]) + protectedField: String @policy(policies: [["read:admin"]]) + publicField: String +} +``` + +It has the same logic with `@requiresScopes` but it can asynchronously fetch the roles from a +source; + +```ts +{ + resolveUserFn, + validateUser, + mode: 'protect-granular', + fetchPolicies: async user => { + const res = await fetch('https://policy-service.com', { + headers: { + Authorization: `Bearer ${user.token}` + } + }) + // Expected to return an array of strings + return res.json() + } +} +``` diff --git a/packages/web/docs/src/pages/docs/gateway/defer-stream.mdx b/packages/web/docs/src/pages/docs/gateway/defer-stream.mdx new file mode 100644 index 000000000..28e3069d0 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/defer-stream.mdx @@ -0,0 +1,41 @@ +--- +description: + Stream and defer are directives that allow you to improve latency for clients by sending the most + important data as soon as it's ready. +--- + +import { Callout } from '@theguild/components' + +# Defer and Stream + +Stream and defer are directives that allow you to improve latency for clients by sending the most +important data as soon as it's ready. + +As applications grow, the GraphQL operation documents can get bigger. The server will only send the +response back once all the data requested in the query is ready. However, not all requested data is +of equal importance, and the client may not need all of the data at once. To remedy this, GraphQL +specification working group is working on +[introducing new `@defer` and `@stream` directives](https://github.com/graphql/graphql-wg/blob/main/rfcs/DeferStream.md) +which allows applications to request a subset of data which is critical and get the rest of the data +in subsequent responses from the server. This +[proposal](https://github.com/graphql/graphql-spec/pull/742) is in +[Stage 2](https://github.com/graphql/graphql-spec/blob/main/CONTRIBUTING.md#stage-2-draft), meaning +GraphQL libraries can start implementing this as experimental feature to provide feedback to the +working group. + + + Stream and Defer are **experimental** features and not yet stable. The implementation can and will + change. Furthermore, there is no yet a stable specification for the incremental delivery protocol. + + +## Enabling in the configuration + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + deferStream: true +}) +``` + +[See more](https://the-guild.dev/graphql/yoga-server/docs/features/defer-stream#using-defer) diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/_meta.ts b/packages/web/docs/src/pages/docs/gateway/deployment/_meta.ts new file mode 100644 index 000000000..6d3f6d514 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/_meta.ts @@ -0,0 +1,7 @@ +export default { + index: 'Overview', + docker: 'Docker', + serverless: 'Serverless / On the Edge', + 'node-frameworks': 'Node.js Frameworks', + runtimes: 'Serverside JS Runtimes', +}; diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/docker.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/docker.mdx new file mode 100644 index 000000000..3e7168d77 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/docker.mdx @@ -0,0 +1,346 @@ +import { Callout } from '@theguild/components' + +# Docker + +Docker is a tool that allows you to package an application and its dependencies into a container +that can run on any system. This makes it easy to deploy applications in a consistent and +reproducible way, regardless of the underlying infrastructure. + +To simplify running your GraphQL gateway, you can use the Docker image and the Docker Compose +template we provide. This setup allows you to easily configure and run the gateway without the need +to install Node.js and the required gateway npm packages. + +## Prerequisites + +Make sure you have Docker installed on your system. + +You can follow [the official Docker Engine install manual](https://docs.docker.com/engine/install/) +in case you don't have Docker installed already. + +## Configuration + +### Arguments + +Hive Gateway can be configured with [CLI arguments](/docs/api-reference/gateway-cli#arguments) even +when running the image! + +For example, changing the supergraph to use the `my-schema.graphql` schema instead looks like this: + +```sh +docker run \ + -p 4000:4000 \ + -v "$(pwd)/my-schema.graphql:/serve/my-schema.graphql" \ + ghcr.io/ardatan/hive-gateway supergraph my-schema.graphql +``` + +For a full list of CLI arguments, please refer to the +[CLI arguments](/docs/api-reference/gateway-cli#arguments). + +### Config File + +Instead of configuring Hive Gateway with CLI arguments, we support configuring with a config file. + +You're recommended to use the `gateway.config.ts` file to configure Hive Gateway. Simply mount the +config file when running the image. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + proxy: { + endpoint: 'https://example.com/graphql' + } +}) +``` + +```sh +docker run \ + -p 4000:4000 \ + -v "$(pwd)/gateway.config.ts:/serve/gateway.config.ts" \ + ghcr.io/ardatan/hive-gateway proxy +``` + +For a full list of CLI arguments, please refer to the +[Config Reference](/docs/api-reference/gateway-config). + +### Changing Port in Container + +The default port where Hive Gateway listens is `4000`; however, maybe the container is running +inside a network (like when using +[Networking in Compose](https://docs.docker.com/compose/networking/)) and you wish to change the +port of Hive Gateway in the image. + +You can use the `gateway.config.ts` to change the port, or simply pass in the `--port` argument when +running the image: + +```sh +docker run \ + -p 8080:8080 \ + -v "$(pwd)/supergraph.graphql:/serve/supergraph.graphql" \ + ghcr.io/ardatan/hive-gateway supergraph --port=8080 +``` + +## Running + +Having a `supergraph.graphql` already composed with [GraphQL Mesh](https://graphql-mesh.com/), +running the Docker image is as easy as: + +```sh +docker run \ + -p 4000:4000 \ + -v "$(pwd)/supergraph.graphql:/serve/gateway.config.ts" \ + ghcr.io/ardatan/hive-gateway supergraph +``` + +## Docker Compose + +You may have an environment where you want to use [Docker Compose](https://docs.docker.com/compose/) +and would like to add Hive Gateway there. + +Start by defining the `docker-compose.yml` + +```yaml +services: + hive-gateway: + image: ghcr.io/ardatan/hive-gateway + command: supergraph + ports: + - '4000:4000' + # Add Hive Registry environment variables in case you use it + # environment: + # HIVE_CDN_ENDPOINT: + # HIVE_CDN_KEY: + # HIVE_REGISTRY_TOKEN: + volumes: + - ./gateway.config.ts:/serve/gateway.config.ts +``` + +And then simply start the services with: + +```sh +docker compose up +``` + +## Extend Docker Image + +### Install Plugin + +You may want to add additional functionality, or plugins to the base image - you just need to create +a new Dockerfile basing the image off `ghcr.io/ardatan/hive-gateway`. + +If need only a handful of plugins (or some other dependencies), you can simply extend the image and +install the modules with `npm i`: + +For example, adding +[Block Field Suggestions Plugin](/docs/gateway/other-features/security/block-field-suggestions) to +the container would look like this: + +```dockerfile filename="Dockerfile" +FROM ghcr.io/ardatan/hive-gateway + +RUN npm i @escape.tech/graphql-armor-block-field-suggestions +``` + +```sh +docker build -t hive-gateway-w-block-suggestions . +``` + +Configure to use the block field suggestions plugin: + +```ts filename="gateway.config.ts" +import { blockFieldSuggestionsPlugin } from '@escape.tech/graphql-armor-block-field-suggestions' +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [ + blockFieldSuggestionsPlugin({ + // Toggle the plugin | Default: true + enabled: true, + // Mask applied to the error message | default: '[Suggestion hidden]' + mask: '[Suggestion hidden]' + }) + ] +}) +``` + +And then simply start the new image with the config file mounted: + +```sh +docker run \ + -p 4000:4000 \ + -v "$(pwd)/gateway.config.ts:/serve/gateway.config.ts" \ + hive-gateway-w-block-suggestions supergraph +``` + +### Develop Plugin + +However, you may be developing a plugin and have a setup with some dependencies and source code, +copying over your project's files is the way to go. + +In the following example, we're developing a `useTiming` plugin that will add a human readable +execution duration to the GraphQL result `extensions` property. + +```json filename="package.json" +{ + "name": "my-timing", + "dependencies": { + "moment": "^2" + }, + "devDependencies": { + "@graphql-hive/gateway": "latest", + "@graphql-hive/gateway": "latest" + } +} +``` + +```ts filename="my-timing.ts" +import moment from 'moment' +import type { GatewayPlugin } from '@graphql-hive/gateway' + +export function useTiming(): GatewayPlugin { + return { + onExecute() { + const start = Date.now() + return { + onExecuteDone({ result, setResult }) { + const duration = moment.duration(Date.now() - start) + if (isAsyncIterable(result)) { + setResult( + mapAsyncIterator(result, result => ({ + ...result, + extensions: { + ...result?.extensions, + duration: duration.humanize() + } + })) + ) + return + } + setResult({ + ...result, + extensions: { + ...result?.extensions, + duration: duration.humanize() + } + }) + } + } + } + } +} +``` + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' +import { useTiming } from './my-timing' + +export const gatewayConfig = defineConfig({ + plugins: () => [useTiming()] +}) +``` + +Your Dockerfile should then look something like this: + +```dockerfile filename="Dockerfile" +FROM ghcr.io/ardatan/hive-gateway + +# we dont install dev deps because: +# 1. we need them for type checking only +# 2. Hive Gateway is already available in the docker image +COPY package*.json . +RUN npm i --omit=dev + +COPY my-time.ts . +COPY gateway.config.ts . +``` + +Then build your image: + +```sh +docker build -t hive-gateway-w-my-timing . +``` + +And finally start it (the config file is in the image and doesn't need to be mounted): + +```sh +docker run -p 4000:4000 hive-gateway-w-my-timing supergraph +``` + + + For faster development, you can mount the source code as volumes so that you don't have to rebuild + the image on each run. + +```sh +docker run -p 4000:4000 \ + -v "$(pwd)/gateway.config.ts":/serve/gateway.config.ts \ + -v "$(pwd)/my-timing.ts":/serve/my-timing.ts \ + hive-gateway-w-my-timing supergraph +``` + + + +### Additional Resolvers + +Instead maybe you need to define additional resolvers that depend on other dependencies. Similarily +to the [Develop Plugin](#develop-plugin) approach, you can just copy the project code over and build +another image. + +Say you have the following files: + +```json filename="package.json" +{ + "name": "my-time", + "dependencies": { + "moment": "^2" + }, + "devDependencies": { + "@graphql-hive/gateway": "latest" + } +} +``` + +```js filename="my-time.ts" +import moment from 'moment' + +export const additionalResolvers = { + Query: { + formattedToday() { + return moment().format('DD.MM.YYYY') + } + } +} +``` + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' +import { additionalResolvers } from './my-time' + +export const gatewayConfig = defineConfig({ additionalResolvers }) +``` + +Your Dockerfile should then look something like this: + +```dockerfile filename="Dockerfile" +FROM ghcr.io/ardatan/hive-gateway + +# we dont install dev deps because: +# 1. we need them for type checking only +# 2. Hive Gateway is already available in the docker image +COPY package*.json . +RUN npm i --omit=dev + +COPY my-time.ts . +COPY gateway.config.ts . +``` + +Then build your image: + +```sh +docker build -t hive-gateway-w-add-res . +``` + +And finally start it (the config file is in the image and doesn't need to be mounted): + +```sh +docker run -p 4000:4000 hive-gateway-w-add-res supergraph +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/index.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/index.mdx new file mode 100644 index 000000000..433c47d7d --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/index.mdx @@ -0,0 +1,87 @@ +# Run Anywhere - Deploy your Gateway + +import { Callout } from '@theguild/components' + +Once you configured, and tested your gateway. Now, it is time to deploy it. Hive Gateway Runtime +uses Web Standards (WHATWG Fetch API) not only as an HTTP client but also for handling the +server-side. That gives us the ability to run the gateway in any environment that runs JavaScript. + +Node.js is the most common server-side environment in JavaScript ecosystem but it doesn't use Web +Standards for handling HTTP requests. So we use a library called +[`@whatwg-node/server`](https://github.com/ardatan/whatwg-node/tree/master/packages/server#whatwg-node-generic-server-adapter) +that allows us to create a wrapper between `node:http` and Fetch API. + + +Check the following sections to see how to deploy your gateway in different environments on the left menu. + +**If your environment is not listed here**, that doesn't mean you can't deploy your gateway. Thanks +to our adapter system, **you can create your own implementation for your environment**. + +Feel free to contribute the documentation for your favorite server implementation if we don't have +it in the list. + + + +## Other Environments (Custom) + +Let's say you have an environment that is not listed here, you can still deploy your gateway. In +this case, we will show here how to pass the request information from your environment to Gateway, +then get the response for your environment back. + +```ts +import { createGatewayRuntime } from '@graphql-hive/gateway' +import type { + ImaginaryEnvironmentRequest, + ImaginaryEnvironmentServerContext +} from '@imaginary-environment/types' +import { getMySupergraph } from './my-supergraph.js' + +// First pass it to the runtime as a context +const gatewayRuntime = createGatewayRuntime({ + supergraph: () => getMySupergraph() +}) + +// Let's say it needs a function exported +export async function gatewayEndpoint( + envRequest: ImaginaryEnvironmentRequest, + envContext: ImaginaryEnvironmentServerContext +) { + // Serve Runtime provides a fetch function which has exactly the same signature with regular `fetch` + const res = await gatewayRuntime.fetch( + envRequest.url, + { + method: envRequest.method, + headers: envRequest.headers, + body: envRequest.body // Body can be a string or a ReadableStream or UInt8Array, see [BodyInit](https://developer.mozilla.org/en-US/docs/Web/API/BodyInit) + }, + envContext + ) + // You can create an object from [`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers) object + const headersObj: Record = {} + res.headers.forEach((value, key) => { + headersObj[key] = value + }) + // It returns [`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response) object + // See the methods and properties of the Response object from the link + // You can get a string + const bodyText = await res.text() + // You can get a stream + const bodyStream = res.body + // You can get a buffer + const bodyBuffer = await res.arrayBuffer() + // You can get a JSON object + const bodyJson = await res.json() + // You can get a blob + const bodyBlob = await res.blob() + // You can get a form data + const bodyFormData = await res.formData() + + // Then you can return the response to your environment + return { + status: res.status, + statusText: res.statusText, + headers: headersObj, + bodyText + } +} +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/_meta.ts b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/_meta.ts new file mode 100644 index 000000000..e2ed9251a --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/_meta.ts @@ -0,0 +1,11 @@ +export default { + index: 'Introduction', + express: 'Express', + fastify: 'Fastify', + koa: 'Koa', + hapi: 'Hapi', + nestjs: 'NestJS', + uwebsockets: 'µWebSockets.js', + nextjs: 'Next.js', + sveltekit: 'SvelteKit', +}; diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/express.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/express.mdx new file mode 100644 index 000000000..01c0f5c2e --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/express.mdx @@ -0,0 +1,97 @@ +--- +description: + Express is the most popular web framework for Node.js. It is a minimalist framework that provides + a robust set of features to handle HTTP on Node.js applications. +--- + +import { Callout } from '@theguild/components' + +# Integration with Express + +[Express is the most popular web framework for Node.js.](https://expressjs.com/) It is a minimalist +framework that provides a robust set of features to handle HTTP on Node.js applications. You can +easily integrate Hive Gateway into your Express application with a few lines of code. + +## Example + +```ts +import express from 'express' +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const app = express() + +const serveRuntime = createGatewayRuntime(/* Your configuration */) + +// Bind Hive Gateway to the graphql endpoint to avoid rendering the playground on any path +app.use(serveRuntime.graphqlEndpoint, serveRuntime) + +app.listen(4000, () => { + console.log('Running a GraphQL API server at http://localhost:4000/graphql') +}) +``` + +## Using Helmet + +If you are using [Helmet](https://helmetjs.github.io/) to set your +[Content Security Policy](https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP), you can use the +following configuration: + +```ts +app.use( + helmet({ + contentSecurityPolicy: { + directives: { + 'style-src': ["'self'", 'unpkg.com'], + 'script-src': ["'self'", 'unpkg.com', "'unsafe-inline'"], + 'img-src': ["'self'", 'raw.githubusercontent.com'] + } + } + }) +) +``` + +### Isolate GraphiQL configuration + +To avoid applying this configuration to other endpoints you may have on your Express server, you can +use `Express.Router` to create a new router instance and apply the configuration only to the Hive +Gateway endpoint. + +```ts +import express from 'express' +import helmet from 'helmet' +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const app = express() + +const serveRuntime = createGatewayRuntime(/* Your configuration */) +const hiveGWRouter = express.Router() +// GraphiQL specefic CSP configuration +hiveGWRouter.use( + helmet({ + contentSecurityPolicy: { + directives: { + 'style-src': ["'self'", 'unpkg.com'], + 'script-src': ["'self'", 'unpkg.com', "'unsafe-inline'"], + 'img-src': ["'self'", 'raw.githubusercontent.com'] + } + } + }) +) +hiveGWRouter.use(serveRuntime) + +// By adding the Hive Gateway router before the global helmet middleware, +// you can be sure that the global CSP configuration will not be applied to the Hive Gateway endpoint +app.use(serveRuntime.graphqlEndpoint, hiveGWRouter) + +// Add the global CSP configuration for the rest of your server. +app.use(helmet()) + +// You can know register your other endpoints that will not be affected by the GraphiQL CSP configuration +app.get('/hello', (req, res) => { + res.send('Hello World!') +}) + +app.listen(4000, () => { + console.log('Running a GraphQL API server at http://localhost:4000/graphql') +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/fastify.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/fastify.mdx new file mode 100644 index 000000000..060767e3a --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/fastify.mdx @@ -0,0 +1,79 @@ +--- +description: + Fastify is one of the popular HTTP server frameworks for Node.js. It is a very simple, yet + powerful framework that is easy to learn and use. +--- + +import { Callout } from '@theguild/components' + +# Integration with Fastify + +[Fastify is one of the popular HTTP server frameworks for Node.js.](https://www.fastify.io/) It is a +very simple, yet powerful framework that is easy to learn and use. + +You can easily integrate Hive Gateway with Fastify. + +So you can benefit from the powerful plugins of Fastify ecosystem with Hive Gateway. +[See the ecosystem](https://fastify.io/docs/latest/Guides/Ecosystem) + +## Example + +```ts +import fastify, { FastifyReply, FastifyRequest } from 'fastify' +import { createGatewayRuntime } from '@graphql-hive/gateway' + +// This is the fastify instance you have created +const app = fastify({ logger: true }) + +const serveRuntime = createGatewayRuntime<{ + req: FastifyRequest + reply: FastifyReply +}>({ + // Integrate Fastify logger + logging: { + debug: (...args) => args.forEach(arg => app.log.debug(arg)), + info: (...args) => args.forEach(arg => app.log.info(arg)), + warn: (...args) => args.forEach(arg => app.log.warn(arg)), + error: (...args) => args.forEach(arg => app.log.error(arg)) + } +}) + +/** + * We pass the incoming HTTP request to Hive Gateway + * and handle the response using Fastify's `reply` API + * Learn more about `reply` https://www.fastify.io/docs/latest/Reply/ + **/ +app.route({ + // Bind to the Hive Gateway's endpoint to avoid rendering on any path + url: serveRuntime.graphqlEndpoint, + method: ['GET', 'POST', 'OPTIONS'], + handler: async (req, reply) => { + // Second parameter adds Fastify's `req` and `reply` to the GraphQL Context + const response = await serveRuntime.handleNodeRequestAndResponse(req, reply, { + req, + reply + }) + response.headers.forEach((value, key) => { + reply.header(key, value) + }) + + reply.status(response.status) + + reply.send(response.body) + + return reply + } +}) + +app.listen(4000) +``` + +## Add dummy content type parser for File Uploads + +Fastify needs to be aware of Hive Gateway will handle `multipart/form-data` requests because +otherwise it will throw an error something like `Unsupported media type`. + +```ts +// This will allow Fastify to forward multipart requests to Hive Gateway +app.addContentTypeParser('multipart/form-data', {}, (req, payload, done) => done(null)) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/hapi.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/hapi.mdx new file mode 100644 index 000000000..5a2453429 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/hapi.mdx @@ -0,0 +1,67 @@ +import { Callout } from '@theguild/components' + +# Integration with Hapi + +[Hapi](https://hapi.dev) allows you to build powerful, scalable applications, with minimal overhead +and full out-of-the-box functionality. + +Hive Gateway can be integrated easily as a route to the existing Hapi application with a few lines +of code. + +## Example + +```ts +import http from 'node:http' +import { Readable } from 'node:stream' +import { createGatewayRuntime } from '@graphql-hive/gateway' +import Hapi from '@hapi/hapi' +import { schema } from './my-graphql-schema' + +interface ServerContext { + req: Hapi.Request + h: Hapi.ResponseToolkit +} + +const hiveGateway = createGatewayRuntime(/* Your configuration */) + +const server = Hapi.server({ port: 4000 }) + +server.route({ + method: '*', + path: hiveGateway.graphqlEndpoint, + options: { + payload: { + // let hiveGateway handle the parsing + output: 'stream' + } + }, + handler: async (req, h) => { + const { status, headers, body } = await hiveGateway.handleNodeRequestAndResponse( + req.raw.req, + req.raw.res, + { + req, + h + } + ) + + const res = h.response( + Readable.from(body, { + // hapi needs the stream not to be in object mode + objectMode: false + }) + ) + + for (const [key, val] of headers) { + res.header(key, val) + } + + return res.code(status) + } +}) + +server.start() +``` + +Hive Gateway should now be available at +[http://localhost:4000/graphql](http://localhost:4000/graphql). diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/index.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/index.mdx new file mode 100644 index 000000000..387d44dbd --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/index.mdx @@ -0,0 +1,20 @@ +import { Callout } from '@theguild/components' + +# Node.js Frameworks + +We highly recommend to use Hive Gateway with the CLI in Node.js as described in +[Node.js guide](/docs/gateway/deployment/runtimes/nodejs). But if you want to use Hive Gateway with +a Node.js framework, you can use the `createGatewayRuntime` function from `@graphql-hive/gateway` +package. + +In this case, you have to pass your serve configuration inside `createGatewayRuntime` instead of +exporting it `gatewayConfig` from `gateway.config.ts` file. + +It handles Node.js request and response types which are +[IncomingMessage](https://nodejs.org/api/http.html#http_class_http_incomingmessage) and +[ServerResponse](https://nodejs.org/api/http.html#http_class_http_serverresponse). + +If your framework has middlewares and so on, you can handle the response by yourself as in +[Fastify example](/docs/gateway/deployment/node-frameworks/fastify). + +Choose your framework from the list on the left to see an example. diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/koa.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/koa.mdx new file mode 100644 index 000000000..8641898ea --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/koa.mdx @@ -0,0 +1,52 @@ +--- +description: + Koa is a new web framework designed by the team behind Express, which aims to be a smaller, more + expressive, and more robust foundation for web applications and APIs. +--- + +import { Callout } from '@theguild/components' + +# Integration with Koa + +[Koa is a new web framework designed by the team behind Express, which aims to be a smaller, more expressive, and more robust foundation for web applications and APIs.](https://koajs.com) + +Hive Gateway can be integrated easily as a route to the existing Koa application with a few lines of +code. + +[So you can benefit middlewares written for Koa with Hive Gateway.](https://github.com/koajs/koa/wiki) + +## Example + +```ts +import Koa from 'koa' +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const app = new Koa() + +const gatewayRuntime = createGatewayRuntime() + +// Bind Hive Gateway to `/graphql` endpoint +app.use(async ctx => { + // Second parameter adds Koa's context into GraphQL Context + const response = await gatewayRuntime.handleNodeRequestAndResponse(ctx.req, ctx.res, ctx) + + // Set status code + ctx.status = response.status + + // Set headers + response.headers.forEach((value, key) => { + ctx.append(key, value) + }) + + if (response.body) { + // Set body + ctx.body = response.body + } +}) + +app.listen(4000, () => { + console.log( + `Running a GraphQL API server at http://localhost:4000/${gatewayRuntime.graphqlEndpoint}` + ) +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/nestjs.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/nestjs.mdx new file mode 100644 index 000000000..1abe3570f --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/nestjs.mdx @@ -0,0 +1,3 @@ +import { Callout } from '@theguild/components' + +# Deployment with NestJS diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/nextjs.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/nextjs.mdx new file mode 100644 index 000000000..516091c5f --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/nextjs.mdx @@ -0,0 +1,34 @@ +--- +description: + Next.js is a web framework that allows you to build websites very quickly and Hive Gateway can be + integrated with Next.js easily as an API Route. +--- + +import { Callout } from '@theguild/components' + +# Integration with Next.js + +[Next.js](https://nextjs.org) is a web framework that allows you to build websites very quickly and +Hive Gateway can be integrated with Next.js easily as +[a custom route handler](https://nextjs.org/docs/app/building-your-application/routing/router-handlers). + +## Example + +```ts +// Next.js Custom Route Handler: https://nextjs.org/docs/app/building-your-application/routing/router-handlers + +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const { handleRequest } = createGatewayRuntime({ + /* Your configuration here before the following required settings */ + + // While using Next.js file convention for routing, we need to configure Hive Gateway to use the correct endpoint + graphqlEndpoint: '/api/graphql', + + // Hive Gateway needs to know how to create a valid Next response + fetchAPI: { Response } +}) + +// Export the handler to be used with the following HTTP methods +export { handleRequest as GET, handleRequest as POST, handleRequest as OPTIONS } +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/sveltekit.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/sveltekit.mdx new file mode 100644 index 000000000..86ac950fa --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/sveltekit.mdx @@ -0,0 +1,39 @@ +--- +description: + SvelteKit is a framework for rapidly developing robust, performant web applications using Svelte. +--- + +# Integration with SvelteKit + +[SvelteKit](https://kit.svelte.dev/) is a framework for rapidly developing robust, performant web +applications using [Svelte](https://svelte.dev/). You can easily integrate Hive Gateway into your +SvelteKit powered application. + +## Example + +SvelteKit is typically used together with [Vite](https://vitejs.dev/) with the project structure +[looking like this](https://kit.svelte.dev/docs/project-structure). We also assume that you have +composed a `supergraph.graphql` with [GraphQL Mesh](https://graphql-mesh.com/). + +In this example, we want to integrate Hive Gateway into Vite's routes, we'll therefore use the +runtime. + +```sh npm2yarn +npm i @graphql-hive/gateway +``` + +Keeping the [aforementioned project layout](https://kit.svelte.dev/docs/project-structure) in mind, +create a new server route in `my-project/src/routes/graphql/+server.ts` to expose the GraphQL server +at `/graphql` and implement using the Hive Gateway runtime like this: + +```ts filename="my-project/src/routes/graphql/+server.ts" +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const serve = createGatewayRuntime({ + supergraph: 'supergraph.graphql', // working directory is root of the project + graphqlEndpoint: '/graphql', // matches the server route path + fetchAPI: { Response } // use the native `Response` +}) + +export { serve as GET, serve as POST } +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/uwebsockets.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/uwebsockets.mdx new file mode 100644 index 000000000..35fc89d2c --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/node-frameworks/uwebsockets.mdx @@ -0,0 +1,104 @@ +--- +description: µWebSockets.js is an HTTP/WebSocket server for Node.js. +--- + +import { Callout } from '@theguild/components' + +# Integration with µWebSockets.js + +[µWebSockets.js](https://github.com/uNetworking/uWebSockets.js) is an alternative to Node.js's +built-in HTTP server implementation. It is much faster than Node.js's `http` module as you can see +in the benchmarks in the +[GitHub repo](https://github.com/uNetworking/uWebSockets/tree/master/benchmarks#benchmark-driven-development). +Despite its name, it is not a WebSocket-only server, it does HTTP as well. + +Since Hive Gateway is framework and environment agnostic, it supports µWebSockets.js out of the box +with a simple configuration. + + + If you use Gateway CLI within Node.js, it already uses µWebSockets.js. You don't need to do + anything extra within CLI. Use this guide only if you really need to use µWebSockets.js directly. + + +## Example + +```ts filename="index.ts" +import { App, HttpRequest, HttpResponse } from 'uWebSockets.js' +import { createGatewayRuntime } from '@graphql-hive/gateway' + +interface ServerContext { + req: HttpRequest + res: HttpResponse +} + +export const gatewayRuntime = createGatewayRuntime(/* Your configuration */) + +App() + .any('/*', gatewayRuntime) + .listen('localhost', 4000, () => { + console.log(`Server is running on http://localhost:4000`) + }) +``` + +## Subscriptions with WebSockets + +You can also use WebSockets instead of SSE with `graphql-ws`; + +```sh npm2yarn +npm i graphql-ws +``` + +```ts filename="index.ts" +import { execute, ExecutionArgs, subscribe } from 'graphql' +import { makeBehavior } from 'graphql-ws/lib/use/uWebSockets' +import { App, HttpRequest, HttpResponse } from 'uWebSockets.js' +import { createGatewayRuntime } from '@graphql-hive/gateway' + +interface ServerContext { + req: HttpRequest + res: HttpResponse +} + +export const serveRuntime = createGatewayRuntime(/* Your configuration */) + +// Hive Gateway's envelop may augment the `execute` and `subscribe` operations +// so we need to make sure we always use the freshest instance +type EnvelopedExecutionArgs = ExecutionArgs & { + rootValue: { + execute: typeof execute + subscribe: typeof subscribe + } +} + +const wsHandler = makeBehavior({ + execute: args => (args as EnvelopedExecutionArgs).rootValue.execute(args), + subscribe: args => (args as EnvelopedExecutionArgs).rootValue.subscribe(args), + onSubscribe: async (ctx, msg) => { + const { schema, execute, subscribe, contextFactory, parse, validate } = + serveRuntime.getEnveloped(ctx) + + const args: EnvelopedExecutionArgs = { + schema, + operationName: msg.payload.operationName, + document: parse(msg.payload.query), + variableValues: msg.payload.variables, + contextValue: await contextFactory(), + rootValue: { + execute, + subscribe + } + } + + const errors = validate(args.schema, args.document) + if (errors.length) return errors + return args + } +}) + +App() + .any('/*', gatewayRuntime) + .ws(gatewayRuntime.graphqlEndpoint, wsHandler) + .listen(() => { + console.log(`Server is running on http://localhost:4000`) + }) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/_meta.ts b/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/_meta.ts new file mode 100644 index 000000000..c6619f9fb --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/_meta.ts @@ -0,0 +1,6 @@ +export default { + index: 'Introduction', + nodejs: 'Node.js', + bun: 'Bun', + deno: 'Deno', +}; diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/bun.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/bun.mdx new file mode 100644 index 000000000..b96673aab --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/bun.mdx @@ -0,0 +1,33 @@ +--- +description: + Hive Gateway provides you a cross-platform GraphQL Server. So you can easily integrate it into any + platform besides Node.js. +--- + +import { Callout } from '@theguild/components' + +# Integration with Bun + +Hive Gateway provides you a cross-platform GraphQL Server. So you can easily integrate it into any +platform besides Node.js. [Bun](https://bun.sh) is a modern JavaScript runtime like Node or Deno, +and it supports Fetch API as a first class citizen. So the configuration is really simple like any +other JS runtime with Hive Gateway; + +The following code is a simple example of how to use Hive Gateway with Bun. + +```ts +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const gatewayRuntime = createGatewayRuntime(/* Your configuration */) + +const server = Bun.serve({ + fetch: gatewayRuntime +}) + +console.info( + `Server is running on ${new URL( + server.graphqlEndpoint, + `http://${server.hostname}:${server.port}` + )}` +) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/deno.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/deno.mdx new file mode 100644 index 000000000..1323e9112 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/deno.mdx @@ -0,0 +1,51 @@ +--- +description: + Hive Gateway provides you a cross-platform GraphQL Server. So you can easily integrate it into any + platform besides Node.js. +--- + +import { Callout } from '@theguild/components' + +# Integration with Deno + +Hive Gateway provides you a cross-platform GraphQL Server. So you can easily integrate it into any +platform besides Node.js. +[Deno is a simple, modern and secure runtime for JavaScript and TypeScript that uses V8 and is built in Rust](https://deno.land/). +We will use `@graphql-hive/gateway` which has an agnostic HTTP handler using +[Fetch API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API/Using_Fetch)'s +[`Request`](https://developer.mozilla.org/en-US/docs/Web/API/Request) and +[`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response) objects. + +## Example + +Create a `deno.json` file. +[Learn more about import maps](https://deno.land/manual/basics/import_maps) + +Create a `deno-hive-gateway.ts` file: + +```json filename="deno.json" {3} +{ + "imports": { + "@graphql-hive/gateway": "npm:@graphql-hive/gateway@^0.1.0" + } +} +``` + +```ts filename="deno-hive-gateway.ts" +import { serve } from 'https://deno.land/std@0.157.0/http/server.ts' +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const gatewayRuntime = createGatewayRuntime(/* Your configuration */) + +serve(gatewayRuntime, { + onListen({ hostname, port }) { + console.log(`Listening on http://${hostname}:${port}/${gatewayRuntime.graphqlEndpoint}`) + } +}) +``` + +And run it: + +```bash +deno run --allow-net deno-hive-gateway.ts +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/index.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/index.mdx new file mode 100644 index 000000000..bc06db938 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/index.mdx @@ -0,0 +1,13 @@ +import { Callout } from '@theguild/components' + +# Server-side JavaScript Environments + +For Node.js and other Node-compliant environments, you can use Gateway CLI as described in the +[Node.js guide](/docs/gateway/deployment/runtimes/nodejs). But if you want to use Hive Gateway with +a server-side JavaScript environment that is not compatible with Node.js API, you can use the +`createGatewayRuntime` function from `@graphql-hive/gateway` package. + +In this case, you have to pass your serve configuration inside `createGatewayRuntime` instead of +exporting it `gatewayConfig` from `gateway.config.ts` file. + +See the guides on the left for examples with different server-side JavaScript environments. diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/nodejs.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/nodejs.mdx new file mode 100644 index 000000000..d83369796 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/runtimes/nodejs.mdx @@ -0,0 +1,29 @@ +import { Callout } from '@theguild/components' + +# Node.js + +Node.js is the most common runtime for JavaScript. + +If you have Node.js environment, we highly recommend to use Hive Gateway with the CLI as described +in the [introduction](/docs/gateway). If you really want to use the runtime in a customized way. You +can use the `createGatewayRuntime` function from `@graphql-hive/gateway` package. + +## Hive Gateway CLI + +You can follow the introduction page directly to use Hive Gateway CLI. [See here](/docs/gateway) + +## Hive Gateway Runtime (advanced-only) + +Use this method only if you know what you are doing. It is recommended to use Hive Gateway CLI for +most cases. + +```ts +import { createServer } from 'http' +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const serveRuntime = createGatewayRuntime(/* Your configuration */) +const server = createServer(serveRuntime) +server.listen(4000, () => { + console.log(`Server is running on http://localhost:4000`) +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/serverless/_meta.ts b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/_meta.ts new file mode 100644 index 000000000..052f6f3dc --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/_meta.ts @@ -0,0 +1,7 @@ +export default { + index: 'Introduction', + 'cloudflare-workers': 'Cloudflare Workers', + 'aws-lambda': 'AWS Lambda', + 'google-cloud-platform': 'Google Cloud Platform', + 'azure-functions': 'Azure Functions', +}; diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/serverless/aws-lambda.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/aws-lambda.mdx new file mode 100644 index 000000000..3bed3020c --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/aws-lambda.mdx @@ -0,0 +1,49 @@ +import { Callout } from '@theguild/components' + +# Deploying Hive Gateway to AWS Lambda + +AWS Lambda is a serverless computing platform that makes it easy to build applications that run on +the AWS cloud. Hive Gateway is platform agnostic so they can fit together easily. + + + Before you start, make sure you read the [Serverless / On the + Edge](/docs/gateway/deployment/serverless) page. + + +```ts +import { APIGatewayEvent, APIGatewayProxyResult, Context } from 'aws-lambda' +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const serveRuntime = createGatewayRuntime(/* Your configuration */) + +export async function handler( + event: APIGatewayEvent, + lambdaContext: Context +): Promise { + const response = await serveRuntime.fetch( + event.path + + '?' + + new URLSearchParams((event.queryStringParameters as Record) || {}).toString(), + { + method: event.httpMethod, + headers: event.headers as HeadersInit, + body: event.body + ? Buffer.from(event.body, event.isBase64Encoded ? 'base64' : 'utf8') + : undefined + }, + { + event, + lambdaContext + } + ) + + const responseHeaders = Object.fromEntries(response.headers.entries()) + + return { + statusCode: response.status, + headers: responseHeaders, + body: await response.text(), + isBase64Encoded: false + } +} +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/serverless/azure-functions.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/azure-functions.mdx new file mode 100644 index 000000000..96749ff50 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/azure-functions.mdx @@ -0,0 +1,23 @@ +import { Callout } from '@theguild/components' + +# Deploying Hive Gateway to Azure Functions + +Azure Functions is a serverless environment that supports JavaScript. Hive Gateway is platform +agnostic and can be deployed to Azure Functions as well. + + + Before you start, make sure you read the [Serverless / On the + Edge](/docs/gateway/deployment/serverless) page. + + +```ts +import { app } from '@azure/functions' +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const handler = createGatewayRuntime(/* Your configuration */) + +app.http('graphql', { + method: ['GET', 'POST'], + handler +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/serverless/cloudflare-workers.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/cloudflare-workers.mdx new file mode 100644 index 000000000..f9fbf7235 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/cloudflare-workers.mdx @@ -0,0 +1,31 @@ +import { Callout } from '@theguild/components' + +# Deploying Hive Gateway to Cloudflare Workers + +Hive Gateway a provides you a cross-platform GraphQL Server. So you can easily integrate it into any +platform besides Node.js. + +[Cloudflare Workers](https://developers.cloudflare.com/workers) provides a serverless execution +environment that allows you to create entirely new applications or augment existing ones without +configuring or maintaining infrastructure. + + + Before you start, make sure you read the [Serverless / On the + Edge](/docs/gateway/deployment/serverless) page. + + +```ts +import { createGatewayRuntime } from '@graphql-hive/gateway' + +const gatewayRuntime = createGatewayRuntime({ + // gatewayConfig +}) + +export default { fetch: gatewayRuntime } +``` + + + If you want to use [Cloudflare KV + Cache](https://developers.cloudflare.com/workers/runtime-apis/kv) as a distributed cache, [see + here for Hive Gateway integration](/docs/gateway/other-features/performance#cloudflare-workers-kv) + diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/serverless/google-cloud-platform.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/google-cloud-platform.mdx new file mode 100644 index 000000000..d6ae2fcab --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/google-cloud-platform.mdx @@ -0,0 +1,133 @@ +--- +description: + Google Cloud Platform (GCP) is a suite of cloud computing services powered by Google. It is easy + to use Hive Gateway with GCP. +--- + +import { Callout } from '@theguild/components' + +# Deploying Hive Gateway to Google Cloud Platform + +Google Cloud Platform (GCP) is a suite of cloud computing services powered by Google. It is easy to +use Hive Gateway with GCP. + +## Prerequisites + +You will first need to install the GCP command-line tool: `gcloud`. +[You can find instructions here](https://cloud.google.com/sdk/docs/install). + +If you already have `gcloud` installed, make sure it is up to date with `gcloud components update`. + +[Create a new project](https://cloud.google.com/resource-manager/docs/creating-managing-projects) +and make sure +[billing is enabled](https://cloud.google.com/billing/docs/how-to/verify-billing-enabled). + + + Running these examples requires you to have billing enabled on your GCP account. It should not + cost more than a few cents, but don't forget to clean up your project after you are done to avoid + unexpected charges. + + +## Cloud Functions + +Cloud Functions is a serverless execution environment for building and connecting cloud services. +With Cloud Functions, you write simple, single-purpose functions that are attached to events, such +as an HTTP request. + +It is probably the most straight forward way to deploy a Hive Gateway to GCP. + + + Before you start, make sure you read the [Serverless / On the + Edge](/docs/gateway/deployment/serverless) page. + + +### Installation + +```sh npm2yarn +npm i @google-cloud/functions-framework @graphql-hive/gateway graphql +``` + + + Don't forget to add the `main` field to your `package.json`. Google Cloud Functions rely on it to + know which file to run. + + + + This example uses ESM syntax, so you should set `"type": "module"` in your `package.json`. + + +### Usage + +```js filename=index.js +import { createGatewayRuntime } from '@graphql-hive/gateway' + +export const graphql = createGatewayRuntime(/* Configuration */) +``` + +You can now deploy your function with `gcloud` CLI: + +```bash +$ gcloud functions deploy graphql --runtime nodejs18 --trigger-http --allow-unauthenticated +``` + +You can now test your function by using the URL found in the `httpsTrigger.url` property returned by +the previous command or by using the `gcloud` CLI: + +```bash +gcloud functions describe graphql +``` + +## Cloud Run + +Cloud Run is the Platform as a Service by Google. It is straightforward to use Hive Gateway with it. + +### Installation + +Create a new Node project and add Hive Gateway to its dependencies. + +```sh npm2yarn +npm i @graphql-hive/gateway graphql +``` + + + This example uses ESM syntax, so you should set `"type": "module"` in your `package.json`. + + +Add a `start` script to your `package.json`. Cloud Run needs to know how to start your application. + +You can use Gateway CLI as usual with `gateway.config.ts` + +```json +{ + "name": "hive-gateway-cloud-run-guide", + "version": "1.0.0", + "type": "module", + "scripts": { + "start": "hive-gateway supergraph" + }, + "dependencies": { + "graphql": "latest", + "@graphql-hive/gateway": "latest" + } +} +``` + +You can now deploy to Cloud Run. You can use all default values, except the last one, which allows +unauthenticated access to your service. + +```bash +$ gcloud run deploy --source . +``` + + + If this is your first time using Cloud Run, enabling the service can take up to a few minutes to + be fully effective. If you encounter any `403 Forbidden` errors, please wait for 2 minutes and try + again. + + +You can now access your API using the URL provided by `gcloud`. The default GraphQL endpoint is +`/graphql`. + +If you need to use TypeScript or any other tool that requires a build phase, such as code +generation, add a Dockerfile to the root of your project so that Cloud Run can build a custom image +for you. diff --git a/packages/web/docs/src/pages/docs/gateway/deployment/serverless/index.mdx b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/index.mdx new file mode 100644 index 000000000..93733e372 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/deployment/serverless/index.mdx @@ -0,0 +1,64 @@ +import { Callout } from '@theguild/components' + +# Serverless / On the Edge + +Hive Gateway can be deployed on the edge. This means that you can deploy your Hive Gateway to a +serverless environment like AWS Lambda, Cloudflare Workers, or Azure Functions. + +For Serverless environments, you cannot use Gateway CLI `hive-gateway` but you can use the +`createGatewayRuntime` function from `@graphql-hive/gateway` package. + +The gateway configuration goes into `createGatewayRuntime` function instead of `gatewayConfig` +export in `gateway.config.ts` file. + +## Distributed Caching + +But you need to be aware of the limitations of these environments. For example, in-memory caching is +not possible in these environments. So you have to setup a distributed cache like Redis or +Memcached. + +[See here to configure cache storage](/docs/gateway/other-features/performance). + +## Bundling problem + +Hive Gateway cannot import the required dependencies manually, and load the supergraph from the file +system. So if you are not using a schema registry such as Hive Gateway or Apollo GraphOS, we need to +save the supergraph as a code file (`supergraph.js` or `supergraph.ts`) and import it. + +### Loading the supergraph from a file + +For example, in GraphQL Mesh you need to save the supergraph as a TypeScript file: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-mesh/compose-cli' + +export const composeConfig = defineConfig({ + output: 'supergraph.ts', + subgraph: [ + //... + ] +}) +``` + +In `supergraph.ts` file, you need to export the supergraph: + +```ts +export default /* GraphQL */ ` + #... +` +``` + +Then you need to import the supergraph in your serverless function: + +```ts +import { createGatewayRuntime, WSTransport } from '@graphql-hive/gateway' +// Let's say you are using WS transport +import supergraph from './supergraph.js' + +const serveRuntime = createGatewayRuntime({ + supergraph, + transports: { + ws: WSTransport + } +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/index.mdx b/packages/web/docs/src/pages/docs/gateway/index.mdx new file mode 100644 index 000000000..9dacb505d --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/index.mdx @@ -0,0 +1,345 @@ +--- +description: + Hive Gateway is the Apollo Federation Gateway and/or Proxy Gateway for your GraphQL services. +--- + +import { Callout, Cards, Tabs } from '@theguild/components' + +# Hive Gateway + +Hive Gateway is a fully open-source MIT-licensed GraphQL gateway that can act as a Apollo Federation +Gateway or a Proxy Gateway for any GraphQL services. + +The Hive Gateway can be run as a standalone binary, a Docker Image, or as a JavaScript package (e.g. +within Node.js, Bun, Deno, Google Cloud Functions, Azure Functions or Cloudflare Workers) + +Hive Gateway provides the following features on top of your Federated GraphQL schema or proxied +GraphQL schema: + +- [GraphQL Subscriptions](/docs/gateway/subscriptions) with WebSockets, HTTP Callbacks or SSE +- Automatic query and request batching to reduce the number of HTTP requests to your subgraph +- [JSON Web Tokens (JWT)](/docs/gateway/authorization-authentication) authentication between client + and gateway also between gateway and subgraph +- [Authorization](/docs/gateway/authorization-authentication) on specific fields and types using + Federation Auth directives like `@authenticated` and `@skipAuth` +- [Role-based Access Control (RBAC)](/docs/gateway/authorization-authentication) either + programmatically or with directives `@requiresScope` and `@policy` +- [Response Caching](/docs/gateway/other-features/performance/response-caching) based on either in + memory or Redis for distributed caching optionally with `@cacheControl` directive +- [Security](/docs/gateway/other-features/security) features such as safelisting, depth limit etc +- [Rate Limiting](/docs/gateway/other-features/security/rate-limiting) on specific fields and types + either programmatically or declaratively with `@rateLimit` directive +- [Prometheus and OpenTelemetry integration](/docs/gateway/monitoring-tracing) with fully + customizable spans and attributes +- [Persisted Documents](/docs/gateway/persisted-documents) backed by either Hive Registry or a + selfhosting storage +- [E2E HTTP Compression](/docs/gateway/other-features/performance/compression) from the client to + the subgraph for better performance and resource management +- [And more](/docs/gateway/other-features) + +## Installation + +Hive Gateway can be installed in different ways depending on your preference. + + + +{/* Binary */} + + + +This command will download the appropriate binary for your operating system. + +```sh +curl -sSL https://graphql-hive.com/install-gateway.sh | sh +``` + + + +{/* Docker */} + + + +You can use the official Docker image to run Hive Gateway. + +```sh +docker pull ghcr.io/ardatan/hive-gateway +``` + + + +{/* JavaScript Package */} + + + +To use the NPM package, you need to have [Node.js](https://nodejs.org) installed in your +environment. Then, you can install Hive Gateway CLI with your preferred package manager. + +```sh npm2yarn +npm i @graphql-hive/gateway +``` + + + + + +## Starting the Gateway + +Hive Gateway supports two different modes: + +- **Apollo Federation.** Serve a supergraph provided by a schema registry like + [Hive Registry](https://the-guild.dev/graphql/hive/docs/schema-registry), a composition tool like + [Apollo Rover](https://www.apollographql.com/docs/rover/), + [GraphQL Mesh](https://graphql-mesh.com/) or any other Federation compliant composition tool such + as [Apollo Rover](https://www.apollographql.com/docs/rover/) or schema registry + ([Hive Registry](https://the-guild.dev/graphql/hive), + [Apollo GraphOS](https://www.apollographql.com/docs/graphos/)) +- **Proxy a GraphQL API.** Hive Gateway can also act as a proxy to an existing GraphQL API. + + + +{/* Apollo Federation */} + + + +To serve a Apollo Federation Gateway, we need to point the Gateway to either a local supergraph file +or a supergraph served by our schema registry. For this example, we will serve a supergraph from the +Hive schema registry. + + + +{/* Binary */} + + + +```sh filename="Run Apollo Federation Gateway with the Hive Gateway Binary" +hive-gateway supergraph \ + http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/supergraph \ + --hive-cdn-key "YOUR HIVE CDN KEY" +``` + + + +{/* Docker */} + + + +```sh filename="Run Apollo Federation Gateway with the Hive Gateway Docker Image" +docker run --rm --name hive-gateway -p 4000:4000 \ + ghcr.io/ardatan/hive-gateway supergraph \ + http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/supergraph \ + --hive-cdn-key "YOUR HIVE CDN KEY" +``` + + + +{/* JavaScript Package */} + + + +If you installed the JavaScript package, you can use `npx` for running the CLI. + +```sh filename="Run Apollo Federation Gateway with npx" +npx hive-gateway supergraph \ + http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/supergraph \ + --hive-cdn-key "YOUR HIVE CDN KEY" +``` + + + + + + + +{/* Proxy */} + + + +In order to proxy a GraphQL API, we need to provide the URL of the API when starting our Gateway. +Optionally, we can also provide a schema file from either a local file or a schema registry, which +will be used instead of instrospecting the proxied API. + + + +{/* Binary */} + + + +```sh filename="Run Proxy Gateway with the Hive Gateway Binary" +hive-gateway proxy https://localhost:3000/graphql \ + --hive-cdn-endpoint http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/sdl \ + --hive-cdn-key "YOUR HIVE CDN KEY" +``` + + + +{/* Docker */} + + + +```sh filename="Run Proxy Gateway with the Hive Gateway Binary" +docker run --rm --name hive-gateway -p 4000:4000 \ + ghcr.io/ardatan/hive-gateway proxy https://localhost:3000/graphql \ + --hive-cdn-endpoint http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/sdl \ + --hive-cdn-key "YOUR HIVE CDN KEY" +``` + + + +{/* JavaScript Package */} + + + +```sh filename="Run Proxy Gateway with the Hive Gateway Binary" +npx hive-gateway proxy https://localhost:3000/graphql \ + --hive-cdn-endpoint http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/sdl \ + --hive-cdn-key "YOUR HIVE CDN KEY" +``` + + + + + + + + + +By default, Hive Gateway will start a server on port 4000. You can customize that behavior. For that +please refer to our [CLI Reference](/docs/api-reference/gateway/cli). + +## Configuration File + +The Hive Gateway config file `gateway.config.ts` is used for enabling additional features such as +authorization, authentication caching, rate limiting, and more. The recommended language for the +configuration file is TypeScript. + +We can provide the CLI configuration parameters, also via the configuration file. + + + +{/* Apollo Federation */} + + + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + supergraph: { + type: 'hive', + endpoint: + 'http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/supergraph', + key: 'YOUR HIVE CDN KEY' + } +}) +``` + +Hive Gateway will automatically load the default config file and apply the settings. + + + +{/* Binary */} + + + +```sh filename="Run Proxy Gateway with the Hive Gateway Binary using configuration file" +hive-gateway supergraph +``` + + + +{/* Docker */} + + + +For docker, we need to mount the configuration file into the container. + +```sh filename="Run Proxy Gateway with the Hive Gateway Binary" {2} +docker run --rm --name hive-gateway -p 4000:4000 \ + -v $(pwd)/gateway.config.ts:/serve/gateway.config.ts \ + ghcr.io/ardatan/hive-gateway supergraph +``` + + + +{/* JavaScript Package */} + + + +```sh filename="Run Proxy Gateway with the Hive Gateway Binary" +npx hive-gateway supergraph +``` + + + + + + + +{/* Proxy */} + + + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + proxy: { + endpoint: 'http://localhost:3000/graphql' + } +}) +``` + +Hive Gateway will automatically load the default config file and apply the settings. + + + +{/* Binary */} + + + +```sh filename="Run Proxy Gateway with the Hive Gateway Binary using configuration file" +hive-gateway proxy +``` + + + +{/* Docker */} + + + +For docker, we need to mount the configuration file into the container. + +```sh filename="Run Proxy Gateway with the Hive Gateway Binary" {2} +docker run --rm --name hive-gateway -p 4000:4000 \ + -v $(pwd)/gateway.config.ts:/serve/gateway.config.ts \ + ghcr.io/ardatan/hive-gateway proxy https://localhost:3000/graphql \ + --hive-cdn-endpoint http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/sdl \ + --hive-cdn-key "YOUR HIVE CDN KEY" +``` + + + +{/* JavaScript Package */} + + + +```sh filename="Run Proxy Gateway with the Hive Gateway Binary" +npx hive-gateway proxy +``` + + + + + + + + + +## Next steps + +After learning the first steps of Hive Gateway, you can explore the following topics. + + + + + diff --git a/packages/web/docs/src/pages/docs/gateway/monitoring-tracing.mdx b/packages/web/docs/src/pages/docs/gateway/monitoring-tracing.mdx new file mode 100644 index 000000000..c546510b4 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/monitoring-tracing.mdx @@ -0,0 +1,1397 @@ +--- +description: + Monitoring and tracing are essential for debugging and understanding the performance and overall + behavior of your Hive Gateway. +--- + +import Image from 'next/image' +import { Table, Td, Th, Tr } from 'nextra/components' +import { Callout, Cards, Tabs } from '@theguild/components' + +# Monitoring and Tracing + +If something is not working as it should within your GraphQL gateway, you would not want it to go +unnoticed. + +Monitoring and tracing are essential for debugging and understanding the performance of your +gateway. + +You can use Gateway plugins to trace and monitor your gateway's execution flow together with all +outgoing HTTP calls and internal query planning. + +## Healthcheck + +Hive Gateway is aware of the usefulness of a health check and gives the user maximum possibilities +to use the built-in check. + +There are two types of health checks: **liveliness** and **readiness**, they both _are_ a health +check but convey a different meaning: + +- **Liveliness** checks whether the service is alive and running +- **Readiness** checks whether the upstream services are ready to perform work and execute GraphQL + operations + +The difference is that a service can be _live_ but not _ready_ - for example, server has started and +is accepting requests (alive), but the read replica it uses is still unavailable (not ready). + +Both endpoints are enabled by default. + +### Liveliness + +By default, you can check whether the gateway is alive by issuing a request to the `/healthcheck` +endpoint and expecting the response `200 OK`. A successful response is just `200 OK` without a body. + +You can change this endpoint through the `healthCheckEndpoint` option: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + healthCheckEndpoint: '/healthcheck' +}) +``` + +### Readiness + +For readiness check, Hive Gateway offers another endpoint (`/readiness`) which checks whether the +services powering your gateway are ready to perform work. It returns `200 OK` if all the services +are ready to execute GraphQL operations. + +It returns `200 OK` if all the services are ready to perform work. + +You can customize the readiness check endpoint through the `readinessCheckEndpoint` option: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + readinessCheckEndpoint: '/readiness' +}) +``` + +## OpenTelemetry Traces + +Hive Gateway supports OpenTelemetry for tracing and monitoring your gateway. + +[OpenTelemetry](https://opentelemetry.io/) is a set of APIs, libraries, agents, and instrumentation +to provide observability to your applications. + +The following are available to use with this plugin: + +- HTTP request: tracks the incoming HTTP request and the outgoing HTTP response +- GraphQL Lifecycle tracing: tracks the GraphQL execution lifecycle (parse, validate and execution). +- Upstream HTTP calls: tracks the outgoing HTTP requests made by the GraphQL execution. +- Context propagation: propagates the trace context between the incoming HTTP request and the + outgoing HTTP requests. + +![image](https://github.com/user-attachments/assets/74918ade-8d7c-44ee-89b2-e10a13ffc4ad) + +### Usage Example + +```ts filename="gateway.config.ts" +import { createStdoutExporter, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + // A simple output to the console. + // You can add more exporters here, please see documentation below for more examples. + createStdoutExporter() + ], + serviceName: 'my-custom-service-name', // Optional, the name of your service + tracer: myCustomTracer, // Optional, a custom tracer to use + inheritContext: true, // Optional, whether to inherit the context from the incoming request + propagateContext: true, // Optional, whether to propagate the context to the outgoing requests + // Optional config to customize the spans. By default all spans are enabled. + spans: { + http: true, // Whether to track the HTTP request/response + graphqlParse: true, // Whether to track the GraphQL parse phase + graphqlValidate: true, // Whether to track the GraphQL validate phase + graphqlExecute: true, // Whether to track the GraphQL execute phase + subgraphExecute: true, // Whether to track the subgraph execution phase + upstreamFetch: true // Whether to track the upstream HTTP requests + } + } +}) +``` + +### Exporters + +You may use one of the following exporters to send the traces to a backend, or create an configure +custom exporters and processors. + +To use a custom exporter that is not listen below, please refer to +[Customer Exporters in OpenTelemetry documentation](https://opentelemetry.io/docs/languages/js/exporters/#custom-exporters). + +In addition, you can fully customize the plugin's Tracer with any kind of OpenTelemetry +[tracer](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#tracer), +and integrate it to any tracing/metric platform that supports this standard. + + + +{/* Stdout */} + + + +A simple exporter that writes the spans to the `stdout` of the process. + +```ts filename="gateway.config.ts" +import { createStdoutExporter, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [createStdoutExporter()] + } +}) +``` + + + +{/* OTLP (HTTP) */} + + + +An exporter that writes the spans to an OTLP-supported backend using HTTP. + +```ts filename="gateway.config.ts" +import { createOtlpHttpExporter, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + createOtlpHttpExporter({ + url: 'http://my-otlp-backend:4318' + // ... + // additional options to pass to @opentelemetry/exporter-trace-otlp-http + // https://www.npmjs.com/package/@opentelemetry/exporter-trace-otlp-http + }) + ] + } +}) +``` + + + +{/* OTLP (gRPC) */} + + + +An exporter that writes the spans to an OTLP-supported backend using gRPC. + +```ts filename="gateway.config.ts" +import { createOtlpGrpcExporter, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + createOtlpGrpcExporter({ + url: 'http://my-otlp-backend:4317' + // ... + // additional options to pass to @opentelemetry/exporter-trace-otlp-grpc + // https://www.npmjs.com/package/@opentelemetry/exporter-trace-otlp-grpc + }) + ] + } +}) +``` + + + +{/* Jaeger */} + + + +[Jaeger](https://www.jaegertracing.io/) supports [OTLP over HTTP/gRPC](#otlp-over-http), so you can +use it by pointing the `createOtlpHttpExporter`/`createOtlpGrpcExporter` to the Jaeger endpoint: + +```ts filename="gateway.config.ts" +import { createOtlpHttpExporter, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + createOtlpHttpExporter({ + url: 'http://my-jaeger-backend:4318' + }) + ] + } +}) +``` + +> Your Jaeger instance needs to have OTLP ingestion enabeld, so verify that you have the +> `COLLECTOR_OTLP_ENABLED=true` environment variable set, and that ports `4317` and `4318` are +> acessible. + +To test this integration, you can run a local Jaeger instance using Docker: + +``` +docker run -d --name jaeger \ + -e COLLECTOR_OTLP_ENABLED=true \ + -p 5778:5778 \ + -p 16686:16686 \ + -p 4317:4317 \ + -p 4318:4318 \ + jaegertracing/all-in-one:latest +``` + + + +{/* NewRelic */} + + + +[NewRelic](https://newrelic.com/) supports [OTLP over HTTP/gRPC](#otlp-over-http), so you can use it +by configuring the `createOtlpHttpExporter`/`createOtlpGrpcExporter` to the NewRelic endpoint: + +```ts filename="gateway.config.ts" +import { createOtlpHttpExporter, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + createOtlpHttpExporter({ + url: 'http://:4318' + }) + ] + } +}) +``` + +> For additional information and NewRelic ingestion endpoints, see +> [**New Relic OTLP endpoint**](https://docs.newrelic.com/docs/opentelemetry/best-practices/opentelemetry-otlp/). + + + +{/* Datadog */} + + + +[DataDog Agent](https://docs.datadoghq.com/agent/) supports [OTLP over HTTP/gRPC](#otlp-over-http), +so you can use it by pointing the `createOtlpHttpExporter` to the DataDog Agent endpoint: + +```ts filename="gateway.config.ts" +import { createOtlpHttpExporter, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + createOtlpHttpExporter({ + url: 'http://:4318' + }) + ] + } +}) +``` + +> For additional information, see +> [**OpenTelemetry in Datadog**](https://docs.datadoghq.com/opentelemetry/interoperability/otlp_ingest_in_the_agent/?tab=host#enabling-otlp-ingestion-on-the-datadog-agent). + + + +{/* Zipkin */} + + + +[Zipkin](https://zipkin.io/) is using a custom protocol to send the spans, so you can use the Zipkin +exporter to send the spans to a Zipkin backend: + +```ts filename="gateway.config.ts" +import { createZipkinExporter, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + createZipkinExporter({ + url: 'http://:9411/api/v2/spans' + // ... + // additional options to pass to @opentelemetry/exporter-zipkin + // https://www.npmjs.com/package/@opentelemetry/exporter-zipkin + }) + ] + } +}) +``` + + + + + +### Batching + +All built-in processors allow you to configure batching options by an additional argument to the +factory function. + +The following configuration are allowed: + +- `true` (default): enables batching and use + [`BatchSpanProcessor`](https://opentelemetry.io/docs/specs/otel/trace/sdk/#batching-processor) + default config. +- `object`: enables batching and use + [`BatchSpanProcessor`](https://opentelemetry.io/docs/specs/otel/trace/sdk/#batching-processor) + with the provided configuration. +- `false` - disables batching and use + [`SimpleSpanProcessor`](https://opentelemetry.io/docs/specs/otel/trace/sdk/#simple-processor) + +> By default, the batch processor will send the spans every 5 seconds or when the buffer is full. +> +> `{ scheduledDelayMillis: 5000, maxQueueSize: 2048, exportTimeoutMillis: 30000, maxExportBatchSize: 512 }` + +> You can learn more about the batching options in the +> [Picking the right span processor](https://opentelemetry.io/docs/languages/js/instrumentation/#picking-the-right-span-processor) +> page. + +### Reported Spans + +The plugin exports OpenTelemetry spans for the following operations: + +
+ +HTTP Server + + + This span is created for each incoming HTTP request, and acts as a root span for the entire + request. Disabling this span will also disable the other hooks and spans. + + +By default, the plugin will a root span for the HTTP layer as a span (`METHOD /path`) with the +following attributes for the HTTP request: + +- `http.method`: The HTTP method +- `http.url`: The HTTP URL +- `http.route`: The HTTP status code +- `http.scheme`: The HTTP scheme +- `http.host`: The HTTP host +- `net.host.name`: The hostname +- `http.user_agent`: The HTTP user agent (based on the `User-Agent` header) +- `http.client_ip`: The HTTP connecting IP (based on the `X-Forwarded-For` header) + +And the following attributes for the HTTP response: + +- `http.status_code`: The HTTP status code + +> An error in the this phase will be reported as an +> [error span](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/) with the +> HTTP status text and as an OpenTelemetry +> [`Exception`](https://opentelemetry.io/docs/specs/otel/trace/exceptions/). + +You may disable this by setting `spans.http` to `false`: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + http: false + } + } +}) +``` + +Or, you may filter the spans by setting the `spans` configuration to a function: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + http: payload => { + // Filter the spans based on the payload + return true + } + } + } +}) +``` + +> The `payload` object is the same as the one passed to the +> [`onRequest` hook](https://github.com/ardatan/whatwg-node/blob/master/packages/server/src/plugins/types.ts#L16-L25). + +
+ +
+ +GraphQL Parse + +By default, the plugin will report the validation phase as a span (`graphql.validate`) with the +following attributes: + +- `graphql.document`: The GraphQL query string +- `graphql.operation.name`: The operation name + +> An error in the parse phase will be reported as an +> [error span](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/), including +> the error message and as an OpenTelemetry +> [`Exception`](https://opentelemetry.io/docs/specs/otel/trace/exceptions/). + +You may disable this by setting `spans.graphqlParse` to `false`: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + graphqlParse: false + } + } +}) +``` + +Or, you may filter the spans by setting the `spans` configuration to a function: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + graphqlParse: payload => { + // Filter the spans based on the payload + return true + } + } + } +}) +``` + +> The `payload` object is the same as the one passed to the +> [`onParse` hook](https://the-guild.dev/graphql/envelop/v4/plugins/lifecycle#before). + +
+ +
+ +GraphQL Validate + +By default, the plugin will report the validation phase as a span (`graphql.validate`) with the +following attributes: + +- `graphql.document`: The GraphQL query string +- `graphql.operation.name`: The operation name + +> An error in the validate phase will be reported as an +> [error span](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/), including +> the error message and as an OpenTelemetry +> [`Exception`](https://opentelemetry.io/docs/specs/otel/trace/exceptions/). + +You may disable this by setting `spans.graphqlValidate` to `false`: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + graphqlValidate: false + } + } +}) +``` + +Or, you may filter the spans by setting the `spans` configuration to a function: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + graphqlValidate: payload => { + // Filter the spans based on the payload + return true + } + } + } +}) +``` + +> The `payload` object is the same as the one passed to the +> [`onValidate` hook](https://the-guild.dev/graphql/envelop/v4/plugins/lifecycle#before-1). + +
+ +
+ +GraphQL Execute + +By default, the plugin will report the execution phase as a span (`graphql.execute`) with the +following attributes: + +- `graphql.document`: The GraphQL query string +- `graphql.operation.name`: The operation name +- `graphql.operation.type`: The operation type (`query`/`mutation`/`subscription`) + +> An error in the execute phase will be reported as an +> [error span](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/), including +> the error message and as an OpenTelemetry +> [`Exception`](https://opentelemetry.io/docs/specs/otel/trace/exceptions/). + +You may disable this by setting `spans.graphqlExecute` to `false`: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + graphqlExecute: false + } + } +}) +``` + +Or, you may filter the spans by setting the `spans` configuration to a function: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + graphqlExecute: payload => { + // Filter the spans based on the payload + return true + } + } + } +}) +``` + +> The `payload` object is the same as the one passed to the +> [`onExecute` hook](https://the-guild.dev/graphql/envelop/v4/plugins/lifecycle#before-3). + +
+ +
+ +Subgraph Execute + +By default, the plugin will report the subgraph execution phase as a span (`subgraph.execute`) with +the following attributes: + +- `graphql.document`: The GraphQL query string executed to the upstream +- `graphql.operation.name`: The operation name +- `graphql.operation.type`: The operation type (`query`/`mutation`/`subscription`) +- `gateway.upstream.subgraph.name`: The name of the upstream subgraph + +In addition, the span will include the following attributes for the HTTP requests; + +- `http.method`: The HTTP method +- `http.url`: The HTTP URL +- `http.route`: The HTTP status code +- `http.scheme`: The HTTP scheme +- `net.host.name`: The hostname +- `http.host`: The HTTP host + +And the following attributes for the HTTP response: + +- `http.status_code`: The HTTP status code + +You may disable this by setting `spans.subgraphExecute` to `false`: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + subgraphExecute: false + } + } +}) +``` + +Or, you may filter the spans by setting the `spans.subgraphExecute` configuration to a function: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + subgraphExecute: payload => { + // Filter the spans based on the payload + return true + } + } + } +}) +``` + +> The `payload` object is the same as the one passed to the +> [`onSubgraphHook` hook](/docs/gateway/other-features/custom-plugins#onsubgraphexecute). + +
+ +
+ +Upstream Fetch + +By default, the plugin will report the upstream fetch phase as a span (`http.fetch`) with the +information about outgoing HTTP calls. + +The following attributes are included in the span: + +- `http.method`: The HTTP method +- `http.url`: The HTTP URL +- `http.route`: The HTTP status code +- `http.scheme`: The HTTP scheme +- `net.host.name`: The hostname +- `http.host`: The HTTP host + +And the following attributes for the HTTP response: + +- `http.status_code`: The HTTP status code + +You may disable this by setting `spans.upstreamFetch` to `false`: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + upstreamFetch: false + } + } +}) +``` + +Or, you may filter the spans by setting the `spans.upstreamFetch` configuration to a function: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + spans: { + /* ... */ + upstreamFetch: payload => { + // Filter the spans based on the payload + return true + } + } + } +}) +``` + +> The `payload` object is the same as the one passed to the +> [`onFetch` hook](/docs/gateway/other-features/custom-plugins#onfetch). + +
+ +### Context Propagation + +By default, the plugin will +[propagate the trace context](https://opentelemetry.io/docs/concepts/context-propagation/) between +the incoming HTTP request and the outgoing HTTP requests. + +You may disable this by setting `inheritContext` or `propagateContext` to `false`: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [ + /* ... */ + ], + // Controls the propagation of the trace context between the incoming HTTP request and Hive Gateway + inheritContext: false, + // Controls the propagation of the trace context between Hive Gateway and the upstream HTTP requests + propagateContext: false + } +}) +``` + +### Troubleshooting + +The default behavor of the plugin is to log errors and warnings to the console. + +You can customize this behavior by changing the value of the +[`OTEL_LOG_LEVEL`](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/) +environment variable on your gateway process/runtime. + +In addition, you can use the `Stdout` exporter to log the traces to the console: + +```ts filename="gateway.config.ts" +import { createStdoutExporter, defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + openTelemetry: { + exporters: [createStdoutExporter()] + } +}) +``` + +This will log the traces to the console, which can be useful for debugging and troubleshooting. + +## Prometheus Metrics + +[Prometheus](https://www.prometheus.io/) is a utility for producing, scraping and storage of metrics +from services and utilities. + +You can use this feature of the gateway to expose and collect metrics from all phases of your +GraphQL execution including internal query planning and outgoing HTTP requests. + +The metrics gathered are then exposed in a format that Prometheus can scrape on a regular basis on +an HTTP endpoint (`/metrics` by default). + +### Usage Example + +Add its configuration to your `gateway.config.ts` file. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + prometheus: { + // Enable the metrics you want to expose + // The following represent the default config of the plugin. + metrics: { + graphql_gateway_fetch_duration: true, + graphql_gateway_subgraph_execute_duration: true, + graphql_gateway_subgraph_execute_errors: true, + graphql_envelop_deprecated_field: true, + graphql_envelop_request: true, + graphql_envelop_request_duration: true, + graphql_envelop_request_time_summary: true, + graphql_envelop_phase_parse: true, + graphql_envelop_phase_validate: true, + graphql_envelop_phase_context: true, + graphql_envelop_error_result: true, + graphql_envelop_phase_execute: true, + graphql_envelop_phase_subscribe: true, + graphql_envelop_schema_change: true, + graphql_yoga_http_duration: true + } + } +}) +``` + +You can now start your Hive Gateway and make some requests to it. The plugin will start collecting +metrics, and you can access them by visiting the `/metrics` endpoint. + +In most cases, you'll need to setup a Prometheus server to scrape the metrics from your gateway, we +recommend using the official +[Prometheus Server](https://prometheus.io/docs/prometheus/latest/getting_started/) or tools like +[Vector](https://vector.dev/docs/setup/installation/). + +### Grafana Dashboard + +If you are using Grafana to visualize your metrics, you can +[import the published Grafana dashboard for Grafana's marketplace](https://grafana.com/grafana/dashboards/21777), +or +[you can use/import this dashboard JSON file directly](https://github.com/ardatan/graphql-mesh/blob/master/packages/plugins/prometheus/grafana.json) +to easily visualuze the metrics for your gateway. + +![image](https://github.com/user-attachments/assets/d72ef00b-90c2-4fb2-9bc4-851cec0eaf0b) + +For additional instructions, please refer to +[Import dashboards insturction in Grafana documentation](https://grafana.com/docs/grafana/latest/dashboards/build-dashboards/import-dashboards/). + +### Reported Metrics + +You will find the timing of each phase of the GraphQL execution. If you are not familiar with the +lifecycle of a GraphQL operation in the gateway, please refer to the +[Plugin Lifecycle page](/docs/gateway/other-features/custom-plugins#plugin-lifecycle). Each plugin +hook has a corresponding metric which tracks timings as +[histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) or +[summary](https://prometheus.io/docs/concepts/metric_types/#summary). You will also find some +[counters](https://prometheus.io/docs/concepts/metric_types/#counter) to track the number of +requests, errors, and other useful information. + +To enable a metric, set the corresponding option to `true` in the `metrics` option's object. You can +also provide a string to customize the metric name, or an object to provide more options (see +[`siimon/prom-client` documentation](https://github.com/siimon/prom-client#custom-metrics)). +Histogram metrics can be passed an array of numbers to configure buckets. + +
+ `graphql_yoga_http_duration` (default: **enabled**, type: **Histogram**) + +This metric tracks the duration of incoming (downstream) HTTP requests. It reports the time spent to +process each incoming request as a +[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +It is useful to track the responsiveness of your gateway. A spike in this metric could indicate a +performance issue and that further investigation is needed. + +Please note that this metric is not specific to GraphQL, it tracks all incoming HTTP requests. + +You can use labels to have a better understanding of the requests and group them together. A common +filter is to include only `statusCode` with `200` value and `method` with `POST` (the default method +for GraphQL requests, but it can also be `GET` depending on your client setup) value to get +execution time of successful GraphQL requests only. + +This metric includes some useful labels to help you identify requests and group them together. + +| Label | Description | +| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `method` | The HTTP method used to request the gateway endpoint.

Since GraphQL usually only uses `POST` requests, this can be used to filter out GraphiQL-related requests.

It can be any HTTP verb, including disallowed ones. Which means this metric can also be used to track malformed or malicious requests. | +| `statusCode` | The HTTP status code returned by the gateway.

You probably want to filter out non-`200` responses to have a view of the successful requests.

This can help you identify which requests are failing and why. Since GraphQL errors are returned as `200 OK` responses, this can be useful to track errors that are not related to the GraphQL, like malformed requests. | +| `operationName` | If available, the name of the GraphQL operation requested, otherwise `Anonymous`.

This can help you identify which operations are slow or failing.

We recommend you always provide an operation name to your queries and mutations to help performance analysis and bug tracking. | +| `operationType` | The type of the GraphQL operation requested. It can be one of `query`, `mutation`, or `subscription`.

This can help you differentiate read and write performance of the system. It can for example help understand cache impact. | +| `url` | The URL of the request. Useful to filter graphql endpoint metrics (`/graphql` by default). | + +
+ +
+ `graphql_gateway_fetch_duration` (default: **enabled**, type: **Histogram**) + +This metric tracks the duration of outgoing HTTP requests. It reports the time spent on each request +made using the `fetch` function provided by the gateway. It is reported as a +[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +This metric can provide insights into the network usage of your gateway. It does not only include +requests made to resolve GraphQL operation responses, but also include any other outgoing HTTP +requests made by the gateway or one of its plugins. It will for example include requests made to +fetch the supergraph schema from the configured Schema Registry. + +These metrics include some useful labels to help you identify requests and group them together. + +Since they can be heavy, `requestHeaders` and `responseHeaders` are disabled by default. You can +either set those options to `true` in the `label` configuration object to include all headers in the +label, but you can also provide a list of header names to include. + +| Label | Description | +| ----------------- | ------------------------------------------------------------------------------------------- | +| `url` | The URL of the upstream request. | +| `method` | The HTTP method of the upstream request. | +| `statusCode` | The status code of the upstream response. | +| `statusText` | The status text of the upstream response. | +| `requestHeaders` | Disabled by default. A JSON encoded object containing the headers of the upstream request. | +| `responseHeaders` | Disabled by default. A JSON encoded object containing the headers of the upstream response. | + +
+ +
+ `graphql_gateway_subgraph_execute_duration` (default: **enabled**, type: **Histogram**) + +This metric tracks the duration of subgraph execution. It reports the time spent on each subgraph +queries made to resolve incoming operations as a +[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +This metric can provide insights into how the time is spent to resolve queries. It can help you +identify bottlenecks in your subgraphs. + +| Label | Description | +| --------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `subgraphName` | The name of the targeted subgraph. | +| `operationType` | The type of the GraphQL operation executed by the subgraph. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation executed by the subgraph. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_gateway_subgraph_execute_errors` (default: **enabled**, type: **Counter**) + +This metric tracks the number of errors that occurred during the subgraph execution. It counts all +errors found in the response returned by the subgraph execution. It is exposed as a +[counter](https://prometheus.io/docs/concepts/metric_types/#counter). + +This metric can help you identify subgraphs that are failing to execute operations. It can help +identify issues with the subgraph itself or the communication between the gateway and the subgraph. + +| Label | Description | +| --------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `subgraphName` | The name of the targeted subgraph. | +| `operationType` | The type of the GraphQL operation executed by the subgraph. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation executed by the subgraph. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_phase_parse` (default: **enabled**, type: **Histogram**) + +This metric tracks the duration of the `parse` phase of the GraphQL execution. It reports the time +spent parsing the incoming GraphQL operation. It is reported as a +[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +Since you don't have control over the parsing phase, this metric is mostly useful to track potential +attacks. A spike in this metric could indicate someone is trying to send malicious operations to +your gateway. + +| Label | Description | +| --------------- | ------------------------------------------------------------------------------------------------------- | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_phase_validate` (default: **enabled**, type: **Histogram**) + +This metric tracks the duration of the `validate` phase of the GraphQL execution. It reports the +time spent validating the incoming GraphQL operation. It is reported as a +[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +| Label | Description | +| --------------- | ------------------------------------------------------------------------------------------------------- | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_phase_context` (default: **enabled**, type: **Histogram**) + +This metric tracks the duration of the `context` phase of the GraphQL execution. It reports the time +spent building the context object that will be passed to the executors. It is reported as a +[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +| Label | Description | +| --------------- | ------------------------------------------------------------------------------------------------------- | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_phase_execute` (default: **enabled**, type: **Histogram**) + +This metric tracks the duration of the `execute` phase of the GraphQL execution. It reports the time +spent actually resolving the response of the incoming operation. This includes the gathering of all +the data from all sources required to construct the final response. It is reported as a +[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +It is the metric that will give you the most insights into the performance of your gateway, since +this is where most of the work is done. + +| Label | Description | +| --------------- | ------------------------------------------------------------------------------------------------------- | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_phase_subscribe` (default: **enabled**, type: **Histogram**) + +This metric tracks the duration of the `subscribe` phase of the GraphQL execution. It reports the +time spent initiating a subscription (which doesn't include actually sending the first response). It +is reported as a [histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +It will notably include the time spent to setup upstream subscriptions with appropriate transport +for each source. + +| Label | Description | +| --------------- | ------------------------------------------------------------------------------------------------------- | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_request_duration` (default: **enabled**, type: **Histogram**) + +This metric tracks the duration of the complete GraphQL operation execution. It reports the time +spent in the GraphQL specific processing, excluding the HTTP-level processing. It is reported as a +[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). + +| Label | Description | +| --------------- | ------------------------------------------------------------------------------------------------------- | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_request_time_summary` (default: **enabled**, type: **Summary**) + +This metric provides a summary of the time spent on the GraphQL operation execution. It reports the +same timing than [`graphql_envelop_request_duration`](#graphql_envelop_request_duration) but as a +[summary](https://prometheus.io/docs/concepts/metric_types/#summary). + +| Label | Description | +| --------------- | ------------------------------------------------------------------------------------------------------- | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_error_result` (default: **enabled**, type: **Counter**) + +This metric tracks the number of errors that was returned by the GraphQL execution. + +Similarly to [`graphql_gateway_subgraph_execute_errors`](#graphql_gateway_subgraph_execute_errors), +it counts all errors found in the final response constructed by the gateway after it gathered all +subgraph responses, but it also includes errors from other GraphQL processing phases (parsing, +validation and context building). It is exposed as a +[counter](https://prometheus.io/docs/concepts/metric_types/#counter). + +Depending on the phase when the error occurred, some labels may be missing. For example, if the +error occurred during the context phase, only the `phase` label will be present. + +| Label | Description | +| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `path` | The path of the field that caused the error. It can be `undefined` if the error is not related to a given field. | +| `phase` | The phase of the GraphQL execution where the error occurred. It can be `parse`, `validate`, `context`, `execute` (for every operation types including subscriptions). | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_request` (default: **enabled**, type: **Counter**) + +This metric tracks the number of GraphQL operations executed. It counts all operations, either +failed or successful, including subscriptions. It is exposed as a +[counter](https://prometheus.io/docs/concepts/metric_types/#counter). + +It can differ from the number reported by +[`graphql_yoga_http_duration_sum`](#graphql_yoga_http_duration) because a single HTTP request can +contain multiple GraphQL operations if batching has been enabled. + +| Label | Description | +| --------------- | ------------------------------------------------------------------------------------------------------- | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_deprecated_field` (default: **enabled**, type: **Counter**) + +This metric tracks the number of deprecated fields used in the GraphQL operation. + +| Label | Description | +| --------------- | ------------------------------------------------------------------------------------------------------- | +| `fieldName` | The name of the deprecated field that has been used. | +| `typeName` | The name of the parent type of the deprecated field that has been used. | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | + +
+ +
+ `graphql_envelop_schema_change` (default: **enabled**, type: **Counter**) + +This metric tracks the number of schema changes that have occurred since the gateway started. When +polling is enabled, this will include the schema reloads. + +If you are using a plugin that modifies the schema on the fly, be aware that this metric will also +include updates made by those plugins. Which means that one schema update can actually trigger +multiple schema changes. + +
+ +
+ `graphql_envelop_execute_resolver` (default: **disabled**, type: **Histogram**) + + + Enabling resolvers level metrics will introduce significant overhead. It is recommended to enable + this metric only for debugging purposes. + + +This metric tracks the duration of each resolver execution. It reports the time spent only on +additional resolvers, not on fields that are resolved by a subgraph. It is up to the subgraph server +to implement resolver level metrics, the gateway can't remotely track their execution time. + +| Label | Description | +| --------------- | ------------------------------------------------------------------------------------------------------- | +| `operationType` | The type of the GraphQL operation requested. This can be one of `query`, `mutation`, or `subscription`. | +| `operationName` | The name of the GraphQL operation requested. It will be `Anonymous` if no `operationName` is found. | +| `fieldName` | The name of the field being resolved. | +| `typeName` | The name of the parent type of the field being resolved. | +| `returnType` | The name of the return type of the field being resolved. | + +**Filter resolvers to instrument** + +To mitigate the cost of instrumenting all resolvers, you can explicitly list the fields that should +be instrumented by providing a list of field names to the `instrumentResolvers` option. + +It is a list of strings in the form of `TypeName.fieldName`. For example, to instrument the `hello` +root query, you would use `Query.hello`. + +You can also use wildcards to instrument all the fields for a type. For example, to instrument all +root queries, you would use `Query.*`. + +
+ +### Troubleshooting + +You can observe and troubleshoot the metrics by visiting the `/metrics` endpoint of your gateway. +Run your gateway and execute a few GraphQL operations to produce some metrics. + +Then, use the following `curl` command will fetch the metrics from your gateway: + +```sh +curl -v http://localhost:4000/metrics +``` + +> Change `http://localhost:4000` to the actual URL of your running gateway. + +### Customizations + + + + + +By default, all operations are instrumented, including introspection queries. It is possible to +ignore introspection queries for all metrics prefixed by `graphql_envelop_` by setting the +`skipIntrospection` option to `true`. + + + + + +By providing a string, you can change the name of the metric. For example, to change the name of the +name of the `graphql_yoga_http_duration` metric to `http_request_duration`, you would use: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + prometheus: { + graphql_yoga_http_duration: 'http_request_duration' + } +}) +``` + + + + + +By providing an object, you can customize the metric configuration. These configuration objects +should be created using the provided factories for each metric type (`createCounter`, +`createHistogram`, `createSummary`). + + + By providing a custom configuration, the default configuration is completely overridden. This means + you need to provide all options, including the name and the labels. + +You can look at the source code of the plugin to see the default configuration for each metric to +use it as a base. + + + +Available options depend on the metric type, and full details about them can be found in the +[`siimon/prom-client` documentation](https://github.com/siimon/prom-client#custom-metric). + +For example, you can customize the buckets of the `graphql_yoga_http_duration` histogram metric: + +```ts filename="gateway.config.ts" +import { defineConfig, createHistogram } from '@graphql-hive/gateway' +import { register as registry } from 'prom-client' + +export const gatewayConfig = defineConfig({ + prometheus: { + graphql_yoga_http_duration: createHistogram({ + registry, + histogram: { + name: 'graphql_yoga_http_duration', + help: 'Time spent on HTTP connection', + labels: ['method', 'statusCode', 'operationName', 'operationType'], + buckets: [0.1, 5, 15, 50, 100, 500], + } + fillLabelsFn(params, { request, response }) { + return { + method: request.method, + statusCode: response.status, + operationType: params.operationType, + operationName: params.operationName || 'Anonymous', + }; + } + }) + } +}) +``` + + + + + +You can customize the client’s registry by passing a custom registry to the `registry` option. + +```ts filename="gateway.config.ts" +import { Registry } from 'prom-client' +import { defineConfig } from '@graphql-hive/gateway' + +const myRegistry = new Registry() + +export const gatewayConfig = defineConfig({ + prometheus: { + registry: myRegistry + } +}) +``` + + + + + +## StatsD + +You can use `@graphql-mesh/plugin-statsd` plugin to collect and send metrics to Datadog's DogStatsD +and InfluxDB's Telegraf StatsD services. + +```sh npm2yarn +npm i @graphql-mesh/plugin-statsd hot-shots +``` + +Compatible with: + +- Datadog's DogStatsD server +- InfluxDB's Telegraf StatsD server +- Etsy's StatsD serve + +Available metrics: + +- `graphql.operations.count` - the number of performed operations (including failures) +- `graphql.operations.error.count` - the number of failed operations +- `graphql.operations.latency` - a histogram of response times (in milliseconds) +- `graphql.delegations.count` - the number of delegated operations to the sources +- `graphql.delegations.error.count` - the number of failed delegated operations +- `graphql.delegations.latency` - a histogram of delegated response times (in milliseconds) +- `graphql.fetch.count` - the number of outgoing HTTP requests +- `graphql.fetch.error.count` - the number of failed outgoing HTTP requests +- `graphql.fetch.latency` - a histogram of outgoing HTTP response times (in milliseconds) + +> You can also customize the `graphql` prefix and add custom tags to the metrics. + +### Usage Example + +```ts filename="gateway.config.ts" +import { StatsD } from 'hot-shots' +import { defineConfig } from '@graphql-hive/gateway' +import useStatsD from '@graphql-mesh/plugin-statsd' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [ + useStatsD({ + ...pluginCtx, + // Configure `hot-shots` if only you need. You don't need to pass this if you don't need to configure it. + client: new StatsD({ + port: 8020 + }), + // results in `my-graphql-gateway.operations.count` instead of `graphql.operations.count` + prefix: 'my-graphql-gateway', + // If you wish to disable introspection logging + skipIntrospection: true + }) + ] +}) +``` + +## Sentry + +This plugin collects errors and performance tracing for your execution flow, and reports it to +[Sentry](https://sentry.io). + +This is how it looks like in Sentry for error tracking: + +![Example](https://raw.githubusercontent.com/n1ru4l/envelop/main/packages/plugins/sentry/error1.png) +![Example](https://raw.githubusercontent.com/n1ru4l/envelop/main/packages/plugins/sentry/error2.png) + +> The operation name, document, variables are collected on errors, and the breadcrumbs that led to +> the error. You can also add any custom values that you need. + +To get started with Sentry, you need to create a new project in Sentry and get the DSN: + +1. Start by creating an account and a project in https://sentry.io +2. Follow the instructions to setup your Sentry instance in your application. +3. Setup Sentry global instance configuration. +4. Setup the Envelop plugin. + +Then, install the following plugin in your project: + +```sh +yarn add @sentry/node @sentry/tracing @envelop/sentry +``` + +### Usage Example + +```ts filename="gateway.config.ts" +import { useSentry } from '@envelop/sentry' +import { defineConfig } from '@graphql-hive/gateway' +// do this only once in you entry file. +import '@sentry/tracing' + +export const gatewayConfig = defineConfig({ + plugins: () => [ + useSentry({ + includeRawResult: false, // set to `true` in order to include the execution result in the metadata collected + includeResolverArgs: false, // set to `true` in order to include the args passed to resolvers + includeExecuteVariables: false, // set to `true` in order to include the operation variables values + appendTags: args => {}, // if you wish to add custom "tags" to the Sentry transaction created per operation + configureScope: (args, scope) => {}, // if you wish to modify the Sentry scope + skip: executionArgs => {} // if you wish to modify the skip specific operations + }) + ] +}) +``` + +### Configuration + +- `startTransaction` (default: `true`) - Starts a new transaction for every GraphQL Operation. When + disabled, an already existing Transaction will be used. +- `renameTransaction` (default: `false`) - Renames Transaction. +- `includeRawResult` (default: `false`) - Adds result of each resolver and operation to Span's data + (available under "result") +- `includeExecuteVariables` (default: `false`) - Adds operation's variables to a Scope (only in case + of errors) +- `appendTags` - See example above. Allow you to manipulate the tags reports on the Sentry + transaction. +- `configureScope` - See example above. Allow you to manipulate the tags reports on the Sentry + transaction. +- `transactionName` (default: operation name) - Produces a name of Transaction (only when + "renameTransaction" or "startTransaction" are enabled) and description of created Span. +- `traceparentData` (default: `{}`) - Adds tracing data to be sent to Sentry - this includes + traceId, parentId and more. +- `operationName` - Produces a "op" (operation) of created Span. +- `skip` (default: none) - Produces a "op" (operation) of created Span. +- `skipError` (default: ignored `GraphQLError`) - Indicates whether or not to skip Sentry exception + reporting for a given error. By default, this plugin skips all `GraphQLError` errors and does not + report it to Sentry. +- `eventIdKey` (default: `'sentryEventId'`) - The key in the error's extensions field used to expose + the generated Sentry event id. Set to `null` to disable. diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/_meta.ts b/packages/web/docs/src/pages/docs/gateway/other-features/_meta.ts new file mode 100644 index 000000000..b5bc9bb1b --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/_meta.ts @@ -0,0 +1,7 @@ +export default { + index: 'Overview', + performance: 'Performance/Cache', + security: 'Security', + testing: 'Testing & Debugging', + 'custom-plugins': 'Custom Plugins', +}; diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/custom-plugins.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/custom-plugins.mdx new file mode 100644 index 000000000..f9339b31b --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/custom-plugins.mdx @@ -0,0 +1,246 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Custom Plugins + +Hive Gateway uses +[GraphQL Yoga](https://the-guild.dev/graphql/yoga-server/docs/features/envelop-plugins), and it uses +[Envelop](https://the-guild.dev/graphql/envelop) plugin system which allows you to hook into the +different phases of the GraphQL execution to manipulate or track the entire workflow step-by-step. + + +You can both use Yoga or Envelop or Gateway plugins with your GraphQL Gateway. +But you should always opt-in for the Hive Gateway variant of the plugin, then Yoga then Envelop because each of them have more control over the execution. +For example, Yoga variant of the plugin leverage HTTP hooks, and Hive Gateway one can leverage more hooks and more control over the context. + +We'd recommend to check the features of the gateway first, and if you can't find what you are +looking for, then you can use this option on your own to add plugins from either GraphQL Yoga or +[Envelop's Plugin Hub](https://the-guild.dev/graphql/envelop/plugins). + + + +You can provide those plugins as an array of objects, + +```ts filename="gateway.config.ts" {7} +import { useGraphQLJit } from '@envelop/graphql-jit' +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: () => [useGraphQLJit()] +}) +``` + +## Writing Plugins + +Sometimes you might want to build your own plugins. You can write your own gateway plugin and even +share it with other people by publishing it to `npm`. + + + A good entry-point for discovering how to write Gateway plugins is to look at the source code of + the existing plugins maintained by us. + + +The most hooks for Hive Gateway origin from the Envelop and Yoga plugin systems. +[Please refer to the Envelop Plugin Lifecycle documentation for more information.](https://the-guild.dev/graphql/envelop/docs/plugins/lifecycle) +and +[Yoga Plugin Lifecycle documentation](https://the-guild.dev/graphql/yoga-server/docs/features/envelop-plugins). +In addition, Yoga adds more HTTP specific hooks while Hive Gateway adds more related to the subgraph +execution. Gateway plugins also uses +[Explicit Resource Management](https://www.typescriptlang.org/docs/handbook/release-notes/typescript-5-2.html), +so all the resources are cleaned up gracefully when Hive Gateway is shut down. You can see +`Symbol.asyncDispose` below. + +### Plugin Lifecycle + +The following diagram shows the plugin lifecycle of Hive Gateway. For a detailed description of each +hook, please refer to the detail sections of each hook. Please check Yoga and Envelop documentation +for more information about the hooks except `onSubgraphExecute`. + +```mermaid +stateDiagram-v2 + [*] --> onRequest + + state onRequest_if_state <> + + onRequest --> onRequest_if_state + + onRequest_if_state --> onResponse: Is not a GraphQL Request + onRequest_if_state --> GraphQLRequest: Is GraphQL Request + + GraphQLRequest: GraphQL Request + + state GraphQLRequest { + [*] --> onRequestParse + onRequestParse --> onParams + onParams --> onParse + onParse --> onValidate + onValidate --> onContextBuilding + onContextBuilding --> onExecute + onContextBuilding --> onSubscribe + + onExecute --> onSubgraphExecute + onSubscribe --> onSubgraphExecute + + onSubgraphExecute --> onFetch + onFetch --> onSubgraphExecuteDone + + onSubgraphExecuteDone --> onExecuteDone + onSubgraphExecuteDone --> onSubscribeDone + + onExecuteDone --> onResultProcess + onSubscribeDone --> onResultProcess + + onResultProcess --> [*] + } + + GraphQLRequest --> onResponse + onResponse --> [*] + +``` + +#### `onSubgraphExecute` + +This hook is invoked for ANY request that is sent to the subgraph. + +**Example actions in this hook:** + +- Manipulate the request +- Add a custom auth header +- Monitor the subgraph request + +You can see [Prometheus plugin](/docs/gateway/authorization-authentication) for an example of how to +use this hook. + +#### `onFetch` + +This hook is invoked everytime the gateways sends an outgoing HTTP request to an upstream service. + +**Example actions in this hook:** + +- Manipulate HTTP [`Request`](https://developer.mozilla.org/en-US/docs/Web/API/Request) object +- Manipulate HTTP [`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response) object +- Change [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API) implementation +- Add custom headers +- Monitor the HTTP request + +**Example plugins:** + +- [Prometheus plugin](/docs/gateway/authorization-authentication) + +##### `API` + +- `supergraph`: The unified graph +- `subgraph`: The subgraph +- `subgraphName`: The name of the subgraph +- `transportEntry`: The transport entry for the subgraph including the configuration for the + upstream communication, and details. +- `executionRequest`: The execution request object that is sent to the subgraph, that includes + `document`, `variables`, `contextValue`, `operationName`, and etc. +- `setExecutionRequest`: A function to replace the execution request object that will be sent to the + subgraph. +- `executor`: The executor function that will be used to execute the request to the subgraph, and it + takes the execution request object. +- `setExecutor`: A function to replace the executor function +- `logger`: The logger instance for the specific request that includes the details of the request + and the response. + +#### `Symbol.asyncDispose` or `Symbol.dispose` + +In order to clean up resources when Hive Gateway is shut down, you can use `Symbol.asyncDispose` or +`Symbol.syncDispose` to clean up resources. + +```ts +export const useMyPlugin = () => { + return { + async [Symbol.asyncDispose]() { + // Clean up resources + stopConnection() + } + } +} +``` + +You can learn more about +[Explicit Resource Management](https://www.typescriptlang.org/docs/handbook/release-notes/typescript-5-2.html#using-declarations-and-explicit-resource-management) +here. + +### Plugin Context + +Hive Gateway comes with ready-to-use `logger`, `fetch`, cache storage and etc that are shared across +different components. We'd highly recommend you to use those available context values instead of +creating your own for a specific plugin. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins({ + fetch, // WHATWG compatible Fetch implementation. + logger, // Logger instance used by Hive Gateway + cwd, // Current working directory + pubsub, // PubSub instance used by Hive Gateway + cache // Cache storage used by Hive Gateway + }) { + return [ + useMyPlugin({ logger, fetch }) // So the plugin can use the shared logger and fetch + ] + } +}) +``` + +## Example Additional Plugin (SOFA) + +GraphQL SOFA allows you to generate a fully documented REST API from your GraphQL schema. This is +useful when you need to support REST clients or when you want to expose a REST API to the public. + +- **Don’t choose between REST and GraphQL** +- Get most of the **benefits of GraphQL** on the backend and frontend, while using and **exposing + REST** +- **Support all your existing clients** with REST while improving your backend stack with GraphQL +- Create custom, perfectly client-aligned REST endpoints for your frontend simply by naming a route + and attaching a query +- In the other way around (REST to GraphQL) you won’t get the best of both worlds. Instead just less + powerful, harder-to-maintain server implementations with a some of the benefits of GraphQL. It can + be a good and fast start for a migration though. +- Fully **generated documentation** that is always up-to-date +- **GraphQL Subscriptions as Webhooks** + +## Installation + +```sh npm2yarn +npm i @graphql-yoga/plugin-sofa +``` + +## Quick Start + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' +import { useSOFA } from '@graphql-yoga/plugin-sofa' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [ + useSOFA({ + // The path where the REST API will be served + basePath: '/rest', + // The path where the Swagger UI will be served + swaggerUIEndpoint: '/rest/docs', + // OpenAPI Document details + info: { + title: 'My API', + description: 'My API Description', + version: '1.0.0' + } + }) + ] +}) +``` + +[Learn more about SOFA](https://the-guild.dev/graphql/sofa-api/docs) + + + You can consume the API using [feTS Client](https://the-guild.dev/openapi/fets/client/quick-start) + without any code generation! + diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/index.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/index.mdx new file mode 100644 index 000000000..7e634aa12 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/index.mdx @@ -0,0 +1,21 @@ +--- +searchable: false +description: Extend the capabilities of your GraphQL API such as rate limiting, caching, and more. +--- + +import { Callout } from '@theguild/components' + +# Extend Your Gateway + +Hive Gateway Runtime can be extended with feature flags and additional plugins. This allows you to +add more features to your Hive Gateway, such as rate limiting, caching, and more. This documentation +section covers most of the possible features that you can add with simple additions. + +Hive Gateway also allows you to hook into the different phases of the lifecycle from the client to +the upstream services by using custom plugins that you can +[learn more here](/docs/gateway/other-features/custom-plugins). + + + We'd recommend to check the available features and plugins on the left sidebar, because we believe + some of them can be useful for your use case. + diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/_meta.ts b/packages/web/docs/src/pages/docs/gateway/other-features/performance/_meta.ts new file mode 100644 index 000000000..b8ada730d --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/_meta.ts @@ -0,0 +1,12 @@ +export default { + index: 'Overview', + 'response-caching': 'Response Caching', + compression: 'Compression in HTTP', + 'parsing-and-validation-caching': 'Parsing and Validation Caching', + 'execution-cancellation': 'Execution Cancellation', + 'upstream-cancellation': 'Upstream Cancellation', + 'http-caching': 'Upstream HTTP Caching', + 'deduplicate-request': 'Deduplicate HTTP Requests', + 'automatic-persisted-queries': 'Automatic Persisted Queries', + 'request-batching': 'Request Batching', +}; diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/automatic-persisted-queries.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/performance/automatic-persisted-queries.mdx new file mode 100644 index 000000000..dc571775c --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/automatic-persisted-queries.mdx @@ -0,0 +1,152 @@ +--- +description: + Automatic Persisted Queries is a protocol for reducing the overhead of sending the same GraphQL + documents to the server over and over again. +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Automatic Persisted Queries + +Automatic Persisted Queries is a protocol for reducing the overhead of sending the same GraphQL +documents to the server over and over again. Thus reducing client to server upstream traffic. + +Since the upload speed can be the bottleneck from client to server, reducing the payload size can +improve the performance especially for huge GraphQL documents. + +The Automatic Persisted Queries plugin follows +[the APQ Specification of Apollo](https://github.com/apollographql/apollo-link-persisted-queries#apollo-engine). + + + Automatic Persisted Queries do not provide any security features, the benefit + of using them is to reduce network overhead. If you want to avoid executing + arbitrary GraphQL operations please use [Persisted + Operations](/docs/gateway/persisted-documents). + +Furthermore, an potential DDOS attacker could spam your GraphQL API with persisted operation +registrations, thus completly disable the advantages you would get from APQ and, furthermore, even +decrease the performance of your GraphQL API. + + + +## Installation + +## Quick Start + +Using Automatic Persisted Queries requires installing a separate package.e. + +```sh npm2yarn +npm i @graphql-yoga/plugin-apq +``` + +```ts filename="Automatic Persisted Queries Gateway setup" {3, 13} +import { defineConfig } from '@graphql-hive/gateway' +import { useAPQ } from '@graphql-yoga/plugin-apq' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [useAPQ()] +}) +``` + +Start your Hive Gatewayr and send a request for priming the cache (register the operation). + +```bash filename="Execute GraphQL Operation to prime the cache" +curl -X POST -H 'Content-Type: application/json' http://localhost:4000/graphql \ + -d '{"query":"{__typename}","extensions":{"persistedQuery":{"version":1,"sha256Hash":"ecf4edb46db40b5132295c0291d62fb65d6759a9eedfa4d5d612dd5ec54a6b38"}}}' +``` + +Then afterwards we can send the same payload again, but this time omit the `query` field. + +```bash filename="Execute GraphQL Operation without query payload" +curl -X POST -H 'Content-Type: application/json' http://localhost:4000/graphql \ + -d '{"extensions":{"persistedQuery":{"version":1,"sha256Hash":"ecf4edb46db40b5132295c0291d62fb65d6759a9eedfa4d5d612dd5ec54a6b38"}}}' +``` + +Especially for big GraphQL document strings, the subsequent payload can be much smaller. + +## Client Usage + +GraphQL clients such `Apollo Client` and `Urql` support Automatic Persisted Queries out of the box. +Check the corresponding documentation for more information. + +- [Apollo Client](https://www.apollographql.com/docs/apollo-server/performance/apq/#step-2-enable-automatic-persisted-queries) +- [Urql](https://formidable.com/open-source/urql/docs/advanced/persistence-and-uploads/) + +## Custom Store + +By default all the documents strings are stored in memory with an LRU cache that holds up to 1000 +unique entries. + +A custom store implementation can be provided via the `store` option. + +```ts filename="Automatic Persisted Operations with a custom store" {16} +import { useAPQ, type APQStore } from '@graphql-yoga/plugin-apq' + +// Note: this store grows infinitely, so it is not a good idea to use it in production. +const store: APQStore = new Map() + +useAPQ({ store }) +``` + +For external stores the `set` and `get` properties on the store can also return a `Promise`. + + + In production, it's recommended to capture the errors from any store that could stop functioning. + Instead of raising an error, returning undefined or null will allow the server to continue to + respond to requests if the store goes down. + +```ts filename="Automatic Persisted Operations with a redis store" {16} +import Keyv from 'keyv' + +const store = new Keyv('redis://user:pass@localhost:6379') + +useAPQ({ + store: { + async get(key) { + try { + return await store.get(key) + } catch (e) { + console.error(`Error while fetching the operation: ${key}`, e) + } + }, + async set(key, value) { + try { + return await store.set(key, value) + } catch (e) { + console.error(`Error while saving the operation: ${key}`, e) + } + } + } +}) +``` + + + +## Configure Error responses + +By default, responses for missing or mismatching query will include `extensions` property with HTTP +status code. + +For example: + +```ts {4} +{ + extensions: { + http: { + status: 404 + }, + code: 'PERSISTED_QUERY_NOT_FOUND' + } +} +``` + +You can force the error responses to use 200 OK status code: + +```ts filename="Automatic Persisted Operations with a custom store" {18-20} +useAPQ({ + responseConfig: { + forceStatusCodeOk: true + } +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/compression.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/performance/compression.mdx new file mode 100644 index 000000000..97fa6bfdd --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/compression.mdx @@ -0,0 +1,242 @@ +--- +searchable: false +--- + +# Compression in HTTP + +import { Callout } from '@theguild/components' + +Compression is a technique used to reduce the size of the data that is being transferred between the +server and the client. This is done by compressing the data before sending it and then decompressing +it on the client side. This can help reduce the amount of data that needs to be transferred, which +can improve the performance of your website. + +`Content-Encoding` and `Accept-Encoding` HTTP headers are used for this behavior. The +`Content-Encoding` header is used to specify the compression algorithm that was used to compress the +data, while the `Accept-Encoding` header is used to specify the compression algorithms that the +client supports. + +[Learn more about compression in HTTP](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding) + +Hive Gateway is capable of handling compressions in the following directions. We can selectively +enable or disable compression in each direction. + +```mermaid +graph TD + client --> gateway + gateway --> subgraphA + subgraphA --> gateway + gateway --> subgraphB + subgraphB --> gateway + gateway --> client +``` + + + Caution! +
+ Please take a look at the each direction, because even if they look similar, they have different + configurations and behaviors. While configuring the compression, make sure each side supports the + compression algorithm that the other side supports. Otherwise, it will end up with unexpected + errors. +
+ +## From the gateway to the client + +When the client sends a request to the gateway, it can specify the compression algorithm that it +supports using the `Accept-Encoding` header. Then the gateway can compress the response using the +specified algorithm before sending it back to the client with the `Content-Encoding` header, so that +the client can decompress it. + +```mermaid +graph TD + Client --> AddAcceptEncodingHeader + AddAcceptEncodingHeader --> Gateway + Gateway --> GWCompressor + GWCompressor --> AddContentEncodingHeader + AddContentEncodingHeader --> ClientDecompressor + ClientDecompressor --> Client +``` + +In the following example, we say that the client supports the `gzip` algorithm for compression. Then +the gateway compresses the response using the `gzip` algorithm before sending it back to the client. +So the client can decompress the response using the `gzip` algorithm. + +```ts +const res = await fetch('http://localhost:4000/graphql', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Accept-Encoding': 'gzip' + }, + body: JSON.stringify({ + query: ` + query { + hello + } + ` + }) +}) +console.assert(res.headers.get('Content-Encoding') === 'gzip', 'Response is compressed') +``` + +You need to configure the gateway for this feature. [See here](#configuration-on-gateway) + +## From the client to the gateway + +When the client sends a request to the gateway, it can compress the request using the specified +algorithm before sending it to the gateway. Then the gateway can decompress the request before +processing it. + +```mermaid +graph TD + Client --> ClientCompressor + ClientCompressor --> AddContentEncodingHeader + AddContentEncodingHeader --> GatewayDecompressor + GatewayDecompressor --> Gateway + Gateway --> Client +``` + +In the following example, we compress the request using the `gzip` algorithm before sending it to +the gateway. Then the gateway decompresses the request before processing it. + +```ts +const res = await fetch('http://localhost:4000/graphql', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Content-Encoding': 'gzip' + }, + // Compress the request body + body: gzip( + JSON.stringify({ + query: ` + query { + hello + } + ` + }) + ) +}) +``` + +Here we are using the `gzip` function to compress the request body before sending it to the gateway. +We assume that the `gzip` function is a function that compresses the data using the gzip algorithm. + + + Caution! +
+ When this feature is not enabled as described below, the gateway won't be able to process the + client request body. Then it will fail with a `400 Bad Request` response. Because there is no way + to check if the server supports compression from the consumer side. Before configuring this + feature on the client side, make sure that the gateway supports the compression algorithm that the + client supports. +
+ +### Configuration on Gateway + +In your gateway configuration, you need to enable the compression for the gateway. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + contentEncoding: true +}) +``` + +Now gateway will respect the `Accept-Encoding` header from the client and compress the response +accordingly. + +## From the subgraph to the gateway + +When the subgraph sends a response to the gateway, it can compress the response using the specified +algorithm before sending it to the gateway. Then the gateway can decompress the response before +sending it to the client. + +It has the same principle as the previous example, but here the gateway is acting like a client +against a subgraph. + +```mermaid +graph TD + Gateway --> AddAcceptEncodingHeader + AddAcceptEncodingHeader --> Subgraph + Subgraph --> SubgraphCompressor + SubgraphCompressor --> AddContentEncodingHeader + AddContentEncodingHeader --> GatewayDecompressor + GatewayDecompressor --> Gateway +``` + + + You don't need to configure anything on the gateway side for this feature. Because the HTTP Client + implementation is based on [`@whatwg-node/fetch`](https://github.com/ardatan/whatwg-node) which + automatically sends the `Accept-Encoding` headers to the upstream APIs, and decompresses the + response based on the sent `Content-Encoding` headers. + + +### Configuration on Subgraph + +You should configure your subgraph to respect the `Accept-Encoding` header and compress the response +accordingly. For example if you have a GraphQL subgraph using +[GraphQL Yoga](https://the-guild.dev/graphql/yoga-server) server you can use `useContentEncoding` +plugin to enable this; + +```npm2yarn +npm i @whatwg-node/server +``` + +```ts +import { createYoga } from 'graphql-yoga' +import { useContentEncoding } from '@whatwg-node/server' + +const server = createYoga({ + schema, + plugins: [useContentEncoding()] +}) +``` + + + If you use [`feTS`](https://the-guild.dev/openapi/fets) or any other + [`@whatwg-node/server`](https://github.com/ardatan/whatwg-node) based server implementation in + your non GraphQL subgraph, you can still use the same plugin. + + +## From the gateway to the subgraph + +When the gateway sends a request to the subgraph, it can compress the request using the specified +algorithm before sending it to the subgraph. Then the subgraph can decompress the request before +processing it. + +```mermaid +graph TD + Gateway --> GatewayCompressor + GatewayCompressor --> SubgraphDecompressor + SubgraphDecompressor --> Subgraph + Subgraph --> Gateway +``` + +In this case, gateway will always send a compressed request to the defined subgraphs with +`Content-Encoding` header. + + + Caution! +
+ If the subgraph does not support compression, the gateway will receive an unexpected error. So + make sure that the subgraph supports the compression algorithm that the gateway supports. Because + there is no way to check the subgraph's support for compression since the gateway is acting like a + client here. +
+ +### Configuration on Gateway + +In your gateway configuration, you need to enable the compression. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + contentEncoding: { + subgraphs: ['*'] // Enable compression for all subgraphs + // subgraphs: ['subgraph1', 'subgraph2'] // Enable compression for specific subgraphs + } +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/deduplicate-request.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/performance/deduplicate-request.mdx new file mode 100644 index 000000000..a0adf5cf7 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/deduplicate-request.mdx @@ -0,0 +1,29 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Deduplicate HTTP Requests + +Most of the time, your Hive Gateway will receive multiple requests for the same data. This can +happen when multiple clients request the same data, or when a single client sends multiple requests +for the same data. + +To reduce the load on your downstream services, you can deduplicate the requests. This means that if +multiple requests for the same data are received at the same time, only one request will be sent to +the downstream service, and the responses will be shared among the clients. + +You can enable request deduplication by using the `useRequestDeduplication` plugin. + +```ts filename="gateway.config.ts" +import { defineConfig, useRequestDeduplication } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [ + useRequestDeduplication({ + ...pluginCtx + }) + ] +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/execution-cancellation.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/performance/execution-cancellation.mdx new file mode 100644 index 000000000..26387b13d --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/execution-cancellation.mdx @@ -0,0 +1,44 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Execution Cancellation + +In the real world, a lot of HTTP requests are dropped or canceled. This can happen due to a flakey +internet connection, navigation to a new view or page within a web or native app or the user simply +closing the app. In this case, the server can stop processing the request and save resources. + +That is why Hive Gateway comes with the support for canceling the GraphQL execution upon request +cancellation. + +So any extra calls to the services can be stopped and the resources can be saved. + +## Enable Execution Cancellation + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + executionCancellation: true +}) +``` + +That is all you need to do to enable execution cancellation in your Hive Gateway. Theoretically, you +can enable this and immediately benefit from it without making any other adjustments within your +GraphQL schema implementation. + +If you want to understand how it works and how you can adjust your resolvers to properly cancel +pending promises (e.g. database reads or HTTP requests), you can continue with the next section. + + + You can also use the same plugin with your subgraph configuration if you use GraphQL Yoga. [See + more](https://the-guild.dev/graphql/yoga-server/docs/features/execution-cancellation) + + + + Also this can be combined with [Upstream + Cancellation](/docs/gateway/other-features/performance/upstream-cancellation) that does this + cancellation not only in the execution level but also on the upstream HTTP level. + diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/http-caching.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/performance/http-caching.mdx new file mode 100644 index 000000000..2a2df7fdd --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/http-caching.mdx @@ -0,0 +1,31 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# HTTP caching + +Most of JavaScript runtimes except browsers don't respect HTTP caching headers by default. But you +can enable HTTP caching in your Hive Gateway by using the HTTP caching plugin. This allows you to +cache the responses when possible, and reduce the server load. It uses +[`http-cache-semantics`](https://www.npmjs.com/package/http-cache-semantics) under the hood. + + + You need to set your cache storage in your gateway configuration to enable response caching. See + [Cache Storage](/docs/gateway/other-features/performance#providing-cache-storage) for more + information. + + +```ts filename="gateway.config.ts" +import { defineConfig, useHttpCache } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + cache // Your cache storage here, + plugins: pluginCtx => [ + useHttpCache({ + ...pluginCtx + }) + ] +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/index.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/performance/index.mdx new file mode 100644 index 000000000..ea67cf28b --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/index.mdx @@ -0,0 +1,141 @@ +--- +description: + Performance is a critical aspect of any application. Hive Gateway Runtime provides a set of + features to help you optimize the performance of your gateway. +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Performance & Caching + +Hive Gateway provides a set of features to help you optimize the performance of your GraphQL +gateway. Hive Gateway provides a shared caching storage that can be used across plugins, transforms +and subgraph execution. + +## Providing Cache Storage + +In order to enable features that need a storage to keep the data, you need to define a cache storage +implementation, and pass it to the `gatewayConfig`. + +You can choose the best-fit cache storage for your use case. + +### LocalForage + +LocalForage is a library that improves the existing storage mechanism in the browser by using +`IndexedDB`, `WebSQL` and `localStorage`, [see more](https://github.com/localForage/localForage). + +Even if it is known as a browser storage, Hive Gateway provides you as a platform-agnostic cache +storage to leverage the well-known storage APIs that are available in most JavaScript environments. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + cache: { + type: 'localforage', + // All of the following options are listed with default values, you don't need to provide them + driver: ['WEBSQL', 'INDEXEDDB', 'LOCALSTORAGE'] // The order of the drivers to use + name: 'HiveGateway', // The name of the database + version: 1.0, // The version of the database + size: 4980736, // The size of the database + storeName: 'keyvaluepairs', // The name of the store + description: 'Cache storage for Hive Gateway', // The description of the database + } + responseCaching: { + session: () => null, + } +}) +``` + +### Redis + +Redis is an in-memory data structure store, used as a database, cache, and message broker. You can +use Redis as a cache storage for your Hive Gateway. + +The Redis cache currently only works in Node.js environments. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + cache: { + type: 'redis', + host: 'localhost', // The host of the Redis server + port: 6379, // The port of the Redis server + password: undefined, // The password of the Redis server + lazyConnect: true, // If true, the connection will be established when the first operation is executed + // or + url: 'redis://localhost:6379' // The URL of the Redis server + }, + responseCaching: { + session: () => null + } +}) +``` + +### Cloudflare Workers KV + +Cloudflare Workers KV is a distributed, eventually consistent key-value store available in the +Cloudflare Workers runtime. You can use Cloudflare Workers KV as a cache storage for your Hive +Gateway. [Learn more about KV](https://developers.cloudflare.com/workers/runtime-apis/kv/) + + + This is only available for Cloudflare Workers runtime. If you want to learn how to deploy your + Hive Gateway to Cloudflare Workers, you can check the [deployment + documentation](/docs/gateway/deployment/serverless/cloudflare-workers). + + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + cache: { + type: 'cfw-kv', + namespace: 'HiveGateway' // The namespace of the KV + } + responseCaching: { + session: () => null + } +}) +``` + +## Custom Cache Storage + +You can also implement your own cache storage by extending the `CacheStorage` class. It needs to +match `KeyValueCache` interface from `@graphql-hive/gateway`. + +```ts filename="my-cache-storage.ts" +import { LRUCache } from 'lru-cache' +import { KeyValueCache } from '@graphql-hive/gateway' + +export class MyKeyValueCache implements KeyValueCache { + // Your cache implementation here + private cache = new LRUCache() + + // Get the value of the key + async get(key: string) { + return this.cache.get(key) + } + + // Set the key with the value and optional options + async set(key: string, value: V, options?: { ttl?: number }) { + this.cache.set(key, value, options?.ttl) + } + + // Delete the key from the cache + async delete(key: string) { + this.cache.del(key) + } + + // Get all keys that match the given prefix + async getKeysByPrefix(prefix: string) { + return Array.from(this.cache.keys()).filter(key => key.startsWith(prefix)) + } + + // This should be implemented if you want to clear the cache on shutdown + [Symbol.asyncDispose]() { + this.cache.reset() + } +} +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/parsing-and-validation-caching.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/performance/parsing-and-validation-caching.mdx new file mode 100644 index 000000000..2d1feee16 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/parsing-and-validation-caching.mdx @@ -0,0 +1,59 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Parsing & Validation Caching + +By default, Hive Gateway maintains a parsing and validation cache. If requests contain documents +that have been executed before, they will not be parsed and validated again. + +Using the parser cache can improve performance up to ~60%, and using the validation cache up to ~50% +(based on benchmarks). + +This behavior is built-in and can be optionally disabled using the `parserAndValidationCache` +options: + +```ts filename="gateway.config.ts" {4,5} +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + // disable parse and validate caching + parserAndValidationCache: false +}) +``` + + + Due to the restrictions of the GraphQL execution flow, we cannot use an async cache storage as we + use in other caching plugins. So the cache storage for the parser and validation cache should be + synchronous, and it is an in-memory store by default. + + +Furthermore, you can provide your own cache store to both of these plugins by implementing the +following interface: + +```ts +interface CacheStore { + get(key: string): T | undefined + set(key: string, value: T): void +} +``` + +You can then pass your cache store to the `parserAndValidationCache` options: + +```ts filename="gateway.config.ts" {9-13} +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + parserAndValidationCache: { + documentCache: documentCacheStore as CacheStore, + errorCache: errorCacheStore as CacheStore, + validationCache: validationCacheStore as CacheStore + } +}) +``` + + + We'd recommend to keep the default behavior as-is since it's already optimized for performance. + diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/request-batching.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/performance/request-batching.mdx new file mode 100644 index 000000000..766b55cf2 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/request-batching.mdx @@ -0,0 +1,64 @@ +--- +description: + Request Batching is the process of taking a group of requests, combining them into one, and making + a single request with the same data that all of the other queries would have made. +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Request Batching + +Batching is the process of taking a group of requests, combining them into one, and making a single +request with the same data that all of the other queries would have made. This is a way to reduce +the number of requests that your application makes to the server. + +The Batching functionality is described via the +[`Batching RFC`](https://github.com/graphql/graphql-over-http/blob/main/rfcs/Batching.md). + +## Enable Batching + +Batching is disabled by default, but you can enable it by setting the `batching` option to `true`: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + batching: true +}) +``` + +```bash filename="Execute batched operation" +curl -X POST -H 'Content-Type: application/json' http://localhost:4000/graphql \ + -d '[{"query": "{ hee: __typename }"}, {"query": "{ ho: __typename }"}]' +``` + +## Limit the amount of Batched Requests + +By default up to 10 GraphQL requests are allowed within a single HTTP request. If this amount is +exceeded an error will be raised. You can customize this option by passing an object to the +`batching` configuration option: + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + batching: { + limit: 2 + } +}) +``` + +```bash filename="Execute batched operation (exceed limit)" +curl -X POST -H 'Content-Type: application/json' -i http://localhost:4000/graphql \ + -d '[{"query": "{ hee: __typename }"}, {"query": "{ ho: __typename }"}, {"query": "{ holla: __typename }"}]' +``` + +When exceeding the batching limit the HTTP status code will be +[`413` (Payload Too Large)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/413). + +```json filename="GraphQL Error response" +{ + "errors": [{ "message": "Batching is limited to 2 operations per request." }] +} +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/response-caching.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/performance/response-caching.mdx new file mode 100644 index 000000000..0d23ef695 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/response-caching.mdx @@ -0,0 +1,415 @@ +--- +searchable: false +--- + +import { Code } from 'nextra/components' +import { Callout, Tabs } from '@theguild/components' + +# Response Caching + +**_GraphQL Response Caching_** is a feature that allows you to cache the response of a GraphQL +query. This is useful when you want to reduce the number of requests to your sources. For example, +if you have a GraphQL query that fetches a list of products, you can cache the response of this +query so that the next time the same query is made, the response is fetched from the cache instead +of making a request to the underlying sources. + + + You need to set your cache storage in your gateway configuration to enable response caching. See + [Cache Storage](/docs/gateway/other-features/performance#providing-cache-storage) for more + information. + + +## How to use? + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + responseCaching: { + // global cache + session: () => null + } +}) +``` + +After starting the server we can execute a GraphQL Query operation, that selects the `Query.slow` +field. + +```sh filename="Execute slow GraphQL Query Operation with cUrl" +curl -X POST http://localhost:4000/graphql \ + -H 'Content-Type: application/json' \ + -d '{ "query" : "{ slow }" }' \ + -w '\nTotal time : %{time_total}' +``` + +The output will look similar to the following: + +```sh filename="Initial Request time" +{"data":{"slow":"I am slow."}} +Total time:5.026632 +``` + +After executing the same curl statement a second time, the duration is significantly lower. + +```sh filename="Cached Request time" +{"data":{"slow":"I am slow."}} +Total time:0.007571% +``` + +## Configuration + +The behaviour of this plugin can be configured by passing an object at the gateway level or by using +`@cacheControl` directive at schema defintion level. + +The `@cacheControl` directive can be used to give to subgraphs the control over the cache behavior +for the fields and types they are defining. You can add this directive during composition. + +- [See here for Federation to learn more about the `@cacheControl` directive](https://www.apollographql.com/docs/federation/performance/caching/#using-cache-hints-with-subgraphs) +- [See here for GraphQL Mesh to learn more about the `@cacheControl` in subgraph definitions](/docs/gateway/other-features/performance/response-caching) + +## Session based caching + +If your GraphQL API returns specific data depending on the viewer's session, you can use the session +option to cache the response per session. Usually, the session is determined by an HTTP header, e.g. +an user id within the encoded access token. + + + Don't forget to validate the authentication token before using it as a session key. + Allowing cached responses to be returned with unverified tokens can lead to data leaks. + +Please see the [Authorization/Auhtentication](/docs/gateway/authorization-authentication) section +for more information. + + + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + responseCaching: { + // cache based on the authentication header + session: request => request.headers.get('authentication') + } +}) +``` + +### Enforce session based caching + +In some cases, a type or a field should only be cached if their is a session. For this, you can use +the `scope` to indicate that the cache should only be used if a session is present. + +This can be useful to prevent exposure of sensitive data to unauthorized users. + +Declaratively using @cacheControl +]}> + + ```ts filename="Response Cache configuration with scope" + defineConfig({ + responseCaching: { + // cache based on the authentication header + session: request => request.headers.get('authentication') + + // You can use configuration object to define the scope + scopePerSchemaCoordinate: { + 'Query.me': 'PRIVATE', // on a field + User: 'PRIVATE', // or a type + }} + }) + ``` + + + + ```graphql filename="GraphQL schema with @cacheControl directive" + type Query { + me: User @cacheControl(scope: PRIVATE) # on a field + } + + type User @cacheControl(scope: PRIVATE) { # on a type + id: ID! + name: String! + } + ``` + + + + +### Group based caching + +The `session` option can also be used to cache responses based for a group of users. This can be +useful if data exposed by your API is the same for a group of users sharing the same characteristic. + +For example, if data returned by an API is always the same for every users with the same role, you +can use the role as a session key. + +```ts filename="Role based caching" +defineConfig({ + responseCaching: { + session: request => request.headers.get('x-user-role') + } +}) +``` + +## Time to Live (TTL) + +By default, all cached operations are stored indefinitely. This can lead to stale data being +returned. + +It is possible to give cached operations a time to live. Either globally, based on +[schema coordinates](https://github.com/graphql/graphql-wg/blob/main/rfcs/SchemaCoordinates.md) or +object types. + +If a query operation result contains multiple objects of the same or different types, the lowest TTL +is picked. + +Declaratively using @cacheControl +]}> + + ```ts filename="Response Cache configuration with TTL" + defineConfig({ + responseCaching: { + session: () => null, + // by default cache all operations for 2 seconds + ttl: 2_000, + ttlPerSchemaCoordinate: { + // only cache query operations containing User for 500ms + User: 500 + // cache operations selecting Query.lazy for 10 seconds + 'Query.lazy': 10_000 + } + } + }) + ``` + + + ```graphql filename="GraphQL schema with @cacheControl directive" + type Query { + me: User @cacheControl(maxAge: 500) # only cache query operations containing User for 500ms + } + + type User @cacheControl(maxAge: 10000) { # cache operations selecting Query.lazy for 10 seconds + id: ID! + name: String! + } + ``` + + + + +## Control which responses are cached + +By default, all successful operations influences the cache. + +You can globaly disable caching using the `enabled` option. This can be useful for local +development. + +```ts filename="Disabling caching" +defineConfig({ + responseCaching: { + session: request => null, + enabled: () => process.env.NODE_ENV !== 'development' + } +}) +``` + +### Ingore a specific request + +You can entirely disable caching (both caching and invalidation) for a specific request by using the +`enabled` option. + +Be aware that this means that if the response contains entities that are part of other cached +responses, those responses will not be invalidated. + +```ts filename="Disabling caching for a specific request" +defineConfig({ + responseCaching: { + session: request => null, + enabled: request => request.headers.get('x-no-cache') !== 'true' + } +}) +``` + +### Disable caching of specific types and fields + +Some types or fields contains data that should never be cached. For example, a field that returns +the current time. + +You can disable caching for specific types or fields by setting it's TTL to `0`. This will prevent +the response from being cached, but will not prevent cache invalidation for other entities contained +in the response. + +Declaratively using @cacheControl +]}> + + ```ts filename="Disabling caching for a specific type" +defineConfig({ + responseCaching: { + session: request => null, + ttlPerSchemaCoordinate: { + // for a entire type + Date: 0 + // for a specific field + 'Query.time': 0 + } + } +}) + ``` + + + ```graphql filename="GraphQL schema with @cacheControl directive" + type Query { + time: String @cacheControl(maxAge: 0) # on a field + } + + type Date @cacheControl(maxAge: 0) { # on a type + day: Int! + month: Int! + year: Int! + } + ``` + + + + +## Invalidations via Mutation + +When executing a mutation operation the cached query results that contain type entities within the +Mutation result will be automatically be invalidated. + +```graphql filename="GraphQL mutation operation" +mutation UpdateUser { + updateUser(id: 1, newName: "John") { + __typename + id + name + } +} +``` + +```json filename="GraphQL operation execution result" +{ + "data": { + "updateLaunch": { + "__typename": "User", + "id": "1", + "name": "John" + } + } +} +``` + +For the given GraphQL operation and execution result, all cached query results that contain the type +`User` with the id `1` will be invalidated. + +This behavior can be disabled by setting the `invalidateViaMutation` option to `false`. + +```ts filename="Disabling mutation invalidation" +defineConfig({ + responseCaching: { + session: request => null, + invalidateViaMutation: false + } +}) +``` + +### Entity identity + +Automatic cache invalidation works by instpecting the result of each query and mutation operations, +and keeping track of the entities that are part of it. + +By default, the identity of entities is based on the `id` field. + +You can customize the identity field by setting the `idFields` options. + +```ts filename="Customizing entity identity" +defineConfig({ + responseCaching: { + session: request => null, + idFields: ['id', 'email'] + } +}) +``` + +```graphql +type User { + email: String! + username: String! + profile: Profile! +} + +type Profile { + id: ID! + bio: String + picture: String +} +``` + +In this example, `User`'s identity will be based on `email` field, and `Profile`'s identity will be +based on `id` field. + +## HTTP Caching + +Response Caching plugin sends `ETag` headers to the client, and respects `If-None-Match` headers in +the HTTP request. + +If the client sends an `If-None-Match` header with the same value as the `ETag` header, the server +will respond with a `304 Not Modified` status code without any content, which allows you to reduce +the server load. + +Most of the browsers and some HTTP clients support this behavior, so you can use it to improve the +performance of your frontend application. + +[Learn more about `ETag` and `If-None-Match` headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag). + +### Example with `curl` + +First we send a request to the GraphQL server, and we can see that the response contains the headers + +```bash filename="Get ETag and Last-Modified headers" +curl -H 'Content-Type: application/json' \ + "http://localhost:4000/graphql?query={me{id name}}" -v +``` + +Then the server will respond a data something the following with the `ETag` and `Last-Modified` +headers: + +- `ETag` is the key that is used to identify the cached response. +- `Last-Modified` is used to determine if the cached response is still valid. + +```bash filename="Response with ETag and Last-Modified headers" +> GET /graphql?query={me{id,name}} HTTP/1.1 +> Host: localhost:4000 +> User-Agent: curl/7.68.0 +> Accept: application/json +> +* Mark bundle as not supporting multiuse +< HTTP/1.1 200 OK +< access-control-allow-origin: * +< content-length: 130 +< content-type: application/json; charset=utf-8 +< etag: 2c0ebfe7b2b0273029f2fa23a99d213b56f4838756b3ef7b323c04de1e836be3 +< last-modified: Wed Feb 15 2023 15:23:55 GMT+0300 (GMT+03:00) +< Date: Wed, 15 Feb 2023 12:23:55 GMT +< Connection: keep-alive +< Keep-Alive: timeout=5 +< + +{"data":{"me":{"id":"1","name":"Bob"}}} +``` + +In the next calls, we can use the `ETag` header as the `If-None-Match` header together with +`Last-Modified` header as `If-Modified-Since` to check if the cached response is still valid. + +```bash filename="Use the headers to check if the cached response is still valid" +curl -H "Accept: application/json" \ + -H "If-None-Match: 2c0ebfe7b2b0273029f2fa23a99d213b56f4838756b3ef7b323c04de1e836be3" \ + -H "If-Modified-Since: Wed Feb 15 2023 15:23:55 GMT" \ + "http://localhost:4000/graphql?query=\{me\{id,name\}\}" -v +``` + +Then the server will return `304: Not Modified` status code with no content. diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/performance/upstream-cancellation.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/performance/upstream-cancellation.mdx new file mode 100644 index 000000000..9d73dbc3a --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/performance/upstream-cancellation.mdx @@ -0,0 +1,25 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Upstream Cancellation + +This feature enables you to cancel the upstream HTTP requests when the client cancels the downstream +GraphQL request. This can be useful when you want to save resources on your server and the services. + +There is also +[Execution Cancellation](/docs/gateway/other-features/performance/execution-cancellation) that stops +the execution, but it doesn't stop ongoing HTTP requests. This seperately allows you to stop the +HTTP requests by hooking into [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API). + +## Enable Upstream Cancellation + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + upstreamCancellation: true +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/_meta.ts b/packages/web/docs/src/pages/docs/gateway/other-features/security/_meta.ts new file mode 100644 index 000000000..843830168 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/_meta.ts @@ -0,0 +1,18 @@ +export default { + index: 'Overview', + 'error-masking': 'Error Masking', + cors: 'CORS', + 'csrf-prevention': 'CSRF Prevention', + 'rate-limiting': 'Rate Limiting', + 'disable-introspection': 'Introspection', + https: 'HTTPS', + 'hmac-signature': 'HMAC Signature', + 'audit-documents': 'Audit Documents', + 'block-field-suggestions': 'Block Field Suggestions', + 'character-limit': 'Character Limit', + 'cost-limit': 'Cost Limit', + 'max-aliases': 'Max Aliases', + 'max-depth': 'Max Depth', + 'max-directives': 'Max Directives', + 'max-tokens': 'Max Tokens', +}; diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/audit-documents.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/audit-documents.mdx new file mode 100644 index 000000000..b27854c9a --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/audit-documents.mdx @@ -0,0 +1,30 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Audit Documents + +Audit your documents for useful metrics such as query depth, directive count and alias count. This +is useful if you want to introduce security rules on your GraphQL server (e.g. via graphql-armor) +and need to figure out the values for doing so. + +## How to use? + +You need to install the `@graphql-inspector/cli` package: + +```sh npm2yarn +npm i @graphql-inspector/cli +``` + +Then you can point documents to the `audit` command: + +```sh +graphql-inspector audit DOCUMENTS +``` + + + Learn more about this command, and [GraphQL Inspector + here](https://the-guild.dev/graphql/inspector/docs/commands/audit#audit---usage) + diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/block-field-suggestions.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/block-field-suggestions.mdx new file mode 100644 index 000000000..6868c059d --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/block-field-suggestions.mdx @@ -0,0 +1,39 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Block Field Suggestions + +This is a feature that allows you to prevent **returning field suggestions** and **leaking your +schema** to unauthorized actors provided by +[GraphQL Armor](https://escape.tech/graphql-armor/docs/plugins/block-field-suggestions/) + +In production, this can lead to Schema leak even if the introspection is disabled. + +## How to use? + +Install the plugin: + +```sh npm2yarn +npm install @escape.tech/graphql-armor-block-field-suggestions +``` + +Then, add it to your plugins: + +```ts filename="gateway.config.ts" +import { blockFieldSuggestionsPlugin } from '@escape.tech/graphql-armor-block-field-suggestions' +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: () => [ + blockFieldSuggestionsPlugin({ + // Toggle the plugin | Default: true + enabled: true, + // Mask applied to the error message | default: '[Suggestion hidden]' + mask: '[Suggestion hidden]' + }) + ] +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/character-limit.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/character-limit.mdx new file mode 100644 index 000000000..59188a07d --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/character-limit.mdx @@ -0,0 +1,42 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Character Limit + +**Limit** number of **characters** in a GraphQL query document. + +This plugin helps you to prevent **DoS attacks** by hard-limiting the size of the query document. + +[Provided by GraphQL Armor](https://escape.tech/graphql-armor/docs/plugins/character-limit) + +## How to use? + +Install the plugin: + +```sh npm2yarn +npm install @escape.tech/graphql-armor-character-limit +``` + +Then, add it to your plugins: + +```ts filename="gateway.config.ts" +import { characterLimitPlugin } from '@escape.tech/graphql-armor-character-limit' +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: () => [ + characterLimitPlugin({ + maxLength: 15000 // Number of characters allowed | Default: 15000 + }) + ] +}) +``` + + + - We recommend using the [Max Tokens Plugin](/docs/gateway/other-features/security/max-tokens) + instead of this one. - This plugin does not limit the size of input variables. - This plugin + relies on a parser plugin to works and access query document through the context. + diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/cors.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/cors.mdx new file mode 100644 index 000000000..f98dc131e --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/cors.mdx @@ -0,0 +1,114 @@ +--- +description: + CORS stands for Cross Origin Resource Sharing. In a nutshell, as a security measure, browsers + aren't allowed to access resources outside their own domain. +searchable: false +--- + +import { Callout } from '@theguild/components' + +# CORS + +CORS stands for Cross Origin Resource Sharing. In a nutshell, as a security measure, browsers aren't +allowed to access resources outside their own domain. + +If your api and web apps are deployed to different domains (or subdomains), you'll have to worry +about CORS. For example, if your web client is deployed to example.com but your Hive Gateway is +api.example.com. For security reasons your browser will not allow XHR requests (like the kind that +the GraphQL client makes) to a domain other than the one currently in the browser's address bar. + +To deal with this you have two options: + +_1. Avoid CORS by proxying your requests_ e.g. If you setup a proxy or redirect to forward requests +from `example.com/api/*` to `api.example.com`, you avoid CORS issues all together. + +_2. Configure the gateway to send back CORS headers_ Hive Gateway comes with CORS support out of the +box - CORS can be configured when creating the server either by passing a CORSOptions object, or a +builder function that returns the CORSOptions object. + +```ts +export type CORSOptions = + | { + origin?: string[] | string + methods?: string[] + allowedHeaders?: string[] + exposedHeaders?: string[] + credentials?: boolean + maxAge?: number + } + | false +``` + +### Example configuration using CORSOptions + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + cors: { + origin: 'http://localhost:4000', + credentials: true, + allowedHeaders: ['X-Custom-Header'], + methods: ['POST'] + } +}) +``` + +This will return the following headers: + +``` +Access-Control-Allow-Origin: 'http://localhost:4000' +Access-Control-Allow-Credentials: true +Access-Control-Allow-Methods: POST +Access-Control-Allow-Headers: X-Custom-Header +``` + +### Example configuration using builder function + +You can also pass a function to the cors property, that takes your request and constructs the +options + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + cors: request => { + return { + origin: 'http://localhost:4000', + credentials: true, + allowedHeaders: ['X-Custom-Header'], + methods: ['POST'] + } + } +}) +``` + +This will return the same headers as the previous example, but take the `origin` of the request, and +return it in the `Access-Control-Allow-Origin` header. + +## Default CORS setting + +By default, Hive Gateway will return `Access-Control-Allow-Origin: *` when preflight requests are +made. + +This means cross origin requests from browsers work out of the box - however it may be appropriate +to lock to a specific domain before deploying to production. + +## Disabling CORS + +You can disable CORS on your gateway by simply passing `false` as the cors property + +**For example:** + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + cors: false +}) +``` + + + If you disable CORS, you may run into issues with your web client not being able to access the + Hive Gateway. This is because of the browser's security policy. + diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/cost-limit.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/cost-limit.mdx new file mode 100644 index 000000000..436a7691b --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/cost-limit.mdx @@ -0,0 +1,52 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Cost Limit + +**Limit** the **complexity** of a GraphQL document by using +[GraphQL Armor](https://escape.tech/graphql-armor/docs/plugins/cost-limit) + +## How to use? + +```sh npm2yarn +npm install @escape.tech/graphql-armor-cost-limit +``` + +Then, add it to your plugins: + +```ts filename="gateway.config.ts" +import {defineConfig} from '@graphql-hive/gateway'; +import { costLimitPlugin } from '@escape.tech/graphql-armor-cost-limit'; + +export const gatewayConfig = defineConfig({ + plugins: () => [ + costLimitPlugin({ + // Toogle the plugin | default: true + enabled: true, + // Cost allowed for the query | default: 5000 + maxCost: 5000, + // Static cost of an object | default: 2 + objectCost: 2, + // Static cost of a field | default: 1 + scalarCost: 1, + // Factorial applied to nested operator | default: 1.5 + depthCostFactor: 1.5, + // Ignore the cost of introspection queries | default: true + ignoreIntrospection: true, + // Do you want to propagate the rejection to the client? | default: true + propagateOnRejection: true, + + /* Advanced options (use here on your own risk) */ + + // Callbacks that are ran whenever a Query is accepted + onAccept: [] + + // Callbacks that are ran whenever a Query is rejected + onReject: [] + }), + ] +}); +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/csrf-prevention.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/csrf-prevention.mdx new file mode 100644 index 000000000..8258cb4bc --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/csrf-prevention.mdx @@ -0,0 +1,32 @@ +--- +description: + If you have CORS enabled, almost all requests coming from the browser will have a preflight + request - however, some requests are deemed "simple" and don't make a preflight. +searchable: false +--- + +import { Callout } from '@theguild/components' + +# CSRF Prevention + +If you have CORS enabled, almost all requests coming from the browser will have a preflight +request - however, some requests are deemed "simple" and don't make a preflight. One example of such +a request is a good ol' GET request without any headers, this request can be marked as "simple" and +have preflight CORS checks skipped therefore skipping the CORS check. + +This attack can be mitigated by saying: "all GET requests must have a custom header set". This would +force all clients to manipulate the headers of GET requests, marking them as "\_not-\_simple" and +therefore always executing a preflight request. Apollo does this when using the +[`csrfPrevention = true` option](https://www.apollographql.com/docs/apollo-server/api/apollo-server/#csrfprevention). + +## Quick Start + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + csrfPrevention: { + requestHeaders: ['x-gateway-csrf'] + } +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/disable-introspection.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/disable-introspection.mdx new file mode 100644 index 000000000..20150eb4b --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/disable-introspection.mdx @@ -0,0 +1,87 @@ +--- +description: + Learn how to disable GraphQL schema introspection and the "did you mean x" suggestion feature. +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Introspection + +A powerful feature of GraphQL is schema introspection. This feature is used by GraphiQL for +exploring the schema and also by tooling such as +[GraphQL Code Generator](https://the-guild.dev/graphql/codegen) for generating type-safe +client/frontend code. + +GraphQL schema introspection is also a feature that allows clients to ask a GraphQL server what +GraphQL features it supports (e.g. defer/stream or subscriptions). + +## Disabling Introspection + + + If your goal is to avoid unknown actors from reverse-engineering your GraphQL + schema and executing arbitrary operations, it is highly recommended to use + persisted operations. + +[Learn more about persisted operations.](/docs/gateway/persisted-documents) + + + +## Disable Introspection based on the GraphQL Request + +Sometimes you want to allow introspectition for certain users. You can access the `Request` object +and determine based on that whether introspection should be enabled or not. E.g. you can check the +headers. + +```ts filename="gateway.config.ts" {7} +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + disableIntrospection: { + isDisabled: request => request.headers.get('x-allow-introspection') !== 'secret-access-key' + } +}) +``` + +## Disabling Field Suggestions + + + The [`graphql-armor`](https://github.com/Escape-Technologies/graphql-armor) plugin is a security layer that help you protect your GraphQL server from malicious queries. + It allows you to configure various security features such as character limit or blocking field suggestions. + For more information about `graphql-armor` features, you can refer to the [documentation for the plugin](/docs/gateway/other-features/security/block-field-suggestions). + +Here is an example of how to use `graphql-armor` to disable introspection and block field +suggestions. + + + +When executing invalid GraphQL operation the GraphQL engine will try to construct smart suggestions +that hint typos in the executed GraphQL document. This can be considered a security issue, as it can +leak information about the GraphQL schema, even if introspection is disabled. + + + If your goal is to avoid unknown actors from reverse-engineering your GraphQL + schema and executing arbitrary operations, it is highly recommended to use + persisted operations. + +[Learn more about persisted operations.](/docs/gateway/persisted-documents) + + + +Disabling the "did you mean x" suggestion feature can be achieved via the +`blockFieldSuggestionsPlugin` from +[`graphql-armor`](https://github.com/Escape-Technologies/graphql-armor). + +```sh npm2yarn +npm i @escape.tech/graphql-armor-block-field-suggestions +``` + +```ts filename="Disabling the 'did you mean x' suggestion feature with a plugin" {2, 7} +import { blockFieldSuggestionsPlugin } from '@escape.tech/graphql-armor-block-field-suggestions' +import { defineConfig, useDisableIntrospection } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + disableIntrospection: true, + plugins: pluginCtx => [blockFieldSuggestionsPlugin()] +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/error-masking.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/error-masking.mdx new file mode 100644 index 000000000..4b02a8780 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/error-masking.mdx @@ -0,0 +1,18 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Error Masking + +Hive Gateway automatically masks unexpected errors and prevents leaking sensitive information to +clients. + +Unexpected errors can be caused by failed connections to remote services such as databases or HTTP +APIs. Nobody external needs to know that your database server is not reachable. Exposing such +information to the outside world can make you vulnerable for targeted attacks. + +In order to build secure applications, it is crucial to understand this concept. + +{/* `TODO: Consider how to explain subgraph errors etc` */} diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/hmac-signature.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/hmac-signature.mdx new file mode 100644 index 000000000..79fd68e54 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/hmac-signature.mdx @@ -0,0 +1,226 @@ +--- +searchable: false +--- + +import { Callout, Steps } from '@theguild/components' + +# HMAC Signature + +HMAC (Hash-based Message Authentication Code) is a mechanism for calculating a message +authentication code involving a hash function in combination with a secret key. It can be used to +verify the integrity and authenticity of a message. + +This Gateway plugin implements HMAC signing for requests between Hive Gateway and the upstream +GraphQL subgraph. It also provides HMAC verification plugin for the incoming requests in the +subgraph services. + +By activating this plugin, you can ensure that the requests send to GraphQL subgraphs is trusted and +signed by the Hive Gateway. In case of any missing signature, tampering or unauthorized access, the +subgraph services will reject the request. + +```mermaid +flowchart LR + 1(["End-user"]) --->|"query { comments { id author { id name }}}"| 2 + + subgraph Hive Gateway + 2["Engine"] + 3["useHmacUpstreamSignature"] + 4["Query Planner"] + 2--->3 + 2--->4 + end + + subgraph "Users Subgraph" + 5["useHmacSignatureValidation"] + 4--->|"query { _entities(representations: $r) { ... on User { name }} }\nextensions: { hmac-signature: AbC123 }"|5 + end + + subgraph "Comments Subgraph" + 6["useHmacSignatureValidation"] + + 4--->|"query { comments { id author { id }} }\nextensions: { hmac-signature: AbC123 }"|6 + end +``` + +## How to use? + + + +### Step 1: Gather your secret key + +Before you start, you need to have a secret key that will be used for HMAC signing and verification. + +The secret key should be a random, opaque string, that will be shared between the Hive Gateway and +the subgraphs validating the HMAC signature. + +### Step 2: HMAC Signing in Hive Gateway + +```ts +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + hmacUpstreamSignature: { + secret: myHMACSecret // see step 1 for the secret key + } + // ... +}) +``` + +Now, every GraphQL request sent to the upstream GraphQL subgraphs will be signed with the HMAC and +the `extensions` of the upstream request will contain the HMAC signature. + +To configure the subgraph verification of the HMAC signature, please follow the next step. + +### Step 3: HMAC Verification in Subgraph services + +The next step is to perform a verification over the sent HMAC signature in the subgraph services: + +#### With GraphQL Yoga + +If you are using Yoga, you can use the gateway package: + +```sh npm2yarn +npm i @graphql-hive/gateway +``` + +```ts +import { createYoga } from 'graphql-yoga' +import { useHmacSignatureValidation } from '@graphql-hive/gateway' + +const myYogaSubgraphServer = createYoga({ + // ... + plugins: [ + useHmacSignatureValidation({ + secret: myHMACSecret // see step 1 for the secret key + }) + // other Yoga plugins + // ... + ] +}) +``` + + + Make sure to add `useHmacSignatureValidation` first in the plugins list in your Yoga + configuration. This will ensure the request is verified before processing the other plugins. + + +#### With Apollo Server + +If you are using Apollo-Server for your subgraph services, you can implement a custom plugin to +verify the HMAC signature. You can still use the utilities from the `@graphql-hive/gateway` library +to serialize the request parameters and verify the HMAC signature in a stable way. + +Start by installing the `@graphql-hive/gateway` package: + +```sh npm2yarn +npm i @graphql-hive/gateway +``` + +Now, configure your Apollo Server with the HMAC verification plugin: + +```ts filename="apollo-subgraph.ts" +import { createHmac } from 'crypto' +import { ApolloServer, ApolloServerPlugin } from '@apollo/server' +import { defaultParamsSerializer } from '@graphql-hive/gateway' + +const verifyHmacPlugin = { + async requestDidStart({ request, contextValue }) { + const signature = request.extensions?.['hmac-signature'] + + if (!signature) { + throw new Error('HMAC signature is missing') + } + + const serializedParams = defaultParamsSerializer({ + query: request.query, + variables: request.variables + }) + + const incomingReqSignature = createHmac('sha256', HMAC_SIGNING_SECRET) + .update(serializedParams) + .digest('base64') + + if (incomingReqSignature !== signature) { + throw new Error('HMAC signature is invalid') + } + } +} satisfies ApolloServerPlugin<{}> + +const server = new ApolloServer({ + plugins: [ + verifyHmacPlugin + // ... other Apollo plugins + ] +}) +``` + +#### Other GraphQL servers + +To implement HMAC verification in other GraphQL servers, you should implement a HMAC verification +using the following specification: + +- The incoming request to your server will contain an `extensions` field with a `hmac-signature` + key. +- The `hmac-signature` value is a `base64` encoded HMAC signature of the request parameters, using + the SHA-256 algorithm. +- The request parameters should be serialized in a stable way, so the signature can be verified + correctly. I should consist of the GraphQL `query` and `variables`: + + ```json + { + "query": "query { comments { id author { id name } } ", + "variables": {} + } + ``` + +- The HMAC signature should be calculated using the secret key shared between the Hive Gateway and + the subgraph services. + +Here's an example of an incoming subgraph request with the HMAC signature: + +```json +{ + "query": "query { comments { id author { id name } } ", + "variables": {}, + "extensions": { + "hmac-signature": "AbC123" + } +} +``` + +> The signature is produced by the Hive Gateway using the shared secret key, and the serialized +> request (query and variables). + + + +## Configuration + +### `hmacUpstreamSignature` + +The `hmacUpstreamSignature` flag allows you to customize the HMAC signing behavior in the Hive +Gateway: + +- `secret`: The secret key used for HMAC signing and verification. It should be a random, opaque + string shared between the Hive Gateway and the subgraph services. +- `extensionName` (optional, default: `hmac-signature`): The key name used in the `extensions` field + of the outgoing requests to store the HMAC signature. +- `serializeExecutionRequest` - A function to customize the way the incoming request is serialized + before calculating the HMAC signature. By default, it uses + [stable JSON hash](https://www.npmjs.com/package/json-stable-stringify) of the GraphQL `query` and + `variables`. +- `shouldSign`: A function to determine if the request should be signed or not. By default, it signs + all requests. + +### `useHmacSignatureValidation` + +The `useHmacSignatureValidation` plugin allow you to customize the HMAC verification behavior in the +subgraph. + +- `secret`: The secret key used for HMAC signing and verification. It should be a random, opaque + string shared between the Hive Gateway and the subgraph services. +- `extensionName` (optional, default: `hmac-signature`): The key name used in the `extensions` field + of the outgoing requests to store the HMAC signature. +- `serializeParams` - A function to customize the way the incoming request is serialized before + calculating the HMAC signature. By default, it uses + [stable JSON hash](https://www.npmjs.com/package/json-stable-stringify) of the GraphQL `query` and + `variables`. diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/https.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/https.mdx new file mode 100644 index 000000000..6a87f7d70 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/https.mdx @@ -0,0 +1,82 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Secure HTTP Connection (HTTPS) + +HTTPS (HyperText Transfer Protocol Secure) is an encrypted version of the HTTP protocol. It uses TLS +to encrypt all communication between a client and a server. + +There are different ways to secure the connection. It is either between the client and Hive Gateway +or between Hive Gateway and the subgraphs. + +This is only available on Node environment. + +## Subgraph - Gateway Connection + +Hive Gateway acts as a client to the subgraphs, so if you want to have a secure connection in +between Hive Gateway and the subgraphs, you can use HTTPs. + +### Using Self-Signed Certificates + +But if you use self-signed certificates, Hive Gateway may not verify the certificate by default, +then you need to provide those certificates to Hive Gateway. + +#### Environment Variables + +Hive Gateway's default HTTP Client implementation respects Node's environment variables related to +this; + +- `NODE_TLS_REJECT_UNAUTHORIZED` - If set to `0`, it disables the rejection of self-signed + certificates. +- `NODE_EXTRA_CA_CERTS` - If set, it provides a path to a CA certificate file. + +Below is an example of how to use self-signed certificates with Hive Gateway; + +```sh +NODE_EXTRA_CA_CERTS=/path/to/ca.crt hive-gateway supergraph +``` + +#### Configuration File + +The only way to configure HTTPS programmaticaly is to use a custom agent like below; + +```ts +import { readFileSync } from 'fs' +import { Agent } from 'https' +import { defineConfig } from '@graphql-hive/gateway' + +const agent = new Agent({ + ca: readFileSync('/path/to/ca.crt') + // or + rejectUnauthorized: false +}) + +export const gatewayConfig = defineConfig({ + // This function will be called for each URL to determine if the custom agent should be used + customAgent: ({ url }) => + url === 'https://example.com' + ? agent + : undefined +}) +``` + +## Client - Gateway Connection + +You can also configure Hive Gateway to listen on HTTPS. You can provide the path to the key and +certificate files in the configuration file; + +```ts +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + sslCredentials: { + key_file_name: 'path/to/key.pem', + cert_file_name: 'path/to/cert.pem', + passphrase: 'passphrase' + }, + port: 443 +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/index.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/index.mdx new file mode 100644 index 000000000..1dceca366 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/index.mdx @@ -0,0 +1,204 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Secure your Gateway + +Building a secure GraphQL API is hard by design because of the “Graph” nature of GraphQL. Libraries +for making different aspects of a GraphQL server secure have existed since the early days of +GraphQL. However, combining those tools is often cumbersome and results in messy code. With envelop +securing your server is now as easy as pie! Hive Gateway has a built-in security layer that helps +you to secure your Gateway. But in most of time, this security layer is not enough or needed to be +customized for your use case. + +## Protection against Malicious GraphQL Operations + +One of the main benefits of GraphQL is that data can be requested individually. However, this also +introduces the possibility for attackers to send operations with deeply nested selection sets that +could block other requests being processed. Fortunately, infinite loops are not possible by design +as a fragment cannot self-reference itself. Unfortunately, that still does not prevent possible +attackers from sending selection sets that are hundreds of levels deep. + +The following schema: + +```graphql +type Query { + author(id: ID!): Author! +} +type Author { + id: ID! + posts: [Post!]! +} +type Post { + id: ID! + author: Author! +} +``` + +Would allow sending and executing queries such as: + +```graphql +query { + author(id: 42) { + posts { + author { + posts { + author { + posts { + author { + posts { + author { + posts { + author { + posts { + author { + id + } + } + } + } + } + } + } + } + } + } + } + } + } +} +``` + +There are a few measurements you can use for preventing the execution of such operations. + +A handy tool for analyzing your existing GraphQL operations and finding the best defaults for your +use case is [`graphql-inspector`](https://www.the-guild.dev/graphql/inspector). + +Learn more about `graphql-inspector audit` +[here](https://the-guild.dev/graphql/inspector/docs/essentials/audit). + +### Persisted Operations + +Instead of allowing any arbitrary GraphQL operation in production usage, we could use an allow-list +of operations that the server is allowed to execute. We can collect such a list by scanning the +code-base and extracting the list of operations. + +[Learn more how to configure persisted operations](/docs/gateway/persisted-documents) + +### Reject Malicious Operation Documents + +Parsing a GraphQL operation document is a very expensive and compute intensitive operation that +blocks the JavaScript event loop. If an attacker sends a very complex operation document with slight +variations over and over again he can easily degrade the performance of the GraphQL server. Because +of the variations simply having an LRU cache for parsed operation documents is not enough. + +A potential solution is to limit the maximal allowed count of tokens within a GraphQL document. + +In computer science, lexical analysis, lexing or tokenization is the process of converting a +sequence of characters into a sequence of lexical tokens. + +E.g. given the following GraphQL operation. + +```graphql +graphql { + me { + id + user + } +} +``` + +The tokens are `query`, `{`, `me`, `{`, `id`, `user`, `}` and `}`. Having a total count of 8 tokens. + +The optimal maximum token count for your application depends on the complexity of the GrapHQL +operations and documents. Usually 800-2000 tokens seems like a sane default. + +You can limit the amount of allowed tokens per operation and automatically abort any further +processing of a GraphQL operation document that exceeds the limit with the +[Max Tokens Plugin](/docs/gateway/other-features/security/max-tokens). + +Also this can be combined with +[Character Limit](/docs/gateway/other-features/security/character-limit) that limits the number of +characters in the query and mutation documents. + +### Gateway -> Subgraph HMAC Signing + +When you have multiple subgraphs and a gateway, you might want to ensure that the requests to the +subgraphs are trusted and signed by the gateway. This is handy in case your want to ensure that the +requests to the subgraphs are trusted and signed by the gateway, and no other entity can execute +requests to the subgraph. + +In case of any missing signature, tampering or unauthorized access, the subgraph services will +reject the request. + +We recommend using HMAC signing for requests between the Hive Gateway and the upstream in cases +where authentication plugins are involved, in order to ensure the gateway is the only entity that +can execute requests to the subgraph on behalf of the end-users. + +You can use the [HMAC Signature plugin](/docs/gateway/other-features/security/hmac-signature) to +perform requesting signing and verification. + +### Query Depth Limiting + +Sometimes persisted operations cannot be used. E.g. if you are building an API that is used by third +party users. However, we can still apply some protection. + +[Learn more about Max Depth plugin here](/docs/gateway/other-features/security/max-depth) + +This can prevent malicious API users executing GraphQL operations with deeply nested selection sets. +You need to tweak the maximum depth an operation selection set is allowed to have based on your +schema and needs, as it could vary between users. + +### Rate Limiting + +Rate-limiting is a common practice with APIs, and with GraphQL it gets more complicated because of +the flexibility of the graph and the ability to choose what fields to query. + +The [Rate Limit Plugin](/docs/gateway/other-features/security/rate-limiting) can be used to limit +access to resources by field level. + +## Prevent unwanted HTTP requests + +### CORS (Cross-Origin Resource Sharing) (enabled by default) + +Cross-Origin Resource Sharing (CORS) is a mechanism that uses additional HTTP headers to tell +browsers to give a web application running at one origin, access to selected resources from a +different origin. A web application makes a cross-origin HTTP request when it requests a resource +that has a different origin (domain, protocol, or port) from its own. + +[Learn more about CORS plugin here](/docs/gateway/other-features/security/cors) + +### CSRF Prevention + +Cross-Site Request Forgery (CSRF) is an attack that forces an end user to execute unwanted actions +on a web application in which they're currently authenticated. + +[Learn more about CSRF Prevention plugin here](/docs/gateway/other-features/security/csrf-prevention) + +## Prevent Leaking Sensitive Information + +### Disable Schema Introspection + +If your schema includes sensitive information that you want to hide from the outside world, +disabling the schema introspection is a possible solution. The +[Disable Introspection Plugin](/docs/gateway/other-features/security/disable-introspection) plugin +solves that in a single line of code! + +### Block Field Suggestions + +Field suggestions are a feature of GraphQL that allows the client to request the server to suggest +fields that can be queried. This is a very useful feature for developers using GraphQL, but it can +also be used by attackers to discover the schema of the server. + +You can block field suggestions with the +[Block Field Suggestions Plugin](/docs/gateway/other-features/security/block-field-suggestions). + +### Error Masking (enabled by default) + +In most GraphQL servers any thrown error or rejected promise will result in the original error +leaking to the outside world. Some frameworks have custom logic for catching unexpected errors and +mapping them to an unexpected error instead. In Hive Gateway, this is enabled by default. + +[Learn more about Error Masking](/docs/gateway/other-features/security/error-masking) diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/max-aliases.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/max-aliases.mdx new file mode 100644 index 000000000..e22b56803 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/max-aliases.mdx @@ -0,0 +1,51 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Max Aliases + +**Limit** the number of **aliases** in a GraphQL document. + +It is used to prevent **DOS attack** or **heap overflow**. + +[Provided by GraphQL Armor](https://escape.tech/graphql-armor/docs/plugins/max-aliases) + +## How to use? + +Install the plugin: + +```sh npm2yarn +npm install @escape.tech/graphql-armor-max-aliases +``` + +Then, add it to your plugins: + +```ts filename="gateway.config.ts" +import { maxAliasesPlugin } from '@escape.tech/graphql-armor-max-aliases' +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: () => [ + maxAliasesPlugin({ + // Toggle the plugin | Default: true + enabled: true, + // Number of aliases allowed | Default: 5 + maxAliases: 5, + // Do you want to propagate the rejection to the client? | default: true + propagateOnRejection: true, + // List of queries that are allowed to bypass the plugin + allowList: [], + + /* Advanced options (use here on your own risk) */ + + // Callbacks that are ran whenever a Query is accepted + onAccept: [], + + // Callbacks that are ran whenever a Query is rejected + onReject: [] + }) + ] +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/max-depth.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/max-depth.mdx new file mode 100644 index 000000000..be98850f3 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/max-depth.mdx @@ -0,0 +1,55 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Max Depth + +**Limit** the **depth** of a GraphQL document. + +It is used to prevent too large queries that could lead to overfetching or **DOS attack**. + +[Provided by GraphQL Armor](https://escape.tech/graphql-armor/docs/plugins/max-depth) + +## How to use? + +Install the plugin: + +```sh npm2yarn +npm install @escape.tech/graphql-armor-max-depth +``` + +Then, add it to your plugins: + +```ts filename="gateway.config.ts" +import { maxDepthPlugin } from '@escape.tech/graphql-armor-max-depth' +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: () => [ + maxDepthPlugin({ + // Toggle the plugin | Default: true + enabled: true, + // Depth threshold | default: 6 + n: 6, + // Do you want to propagate the rejection to the client? | default: true + propagateOnRejection: true, + // List of queries that are allowed to bypass the plugin + allowList: [], + + /* Advanced options (use here on your own risk) */ + + // Callbacks that are ran whenever a Query is accepted + onAccept: [], + + // Callbacks that are ran whenever a Query is rejected + onReject: [] + }) + ] +}) +``` + +## References + +- https://github.com/advisories/GHSA-mh3m-8c74-74xh diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/max-directives.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/max-directives.mdx new file mode 100644 index 000000000..b54cd76a8 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/max-directives.mdx @@ -0,0 +1,56 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Max Directives + +**Limit** the number of **directives** in a GraphQL document. + +It is used to prevent **DOS attack**, **heap overflow** or **server overloading**. + +[Provided by GraphQL Armor](https://escape.tech/graphql-armor/docs/plugins/max-directives) + +## How to use? + +Install the plugin: + +```sh npm2yarn +npm install @escape.tech/graphql-armor-max-directives +``` + +Then, add it to your plugins: + +```ts filename="gateway.config.ts" +import { maxDirectivesPlugin } from '@escape.tech/graphql-armor-max-directives' +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: () => [ + maxDirectivesPlugin({ + // Toggle the plugin | Default: true + enabled: true, + // Number of directives allowed | Default: 10 + n: 10, + // Do you want to propagate the rejection to the client? | default: true + propagateOnRejection: true, + // List of queries that are allowed to bypass the plugin + allowList: [], + + /* Advanced options (use here on your own risk) */ + + // Callbacks that are ran whenever a Query is accepted + onAccept: [], + + // Callbacks that are ran whenever a Query is rejected + onReject: [] + }) + ] +}) +``` + +## References + +- https://github.com/graphql-java/graphql-java/issues/2888 +- https://github.com/graphql-java/graphql-java/pull/2892 diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/max-tokens.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/max-tokens.mdx new file mode 100644 index 000000000..b1ef48384 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/max-tokens.mdx @@ -0,0 +1,59 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Max Tokens + +**Limit** the number of **tokens** in a GraphQL document. + +It is used to prevent **DOS attack**, **heap overflow** or **server overloading**. + +The token limit is often limited by the graphql parser, but this is not always the case and would +lead to a fatal heap overflow. + +[Provided by GraphQL Armor](https://escape.tech/graphql-armor/docs/plugins/max-tokens) + +## How to use? + +Install the plugin: + +```sh npm2yarn +npm install @escape.tech/graphql-armor-max-tokens +``` + +Then, add it to your plugins: + +```ts filename="gateway.config.ts" +import { maxTokensPlugin } from '@escape.tech/graphql-armor-max-tokens' +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: () => [ + maxTokensPlugin({ + // Toggle the plugin | Default: true + enabled: true, + // Number of tokens allowed | Default: 5000 + n: 5000, + // Do you want to propagate the rejection to the client? | default: true + propagateOnRejection: true, + // List of queries that are allowed to bypass the plugin + allowList: [], + + /* Advanced options (use here on your own risk) */ + + // Callbacks that are ran whenever a Query is accepted + onAccept: [], + + // Callbacks that are ran whenever a Query is rejected + onReject: [] + }) + ] +}) +``` + +## References + +- https://github.com/graphql/graphql-js/pull/3684 +- https://github.com/advisories/GHSA-p4qx-6w5p-4rj2 diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/security/rate-limiting.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/security/rate-limiting.mdx new file mode 100644 index 000000000..ecc1d67b2 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/security/rate-limiting.mdx @@ -0,0 +1,93 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Rate Limiting + +Rate limiting is a technique for reducing server load by limiting the number of requests that can be +made to a subgraph. + +You can use rate limiting feature in order to limit the rate of calling queries and mutations. + +## Programmatic Configuration + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + rateLimiting: { + rules: [ + { + type: 'Query', + field: 'foo', + max: 5, // requests limit for a time period + ttl: 5000, // time period + // You can use any value from the context + identifier: '{context.headers.authorization}' + } + ] + } +}) +``` + +# Rate Limiting through `@rateLimit` directive + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + rateLimiting: true +}) +``` + +This approach follows the pattern of +[`graphql-rate-limit`](https://github.com/teamplanes/graphql-rate-limit/blob/master/README.md#field-config). + +To set rate limit hints in your subgraph schema, the `@rateLimit` directive definition should be +included in the subgraph schema: + +```graphql +# Import the directive for Federation +extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/federation/v2.3", import: ["@composeDirective"]) + @link(url: "https://the-guild.dev/graphql/mesh/spec/v1.0", import: ["@rateLimit"]) + @composeDirective(name: "@rateLimit") + +directive @rateLimit( + max: Int + window: String + message: String + identityArgs: [String] + arrayLengthField: String +) on FIELD_DEFINITION +``` + +Then in the subgraph schema, you can use the `@rateLimit` directive to set rate limit hints on +fields: + +```graphql +type Query { + getItems: [Item] @rateLimit(window: "1s", max: 5, message: "You are doing that too often.") +} +``` + +## Field Configuration + +- `window`: Specify a time interval window that the max number of requests can access the field. We + use Zeit's ms to parse the window arg, docs here. + +- `max`: Define the max number of calls to the given field per window. + +- `identityArgs`: If you wanted to limit the requests to a field per id, per user, use identityArgs + to define how the request should be identified. For example you'd provide just ["id"] if you + wanted to rate limit the access to a field by id. We use Lodash's get to access nested identity + args, docs here. + +- `message`: A custom message per field. Note you can also use formatError to customise the default + error message if you don't want to define a single message per rate limited field. + +- `arrayLengthField`: Limit calls to the field, using the length of the array as the number of calls + to the field. diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/testing/_meta.ts b/packages/web/docs/src/pages/docs/gateway/other-features/testing/_meta.ts new file mode 100644 index 000000000..4caf9fbe1 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/testing/_meta.ts @@ -0,0 +1,6 @@ +export default { + index: 'Overview', + mocking: 'Mocking', + debugging: 'Debugging', + snapshot: 'Upstream HTTP Snapshot', +}; diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/testing/debugging.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/testing/debugging.mdx new file mode 100644 index 000000000..b149886e1 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/testing/debugging.mdx @@ -0,0 +1,67 @@ +--- +searchable: false +--- + +# Debugging + +With `DEBUG=1` flag, you can see a verbose output in your logs to debug all the HTTP requests and +responses from the client to the subgraphs. + +``` +[2024-08-28T12:06:25.861Z] DEBUG upstream - 887a7efc-7324-4612-b1a7-0b76ada9fd7a - fetch request { + url: 'http://localhost:44725/graphql', + method: 'POST', + body: '{\n "query": "query MyTest{foo}"\n}', + headers: { + accept: 'application/graphql-response+json, application/json, multipart/mixed', + 'content-type': 'application/json', + 'x-request-id': '887a7efc-7324-4612-b1a7-0b76ada9fd7a' + }, +} +[2024-08-28T12:06:25.866Z] DEBUG upstream - 887a7efc-7324-4612-b1a7-0b76ada9fd7a - fetch response { + url: 'http://localhost:44725/graphql', + status: 200, + headers: { + 'content-type': 'application/graphql-response+json; charset=utf-8', + 'content-length': '22', + date: 'Wed, 28 Aug 2024 12:06:25 GMT', + connection: 'keep-alive', + 'keep-alive': 'timeout=5' + } +} +``` + +Also in this mode, the gateway will add details about the underlying HTTP requests and received +responses in case of an error; + +```json +{ + "errors": [ + { + "message": "Response not successful: Received status code 500", + "extensions": { + "request": { + "url": "https://api.example.com/graphql", + "method": "POST", + "headers": { + "content-type": "application/json" + }, + "body": { + "query": "query { hello }" + } + }, + "response": { + "status": 500, + "statusText": "Internal Server Error", + "headers": { + "content-type": "application/json" + }, + "body": { + "message": "Internal Server Error" + } + } + } + } + ] +} +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/testing/index.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/testing/index.mdx new file mode 100644 index 000000000..b5d6787fd --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/testing/index.mdx @@ -0,0 +1,17 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Testing & Debugging + +Testing and debugging are essential parts of the development process. This section will help you +understand how to test and debug your application. + +- Testing Utility +- [Mocking](/docs/gateway/other-features/testing/mocking): Mock your GraphQL schema for testing. +- [HTTP Details in Extensions](/docs/gateway/other-features/testing/debugging): Debugging HTTP + details in the GraphQL response +- [Upstream HTTP Snapshot](/docs/gateway/other-features/testing/snapshot): Debugging the upstream + HTTP response by avoiding the network call diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/testing/mocking.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/testing/mocking.mdx new file mode 100644 index 000000000..c05502947 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/testing/mocking.mdx @@ -0,0 +1,256 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Mocking your GraphQL API + +Mocking your GraphQL API is a common practice when developing and testing your application. It +allows you to simulate the behavior of your API without making real network requests. + +## How to use? + +Add it to your plugins: + +```ts filename="gateway.config.ts" +import { defineConfig, useMock } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: [ + useMock({ + mocks: [ + { + apply: 'User.firstName', + faker: '{{name.firstName}}' + } + ] + }) + ] +}) +``` + +The example above will replace the resolver of `User.firstName` with a mock that uses +[faker.js](https://fakerjs.dev) to generate a random name. + +## Custom mock functions for fields + +You can also provide a custom function to generate the mock value for a field: + +```ts filename="gateway.config.ts" +import { defineConfig, useMock } from '@graphql-hive/gateway' +import { fullName } from './user-mocks.js' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [ + useMock({ + mocks: [ + { + apply: 'User.fullName', + custom: fullName + } + ] + }) + ] +}) +``` + +## Custom mock functions for types + +You can mock types with custom mock functions like below; + +```ts filename="gateway.config.ts" +import { defineConfig, useMock } from '@graphql-hive/gateway' +import { user } from './user-mocks.js' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [ + useMock({ + mocks: [ + { + apply: 'User', + custom: user + } + ] + }) + ] +}) +``` + +```ts filename="user-mocks.ts" +export const mockFullName = () => { + return `John Doe` +} +``` + +When defined manually, properties can return values either directly or through a method. This is +useful when defining static mocks because a mock property will be called as many times as there are +items in an array. Here’s an example on how this could be achieved: + +```ts filename="user-mocks.ts" +function* generateNames() { + while (true) { + yield 'John Doe' + yield 'John Snow' + } +} + +const fullNames = generateNames() + +export const fullName = () => fullNames.next().value +``` + +## Mocking the lists + +Hive Gateway generates two mocked items by default if the return type is a list. But this can be +configured, as shown below: + +```graphql +type Query { + users: [User] +} +type User { + id: ID + fullName: String +} +``` + +```ts filename="gateway.config.ts" +import { defineConfig, useMock } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [ + useMock({ + mocks: [ + { + apply: 'User.fullName', + faker: '{{name.fullName}}' + }, + { + apply: 'Query.users', + length: 3 + } + ] + }) + ] +}) +``` + +Now `query { users { id fullName } }{:graphql}` query will return 3 of `User` item; + +```json +{ + "users": [ + { "id": "SOME_RANDOM_ID", "fullName": "John Doe" }, + { "id": "SOME_RANDOM_ID", "fullName": "Jane Doe" }, + { "id": "SOME_RANDOM_ID", "fullName": "The Other Doe" } + ] +} +``` + +## Stateful mocking + +Hive Gateway supports GraphQL Tools' Stateful Mocking feature. So you can have stateful mocking by +using the store provided in the context `context.mockStore`; + +[Learn more about GraphQL Tools Mocking](https://graphql-tools.com/docs/mocking) + +### Initialize store + +When having a schema that returns a list, in this case, a list of users: + +```ts filename="init-store.ts" +import { MockStore } from '@graphql-hive/gateway' + +export const store = new MockStore() +const users = [{ id: 'uuid', name: 'John Snow' }] +// Set individual users' data in the store so that they can be queried as individuals later on +users.forEach(user => { + store.set('User', user.id, user) +}) + +// Populate the `users` query on the root with data +store.set('Query', 'ROOT', 'users', users) +``` + +### Get from the store + +You can implement the mock query field `*ById` declaratively like below: + +```graphql +type Query { + user(id: ID): User +} +``` + +```ts filename="gateway.config.ts" +import { defineConfig, useMock } from '@graphql-hive/gateway' +import { store } from './init-store.js' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [ + useMock({ + store, + mocks: [ + { + apply: 'Query.user', + custom: (_, args) => store.get('User', args.id) + } + ] + }) + ] +}) +``` + +### Mutate data in the store + +```graphql +type User { + id: ID + name: String +} +type Query { + me: User +} +type Mutation { + changeMyName(newName: String): User + updateUser(id: ID, name: String): User +} +``` + +```ts filename="gateway.config.ts" +import { defineConfig, useMock } from '@graphql-hive/gateway' +import { store } from './init-store.js' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [ + useMock({ + store, + mocks: [ + { + apply: 'Query.me', + custom: (_, args, context) => store.get('User', 'uuid') + }, + { + apply: 'Mutation.changeMyName', + custom: (_, args, context) => { + const user = store.get('User', 'uuid') + user.name = args.newName + store.set('User', 'uuid', user) + return user + } + }, + { + apply: 'Mutation.updateUser', + custom: (_, args, context) => { + const user = store.get('User', args.id) + user.name = args.name + store.set('User', args.id, user) + return user + } + } + ] + }) + ] +}) +``` diff --git a/packages/web/docs/src/pages/docs/gateway/other-features/testing/snapshot.mdx b/packages/web/docs/src/pages/docs/gateway/other-features/testing/snapshot.mdx new file mode 100644 index 000000000..2ce28d2d6 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/other-features/testing/snapshot.mdx @@ -0,0 +1,38 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Snapshot Plugin + +The `snapshot` plugin allows applying snapshot for development usage. + +The snapshot plugin writes the responses of your remote data source to your file system and then +uses it instead of re-fetching it every time. It’s also helpful because you can easily manipulate +your data manually and see how your gateway responds. + +Then, add it to your plugins: + +```ts filename="gateway.config.ts" +import { defineConfig, useSnapshot } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + plugins: pluginCtx => [ + useSnapshot({ + ...pluginCtx, + // You can provide a custom condition to enable/disable the plugin + if: () => process.env.NODE_ENV === 'development', + // The directory where the snapshots will be stored + outputDir: '__snapshots__', + // The origins to apply the snapshot + apply: ['https://my-remote-api.com/*'] + }) + ] +}) +``` + +The following snapshot will work if you are in a development environment (see the `if` command). + +To modify your snapshots and change the responses, go to `__snapshots__` and modify the responses +under those files. diff --git a/packages/web/docs/src/pages/docs/gateway/persisted-documents.mdx b/packages/web/docs/src/pages/docs/gateway/persisted-documents.mdx new file mode 100644 index 000000000..473ef6d92 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/persisted-documents.mdx @@ -0,0 +1,486 @@ +import { Callout, Cards, Tabs } from '@theguild/components' + +# Persisted Documents + +Persisted documents can be used on your GraphQL server or Gateway to reduce the payload size of your +GraphQL requests and secure your GraphQL API by only allowing operations that are known and trusted +by your Gateway. + +Hive Gateway can use the Hive Schema Registry as a source for persisted documents. + +Learn more about setting up app deployments and persisted documents on the Hive Dashboard +[here](/docs/schema-registry/app-deployments). + +### Configuration + + + +{/* Hive Registry */} + + + +After getting `endpoint` and `token` from Hive Registry, you can enable persisted documents in Hive +Gateway. + + + +{/* Binary */} + + + +```sh filename="Run the Hive Gateway CLI" {3,4} +hive-gateway supergraph "" \ + --hive-persisted-documents-endpoint "" \ + --hive-persisted-documents-token "" +``` + + + +{/* Docker */} + + + +```sh filename="Run the Hive Gateway CLI" {3,4} +docker run --rm --name hive-gateway -p 4000:4000 \ + ghcr.io/ardatan/hive-gateway supergraph "" \ + --hive-persisted-documents-endpoint "" \ + --hive-persisted-documents-token "" +``` + + + +{/* JavaScript Package */} + + + +```sh filename="Run the Hive Gateway CLI" {3,4} +npx hive-gateway supergraph "" \ + --hive-persisted-documents-endpoint "" \ + --hive-persisted-documents-token "" +``` + + + + + +Instead of using the CLI you can also provide the same configuration via the `gateway.config.ts` +file. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + persistedDocuments: { + type: 'hive', + endpoint: '', + token: '' + } +}) +``` + +#### Enabling Arbitrary Documents + +After enabling persisted documents on your Hive Gateway, any arbitary GraphQL documents that don't +contain a `documentId` will be rejected. If you still want to allow executing arbitrary documents, +you can set `allowArbitraryDocuments` to `true` in the configuration. + +```ts filename="gateway.config.ts" {8} +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + persistedDocuments: { + type: 'hive', + endpoint: '', + token: '', + allowArbitraryDocuments: true + } +}) +``` + + + +{/* Custom Store */} + + + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +const store = { + ecf4edb46db40b5132295c0291d62fb65d6759a9eedfa4d5d612dd5ec54a6b38: '{__typename}' +} + +export const gatewayConfig = defineConfig({ + persistedDocuments: { + getPersistedOperation(sha256Hash: string) { + return store[sha256Hash] + } + } +}) +``` + +## How to use ? + +When using persisted operations, the client sends a hash of the operation instead of the operation +itself. + +By default, the persisted operations plugin follows the +[the APQ Specification](https://github.com/apollographql/apollo-link-persisted-queries#apollo-engine) +for **SENDING** hashes to the server. + +However, you can customize the protocol to comply to other implementations e.g. used by +[Relay persisted queries](https://relay.dev/docs/guides/persisted-queries/). + +Change this behavior by overriding the `getPersistedOperationKey` option to support Relay's +specification for example. + +```bash filename="Execute persisted GraphQL operation" +curl -X POST -H 'Content-Type: application/json' http://localhost:4000/graphql \ + -d '{"extensions":{"persistedQuery":{"version":1,"sha256Hash":"ecf4edb46db40b5132295c0291d62fb65d6759a9eedfa4d5d612dd5ec54a6b38"}}}' + +{"data":{"__typename":"Query"}} +``` + +As you can see, the persisted operations plugin is able to execute the operation without the need to +send the full operation document. + +If you now sent a normal GraphQL operation that is not within the store, it will be rejected. + +```bash filename="Arbitary GraphQL operation" +curl -X POST -H 'Content-Type: application/json' http://localhost:4000/graphql \ + -d '{"query": "{__typename}"}' + +{"errors":[{"message":"PersistedQueryOnly"}]} +``` + +## Extracting client operations + +The recommended way of extracting the persisted operations from your client is to use +[GraphQL Code Generator](https://www.graphql-code-generator.com/). + + + You can learn more about persisted operations with [the `client` preset on the + GraphQL Code Generator + documentation](https://the-guild.dev/graphql/codegen/plugins/presets/preset-client#persisted-documents). + +There is also +[a full code example using GraphQL Yoga available on GitHub](https://github.com/dotansimha/graphql-code-generator/tree/master/examples/persisted-documents). + + + +For people not using the client-preset the is also the standalone +[`graphql-codegen-persisted-query-ids`](https://github.com/valu-digital/graphql-codegen-persisted-query-ids) +plugin for extracting a map of persisted query ids and their corresponding GraphQL documents from +your application/client-code in a JSON file. + +```json filename="Example map extracted by GraphQL Code Generator" +{ + "ecf4edb46db40b5132295c0291d62fb65d6759a9eedfa4d5d612dd5ec54a6b38": "{__typename}", + "c7a30a69b731d1af42a4ba02f2fa7a5771b6c44dcafb7c3e5fa4232c012bf5e7": "mutation {__typename}" +} +``` + +This map can then be used to persist the GraphQL documents in the server. + +```ts filename="gateway.config.ts" +import { readFileSync } from 'node:fs' +import { defineConfig } from '@graphql-hive/gateway' + +const persistedOperations = JSON.parse(readFileSync('./persistedOperations.json', 'utf-8')) + +export const gatewayConfig = defineConfig({ + persistedDocuments: { + getPersistedOperation(sha256Hash: string) { + return persistedOperations[sha256Hash] + } + } +}) +``` + +## Sending the hash from the client + +The persisted operations plugin follows the +[the APQ Specification of Apollo](https://github.com/apollographql/apollo-link-persisted-queries#apollo-engine) +for SENDING hashes to the server. + +GraphQL clients such `Apollo Client` and `Urql` support that out of the box. + +### Urql and GraphQL Code Generator + +When using the GraphQL Code Generator `client` preset together with urql, sending the hashes is +straight-forward using the `@urql/exchange-persisted` package. + + + When you are using the urql graph cache you need to ensure the `__typename` selections are added to your GraphQL documents selection set. + +[Please refer to the GraphQL Code Generator `client` preset documentation for normalized caches for more information.](https://the-guild.dev/graphql/codegen/plugins/presets/preset-client#normalized-caches-urql-and-apollo-client) + + + +```ts filename="Urql Client Configuration" {2,8-13} +import { cacheExchange, createClient } from '@urql/core' +import { persistedExchange } from '@urql/exchange-persisted' + +const client = new createClient({ + url: 'YOUR_GRAPHQL_ENDPOINT', + exchanges: [ + cacheExchange, + persistedExchange({ + enforcePersistedQueries: true, + enableForMutation: true, + generateHash: (_, document) => Promise.resolve(document['__meta__']['hash']) + }) + ] +}) +``` + +[More information on `@urql/exchange-persisted` on the the urql documentation](https://formidable.com/open-source/urql/docs/advanced/persistence-and-uploads/)) + +### Apollo Client and GraphQL Code Generator + +When using the GraphQL Code Generator `client` preset together with Apollo Client, sending the +hashes is straight-forward. + + + When you are using the urql graph cache you need to ensure the `__typename` selections are added to your GraphQL documents selection set. + +[Please refer to the GraphQL Code Generator `client` preset documentation for normalized caches for more information.](https://the-guild.dev/graphql/codegen/plugins/presets/preset-client#normalized-caches-urql-and-apollo-client) + + + +```ts filename="Apollo Client Configuration" {2,4-6} +import { ApolloClient, HttpLink, InMemoryCache } from '@apollo/client' +import { createPersistedQueryLink } from '@apollo/client/link/persisted-queries' + +const link = createPersistedQueryLink({ + generateHash: document => document['__meta__']['hash'] +}) + +const client = new ApolloClient({ + cache: new InMemoryCache(), + link: link.concat(new HttpLink({ uri: '/graphql' })) +}) +``` + +[More information on the Apollo Client documentation](https://www.apollographql.com/docs/apollo-server/performance/apq/#step-2-enable-automatic-persisted-queries) + +## Using parsed GraphQL documents as AST + +You can reduce the amount of work the server has to do by using the parsed GraphQL documents as AST. + +```ts filename="Use parsed GraphQL documents as AST" +import { parse } from 'graphql' + +const persistedOperations = { + 'my-key': parse(/* GraphQL */ ` + query { + __typename + } + `) +} + +{ + getPersistedOperation(key: string) { + return persistedOperations[key] + } +} +``` + +## Skipping validation of persisted operations + +If you validate your persisted operations while building your store, we recommend to skip the +validation on the server. So this will reduce the work done by the server and the latency of the +requests. + +```ts filename="Validate persisted operations" +{ + //... + skipDocumentValidation: true +} +``` + +> Using AST and skipping validations will reduce the amount of work the server has to do, so the +> requests will have less latency. + +## Allowing arbitrary GraphQL operations + +Sometimes it is handy to allow non-persisted operations aside from the persisted ones. E.g. you want +to allow developers to execute arbitrary GraphQL operations on your production server. + +This can be achieved using the `allowArbitraryOperations` option. + +```ts filename="Allow arbitrary GraphQL operations" +{ + allowArbitraryOperations: request => + request.headers.request.headers.get('x-allow-arbitrary-operations') === 'true' +} +``` + +Use this option with caution! + +## Using Relay's Persisted Queries Specification + +If you are using +[Relay's Persisted Queries specification](https://relay.dev/docs/guides/persisted-queries/#example-implemetation-of-relaylocalpersistingjs), +you can configure the plugin like below; + +```ts filename="Relay Persisted Queries example" +{ + extractPersistedOperationId(params: GraphqlParams & { doc_id?: unknown }) { + return typeof params.doc_id === 'string' ? params.doc_id : null + } + getPersistedOperation(key: string) { + return store[key] + }, +}, +``` + +## Advanced persisted operation id Extraction from HTTP Request + +You can extract the persisted operation id from the request using the `extractPersistedOperationId` + +### Query Parameters Recipe + +```ts filename="Extract persisted operation id from query parameters" {22-25} +{ + getPersistedOperation(sha256Hash: string) { + return store[sha256Hash] + }, + extractPersistedOperationId(_params, request) { + const url = new URL(request.url) + return url.searchParams.get('id') + } +} +``` + +### Header Recipe + +You can also use the request headers to extract the persisted operation id. + +```ts filename="Extract persisted operation id from headers" {22-24} +{ + getPersistedOperation(sha256Hash: string) { + return store[sha256Hash] + }, + extractPersistedOperationId(_params, request) { + return request.headers.get('x-document-id') + } +} +``` + +### Path Recipe + +You can also the the request path to extract the persisted operation id. This requires you to also +customize the GraphQL endpoint. The underlying implementation for the URL matching is powered by the +[URL Pattern API](https://developer.mozilla.org/en-US/docs/Web/API/URL_Pattern_API). + +This combination is powerful as it allows you to use the persisted operation id as it can easily be +combined with any type of HTTP proxy cache. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +const store = { + ecf4edb46db40b5132295c0291d62fb65d6759a9eedfa4d5d612dd5ec54a6b38: '{__typename}' +} + +export const gatewayConfig = defineConfig({ + graphqlEndpoint: '/graphql/:document_id?', + persistedDocuments: { + getPersistedOperation(sha256Hash: string) { + return store[sha256Hash] + }, + extractPersistedOperationId(_params, request) { + return request.url.split('/graphql/').pop() ?? null + } + } +}) +``` + +## Using an external Persisted Operation Store + +As a project grows the amount of GraphQL Clients and GraphQL Operations can grow a lot. At some +point it might become impractible to store all persisted operations in memory. + +In such a scenario you can use an external persisted operation store. + +You can return a `Promise` from the `getPersistedOperation` function and call any database or +external service to retrieve the persisted operation. + + + For the best performance a mixture of an LRU in-memory store and external persisted operation + store is recommended. + + +```ts filename="Use external persisted operation store" +{ + getPersistedOperation(key: string) { + return fetch(`https://localhost:9999/document/${key}`).then(res => res.json()) + } +} +``` + +## Using multiple Persisted Operation Stores + +You can vary the persisted operations store you read from by switching based on the request. + +An example of this may be to use request headers. + +```ts filename="Use parsed GraphQL documents as AST" +import { parse } from 'graphql' + +const persistedOperationsStores = { + ClientOne: { + 'my-key': parse(/* GraphQL */ ` + query { + __typename + } + `) + } +} + +{ + getPersistedOperation(key: string, request: Request) { + const store = persistedOperationsStores[request.headers.get('client-name')] + return (store && store[key]) || null + } +} +``` + +## Customize errors + +This plugin can throw three different types of errors:: + +- `PersistedOperationNotFound`: The persisted operation cannot be found. +- `PersistedOperationKeyNotFound`: The persistence key cannot be extracted from the request. +- `PersistedOperationOnly`: An arbitrary operation is rejected because only persisted operations are + allowed. + +Each error can be customized to change the HTTP status or add a translation message ID, for example. + +```ts filename="Customize errors" +import { CustomErrorClass } from './custom-error-class' + +{ + customErrors: { + // You can change the error message + notFound: 'Not Found', + // Or customize the error with a GraphqlError options object, allowing you to add extensions + keyNotFound: { + message: 'Key Not Found', + extensions: { + http: { + status: 404 + } + } + }, + // Or customize with a factory function allowing you to use your own error class or format + persistedQueryOnly: () => { + return new CustomErrorClass('Only Persisted Operations are allowed') + } + } +} +``` + + + diff --git a/packages/web/docs/src/pages/docs/gateway/subscriptions.mdx b/packages/web/docs/src/pages/docs/gateway/subscriptions.mdx new file mode 100644 index 000000000..217dc2af2 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/subscriptions.mdx @@ -0,0 +1,278 @@ +--- +searchable: false +--- + +import { Callout } from '@theguild/components' + +# Subscriptions + +Hive Gateway fully supports federated subscriptions and behaves just like +[Federation GraphQL subscriptions in Apollo Router](https://www.apollographql.com/docs/router/executing-operations/subscription-support). + +Subgraphs providing subscriptions can communicate with Hive Gateway through one of the following +protocols: + +- [GraphQL over SSE](https://github.com/graphql/graphql-over-http/blob/main/rfcs/GraphQLOverSSE.md) +- [GraphQL over WebSocket](https://github.com/graphql/graphql-over-http/blob/main/rfcs/GraphQLOverWebSocket.md) +- [HTTP Callback](https://www.apollographql.com/docs/router/executing-operations/subscription-callback-protocol/) + +## Example + +We'll implement two +[GraphQL Yoga federation services](https://the-guild.dev/graphql/yoga-server/docs/features/apollo-federation#federation-service) +behaving as subgraphs. The "products" service exposes a subscription operation type for subscribing +to product changes, while the "reviews" service simply exposes review stats about products. + +The example is somewhat similar to +[Apollo's documentation](https://www.apollographql.com/docs/router/executing-operations/subscription-support/#example-execution), +except for that we use GraphQL Yoga here and significantly reduce the setup requirements. + +### Install dependencies + +```ssh npm2yarn +npm i graphql-yoga @apollo/subgraph graphql +``` + +### Products service + +```ts filename="products.ts" +import { createServer } from 'http' +import { parse } from 'graphql' +import { createYoga } from 'graphql-yoga' +import { buildSubgraphSchema } from '@apollo/subgraph' +import { resolvers } from './my-resolvers' + +const typeDefs = parse(/* GraphQL */ ` + type Product @key(fields: "id") { + id: ID! + name: String! + price: Int! + } + + type Subscription { + productPriceChanged: Product! + } +`) + +const yoga = createYoga({ schema: buildSubgraphSchema([{ typeDefs, resolvers }]) }) + +const server = createServer(yoga) + +server.listen(40001, () => { + console.log('Products subgraph ready at http://localhost:40001') +}) +``` + +### Reviews service + +```ts filename="reviews.ts" +import { createServer } from 'http' +import { parse } from 'graphql' +import { createYoga } from 'graphql-yoga' +import { buildSubgraphSchema } from '@apollo/subgraph' +import { resolvers } from './my-resolvers' + +const typeDefs = parse(/* GraphQL */ ` + extend type Product @key(fields: "id") { + id: ID! @external + reviews: [Review!]! + } + + type Review { + score: Int! + } +`) + +const yoga = createYoga({ schema: buildSubgraphSchema([{ typeDefs, resolvers }]) }) + +const server = createServer(yoga) + +server.listen(40002, () => { + console.log('Reviews subgraph ready at http://localhost:40002') +}) +``` + +### Start Gateway + +After having generated a supergraph file `supergraph.graphql` for the two subgraphs, either using +[GraphQL Mesh](https://graphql-mesh.com/) or +[Apollo Rover](https://www.apollographql.com/docs/rover/), simply run Hive Gateway without any +additional configuration! + +```sh +hive-gateway supergraph supergraph.graphql +``` + +### Subscribe + +Let's now subscribe to the product price changes by executing the following query: + +```graphql +subscription { + productPriceChanged { + # Defined in Products subgraph + name + price + reviews { + # Defined in Reviews subgraph + score + } + } +} +``` + +Hive Gateway will inteligently resolve all fields on subscription events and deliver you the +complete result. + +You can subscribe to the gateway through Server-Sent Events (SSE) (in JavaScript, using +[EventSource](https://developer.mozilla.org/en-US/docs/Web/API/EventSource) or +[graphql-sse](https://the-guild.dev/graphql/sse)). For the sake of brevity, we'll subscribe using +`curl`: + +```sh +curl 'http://localhost:4000/graphql' \ + -H 'accept: text/event-stream' \ + -H 'content-type: application/json' \ + --data-raw '{"query":"subscription OnProductPriceChanged { productPriceChanged { name price reviews { score } } }","operationName":"OnProductPriceChanged"}' +``` + +## Subgraphs using WebSockets + +If your subgraph uses WebSockets for subscriptions support +([like with Apollo Server](https://www.apollographql.com/docs/apollo-server/data/subscriptions/)), +Hive Gateway will need additional configuration pointing to the WebSocket server path on the +subgraph. + +And configure Hive Gateway to use the `/subscriptions` path on the "products" subgraph for WebSocket +connections: + +```ts filename="gateway.config.ts" +import { defineConfig, type WSTransportOptions } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + supergraph: 'supergraph.graphql', + transportEntries: { + // use "*.http" to apply options to all subgraphs with HTTP + '*.http': { + options: { + subscriptions: { + kind: 'ws', + location: '/subscriptions' + } satisfies WSTransportOptions + } + } + } +}) +``` + +Now simply start Hive Gateway with: + +```sh +hive-gateway supergraph +``` + +Downstream clients are still subscribing to Hive Gateway gateway through any supported subscriptions +protocol, but upstream Hive Gateway will use long-living WebSocket connections to the "products" +service. + +### `Authorization` header + +Hive Gatewayr can propagate the downstream client's `Authorization` header contents to the upstream +WebSocket connections through the +[`ConnectionInit` message payload](https://github.com/graphql/graphql-over-http/blob/main/rfcs/GraphQLOverWebSocket.md#connectioninit). + +```ts filename="gateway.config.ts" +import { defineConfig, type WSTransportOptions } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + supergraph: 'supergraph.graphql', + transportEntries: { + // use "*.http" to apply options to all subgraphs with HTTP + '*.http': { + options: { + subscriptions: { + kind: 'ws', + location: '/subscriptions', + options: { + connectionParams: { + token: '{context.headers.authorization}' + } + } satisfies WSTransportOptions + } + } + } + } +}) +``` + +The contents of the payload will be available in `graphql-ws` connectionParams: + +```json +{ + "connectionParams": { + "token": "" + } +} +``` + + + This is also what Apollo Router when [propagating auth on + WebSockets](https://www.apollographql.com/docs/router/executing-operations/subscription-support/#websocket-auth-support). + + +## Subscriptions using HTTP Callback + +If your subgraph uses +[HTTP Callback protocol for subscriptions](https://www.apollographql.com/docs/router/executing-operations/subscription-callback-protocol/), +Hive Gateway will need additional configuration. + +```ts filename="gateway.config.ts" +import { defineConfig, type HTTPCallbackTransportOptions } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + supergraph: 'supergraph.graphql', + // Setup Hive Gateway to listen for webhook callbacks, and emit the payloads through PubSub engine + webhooks: true, + transportEntries: { + // use "*.http" to apply options to all subgraphs with HTTP + '*.http': { + options: { + subscriptions: { + kind: 'http-callback', + options: { + // The gateway's public URL, which your subgraphs access, must include the path configured on the gateway. + public_url: 'http://localhost:4000/callback', + // The path of the router's callback endpoint + path: '/callback', + // Heartbeat interval to make sure the subgraph is still alive, and avoid hanging requests + heartbeat_interval: 5000 + } satisfies HTTPCallbackTransportOptions + } + } + } + } +}) +``` + +## Closing active subscriptions on schema change + +When the schema changes in Hive Gateway, all active subscriptions will be completed after emitting +the following execution error: + +```json +{ + "errors": [ + { + "message": "subscription has been closed due to a schema reload", + "extensions": { + "code": "SUBSCRIPTION_SCHEMA_RELOAD" + } + } + ] +} +``` + + + This is also what Apollo Router when [terminating subscriptions on schema + update](https://www.apollographql.com/docs/router/executing-operations/subscription-support/#termination-on-schema-update). + diff --git a/packages/web/docs/src/pages/docs/gateway/supergraph-proxy-source.mdx b/packages/web/docs/src/pages/docs/gateway/supergraph-proxy-source.mdx new file mode 100644 index 000000000..736cc1621 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/supergraph-proxy-source.mdx @@ -0,0 +1,252 @@ +import { Tabs } from '@theguild/components' + +# Supergraph / Proxy Source + +Hive Gateway can retrieve a supergraph from a wide range of sources. + +This includes: + +- Hive Schema Schema Registry +- Apollo GraphOS / Studio +- Custom Sources + +In addition you can also proxy any GraphQL API, by either introspection or providing a schema file. + +## Supergraph + + + +{/* Hive Schema Registry */} + + + +Hive Gateway has built in support for fetching supergraphs from the Hive Schema Registry. You can +either choose to provide the configuration via CLI parameters, environment variables or a +configuration file. + + + +{/* CLI */} + + + +```sh +hive-gateway supergraph --hive-cdn-key +``` + + + +{/* Configuration File */} + + + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + supergraph: { + // The CDN type. + type: 'hive', + // The endpoint of CDN + endpoint: '', + // The API key provided by Hive Registry + key: '' + } +}) +``` + + + + + + + +{/* Apollo GraphOS */} + + + +Hive Gateway has built in support for fetching supergraphs from the Apollo GraphOS Registry. You can +either choose to provide the configuration via CLI parameters, environment variables or a +configuration file. + + + +{/* CLI */} + + + +```sh +hive-gateway supergraph [@] --apollo-key +``` + + + +{/* Configuration File */} + + + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + supergraph: { + type: 'graphos', + /** + * The graph ref of the managed federation graph. + * It is composed of the graph ID and the variant (`@`). + * + * If not provided, `APOLLO_GRAPH_REF` environment variable is used. + * + * You can find a a graph's ref at the top of its Schema Reference page in Apollo Studio. + */ + graphRef: '[@]', + /** + * The API key to use to authenticate with the managed federation up link. + * It needs at least the `service:read` permission. + * + * If not provided, `APOLLO_KEY` environment variable will be used instead. + * + * [Learn how to create an API key](https://www.apollographql.com/docs/federation/v1/managed-federation/setup#4-connect-the-gateway-to-studio) + */ + apiKey: '', + /** + * The URL of the managed federation up link. When retrying after a failure, you should cycle through the default up links using this option. + * + * Uplinks are available in `DEFAULT_UPLINKS` constant. + * + * This options can also be defined using the `APOLLO_SCHEMA_CONFIG_DELIVERY_ENDPOINT` environment variable. + * It should be a comma separated list of up links, but only the first one will be used. + * + * Default: 'https://uplink.api.apollographql.com/' (Apollo's managed federation up link on GCP) + * + * Alternative: 'https://aws.uplink.api.apollographql.com/' (Apollo's managed federation up link on AWS) + */ + upLink?: string; + } +}) +``` + + + + + + + +{/* Custom HTTP Source */} + + + +You can provide a custom supergraph source, along with other options to customize the polling/retry +behavior. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + supergraph: () => + // Fetch the supergraph from the schema registry + fetch('https://my-registry.com/supergraph.graphql', { + headers: { + Authorization: 'Bearer MY_TOKEN' + } + }).then(res => res.text()), + + plugins: ctx => [ + // You can also write your custom plugins to interact with the schema registry + useMyCustomPlugin(ctx) + ] +}) +``` + + + +{/* Local File */} + + + +You can point to `supergraph.graphql` located in your file system. + + + +{/* CLI */} + + + +```sh +hive-gateway supergraph ./supergraph.graphql +``` + + + +{/* Configuration file */} + + + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + supergraph: './supergraph.graphql' +}) +``` + + + + + + + + + +### Polling + +You can configure the polling interval for the supergraph source. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + supergraph: { + /* Supergraph Configuration */ + }, + // Poll the schema registry every 10 seconds + pollingInterval: 10_000 +}) +``` + +## Proxy + +Instead of serving a supergraph, you can also use Hive Gateway to proxy any existing GraphQL API. +This allows you to add features such as [usage reporting](/docs/gateway/usage-reporting) or +[persisted documents](/docs/gateway/persisted-documents) without modifying your existing GraphQL +API. + + + +{/* CLI */} + + + +```sh filename="Proxy GraphQL API" +hive-gateway proxy https://example.com/graphql +``` + + + +{/* Configuration File */} + + + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + proxy: { + endpoint: 'https://example.com/graphql' + } +}) +``` + + + + diff --git a/packages/web/docs/src/pages/docs/gateway/usage-reporting.mdx b/packages/web/docs/src/pages/docs/gateway/usage-reporting.mdx new file mode 100644 index 000000000..83d57d5d3 --- /dev/null +++ b/packages/web/docs/src/pages/docs/gateway/usage-reporting.mdx @@ -0,0 +1,128 @@ +import { Callout, Cards, Tabs } from '@theguild/components' + +# Usage Reporting + +Hive Gateway can send usage reports to a schema registry such as the Hive schema registry, but also +other providers such Apollo GraphOS. + + + +{/* Hive Schema Registry */} + + + +The Hive Gateway can report usage metrics to the Hive schema registry, giving you +[insights for executed GraphQL operations](/docs/dashboard/insights), and +[field level usage information](/docs/dashboard/explorer), but also enabling +[conditional breaking changes](http://localhost:3000/docs/management/targets#conditional-breaking-changes). +Usage reporting works for both Apollo Federation and Proxy gateways. + +Before proceeding, make sure you have +[created a registry token with write permissions on the Hive dashboard](/docs/management/targets#registry-access-tokens). + + + +{/* Binary */} + + + +```sh filename="Run Hive Gateway with Usage Reporting enabled." {4} +hive-gateway supergraph \ + http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/supergraph \ + --hive-cdn-key "YOUR HIVE CDN KEY" \ + --hive-registry-token "YOUR HIVE REGISTRY TOKEN" +``` + + + +{/* Docker */} + + + +```sh filename="Run Hive Gateway with Usage Reporting enabled." {5} +docker run --rm --name hive-gateway -p 4000:4000 \ + ghcr.io/ardatan/hive-gateway supergraph \ + http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/supergraph \ + --hive-cdn-key "YOUR HIVE CDN KEY" \ + --hive-registry-token "YOUR HIVE REGISTRY TOKEN" +``` + + + +{/* JavaScript Package */} + + + +```sh filename="Run Hive Gateway with Usage Reporting enabled." {4} +npx hive-gateway supergraph \ + http://cdn.graphql-hive.com/artifacts/v1/12713322-4f6a-459b-9d7c-8aa3cf039c2e/supergraph \ + --hive-cdn-key "YOUR HIVE CDN KEY" \ + --hive-registry-token "YOUR HIVE REGISTRY TOKEN" +``` + + + + + +Alternatively, you can also provide the usage reporting configuration via the `gateway.config.ts` +file. + +```ts filename="gateway.config.ts" {8} +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + reporting: { + // The reporting service type + type: 'hive', + // The registry token provided by Hive Registry + token: 'YOUR HIVE REGISTRY TOKEN' + } +}) +``` + + + +{/* Apollo GraphOS */} + + + +If you want to report usage metrics to a Apollo GraphOS, configure your `gateway.config.ts` file as +following. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + reporting: { + type: 'graphos', + /** + * The graph ref of the managed federation graph. + * It is composed of the graph ID and the variant (`@`). + * + * If not provided, `APOLLO_GRAPH_REF` environment variable is used. + * + * You can find a a graph's ref at the top of its Schema Reference page in Apollo Studio. + */ + graphRef: '[@]', + /** + * The API key to use to authenticate with the managed federation up link. + * It needs at least the `service:read` permission. + * + * If not provided, `APOLLO_KEY` environment variable will be used instead. + * + * [Learn how to create an API key](https://www.apollographql.com/docs/federation/v1/managed-federation/setup#4-connect-the-gateway-to-studio) + */ + apiKey: '', + /** + * Usage report endpoint + * + * Defaults to GraphOS endpoint (https://usage-reporting.api.apollographql.com/api/ingress/traces) + */ + endpoint?: string; + } +}) +``` + + + + diff --git a/packages/web/docs/src/pages/docs/get-started/_meta.ts b/packages/web/docs/src/pages/docs/get-started/_meta.ts index 8de59e7c2..e80091b96 100644 --- a/packages/web/docs/src/pages/docs/get-started/_meta.ts +++ b/packages/web/docs/src/pages/docs/get-started/_meta.ts @@ -1,6 +1,6 @@ export default { 'first-steps': 'First Steps', - 'single-project': 'Guide: Single Schema', - 'schema-stitching': 'Guide: Schema-Stitching', - 'apollo-federation': 'Guide: Apollo Federation', + 'apollo-federation': 'Apollo Federation', + 'single-project': 'Single Schema', + 'schema-stitching': 'Schema-Stitching', }; diff --git a/packages/web/docs/src/pages/docs/get-started/apollo-federation.mdx b/packages/web/docs/src/pages/docs/get-started/apollo-federation.mdx index 4750b152c..46fcd19e8 100644 --- a/packages/web/docs/src/pages/docs/get-started/apollo-federation.mdx +++ b/packages/web/docs/src/pages/docs/get-started/apollo-federation.mdx @@ -1,23 +1,35 @@ import NextImage from 'next/image' import { Callout, Cards, FileTree, Steps, Tabs } from '@theguild/components' +import cdnAccessTokenSettings from '../../../../public/docs/pages/get-started/apollo-federation/cdn-access-token-settings.png' +import createAccessToken from '../../../../public/docs/pages/get-started/apollo-federation/create-access-token.png' +import cdnAccessTokenCreate from '../../../../public/docs/pages/get-started/apollo-federation/create-cdn-access-token.png' +import createdAccessToken from '../../../../public/docs/pages/get-started/apollo-federation/created-access-token.png' +import cdnAccessTokenCreated from '../../../../public/docs/pages/get-started/apollo-federation/created-cdn-access-token.png' +import registryTokenSettings from '../../../../public/docs/pages/get-started/apollo-federation/registry-token-settings.png' +import targetOverview from '../../../../public/docs/pages/get-started/apollo-federation/target-overview.png' import cdnTokenImage from '../../../../public/docs/pages/guides/federation-cdn-token.png' import federationExplorerImage from '../../../../public/docs/pages/guides/federation-explorer.png' import federationHistoryImage from '../../../../public/docs/pages/guides/federation-history.png' import federationSchemaImage from '../../../../public/docs/pages/guides/federation-schema.png' + import historyImage from '../../../../public/docs/pages/guides/history.png' import tokenImage from '../../../../public/docs/pages/guides/token.png' -# Guide: Apollo Federation Project +# Get started with Apollo Federation -Once you've created a Hive project of type **Apollo Federation**, you can simply push your GraphQL -schema to the registry. This guide will guide you through the basics of schema pushing, checking and -fetching of the Supergraph SDL to your Apollo Gateway / Router. +Once you've created a Hive project of type **Apollo Federation**, you can start pushing your Apollo +Federation subgraph schemas to the Hive registry. + +This guide will walk you through the basics of schema pushing, checking, and spin up the Hive +Gateway serving the federated GraphQL schema. -### Subgraphs +### Prerequisites -For this guide, we are going to use the following Subgraphs: +For this guide, we are going to use the following Subgraphs that we are going to publish to Hive. + +> **Note**: If you want you can also use your own subgraphs instead of the ones we provide. @@ -26,60 +38,28 @@ For this guide, we are going to use the following Subgraphs: -Here's the GraphQL schema (SDL) for every subgraph we are going to publish to Hive: +We provide the actual URLs for these running subgraphs, so we can later on send some real GraphQL +requests with our federation gateway. + +- **Products**: https://federation-demo.theguild.workers.dev/products +- **Reviews**: https://federation-demo.theguild.workers.dev/reviews + +Here's the GraphQL schema (SDL) for every subgraph we are going to publish to Hive. Save these to +files on your machine. ```graphql filename="subgraphs/products.graphql" -enum CURRENCY_CODE { - USD -} - -type Department { - category: ProductCategory - url: String -} - -type Money { - amount: Float - currencyCode: CURRENCY_CODE -} - -type Price { - cost: Money - deal: Float - dealSavings: Money -} - -type Product @key(fields: "id") { - id: ID! - title: String - url: String - description: String - price: Price - salesRank(category: ProductCategory = ALL): Int - salesRankOverall: Int - salesRankInCategory: Int - category: ProductCategory - images(size: Int = 1000): [String] - primaryImage(size: Int = 1000): String -} - -enum ProductCategory { - ALL - GIFT_CARDS - ELECTRONICS - CAMERA_N_PHOTO - VIDEO_GAMES - BOOKS - CLOTHING -} - extend type Query { - bestSellers(category: ProductCategory = ALL): [Product] - categories: [Department] - product(id: ID!): Product + topProducts(first: Int = 5): [Product] +} + +type Product @key(fields: "upc") { + upc: String! + name: String + price: Int + weight: Int } ``` @@ -87,80 +67,146 @@ extend type Query { ```graphql filename="subgraphs/reviews.graphql" -extend type Product @key(fields: "id") { - id: ID! @external - reviews: [Review] - reviewSummary: ReviewSummary -} - type Review @key(fields: "id") { id: ID! - rating: Float - content: String + body: String + author: User @provides(fields: "username") + product: Product } -type ReviewSummary { - totalReviews: Int - averageRating: Float +extend type User @key(fields: "id") { + id: ID! @external + username: String @external + reviews: [Review] +} + +extend type Product @key(fields: "upc") { + upc: String! @external + reviews: [Review] } ``` -### Hive CLI Access Token +### Hive CLI setup As you may have noticed, Hive has created three targets under your project: `development`, -`staging`, and `production`. This guide will use the `development` target to explore the features -Hive offers. +`staging`, and `production`. Each of these targets represent a different environment. You can remove +or create new targets as needed, for modelling the different environments of your project. -To begin, select the `development` target under your Hive project and then choose the **Settings** -tab. On the Settings screen, you can manage your target's settings and access tokens. + -Under the **Registry Access Tokens** section, click the **Generate new token** button. Give your -access token a name and select **Schema Check & Push** from the list of token presets: +For this guide we will use the `development` target. + +In order to publish our subgraph schemas to the schema registry, we first need to create an registry +access token with the necessary permissions for the Hive CLI. + +Select the `development` target under your Hive project and then choose the **Settings** tab. On the +Settings screen, you can manage your target's settings and access tokens. + + + +Under the **Registry Access Tokens** section, click the **Create new registry token** button. Give +your access token a name and select **Read & Write** for the permissions: Click on **Generate Token** button and you should get your CLI token created, with permissions to publish GraphQL schemas. + + Make sure to copy your token and keep it safe. **You won't be able to see it again.** - - **Note:** You can create multiple tokens with different permissions for different use cases. - - -### Publish your schemas +### Publish subgraphs Now that you have your access token, and you have the base schema defined, you can publish your schema to the registry. We'll start with the **Products** subgraph. +If you did not yet copy the contents of the `subgraphs/products.graphql` to a local file, you can do +so now. + Run the following command in your terminal, to publish your `subgraphs/products.graphql` to the -registry (replace `YOUR_TOKEN_HERE` with the token you have just created): +registry. Replace `` with the access token we just created. + + + +{/* Binary */} + + ```bash hive schema:publish \ - --registry.accessToken YOUR_TOKEN_HERE \ + --registry.accessToken "" \ --service="products" \ - --url="http://fake.com/products/graphql" \ - --author "Me" \ - --commit "First" \ + --url="https://federation-demo.theguild.workers.dev/products" \ + --author "John Doe" \ + --commit "My first commit" \ subgraphs/products.graphql ``` - - If you are running under a NodeJS project, make sure to include the `npx`, `yarn` or `pnpm` prefix - to the command. - + -If everything goes well, you should see the following output: +{/* NodeJS */} + + + +```bash +npx hive schema:publish \ + --registry.accessToken "" \ + --service="products" \ + --url="https://federation-demo.theguild.workers.dev/products" \ + --author "John Doe" \ + --commit "My first commit" \ + subgraphs/products.graphql +``` + + + +{/* Docker */} + + + +For Docker, we need to mount the subgraph schema file into the container. + +```bash +docker run --name graphql-hive-cli --rm \ + -v $(pwd)/subgraphs/products.graphql/:/usr/src/app/subgraphs/products.graphql \ + ghcr.io/kamilkisiela/graphql-hive/cli \ + schema:publish \ + --registry.accessToken "" \ + --service="products" \ + --url="https://federation-demo.theguild.workers.dev/products" \ + --author "John Doe" \ + --commit "My first commit" \ + ./subgraphs/products.graphql +``` + + + + + +If everything goes well, you should see the following output, containing a link pointing you to the +schema version on the Hive dashboard. ```bash copy=false ✔ Published initial schema. @@ -180,18 +226,68 @@ new schema you just published 🎉 explore the API types, fields, and arguments. -Now, let's publish the **Reviews** subgraph schema to Hive: +Next, we will publish the **Reviews** subgraph schema to Hive. Again, let's start by copying the +schema to a local file. + +Then, we run the following command in your terminal. + + + +{/* Binary */} + + ```bash hive schema:publish \ --registry.accessToken YOUR_TOKEN_HERE \ --service="reviews" \ - --url="http://fake.com/reviews/graphql" \ - --author "Me" \ - --commit "Second" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ + --author "John Doe" \ + --commit "My second commit" \ subgraphs/reviews.graphql ``` + + +{/* NodeJS */} + + + +```bash +npx hive schema:publish \ + --registry.accessToken YOUR_TOKEN_HERE \ + --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ + --author "John Doe" \ + --commit "My second commit" \ + subgraphs/reviews.graphql +``` + + + +{/* Docker */} + + + +For Docker, we need to mount the subgraph schema file into the container. + +```bash +docker run --name graphql-hive-cli --rm \ + -v $(pwd)/subgraphs/reviews.graphql/:/usr/src/app/subgraphs/reviews.graphql \ + ghcr.io/kamilkisiela/graphql-hive/cli \ + schema:publish \ + --registry.accessToken "" \ + --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ + --author "John Doe" \ + --commit "My second commit" \ + ./subgraphs/reviews.graphql +``` + + + + + If everything goes well, you should see the following output: ```bash copy=false @@ -206,45 +302,90 @@ On your target's **Explorer** page now, you'll be able to see the schema of both className="mt-10 max-w-2xl rounded-lg drop-shadow-md" /> -### Schema Checks +### Schema checks Hive can perform several checks on your schema before publishing it to the registry. You can use -Hive CLI to run these check and find potential breaking changes, and potential composition issues -when a Apollo Federation project is used. +Hive CLI to run these check and find potential breaking changes or composition issues. -To see how schema checks works, let's make a small change to our schema. First, we'll start with a -non-breaking change - we'll add a new field to the `Review` type under the **Reviews** subgraph: - -```graphql {11} filename="subgraphs/reviews.graphql" -extend type Product @key(fields: "id") { - id: ID! @external - reviews: [Review] - reviewSummary: ReviewSummary -} +To see how schema checks works, let's make a small change to our local schema file. First, we'll +start with a non-breaking change - we'll add a new field to the `Review` type under the **Reviews** +subgraph: +```graphql {6} filename="subgraphs/reviews.graphql" type Review @key(fields: "id") { id: ID! - rating: Float - content: String + body: String + author: User @provides(fields: "username") + product: Product verified: Boolean # new field added } -type ReviewSummary { - totalReviews: Int - averageRating: Float +extend type User @key(fields: "id") { + id: ID! @external + username: String @external + reviews: [Review] +} + +extend type Product @key(fields: "upc") { + upc: String! @external + reviews: [Review] } ``` Now, run the Hive CLI with the `schema:check` command and your modified `subgraphs/reviews.graphql` file: + + +{/* Binary */} + + + ```bash hive schema:check \ --registry.accessToken YOUR_TOKEN_HERE \ --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ subgraphs/reviews.graphql ``` + + +{/* NodeJS */} + + + +```bash +npx hive schema:check \ + --registry.accessToken YOUR_TOKEN_HERE \ + --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ + subgraphs/reviews.graphql +``` + + + +{/* Docker */} + + + +For Docker, we need to mount the subgraph schema file into the container. + +```bash +docker run --name graphql-hive-cli --rm \ + -v $(pwd)/subgraphs/reviews.graphql/:/usr/src/app/subgraphs/reviews.graphql \ + ghcr.io/kamilkisiela/graphql-hive/cli \ + schema:check \ + --registry.accessToken "" \ + --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ + ./subgraphs/reviews.graphql +``` + + + + + You should see that Hive successfully detect the change you made, and exists with a `0` exit code, meaning that the schema is compatible, valid and has no breaking changes: @@ -257,32 +398,77 @@ meaning that the schema is compatible, valid and has no breaking changes: Now, are going to try introduce a breaking change. To do that, we'll rename an existing field in the GraphQL schema of the **Reviews** subgraph: -```graphql {9} filename="subgraphs/reviews.graphql" -extend type Product @key(fields: "id") { - id: ID! @external - reviews: [Review] - reviewSummary: ReviewSummary -} - +```graphql {3} filename="subgraphs/reviews.graphql" type Review @key(fields: "id") { id: ID! - averageRating: Float # renamed from "rating" - content: String + bodyContents: String # renamed from body + author: User @provides(fields: "username") + product: Product } -type ReviewSummary { - totalReviews: Int - averageRating: Float +extend type User @key(fields: "id") { + id: ID! @external + username: String @external + reviews: [Review] +} + +extend type Product @key(fields: "upc") { + upc: String! @external + reviews: [Review] } ``` + + +{/* Binary */} + + + ```bash hive schema:check \ --registry.accessToken YOUR_TOKEN_HERE \ --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ subgraphs/reviews.graphql ``` + + +{/* NodeJS */} + + + +```bash +npx hive schema:check \ + --registry.accessToken YOUR_TOKEN_HERE \ + --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ + subgraphs/reviews.graphql +``` + + + +{/* Docker */} + + + +For Docker, we need to mount the subgraph schema file into the container. + +```bash +docker run --name graphql-hive-cli --rm \ + -v $(pwd)/subgraphs/reviews.graphql/:/usr/src/app/subgraphs/reviews.graphql \ + ghcr.io/kamilkisiela/graphql-hive/cli \ + schema:check \ + --registry.accessToken "" \ + --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ + ./subgraphs/reviews.graphql +``` + + + + + In that case, you'll notice that Hive CLI exists with a `1` exit code, meaning that the schema has breaking changes, and it's not compatible with the current schema in the registry: @@ -304,36 +490,81 @@ schema. We are going to add a conflict to the **Reviews** service. We are going to add a new field (`price`) to the `Product` type, that conflicts with the `Products` type in the **Products** service. -```graphql {5} filename="subgraphs/reviews.graphql" -extend type Product @key(fields: "id") { - id: ID! @external - reviews: [Review] - reviewSummary: ReviewSummary - price: Int # new field added -} - +```graphql {17} filename="subgraphs/reviews.graphql" type Review @key(fields: "id") { id: ID! - rating: Float - content: String + body: String + author: User @provides(fields: "username") + product: Product } -type ReviewSummary { - totalReviews: Int - averageRating: Float +extend type User @key(fields: "id") { + id: ID! @external + username: String @external + reviews: [Review] +} + +extend type Product @key(fields: "upc") { + upc: String! @external + reviews: [Review] + price: String # type conflicts with the products subgraph } ``` Run the Hive CLI with the `schema:check` command again and the modified `subgraphs/reviews.graphql` file: + + +{/* Binary */} + + + ```bash hive schema:check \ --registry.accessToken YOUR_TOKEN_HERE \ --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ subgraphs/reviews.graphql ``` + + +{/* NodeJS */} + + + +```bash +npx hive schema:check \ + --registry.accessToken YOUR_TOKEN_HERE \ + --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ + subgraphs/reviews.graphql +``` + + + +{/* Docker */} + + + +For Docker, we need to mount the subgraph schema file into the container. + +```bash +docker run --name graphql-hive-cli --rm \ + -v $(pwd)/subgraphs/reviews.graphql/:/usr/src/app/subgraphs/reviews.graphql \ + ghcr.io/kamilkisiela/graphql-hive/cli \ + schema:check \ + --registry.accessToken "" \ + --service="reviews" \ + --url="https://federation-demo.theguild.workers.dev/reviews" \ + ./subgraphs/reviews.graphql +``` + + + + + And now you can see that the schema check process has failed, due to conflicts and inconsistencies between the schemas: @@ -348,118 +579,32 @@ between the schemas: - Field Product.price changed type from Price to Int ``` -### Evolve your schema +As you can see schema checks can help you to catch potential issues before you publish your schema +to the registry. -Now that you have your schema published, you can evolve it over time. You can add new types, fields, -and implement new capabilities for your consumers. +Usually, you would run these checks in your subgraphs CI pipeline, to ensure that your subgraph +schema integrates flawlessly with the other subgraphs in the federation project, where schema +publishes are made within the Continious Deployment (CD) pipeline to actually publish the latest +subgraph version to the schema registry. -Let's make a valid change in our schema and push it again to the registry: +### Hive Gateway -```graphql {11} filename="subgraphs/reviews.graphql" -extend type Product @key(fields: "id") { - id: ID! @external - reviews: [Review] - reviewSummary: ReviewSummary -} +The next step is to spin up our GraphQL gateway that will serve the federated GraphQL schema +composed out of the subgraphs we published to the schema registry. -type Review @key(fields: "id") { - id: ID! - rating: Float - content: String - verified: Boolean # new field added -} +The Gateway will delegate the requests from the clients to the appropriate subgraph services, and +then merge the results into a single response. -type ReviewSummary { - totalReviews: Int - averageRating: Float -} -``` +The Hive schema registry publishes the supergraph (artifact of the composed schemas that contains +all the information about the subgraphs and fields available) to the high-availability CDN on +Cloudflare. -And publish it to Hive: - -```bash -hive schema:publish \ - --registry.accessToken YOUR_TOKEN_HERE \ - --service="reviews" \ - --url="http://fake.com/reviews/graphql" \ - --author "Me" \ - --commit "Third" \ - subgraphs/reviews.graphql -``` - -You should see now that Hive accepted your published schema and updated the registry: - -```bash copy=false -✔ Schema published -``` - -It's a good timing to check the **History** tab of your Hive target. You should see that a new -schema is published now, and you can see the changes you made: - - - -### Fetch your Supergraph - -Now that your GraphQL schema is stored in the Hive registry, you can access and fetch it through -Hive's CDN (Content Delivery Network). - -The Hive Cloud service leverages the -[CloudFlare Global Network](https://www.cloudflare.com/network/) to deliver your GraphQL schema and -schema metadata. This means that your schema will be available from the nearest location to your -GraphQL gateway, with 100% uptime, regardless of Hive's status. This ensures that everything -required for your GraphQL API is always available, and reduces the risk of depending on Hive as a -single point of failure. -[You can read more about Hive's CDN here](/docs/features/high-availability-cdn). - -To get started with Hive's CDN access, you'll need to create a CDN token from your target's -**Settings** page. You'll see a separate section for managing and creating CDN tokens, called **CDN -Access Token**. - -Click on **Create new CDN Token** to create a new CDN token. Describe your token with an alias, and -click **Create**. Please store this access token securely. **You will not be able to see it again.** - - -**Why are Registry and CDN tokens different?** - -We use a separate, externally managed storage to manage CDN tokens to ensure high availability of -your schemas. This approach ensures that your GraphQL schemas are fully secured and highly -available. CDN tokens are read-only and can only fetch schemas from the CDN. They do not have -permissions to perform any other action on your Hive organization. - - - -To use your access token, go to your target's page on Hive's dashboard and click on the **Connect to -CDN** button. You will see a screen with instructions on how to obtain different types of artifacts -from the CDN. For this guide, you can pick the **Apollo Federation Supergraph** artifact. - - - -Copy the URL and let's try to fetch your schema using `curl` (replace `YOUR_HIVE_CDN_TOKEN` with -your CDN token, and `CDN_ENDPOINT_HERE` with the endpoint you copied from Hive's dashboard): - -```bash -curl -L -H "X-Hive-CDN-Key: YOUR_HIVE_CDN_TOKEN" CDN_ENDPOINT_HERE -``` - -You should see that Hive CDN returns the complete Supergraph as an output for that command. - -### GraphQL Gateway - -The next step is to use real subgraphs and a GraphQL gateway that routes GraphQL requests to your -subgraphs. +The gateway can poll the supergraph from the CDN and serve the composed GraphQL schema. ```mermaid flowchart LR - A["Federation Gateway"] -. Poll supergraph .-> C + A["Hive Gateway"] -. Poll supergraph .-> C subgraph hive [Hive] direction TB B[Registry] -. Publish supergraph .-> C["HA CDN @@ -477,25 +622,225 @@ flowchart LR ``` -For an Apollo Federation gateway you may use **Apollo Gateway** (JS) or **Apollo Router** (Rust) -depending on your needs. You can use the following guides to deploy a gateway based on your -technical preference: +#### CDN Access - - - - +For accessing the supergraph, we need to create the CDN access token from the Hive dashboard. + +Navigate to your target's settings page and select the **CDN Tokens** tab. + + + +Click the **Create new CDN token** button, give your token a name and click **Create**. + + + +There will be a confirmation screen showing the CDN token you just created. Make sure to copy it and +keep it safe. + + + +#### Run the Gateway + +Hive Gateway is the Federation Gateway that seamlessly integrates with the Hive registry. You can +run Hive Gateay either as a Docker image, binary or NPM package. + + + {/* Binary */} + + The single executable binary for the Hive Gateway is available without any dependencies. You can + download and setup the binary on your machine. + +```sh +curl -sSL https://graphql-hive.com/install-gateway.sh | sh +``` + +Then you can run the Hive Gateway with the following command. + +```sh +hive-gateway supergraph \ + "" \ + --hive-cdn-key "" +``` + +| Parameter | Description | +| --------------------- | -------------------------------- | +| `hive_cdn_endpoint` | The endpoint of your Hive target | +| `hive_cdn_access_key` | The Hive CDN access key | + + + {/* Docker */} + + +The Docker image for Hive Gateway is published to the GitHub docker registry. You can simply run the +following command to start a Hive Gateway instance from your existing Hive target. + +```sh +docker run --name hive-gateway -rm \ + ghcr.io/ardatan/hive-gateway supergraph \ + "" \ + --hive-cdn-key "" +``` + +| Parameter | Description | +| ----------------------- | -------------------------------- | +| `` | The endpoint of your Hive target | +| `` | The Hive CDN access key | + + + +{/* Node.js Package */} + + + +The Gateway CLI is available as a Node.js package. You can install it using your favorite package +manager. + +```sh npm2yarn +npm install @graphql-hive/gateway +``` + +You can then run the Gateway CLI from your existing Hive target using the following command. + +```sh filename="Run Hive Gateway" +npx hive-gateway supergraph \ + "" \ + --hive-cdn-key "" +``` + +| Parameter | Description | +| --------------------- | -------------------------------- | +| `hive_cdn_endpoint` | The endpoint of your Hive target | +| `hive_cdn_access_key` | The Hive CDN access key | + + + + +If you now navigate to `http://localhost:4000`, you should see the Hive Gateway page with +information about the supergraph and subgraphs. + +import MeshLandingPageImage from '../../../../public/docs/pages/get-started/apollo-federation/mesh-landing-page.png' + + + +Now, if you navigate to `http://localhost:4000/graphql`, you should see the GraphiQL interface where +you can write and execute queries against the supergraph. + +#### Usage reporting + +Up next let's report the usage from our gateway to the registry, so we can see on the dashboard how +the API is being used. + +For this, we simply provide a usage reporting token in addition to our CDN access token. For this +guide, we can simply re-use the registry token we already use for the CLI. + + + {/* Binary */} + +```sh filename="Run Mesh Gateway with registry token" +hive-gateway supergraph \ + "" \ + --hive-cdn-key "" \ + --hive-registry-token "" +``` + +| Parameter | Description | +| ----------------------- | ------------------------------------------- | +| `` | The endpoint of your Hive target | +| `` | The Hive CDN access key | +| `` | The Hive registry token for usage reporting | + + + {/* Docker */} + + +The Docker image for Hive Gateway is published to the GitHub docker registry. You can simply run the +following command to start a Hive Gateway instance from your existing Hive target. + +```sh +docker run --name hive-gateway -rm \ + ghcr.io/ardatan/hive-gateway supergraph \ + "" \ + --hive-cdn-key "" \ + --hive-registry-token "" +``` + +| Parameter | Description | +| ----------------------- | ------------------------------------------- | +| `` | The endpoint of your Hive target | +| `` | The Hive CDN access key | +| `` | The Hive registry token for usage reporting | + + + +{/* Node.js Package */} + + + +```sh filename="Run Mesh Gateway with registry token" +npx hive-gateway supergraph \ + "" \ + --hive-cdn-key "" \ + --hive-registry-token "" +``` + +| Parameter | Description | +| ----------------------- | ------------------------------------------- | +| `` | The endpoint of your Hive target | +| `` | The Hive CDN access key | +| `` | The Hive registry token for usage reporting | + + + + +After starting the gateway with the usage reporting token, we can no execute some queries using the +gateways built-in GraphiQL interface. + +```graphql +{ + topProducts { + upc + name + price + } +} +``` + +[Execute Query on Hive Gateway GraphiQL](http://localhost:4000/graphql?query=%7B%0A++topProducts+%7B%0A++++upc%0A++++name%0A++++price%0A++%7D%0A%7D) + +After executing the query, a few times, let's switch back to the Hive dashboard and navigate to the +insights page. + +Here we can now see the GraphQL operations from our Gateway being reported to the registry. ### Next Steps -Now that you use the basic functionality of Hive as a schema registry, we recommend following other -powerful features of Hive: +Congratulations on publishing your first subgraph schemas to the Hive schema registry, composing +your supergraph, spinning up your own Federation Gateway serving the unified GraphQL schema and +reporting the usage data! -- [External Schema Compostion for Apollo Federation v2](/docs/management/external-schema-composition) -- [Usage Reporting and Monintoring](/docs/features/usage-reporting) -- [Conditional Breaking Changes](/docs/management/targets#conditional-breaking-changes) -- [Alerts and Notifications](/docs/management/projects#alerts-and-notifications) -- [CI/CD Integration](/docs/integrations/ci-cd) -- [Schema Policies](/docs/features/schema-policy) +From here you can continue to explore the Hive dashboard, or dive deeper into the documentation to +learn about all the features of the Hive platform. + + + + + + diff --git a/packages/web/docs/src/pages/docs/get-started/first-steps.mdx b/packages/web/docs/src/pages/docs/get-started/first-steps.mdx index e46da4923..cabe4a4ae 100644 --- a/packages/web/docs/src/pages/docs/get-started/first-steps.mdx +++ b/packages/web/docs/src/pages/docs/get-started/first-steps.mdx @@ -1,5 +1,5 @@ import NextImage from 'next/image' -import { Callout, Cards, Steps } from '@theguild/components' +import { Callout, Cards, Steps, Tabs } from '@theguild/components' import orgImage from '../../../../public/docs/pages/first-steps/org.png' import projectImage from '../../../../public/docs/pages/first-steps/project.png' import signupImage from '../../../../public/docs/pages/first-steps/signup.png' @@ -50,9 +50,36 @@ You can create as many organizations as you want, and you can be a member of mul ### Install Hive CLI -Hive CLI is a command-line tool that allows you to manage your GraphQL schemas terminal. +Hive CLI is a command-line tool for managing your Hive projects and schemas from the terminal. It +will be used for publishing schemas and apps to the Hive registry. -#### NodeJS + + +{/* Binary */} + + + +If you are running a non-JavaScript project, you can download the prebuilt binary of Hive CLI using +the following command: + +```bash +curl -sSL https://graphql-hive.com/install.sh | sh +``` + +To download a specific version, follow instructions from the +["Hive CLI installation"](/docs/api-reference/cli#specific-version) chapter. + +Run the following command to verify the installation: + +```bash +hive --version +``` + + + +{/* NodeJS */} + + If you are running a JavaScript/NodeJS project, you can install Hive CLI from the `npm` registry: @@ -65,35 +92,48 @@ npm i -D @graphql-hive/cli using a global installation. -#### Binary - -If you are running a non-JavaScript project, you can download the prebuilt binary of Hive CLI using -the following command: +Verify the downloaded image by running the following command: ```bash -curl -sSL https://graphql-hive.com/install.sh | sh +npx hive --version ``` -To download a specific version, follow instructions from the -["Hive CLI installation"](/docs/api-reference/cli#specific-version) chapter. + + +{/* Docker */} + + + +For a docker-based project, you can use the official Hive CLI Docker image. + +```bash +docker pull ghcr.io/kamilkisiela/graphql-hive/cli +``` + +Verify the downloaded image by running the following command: + +```bash +docker run --name graphql-hive-cli --rm ghcr.io/kamilkisiela/graphql-hive/cli --version +``` + + + + ### Create Hive Project A Hive **project** represents a GraphQL API project running a GraphQL schema, created under an organization. -Within a Hive **project**, you can create **targets** (which are equivalent to runtime environments) -and manage different schemas across different contextual runtimes. - Hive supports the following project types: -- **Single Schema**: a GraphQL project that has a single GraphQL schema developed as a standalone. +- **Apollo Federation**: composition of multiple GraphQL services following the + [Apollo Federation specification](https://www.apollographql.com/docs/federation/subgraph-spec/) + into a single unified graph. +- **Single Schema**: a simple monolithic GraphQL schema - **Schema Stitching**: a form of remote schema merging allowing developers to merge any GraphQL - schema(s), under one or many gateways. You can use either direct - [Schema Stitching](https://the-guild.dev/graphql/stitching) or - [GraphQL Mesh](https://the-guild.dev/graphql/mesh) for this project type. -- **Apollo Federation**: a form of remote schema merging developed according to the - [Federation specification](https://www.apollographql.com/docs/federation/subgraph-spec/). + schema(s), under one or many gateways using + [Schema Stitching](https://the-guild.dev/graphql/stitching) Please note that a **project** type cannot be changed once it is created. However, you can always @@ -109,17 +149,12 @@ organization's main page: className="mt-10 max-w-lg rounded-lg drop-shadow-md" /> -Once you have your project created, you should notice that 3 **targets** are automatically created -for you: `development`, `staging` and `production`. These are just defaults, and you can feel free -to change/delete them as you see fit. - -The following guides will help you to get started with your Hive **project**, depending on the type -of project you selected: +Continue with one of the following guidebased on the type of project you selected. + - diff --git a/packages/web/docs/src/pages/docs/get-started/schema-stitching.mdx b/packages/web/docs/src/pages/docs/get-started/schema-stitching.mdx index 32f4a8f41..15d563323 100644 --- a/packages/web/docs/src/pages/docs/get-started/schema-stitching.mdx +++ b/packages/web/docs/src/pages/docs/get-started/schema-stitching.mdx @@ -7,7 +7,7 @@ import stitchingHistoryImage from '../../../../public/docs/pages/guides/stitchin import stitchingUserSchemaImage from '../../../../public/docs/pages/guides/stitching-users-schema.png' import tokenImage from '../../../../public/docs/pages/guides/token.png' -# Guide: Schema-Stitching Project +# Schema-Stitching Project Once you've created a Hive project of type **Schema-Stitching**, you can simply push your GraphQL schema to the registry. This guide will guide you through the basics of schema pushing, checking and @@ -424,8 +424,7 @@ The Hive Cloud service leverages the schema metadata. This means that your schema will be available from the nearest location to your GraphQL gateway, with 100% uptime, regardless of Hive's status. This ensures that everything required for your GraphQL API is always available, and reduces the risk of depending on Hive as a -single point of failure. -[You can read more about Hive's CDN here](/docs/features/high-availability-cdn). +single point of failure. [You can read more about Hive's CDN here](/docs/high-availability-cdn). To get started with Hive's CDN access, you'll need to create a CDN token from your target's **Settings** page. You'll see a separate section for managing and creating CDN tokens, called **CDN @@ -535,12 +534,12 @@ Make sure to add environment variables: Now that you use the basic functionality of Hive as a schema registry, we recommend following other powerful features of Hive: -- [Schema Stitching Integration with Hive](/docs/integrations/schema-stitching) +- [Schema Stitching Integration with Hive](/docs/other-integrations/schema-stitching) - [How to implement and run subschemas and gateway with Schema-Stitching](https://the-guild.dev/graphql/stitching/docs/approaches/stitching-directives#schema-setup) -- [Usage Reporting and Monintoring](/docs/features/usage-reporting) +- [Usage Reporting and Monintoring](/docs/schema-registry/usage-reporting) - [Conditional Breaking Changes](/docs/management/targets#conditional-breaking-changes) - [Alerts and Notifications](/docs/management/projects#alerts-and-notifications) -- [CI/CD Integration](/docs/integrations/ci-cd) -- [Schema Policies](/docs/features/schema-policy) +- [CI/CD Integration](/docs/other-integrations/ci-cd) +- [Schema Policies](/docs/schema-registry/schema-policy) diff --git a/packages/web/docs/src/pages/docs/get-started/single-project.mdx b/packages/web/docs/src/pages/docs/get-started/single-project.mdx index 711dd4474..5e1a7d5be 100644 --- a/packages/web/docs/src/pages/docs/get-started/single-project.mdx +++ b/packages/web/docs/src/pages/docs/get-started/single-project.mdx @@ -4,7 +4,7 @@ import cdnTokenImage from '../../../../public/docs/pages/guides/cdn_token.png' import historyImage from '../../../../public/docs/pages/guides/history.png' import tokenImage from '../../../../public/docs/pages/guides/token.png' -# Guide: Single Schema Project +# Single Schema Project Once you've created a Hive project of type **Single Schema**, you can simply push your GraphQL schema to the registry. This guide will guide you through the basics of schema pushing, checking and @@ -20,7 +20,7 @@ read GraphQL schemas from `.graphql` files in your local filesystem. **Using code-first approach to write your GraphQL schema?** - Please refer to the [Code-First Guide](/docs/integrations/code-first) to learn how to use the Hive with + Please refer to the [Code-First Guide](/docs/other-integrations/code-first) to learn how to use the Hive with code-first approach. @@ -265,8 +265,7 @@ The Hive Cloud service leverages the schema metadata. This means that your schema will be available from the nearest location to your GraphQL gateway, with 100% uptime, regardless of Hive's status. This ensures that everything required for your GraphQL API is always available, and reduces the risk of depending on Hive as a -single point of failure. -[You can read more about Hive's CDN here](/docs/features/high-availability-cdn). +single point of failure. [You can read more about Hive's CDN here](/docs/high-availability-cdn). To get started with Hive's CDN access, you'll need to create a CDN token from your target's **Settings** page. You'll see a separate section for managing and creating CDN tokens, called **CDN @@ -309,10 +308,10 @@ You should see that Hive CDN returns your full GraphQL schema as an output for t Now that you use the basic functionality of Hive as a schema registry, we recommend following other powerful features of Hive: -- [CI/CD Integration](/docs/integrations/ci-cd) -- [Usage Reporting and Monitoring](/docs/features/usage-reporting) +- [CI/CD Integration](/docs/other-integrations/ci-cd) +- [Usage Reporting and Monitoring](/docs/schema-registry/usage-reporting) - [Conditional Breaking Changes](/docs/management/targets#conditional-breaking-changes) - [Alerts and Notifications](/docs/management/projects#alerts-and-notifications) -- [Schema Policies](/docs/features/schema-policy) +- [Schema Policies](/docs/schema-registry/schema-policy) diff --git a/packages/web/docs/src/pages/docs/features/high-availability-cdn.mdx b/packages/web/docs/src/pages/docs/high-availability-cdn.mdx similarity index 95% rename from packages/web/docs/src/pages/docs/features/high-availability-cdn.mdx rename to packages/web/docs/src/pages/docs/high-availability-cdn.mdx index b90dd6a5f..8ffeb8439 100644 --- a/packages/web/docs/src/pages/docs/features/high-availability-cdn.mdx +++ b/packages/web/docs/src/pages/docs/high-availability-cdn.mdx @@ -1,6 +1,6 @@ import NextImage from 'next/image' import { Callout } from '@theguild/components' -import cdnTokenImage from '../../../../public/docs/pages/guides/cdn_token.png' +import cdnTokenImage from '../../../public/docs/pages/guides/cdn_token.png' # High-Availability CDN @@ -80,8 +80,8 @@ curl -v -H 'X-Hive-CDN-Key: CDN_ACCESS_TOKEN' \ Further reading: -- [Integrating Hive CDN with Apollo Gateway](/docs/integrations/apollo-gateway) -- [Integrating Hive CDN with Apollo Router](/docs/integrations/apollo-router) +- [Integrating Hive CDN with Apollo Gateway](/docs/other-integrations/apollo-gateway) +- [Integrating Hive CDN with Apollo Router](/docs/other-integrations/apollo-router) - [Get started with Hive and Apollo Fededation](/docs/get-started/apollo-federation) ### Hive Metadata diff --git a/packages/web/docs/src/pages/docs/index.mdx b/packages/web/docs/src/pages/docs/index.mdx index 4934df2f6..f015217a6 100644 --- a/packages/web/docs/src/pages/docs/index.mdx +++ b/packages/web/docs/src/pages/docs/index.mdx @@ -2,35 +2,26 @@ import { Callout, Cards } from '@theguild/components' # Introduction to Hive -**Hive** is a schema registry for GraphQL. With Hive you manage and collaborate on all your GraphQL -schemas and GraphQL workflows, regardless of the underlying strategy, engine or framework you're -using: this includes [Schema Stitching](https://the-guild.dev/graphql/stitching), Apollo Federation, -or just a traditional monolith approach. +**Hive** is a GraphQL schema registry for managing and collaborating on all your **Apollo +Federation**, **Monolithic** or **[Schema Stitching](https://the-guild.dev/graphql/stitching)** +GraphQL workflows. -Hive as a schema registry serves the main purpose of preventing breaking changes. This ensures that -your GraphQL API stays up-to-date and running smoothly. This allows you to plan ahead and make the -necessary modifications to your schema in a timely manner. +The Hive platform gives the tools you need to gain insights, make decissions, and evolve your +GraphQL API with confidence. -However, it is important to note that determining what constitutes a breaking change can be a -complex task. It requires a thorough understanding of your GraphQL API, consumers and real-world -traffic patterns. Hive provides a set of tools to help you with this task. - -In addition to a basic schema registry functionality, Hive offers a set of tools to assist you in -managing and collaborating on your GraphQL projects: - -- Observability for operation performance, end-user consumption, and operation success rate -- Conditional Breaking Changes based on actual usage collected from your GraphQL gateway -- Alerts and notifications -- Schema diffing and validations -- Schema history and versioning -- Schema policies and best-practices validation +- Track the evolution of your GraphQL schema +- Analytics and Observability on how your GraphQL API is being used +- Prevent schema changes that break clients based on real-world traffic +- Notify teams with alerts and notifications +- Enforce schema design best practices +- Secure your GraphQL API with the Hive Gateway Hive and all of its components are developed and managed as an [MIT open-source project](https://github.com/kamilkisiela/graphql-hive). -You can use it in our Hive Cloud service or run it as a self-hosted solution: +To get started, you can choose between our Hive Cloud service or run it as a self-hosted solution. diff --git a/packages/web/docs/src/pages/docs/integrations/apollo-server.mdx b/packages/web/docs/src/pages/docs/other-integrations/apollo-server.mdx similarity index 100% rename from packages/web/docs/src/pages/docs/integrations/apollo-server.mdx rename to packages/web/docs/src/pages/docs/other-integrations/apollo-server.mdx diff --git a/packages/web/docs/src/pages/docs/integrations/ci-cd.mdx b/packages/web/docs/src/pages/docs/other-integrations/ci-cd.mdx similarity index 100% rename from packages/web/docs/src/pages/docs/integrations/ci-cd.mdx rename to packages/web/docs/src/pages/docs/other-integrations/ci-cd.mdx diff --git a/packages/web/docs/src/pages/docs/integrations/code-first.mdx b/packages/web/docs/src/pages/docs/other-integrations/code-first.mdx similarity index 98% rename from packages/web/docs/src/pages/docs/integrations/code-first.mdx rename to packages/web/docs/src/pages/docs/other-integrations/code-first.mdx index ae2d6a2eb..ed874022b 100644 --- a/packages/web/docs/src/pages/docs/integrations/code-first.mdx +++ b/packages/web/docs/src/pages/docs/other-integrations/code-first.mdx @@ -81,7 +81,7 @@ printer.print_schema ``` You can also refer to the -[runtime integration with GraphQL-Ruby and Hive](/docs/integrations/graphql-ruby). +[runtime integration with GraphQL-Ruby and Hive](/docs/other-integrations/graphql-ruby). ## GraphQL-Crystal diff --git a/packages/web/docs/src/pages/docs/integrations/envelop.mdx b/packages/web/docs/src/pages/docs/other-integrations/envelop.mdx similarity index 97% rename from packages/web/docs/src/pages/docs/integrations/envelop.mdx rename to packages/web/docs/src/pages/docs/other-integrations/envelop.mdx index f97d920c9..fe7e7564e 100644 --- a/packages/web/docs/src/pages/docs/integrations/envelop.mdx +++ b/packages/web/docs/src/pages/docs/other-integrations/envelop.mdx @@ -4,7 +4,7 @@ import { Callout } from '@theguild/components' If you are using GraphQL Yoga, please use the dedicated [GraphQL Yoga - plugin](/docs/integrations/graphql-yoga) for Hive. + plugin](/docs/other-integrations/graphql-yoga) for Hive. ## Installation diff --git a/packages/web/docs/src/pages/docs/integrations/graphql-code-generator.mdx b/packages/web/docs/src/pages/docs/other-integrations/graphql-code-generator.mdx similarity index 80% rename from packages/web/docs/src/pages/docs/integrations/graphql-code-generator.mdx rename to packages/web/docs/src/pages/docs/other-integrations/graphql-code-generator.mdx index 70edd5d82..afccac3bb 100644 --- a/packages/web/docs/src/pages/docs/integrations/graphql-code-generator.mdx +++ b/packages/web/docs/src/pages/docs/other-integrations/graphql-code-generator.mdx @@ -1,8 +1,8 @@ # GraphQL Code Generator [GraphQL Code Generator](https://the-guild.dev/graphql/codegen) is a tool that generates types from -your GraphQL schema and operations. You can use -[High-Availability CDN](/docs/features/high-availability-cdn) to provide the schema. +your GraphQL schema and operations. You can use [High-Availability CDN](/docs/high-availability-cdn) +to provide the schema. ## Setting up the config diff --git a/packages/web/docs/src/pages/docs/integrations/graphql-ruby.mdx b/packages/web/docs/src/pages/docs/other-integrations/graphql-ruby.mdx similarity index 100% rename from packages/web/docs/src/pages/docs/integrations/graphql-ruby.mdx rename to packages/web/docs/src/pages/docs/other-integrations/graphql-ruby.mdx diff --git a/packages/web/docs/src/pages/docs/integrations/graphql-yoga.mdx b/packages/web/docs/src/pages/docs/other-integrations/graphql-yoga.mdx similarity index 100% rename from packages/web/docs/src/pages/docs/integrations/graphql-yoga.mdx rename to packages/web/docs/src/pages/docs/other-integrations/graphql-yoga.mdx diff --git a/packages/web/docs/src/pages/docs/integrations/lighthouse.mdx b/packages/web/docs/src/pages/docs/other-integrations/lighthouse.mdx similarity index 100% rename from packages/web/docs/src/pages/docs/integrations/lighthouse.mdx rename to packages/web/docs/src/pages/docs/other-integrations/lighthouse.mdx diff --git a/packages/web/docs/src/pages/docs/integrations/schema-stitching.mdx b/packages/web/docs/src/pages/docs/other-integrations/schema-stitching.mdx similarity index 98% rename from packages/web/docs/src/pages/docs/integrations/schema-stitching.mdx rename to packages/web/docs/src/pages/docs/other-integrations/schema-stitching.mdx index ce6f7e7d9..57ef1fcac 100644 --- a/packages/web/docs/src/pages/docs/integrations/schema-stitching.mdx +++ b/packages/web/docs/src/pages/docs/other-integrations/schema-stitching.mdx @@ -26,7 +26,7 @@ the all services schemas published to Hive. ### Fetching Services Info from CDN Once you have all services schemas pushed to Hive, and available in the CDN, you can -[create a CDN Access Token and gain access to the CDN endpoint](/docs/features/high-availability-cdn#cdn-access-tokens). +[create a CDN Access Token and gain access to the CDN endpoint](/docs/high-availability-cdn#cdn-access-tokens). > In this example, we are using GraphQL-Yoga to create the Gateway server. diff --git a/packages/web/docs/src/pages/docs/features/_meta.ts b/packages/web/docs/src/pages/docs/schema-registry/_meta.ts similarity index 58% rename from packages/web/docs/src/pages/docs/features/_meta.ts rename to packages/web/docs/src/pages/docs/schema-registry/_meta.ts index c913fdde7..f6e75ae94 100644 --- a/packages/web/docs/src/pages/docs/features/_meta.ts +++ b/packages/web/docs/src/pages/docs/schema-registry/_meta.ts @@ -1,8 +1,8 @@ export default { - 'schema-registry': 'Schema Registry', + index: 'Introduction', 'usage-reporting': 'Usage Reporting and Monitoring', - 'high-availability-cdn': 'High-Availability CDN', 'schema-policy': 'Schema Policies', - laboratory: 'Laboratory', + contracts: 'Schema Contracts', 'app-deployments': 'App Deployments (Persisted Documents)', + 'external-schema-composition': 'External Schema Composition', }; diff --git a/packages/web/docs/src/pages/docs/features/app-deployments.mdx b/packages/web/docs/src/pages/docs/schema-registry/app-deployments.mdx similarity index 95% rename from packages/web/docs/src/pages/docs/features/app-deployments.mdx rename to packages/web/docs/src/pages/docs/schema-registry/app-deployments.mdx index f33d54369..bcb4ef92e 100644 --- a/packages/web/docs/src/pages/docs/features/app-deployments.mdx +++ b/packages/web/docs/src/pages/docs/schema-registry/app-deployments.mdx @@ -47,7 +47,7 @@ persisted queries) on your GraphQL Gateway or server, which provides the followi ```mermaid flowchart LR C["GraphQL API or Gateway -(Yoga, Apollo Server, Mesh, Apollo Router)"] +(Hive Gateway, GraphQL Yoga, Apollo Server, Apollo Router)"] B["Hive CDN"] D["App"] @@ -227,7 +227,31 @@ by your Gateway. Hive serves as the source of truth for the allowed persisted documents and provides a CDN for fetching these documents as they are requested. - + + +{/* Hive Gateway */} + + + +For Hive Gateway you can use the Hive configuration for resolving persisted documents. Adjust your +`gateway.config.ts` file as follows. + +```ts filename="gateway.config.ts" +import { defineConfig } from '@graphql-hive/gateway' + +export const gatewayConfig = defineConfig({ + persistedDocuments: { + type: 'hive', + endpoint: '', + token: '' + } +}) +``` + +For further information, please refer to the +[Hive Gateway documentation for persisted documents](/docs/gateway/persisted-documents). + + {/* GraphQL Yoga */} @@ -302,33 +326,13 @@ For further configuration options, please refer to the -{/* GraphQL Mesh */} +{/* Apollo Router */} -For GraphQL Mesh you can use the Hive configuration for resolving persisted documents. Adjust your -`mesh.config.ts` file as follows. - -```typescript filename="mesh.config.ts" {7-13} -import { defineConfig } from '@graphql-mesh/serve-cli' - -export default defineConfig({ - hive: { - // The endpoint of CDN - endpoint: 'https://cdn.graphql-hive.com//supergraph', - experimental__persistedDocuments: { - cdn: { - // replace and with your values - endpoint: 'https://cdn.graphql-hive.com/', - accessToken: '' - } - } - } -}) -``` - -For further configuration options, please refer to the -[Hive Client API reference](/docs/api-reference/client). +Using the Hive Schema Registry for persisted documents with Apollo Router is currently not +supported. Progress of the support is tracked in +[this GitHub issue](https://github.com/kamilkisiela/graphql-hive/issues/5498). diff --git a/packages/web/docs/src/pages/docs/management/contracts.mdx b/packages/web/docs/src/pages/docs/schema-registry/contracts.mdx similarity index 100% rename from packages/web/docs/src/pages/docs/management/contracts.mdx rename to packages/web/docs/src/pages/docs/schema-registry/contracts.mdx diff --git a/packages/web/docs/src/pages/docs/management/external-schema-composition.mdx b/packages/web/docs/src/pages/docs/schema-registry/external-schema-composition.mdx similarity index 98% rename from packages/web/docs/src/pages/docs/management/external-schema-composition.mdx rename to packages/web/docs/src/pages/docs/schema-registry/external-schema-composition.mdx index 366d12bb3..d57b186da 100644 --- a/packages/web/docs/src/pages/docs/management/external-schema-composition.mdx +++ b/packages/web/docs/src/pages/docs/schema-registry/external-schema-composition.mdx @@ -251,8 +251,8 @@ In case of a failure, you'll see a red cross with the reason of the failure: /> Now you should be able to use the **External Composition** feature in your project. -[Publish a GraphQL schema](/docs/features/schema-registry#publish-a-schema) or perform a -[GraphQL schema check](/docs/features/schema-registry#check-a-schema) to validate your setup. +[Publish a GraphQL schema](/docs/schema-registry#publish-a-schema) or perform a +[GraphQL schema check](/docs/schema-registry#check-a-schema) to validate your setup. diff --git a/packages/web/docs/src/pages/docs/features/schema-registry.mdx b/packages/web/docs/src/pages/docs/schema-registry/index.mdx similarity index 87% rename from packages/web/docs/src/pages/docs/features/schema-registry.mdx rename to packages/web/docs/src/pages/docs/schema-registry/index.mdx index b0aacf139..a2422617a 100644 --- a/packages/web/docs/src/pages/docs/features/schema-registry.mdx +++ b/packages/web/docs/src/pages/docs/schema-registry/index.mdx @@ -1,7 +1,6 @@ import NextImage from 'next/image' import { Callout, Cards } from '@theguild/components' import schemaHistoryDiffImage from '../../../../public/docs/pages/features/history-diff.png' -import schemaExplorerImage from '../../../../public/docs/pages/features/schema-explorer.png' import schemaHistoryImage from '../../../../public/docs/pages/guides/history.png' # Schema Registry @@ -82,8 +81,8 @@ Hive supports the following project types: This setup works for most frameworks and tools in the GraphQL ecosystem. - **Schema Stitching**: a form of remote schema merging allowing developers to merge any GraphQL schema(s), under one or many gateways. You can use either direct - [Schema Stitching](https://the-guild.dev/graphql/stitching) or - [GraphQL Mesh](https://the-guild.dev/graphql/mesh) for this project type. + [Schema Stitching](https://the-guild.dev/graphql/stitching) or [Hive gateway](/docs/gateway) for + this project type. - **Apollo Federation**: a form of remote schema merging developed according to the [Federation specification](https://www.apollographql.com/docs/federation/subgraph-spec/). @@ -193,9 +192,8 @@ Sometimes it is useful to fetch a schema (SDL or Supergraph) from Hive, for exam local development. This can be done using the `schema:fetch` command. - Don't confuse this with the [high-availability CDN](/docs/features/high-availability-cdn.mdx). - This command is used to fetch a schema from the API where the CDN always represents the latest - valid schema. + Don't confuse this with the [high-availability CDN](/docs/high-availability-cdn.mdx). This command + is used to fetch a schema from the API where the CDN always represents the latest valid schema. - [Fetch a schema using Hive CLI](/docs/api-reference/cli#fetch-a-schema-from-the-registry) @@ -247,31 +245,3 @@ If you wish to have a more technical view of the changes, you can use the `diff` src={schemaHistoryDiffImage} className="mt-6 max-w-2xl rounded-lg drop-shadow-md" /> - -## Schema Explorer - -The Hive Schema Explorer is a useful tool that can provide you with a comprehensive understanding of -your GraphQL schema. Not only does it allow you to explore the different types and fields of your -schema, but it also enables you to gain a deeper understanding of the arguments and their respective -input types. - - - -### Schema Usage and Coverage - -With [Usage Reporting](/docs/features/usage-reporting) feature enabled, you'll be able to see an -overview of the schema usage and coverage (for types, fields and input types), based on the GraphQL -operations you report to Hive. - -This feature is useful if you wish to understand how your GraphQL schema is being used and queried, -and understand the impact of changes you make to your schema. - - - The maximum duration is defined by the retention of your [Hive - plan](/docs/management/organizations#subscription-and-billing), and depends on the data you - already sent before to Hive. - diff --git a/packages/web/docs/src/pages/docs/features/schema-policy.mdx b/packages/web/docs/src/pages/docs/schema-registry/schema-policy.mdx similarity index 100% rename from packages/web/docs/src/pages/docs/features/schema-policy.mdx rename to packages/web/docs/src/pages/docs/schema-registry/schema-policy.mdx diff --git a/packages/web/docs/src/pages/docs/features/usage-reporting.mdx b/packages/web/docs/src/pages/docs/schema-registry/usage-reporting.mdx similarity index 89% rename from packages/web/docs/src/pages/docs/features/usage-reporting.mdx rename to packages/web/docs/src/pages/docs/schema-registry/usage-reporting.mdx index 109952043..6ea699c4a 100644 --- a/packages/web/docs/src/pages/docs/features/usage-reporting.mdx +++ b/packages/web/docs/src/pages/docs/schema-registry/usage-reporting.mdx @@ -4,7 +4,6 @@ import monitoringViewImage from '../../../../public/docs/pages/features/monitori import usageClientsImage from '../../../../public/docs/pages/features/usage-clients.png' import usageLatencyImage from '../../../../public/docs/pages/features/usage-latency-over-time.png' import usageOperationsOverTimeImage from '../../../../public/docs/pages/features/usage-operations-over-time.png' -import usageOperationsImage from '../../../../public/docs/pages/features/usage-operations.png' import usageRpmImage from '../../../../public/docs/pages/features/usage-rpm-over-time.png' import usageStatsImage from '../../../../public/docs/pages/features/usage-stats.png' @@ -17,7 +16,7 @@ following purposes: performance, error-rate, and other metrics. 2. **Schema Usage and Coverage**: understand how your consumers are using your GraphQL schema, and what parts of the schema are not being used at all (see - [Schema Usage and Coverage](/docs/features/schema-registry#schema-explorer)). + [Schema Usage and Coverage](/docs/schema-registry#schema-explorer)). 3. **Schema Evolution**: with the knowledge of what GraphQL fields are being used, you can confidently evolve your schema without breaking your consumers (see [Conditional Breaking Changes](/docs/management/targets#conditional-breaking-changes)). @@ -118,15 +117,3 @@ performance: src={usageLatencyImage} className="mt-10 max-w-2xl rounded-lg drop-shadow-md" /> - -### Insights - -A list of all the GraphQL operations executed by your consumers, their performance metrics and total -count. By clicking on a specific query, you'll be able to see the full list of fields and arguments -used in the operation. - - diff --git a/packages/web/docs/src/pages/docs/self-hosting/external-composition.mdx b/packages/web/docs/src/pages/docs/self-hosting/external-composition.mdx index 466a44fb5..2a3d7836f 100644 --- a/packages/web/docs/src/pages/docs/self-hosting/external-composition.mdx +++ b/packages/web/docs/src/pages/docs/self-hosting/external-composition.mdx @@ -6,8 +6,8 @@ import { Callout } from '@theguild/components' The process of running the self-host version with External Composition is similar to the process of running it with Hive Cloud version - you'll need to run the -[external composition service](/docs/management/external-schema-composition). The difference is that -you'll be able to run it locally, instead of running it on a publicly available service. +[external composition service](/docs/schema-registry/external-schema-composition). The difference is +that you'll be able to run it locally, instead of running it on a publicly available service. We provide a [Docker image](https://github.com/kamilkisiela/graphql-hive/pkgs/container/graphql-hive%2Fcomposition-federation-2) @@ -53,4 +53,4 @@ hostname `http://composition-federation-2:3069/compose`. Also, make sure to use ![External Composition Federation 2](/docs/pages/self-hosting/federation-2/external-composition-config.png) -[You can read more about the process of configuring the external composition service in the your Hive instance here](/docs/management/external-schema-composition#connect-to-hive) +[You can read more about the process of configuring the external composition service in the your Hive instance here](/docs/schema-registry/external-schema-composition#connect-to-hive) diff --git a/packages/web/docs/src/pages/docs/use-cases/apollo-studio.mdx b/packages/web/docs/src/pages/docs/use-cases/apollo-studio.mdx index b93bc9be1..eed121008 100644 --- a/packages/web/docs/src/pages/docs/use-cases/apollo-studio.mdx +++ b/packages/web/docs/src/pages/docs/use-cases/apollo-studio.mdx @@ -14,10 +14,10 @@ individual developers and teams seeking a powerful GraphQL development environme ## Seamless Integration with Apollo Ecosystem One of GraphQL Hive's standout features is its **seamless integration with the Apollo ecosystem**. -It harmoniously works alongside the [Apollo Router](/docs/integrations/apollo-router) and -[Apollo Gateway](/docs/integrations/apollo-gateway), ensuring a consistent and smooth experience for -users. This integration extends to supporting -[Apollo Federation v1 and v2](/docs/get-started/apollo-federarion), allowing you to leverage the +It harmoniously works alongside the [Apollo Router](/docs/other-integrations/apollo-router) and +[Apollo Gateway](/docs/other-integrations/apollo-gateway), ensuring a consistent and smooth +experience for users. This integration extends to supporting +[Apollo Federation v1 and v2](/docs/get-started/apollo-federation), allowing you to leverage the advantages of federated architectures while benefiting from GraphQL Hive's capabilities. ```mermaid diff --git a/packages/web/docs/src/pages/product-updates/2023-11-16-schema-check-breaking-change-approval-context.mdx b/packages/web/docs/src/pages/product-updates/2023-11-16-schema-check-breaking-change-approval-context.mdx index edcbeb206..c7009fad9 100644 --- a/packages/web/docs/src/pages/product-updates/2023-11-16-schema-check-breaking-change-approval-context.mdx +++ b/packages/web/docs/src/pages/product-updates/2023-11-16-schema-check-breaking-change-approval-context.mdx @@ -33,5 +33,5 @@ hive schema:check --contextId "pull-request-21" ./my-schema.graphql More Information: -- [Approve breaking schema change](/docs/features/schema-registry#approve-breaking-schema-changes) +- [Approve breaking schema change](/docs/schema-registry#approve-breaking-schema-changes) - [Checking a schema using Hive CLI](/docs/api-reference/cli#check-a-schema) diff --git a/packages/web/docs/src/pages/product-updates/2024-03-26-subscription-defer-stream-usage-reporting.mdx b/packages/web/docs/src/pages/product-updates/2024-03-26-subscription-defer-stream-usage-reporting.mdx index 5eb10ab0a..35b51ea31 100644 --- a/packages/web/docs/src/pages/product-updates/2024-03-26-subscription-defer-stream-usage-reporting.mdx +++ b/packages/web/docs/src/pages/product-updates/2024-03-26-subscription-defer-stream-usage-reporting.mdx @@ -20,8 +20,8 @@ conditional breaking changes in schema checks and schema publishes. We also updated the usage reporting recipes for GraphQL Yoga and Apollo Server for the recommended GraphQL over HTTP, GraphQL over SSE, and GraphQL over WebSocket (via `graphql-ws`) setup. -- [GraphQL Yoga Usage Reporting](/docs/integrations/graphql-yoga#usage-reporting) -- [Apollo Server Usage Reporting](/docs/integrations/apollo-server#usage-reporting) +- [GraphQL Yoga Usage Reporting](/docs/other-integrations/graphql-yoga#usage-reporting) +- [Apollo Server Usage Reporting](/docs/other-integrations/apollo-server#usage-reporting) **For Apollo Router users:** If you need Subscription Usage Reporting via Apollo Router, please reach out to us and we can figure out what might be possible. As an alternative, we are currently diff --git a/packages/web/docs/src/pages/product-updates/2024-04-04-native-federation-v2-support.mdx b/packages/web/docs/src/pages/product-updates/2024-04-04-native-federation-v2-support.mdx index 40b1c17b5..fba3f5016 100644 --- a/packages/web/docs/src/pages/product-updates/2024-04-04-native-federation-v2-support.mdx +++ b/packages/web/docs/src/pages/product-updates/2024-04-04-native-federation-v2-support.mdx @@ -9,8 +9,8 @@ We're excited to announce that **Apollo Federation v2 is now supported by defaul created projects. This eliminates the need to set up -[External Composition](../docs/management/external-schema-composition) or manually opt-in for Apollo -Federation v2, as it's now the default behavior. +[External Composition](/docs/schema-registry/external-schema-composition) or manually opt-in for +Apollo Federation v2, as it's now the default behavior. In October 2023 we announced an [early access to Native Apollo Federation v2](./2023-10-10-native-federation-2.mdx). Since then, we diff --git a/packages/web/docs/src/pages/product-updates/2024-07-30-persisted-documents-app-deployments-preview.mdx b/packages/web/docs/src/pages/product-updates/2024-07-30-persisted-documents-app-deployments-preview.mdx index bf0f4aa49..213e9aed9 100644 --- a/packages/web/docs/src/pages/product-updates/2024-07-30-persisted-documents-app-deployments-preview.mdx +++ b/packages/web/docs/src/pages/product-updates/2024-07-30-persisted-documents-app-deployments-preview.mdx @@ -11,7 +11,7 @@ import NextImage from 'next/image' **TL;DR** Persisted documents through app deployments are now available in preview on Hive and allow you to secure your GraphQL API. To get started, please refer to the -[app deployments documentation](/docs/features/app-deployments). +[app deployments documentation](/docs/schema-registry/app-deployments). ## What are Persisted Documents? @@ -71,7 +71,7 @@ documents, by accessing the Hive CDN. ```mermaid flowchart LR C["GraphQL API or Gateway -(Yoga, Apollo Server, Mesh, Apollo Router)"] +(Hive Gateway, GraphQL Yoga, Apollo Server, Apollo Router)"] B["Hive CDN"] D["App"] @@ -96,4 +96,4 @@ import pendingAppImage from '../../../public/changelog/2024-07-30-persisted-docu /> App deployments are now available in preview on Hive. To get started, please refer to the -[app deployments documentation](/docs/features/app-deployments). +[app deployments documentation](/docs/schema-registry/app-deployments). diff --git a/packages/web/docs/src/pages/product-updates/2024-09-09-hive-gateway.mdx b/packages/web/docs/src/pages/product-updates/2024-09-09-hive-gateway.mdx new file mode 100644 index 000000000..ecb9769ca --- /dev/null +++ b/packages/web/docs/src/pages/product-updates/2024-09-09-hive-gateway.mdx @@ -0,0 +1,37 @@ +--- +title: Introducing Hive Gateway +description: + Announcing the new GraphQL Federation Gateway that seamlessly integrates with the Hive Schema + Registry. +date: 2024-09-10 +authors: [laurin, kamil, arda] +--- + +**TL;DR** We release Hive Gateway v1.0.0, a new GraphQL Federation Gateway that seamlessly +integrates with the Hive Schema Registry. [Check out our new Gateway documentation](/docs/gateway). + +Hive Gateway is our fully open source and MIT-licensed GraphQL Gateway with native support for +GraphQL Federation. + +Hive Gateway is built on top of our existing and widely used open-source packages such as GraphQL +Yoga and GraphQL Tools. + +With Hive Gateway you get access to the following features, that are currently behind a paywall in +other gateway/router solutions. + +- GraphQL Subscriptions +- Persisted Documents +- GraphQL API Usage and Analytics Reporting +- Authentication and Authorization +- Role-based Access Control +- Observability with Open Telemetry and Prometheus + +We don't believe in gatekeeping essential features, and want to make them accessible to everyone. + +To get started check out or new updated +[getting started guide for Apollo Federation](/docs/get-started/apollo-federation) or immediatly +dive into [our new gateway documentation](/docs/gateway). + +You can learn more about our decission to split the GraphQL Mesh and Hive Gateway projects in our +blog post +[Introducing GraphQL Mesh v1 and Hive Gateway v1](https://the-guild.dev/blog/graphql-mesh-v1-hive-gateway-v1). diff --git a/patches/@theguild__components.patch b/patches/@theguild__components.patch new file mode 100644 index 000000000..1ff62a803 --- /dev/null +++ b/patches/@theguild__components.patch @@ -0,0 +1,13 @@ +diff --git a/dist/index.js b/dist/index.js +index 4d1647d8b9d037c9e091e8378d042da4cc569d84..4e45feaeff93ca320e388254012427d69f42968e 100644 +--- a/dist/index.js ++++ b/dist/index.js +@@ -2708,7 +2708,7 @@ function defineConfig({ + // other pages + asPath + )}`, +- image = `https://og-image.the-guild.dev/?product=${websiteName}&title=${encodeURI( ++ image = `https://og-image.the-guild.dev/?product=${websiteName.toUpperCase()}&title=${encodeURI( + pageTitle + )}` + } = frontMatter; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f7867a4cc..59d1f7440 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -20,6 +20,9 @@ patchedDependencies: '@theguild/buddy@0.1.0': hash: ryylgra5xglhidfoiaxehn22hq path: patches/@theguild__buddy@0.1.0.patch + '@theguild/components': + hash: oskjo2y24bb7ptwj3nsbyin4w4 + path: patches/@theguild__components.patch '@theguild/editor@1.2.5': hash: pz2nlsfawfhpo6sw62ecmp2yum path: patches/@theguild__editor@1.2.5.patch @@ -884,6 +887,22 @@ importers: specifier: 3.23.8 version: 3.23.8 + packages/services/demo/federation: + dependencies: + '@apollo/subgraph': + specifier: 2.8.4 + version: 2.8.4(graphql@16.9.0) + graphql: + specifier: 16.9.0 + version: 16.9.0 + graphql-yoga: + specifier: 5.6.0 + version: 5.6.0(graphql@16.9.0) + devDependencies: + wrangler: + specifier: 3.61.0 + version: 3.61.0(@cloudflare/workers-types@4.20240821.1) + packages/services/emails: devDependencies: '@hive/service-common': @@ -2002,7 +2021,7 @@ importers: version: 1.1.2(@types/react-dom@18.3.0)(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@theguild/components': specifier: 7.0.0-alpha-20240906131026-66c41355f58d6d009d54ea08f21d933f97fea6f2 - version: 7.0.0-alpha-20240906131026-66c41355f58d6d009d54ea08f21d933f97fea6f2(@types/react-dom@18.3.0)(@types/react@18.3.3)(next@14.2.6(@babel/core@7.22.9)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(ts-node@10.9.2(@swc/core@1.7.18(@swc/helpers@0.5.11))(@types/node@20.16.1)(typescript@5.5.4))(typescript@5.5.4)(webpack@5.92.1(@swc/core@1.7.18(@swc/helpers@0.5.11))(esbuild@0.23.0)) + version: 7.0.0-alpha-20240906131026-66c41355f58d6d009d54ea08f21d933f97fea6f2(patch_hash=oskjo2y24bb7ptwj3nsbyin4w4)(@types/react-dom@18.3.0)(@types/react@18.3.3)(next@14.2.6(@babel/core@7.22.9)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(ts-node@10.9.2(@swc/core@1.7.18(@swc/helpers@0.5.11))(@types/node@20.16.1)(typescript@5.5.4))(typescript@5.5.4)(webpack@5.92.1(@swc/core@1.7.18(@swc/helpers@0.5.11))(esbuild@0.23.0)) clsx: specifier: 2.1.1 version: 2.1.1 @@ -3466,6 +3485,40 @@ packages: '@changesets/write@0.3.1': resolution: {integrity: sha512-SyGtMXzH3qFqlHKcvFY2eX+6b0NGiFcNav8AFsYwy5l8hejOeoeTDemu5Yjmke2V5jpzY+pBvM0vCCQ3gdZpfw==} + '@cloudflare/kv-asset-handler@0.3.3': + resolution: {integrity: sha512-wpE+WiWW2kUNwNE0xyl4CtTAs+STjGtouHGiZPGRaisGB7eXXdbvfZdOrQJQVKgTxZiNAgVgmc7fj0sUmd8zyA==} + engines: {node: '>=16.13'} + + '@cloudflare/workerd-darwin-64@1.20240610.1': + resolution: {integrity: sha512-YanZ1iXgMGaUWlleB5cswSE6qbzyjQ8O7ENWZcPAcZZ6BfuL7q3CWi0t9iM1cv2qx92rRztsRTyjcfq099++XQ==} + engines: {node: '>=16'} + cpu: [x64] + os: [darwin] + + '@cloudflare/workerd-darwin-arm64@1.20240610.1': + resolution: {integrity: sha512-bRe/y/LKjIgp3L2EHjc+CvoCzfHhf4aFTtOBkv2zW+VToNJ4KlXridndf7LvR9urfsFRRo9r4TXCssuKaU+ypQ==} + engines: {node: '>=16'} + cpu: [arm64] + os: [darwin] + + '@cloudflare/workerd-linux-64@1.20240610.1': + resolution: {integrity: sha512-2zDcadR7+Gs9SjcMXmwsMji2Xs+yASGNA2cEHDuFc4NMUup+eL1mkzxc/QzvFjyBck98e92rBjMZt2dVscpGKg==} + engines: {node: '>=16'} + cpu: [x64] + os: [linux] + + '@cloudflare/workerd-linux-arm64@1.20240610.1': + resolution: {integrity: sha512-7y41rPi5xmIYJN8CY+t3RHnjLL0xx/WYmaTd/j552k1qSr02eTE2o/TGyWZmGUC+lWnwdPQJla0mXbvdqgRdQg==} + engines: {node: '>=16'} + cpu: [arm64] + os: [linux] + + '@cloudflare/workerd-windows-64@1.20240610.1': + resolution: {integrity: sha512-B0LyT3DB6rXHWNptnntYHPaoJIy0rXnGfeDBM3nEVV8JIsQrx8MEFn2F2jYioH1FkUVavsaqKO/zUosY3tZXVA==} + engines: {node: '>=16'} + cpu: [x64] + os: [win32] + '@cloudflare/workers-types@4.20240821.1': resolution: {integrity: sha512-icAkbnAqgVl6ef9lgLTom8na+kj2RBw2ViPAQ586hbdj0xZcnrjK7P46Eu08OU9D/lNDgN2sKU/sxhe2iK/gIg==} @@ -3647,6 +3700,16 @@ packages: resolution: {integrity: sha512-IPjmgSc4KpQRlO4qbEDnBEixvtb06WDmjKfi/7fkZaryh5HuOmTtixe1EupQI5XfXO8joc3d27uUZ0QdC++euA==} engines: {node: '>=18.0.0'} + '@esbuild-plugins/node-globals-polyfill@0.2.3': + resolution: {integrity: sha512-r3MIryXDeXDOZh7ih1l/yE9ZLORCd5e8vWg02azWRGj5SPTuoh69A2AIyn0Z31V/kHBfZ4HgWJ+OK3GTTwLmnw==} + peerDependencies: + esbuild: '*' + + '@esbuild-plugins/node-modules-polyfill@0.2.2': + resolution: {integrity: sha512-LXV7QsWJxRuMYvKbiznh+U1ilIop3g2TeKRzUxOG5X3YITc8JyyTa90BmLwqqv0YnX4v32CSlG+vsziZp9dMvA==} + peerDependencies: + esbuild: '*' + '@esbuild/aix-ppc64@0.21.5': resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==} engines: {node: '>=12'} @@ -3665,6 +3728,12 @@ packages: cpu: [ppc64] os: [aix] + '@esbuild/android-arm64@0.17.19': + resolution: {integrity: sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + '@esbuild/android-arm64@0.21.5': resolution: {integrity: sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==} engines: {node: '>=12'} @@ -3683,6 +3752,12 @@ packages: cpu: [arm64] os: [android] + '@esbuild/android-arm@0.17.19': + resolution: {integrity: sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + '@esbuild/android-arm@0.21.5': resolution: {integrity: sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==} engines: {node: '>=12'} @@ -3701,6 +3776,12 @@ packages: cpu: [arm] os: [android] + '@esbuild/android-x64@0.17.19': + resolution: {integrity: sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + '@esbuild/android-x64@0.21.5': resolution: {integrity: sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==} engines: {node: '>=12'} @@ -3719,6 +3800,12 @@ packages: cpu: [x64] os: [android] + '@esbuild/darwin-arm64@0.17.19': + resolution: {integrity: sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + '@esbuild/darwin-arm64@0.21.5': resolution: {integrity: sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==} engines: {node: '>=12'} @@ -3737,6 +3824,12 @@ packages: cpu: [arm64] os: [darwin] + '@esbuild/darwin-x64@0.17.19': + resolution: {integrity: sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + '@esbuild/darwin-x64@0.21.5': resolution: {integrity: sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==} engines: {node: '>=12'} @@ -3755,6 +3848,12 @@ packages: cpu: [x64] os: [darwin] + '@esbuild/freebsd-arm64@0.17.19': + resolution: {integrity: sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + '@esbuild/freebsd-arm64@0.21.5': resolution: {integrity: sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==} engines: {node: '>=12'} @@ -3773,6 +3872,12 @@ packages: cpu: [arm64] os: [freebsd] + '@esbuild/freebsd-x64@0.17.19': + resolution: {integrity: sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + '@esbuild/freebsd-x64@0.21.5': resolution: {integrity: sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==} engines: {node: '>=12'} @@ -3791,6 +3896,12 @@ packages: cpu: [x64] os: [freebsd] + '@esbuild/linux-arm64@0.17.19': + resolution: {integrity: sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + '@esbuild/linux-arm64@0.21.5': resolution: {integrity: sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==} engines: {node: '>=12'} @@ -3809,6 +3920,12 @@ packages: cpu: [arm64] os: [linux] + '@esbuild/linux-arm@0.17.19': + resolution: {integrity: sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + '@esbuild/linux-arm@0.21.5': resolution: {integrity: sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==} engines: {node: '>=12'} @@ -3827,6 +3944,12 @@ packages: cpu: [arm] os: [linux] + '@esbuild/linux-ia32@0.17.19': + resolution: {integrity: sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + '@esbuild/linux-ia32@0.21.5': resolution: {integrity: sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==} engines: {node: '>=12'} @@ -3845,6 +3968,12 @@ packages: cpu: [ia32] os: [linux] + '@esbuild/linux-loong64@0.17.19': + resolution: {integrity: sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + '@esbuild/linux-loong64@0.21.5': resolution: {integrity: sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==} engines: {node: '>=12'} @@ -3863,6 +3992,12 @@ packages: cpu: [loong64] os: [linux] + '@esbuild/linux-mips64el@0.17.19': + resolution: {integrity: sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + '@esbuild/linux-mips64el@0.21.5': resolution: {integrity: sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==} engines: {node: '>=12'} @@ -3881,6 +4016,12 @@ packages: cpu: [mips64el] os: [linux] + '@esbuild/linux-ppc64@0.17.19': + resolution: {integrity: sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + '@esbuild/linux-ppc64@0.21.5': resolution: {integrity: sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==} engines: {node: '>=12'} @@ -3899,6 +4040,12 @@ packages: cpu: [ppc64] os: [linux] + '@esbuild/linux-riscv64@0.17.19': + resolution: {integrity: sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + '@esbuild/linux-riscv64@0.21.5': resolution: {integrity: sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==} engines: {node: '>=12'} @@ -3917,6 +4064,12 @@ packages: cpu: [riscv64] os: [linux] + '@esbuild/linux-s390x@0.17.19': + resolution: {integrity: sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + '@esbuild/linux-s390x@0.21.5': resolution: {integrity: sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==} engines: {node: '>=12'} @@ -3935,6 +4088,12 @@ packages: cpu: [s390x] os: [linux] + '@esbuild/linux-x64@0.17.19': + resolution: {integrity: sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + '@esbuild/linux-x64@0.21.5': resolution: {integrity: sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==} engines: {node: '>=12'} @@ -3953,6 +4112,12 @@ packages: cpu: [x64] os: [linux] + '@esbuild/netbsd-x64@0.17.19': + resolution: {integrity: sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + '@esbuild/netbsd-x64@0.21.5': resolution: {integrity: sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==} engines: {node: '>=12'} @@ -3983,6 +4148,12 @@ packages: cpu: [arm64] os: [openbsd] + '@esbuild/openbsd-x64@0.17.19': + resolution: {integrity: sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + '@esbuild/openbsd-x64@0.21.5': resolution: {integrity: sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==} engines: {node: '>=12'} @@ -4001,6 +4172,12 @@ packages: cpu: [x64] os: [openbsd] + '@esbuild/sunos-x64@0.17.19': + resolution: {integrity: sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + '@esbuild/sunos-x64@0.21.5': resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==} engines: {node: '>=12'} @@ -4019,6 +4196,12 @@ packages: cpu: [x64] os: [sunos] + '@esbuild/win32-arm64@0.17.19': + resolution: {integrity: sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + '@esbuild/win32-arm64@0.21.5': resolution: {integrity: sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==} engines: {node: '>=12'} @@ -4037,6 +4220,12 @@ packages: cpu: [arm64] os: [win32] + '@esbuild/win32-ia32@0.17.19': + resolution: {integrity: sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + '@esbuild/win32-ia32@0.21.5': resolution: {integrity: sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==} engines: {node: '>=12'} @@ -4055,6 +4244,12 @@ packages: cpu: [ia32] os: [win32] + '@esbuild/win32-x64@0.17.19': + resolution: {integrity: sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + '@esbuild/win32-x64@0.21.5': resolution: {integrity: sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==} engines: {node: '>=12'} @@ -4153,6 +4348,10 @@ packages: '@fastify/ajv-compiler@3.5.0': resolution: {integrity: sha512-ebbEtlI7dxXF5ziNdr05mOY8NnDiPB1XvAlLHctRt/Rc+C3LCOVW5imUVX+mhvUhnNzmPBHewUkOFgGlCxgdAA==} + '@fastify/busboy@2.1.1': + resolution: {integrity: sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==} + engines: {node: '>=14'} + '@fastify/cors@9.0.1': resolution: {integrity: sha512-YY9Ho3ovI+QHIL2hW+9X4XqQjXLjJqsU+sMV/xFsxZkE8p3GNnYVFpoOxF7SsP5ZL76gwvbo3V9L+FIekBGU4Q==} @@ -8060,6 +8259,9 @@ packages: '@types/node-fetch@2.6.4': resolution: {integrity: sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg==} + '@types/node-forge@1.3.11': + resolution: {integrity: sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==} + '@types/node@10.17.60': resolution: {integrity: sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==} @@ -8656,6 +8858,9 @@ packages: resolution: {integrity: sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==} engines: {node: '>=0.10.0'} + as-table@1.0.55: + resolution: {integrity: sha512-xvsWESUJn0JN421Xb9MQw6AsMHRCUknCe0Wjlxvjud80mU4E6hQf1A6NzQKcYNmYw62MfzEtXc+badstZP3JpQ==} + asap@2.0.6: resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==} @@ -8839,6 +9044,9 @@ packages: bl@4.1.0: resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==} + blake3-wasm@2.1.5: + resolution: {integrity: sha512-F1+K8EbfOZE49dtoPtmxUQrpXaBIl3ICvasLh+nJta0xkz+9kF/7uet9fLnwKqhDrmj6g+6K3Tw9yQPUg2ka5g==} + blob-util@2.0.2: resolution: {integrity: sha512-T7JQa+zsXXEa6/8ZhHcQEW1UFfVM49Ts65uBkFL6fz2QmrElqmbajIDJvuA0tEhRe5eIjpV9ZF+0RfZR9voJFQ==} @@ -8875,10 +9083,6 @@ packages: brace-expansion@2.0.1: resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} - braces@3.0.2: - resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} - engines: {node: '>=8'} - braces@3.0.3: resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} engines: {node: '>=8'} @@ -9026,6 +9230,9 @@ packages: capital-case@1.0.4: resolution: {integrity: sha512-ds37W8CytHgwnhGGTi88pcPyR15qoNkOpYwmMMfnWqqWgESapLqvDx6huFjQ5vqWSn2Z06173XNA7LtMOeUh1A==} + capnp-ts@0.7.0: + resolution: {integrity: sha512-XKxXAC3HVPv7r674zP0VC3RTXz+/JKhfyw94ljvF80yynK6VkTnqE3jMuN8b3dUVmmc43TjyxjW4KTsmB3c86g==} + cardinal@2.1.1: resolution: {integrity: sha512-JSr5eOgoEymtYHBjNWyjrMqet9Am2miJhlfKNdqLp6zoeAh0KN5dRAcxlecj5mAJrmQomgiOBj35xHLrFjqBpw==} hasBin: true @@ -9713,6 +9920,9 @@ packages: resolution: {integrity: sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==} engines: {node: '>=0.10'} + data-uri-to-buffer@2.0.2: + resolution: {integrity: sha512-ND9qDTLc6diwj+Xe5cdAgVTbLVdXbtxTJRXRhli8Mowuaan+0EJOtdqJ0QCHNSSPyoXGx9HX2/VMnKeC34AChA==} + dataloader@1.4.0: resolution: {integrity: sha512-68s5jYdlvasItOJnCuI2Q9s4q98g0pCyL3HrcKJu8KNugUl8ahgmZYg38ysLTgQjjXX3H8CJLkAvWrclWfcalw==} @@ -9845,8 +10055,8 @@ packages: resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} engines: {node: '>= 0.4'} - defu@6.1.2: - resolution: {integrity: sha512-+uO4+qr7msjNNWKYPHqN/3+Dx3NFkmIzayk2L1MyZQlvgZb/J1A0fo410dpKrN2SnqFjt8n4JL8fDJE0wIgjFQ==} + defu@6.1.4: + resolution: {integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==} delaunator@5.0.1: resolution: {integrity: sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==} @@ -10150,6 +10360,11 @@ packages: peerDependencies: esbuild: '>=0.12 <1' + esbuild@0.17.19: + resolution: {integrity: sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==} + engines: {node: '>=12'} + hasBin: true + esbuild@0.21.5: resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==} engines: {node: '>=12'} @@ -10404,6 +10619,9 @@ packages: estree-util-visit@2.0.0: resolution: {integrity: sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==} + estree-walker@0.6.1: + resolution: {integrity: sha512-SqmZANLWS0mnatqbSfRP5g8OXZC12Fgg1IwNtLsyHDzJizORW4khDfjPqJZsemPWBB2uqykUah5YpQ6epsqC/w==} + estree-walker@2.0.2: resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} @@ -10459,6 +10677,10 @@ packages: resolution: {integrity: sha512-8iA79xD3uAch729dUG8xaaBBFGaEa0wdD2VkYLFHwlqosEj/jT66AzcreRDSgV7ehnNLBW2WR5jIXwGKjVdTLg==} engines: {node: '>=4'} + exit-hook@2.2.1: + resolution: {integrity: sha512-eNTPlAD67BmP31LDINZ3U7HSF8l57TxOY2PmBJ1shpCvpnxBF93mWCE8YHBnXs8qiUZJc9WDcWIeC3a2HIAMfw==} + engines: {node: '>=6'} + exponential-backoff@3.1.1: resolution: {integrity: sha512-dX7e/LHVJ6W3DE1MHWi9S1EYzDESENfLrYohG2G++ovZrYOkm4Knwa0mc1cn84xJOR4KEU0WSchhLbd0UklbHw==} @@ -10638,10 +10860,6 @@ packages: resolution: {integrity: sha512-mjFIpOHC4jbfcTfoh4rkWpI31mF7viw9ikj/JyLoKzqlwG/YsefKfvYlYhdYdg/9mtK2z1AzgN/0LvVQ3zdlSQ==} engines: {node: '>= 0.4.0'} - fill-range@7.0.1: - resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} - engines: {node: '>=8'} - fill-range@7.1.1: resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} engines: {node: '>=8'} @@ -10898,6 +11116,9 @@ packages: resolution: {integrity: sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==} engines: {node: '>=8.0.0'} + get-source@2.0.12: + resolution: {integrity: sha512-X5+4+iD+HoSeEED+uwrQ07BOQr0kEDFMVqqpBuI+RaZBpBpHCuXxo70bjar6f0b0u/DQJsJ7ssurpP0V60Az+w==} + get-stack-trace@2.1.1: resolution: {integrity: sha512-dhqSDD9lHU/6FvIZ9KbXGmVK6IKr9ZskZtNOUvhlCiONlnqatu4FmAeRbxCfJJVuQ0NWfz6dAbibKQg19B7AmQ==} engines: {node: '>=8.0'} @@ -11204,12 +11425,22 @@ packages: peerDependencies: graphql: ^15.2.0 || ^16.0.0 + graphql-yoga@5.6.0: + resolution: {integrity: sha512-MqzHRPmiMSilYLDbJtAnXN7oyggd446a4F9dyj/H4gCmM/3YllCYw3vtKcmsykorsfiSKCYpCf5CimNXIVaHHg==} + engines: {node: '>=18.0.0'} + peerDependencies: + graphql: ^15.2.0 || ^16.0.0 + graphql-yoga@5.7.0: resolution: {integrity: sha512-QyGVvFAvGhMrzjJvhjsxsyoE+e4lNrj5f5qOsRYJuWIjyw7tHfbBvybZIwzNOGY0aB5sgA8BlVvu5hxjdKJ5tQ==} engines: {node: '>=18.0.0'} peerDependencies: graphql: ^15.2.0 || ^16.0.0 + graphql@16.8.1: + resolution: {integrity: sha512-59LZHPdGZVh695Ud9lRzPBVTtlX9ZCV150Er2W43ro37wVof0ctenSaskPPjN7lVTIN8mSZt8PHUNKZuNQUuxw==} + engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} + graphql@16.9.0: resolution: {integrity: sha512-GGTKBX4SD7Wdb8mqeDLni2oaRGYQWjWHGKPQ24ZMnUtKfcsVoiv4uX8+LJr1K6U5VW2Lu1BwJnj7uiori0YtRw==} engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} @@ -12419,6 +12650,9 @@ packages: resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==} hasBin: true + magic-string@0.25.9: + resolution: {integrity: sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==} + magic-string@0.27.0: resolution: {integrity: sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==} engines: {node: '>=12'} @@ -12846,6 +13080,11 @@ packages: resolution: {integrity: sha512-r9deDe9p5FJUPZAk3A59wGH7Ii9YrjjWw0jmw/liSbHl2CHiyXj6FcDXDu2K3TjVAXqiJdaw3xxwlZZr9E6nHg==} hasBin: true + miniflare@3.20240610.1: + resolution: {integrity: sha512-ZkfSpBmX3nJW00yYhvF2kGvjb6f77TOimRR6+2GQvsArbwo6e0iYqLGM9aB/cnJzgFjLMvOv1qj4756iynSxJQ==} + engines: {node: '>=16.13'} + hasBin: true + minimatch@10.0.1: resolution: {integrity: sha512-ethXTt3SGGR+95gudmqJ1eNhRO7eGEGIgYA9vnPatK4/etz2MEVDno5GMCibdMTuBMyElzIlgxMna3K94XDIDQ==} engines: {node: 20 || >=22} @@ -13080,6 +13319,10 @@ packages: multi-fork@0.0.2: resolution: {integrity: sha512-SHWGuze0cZNiH+JGJQFlB1k7kZLGFCvW1Xo5Fcpe86KICkC3aVTJWpjUcmyYcLCB0I6gdzKLCia/bTIw2ggl8A==} + mustache@4.2.0: + resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} + hasBin: true + mute-stream@0.0.8: resolution: {integrity: sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==} @@ -13204,8 +13447,8 @@ packages: resolution: {integrity: sha512-tmPX422rYgofd4epzrNoOXiE8XFZYOcCq1vD7MAXCDO+O+zndlA2ztdKKMa+EeuBG5tHETpr4ml4RGgpqDCCAg==} engines: {node: '>= 0.10.5'} - node-fetch-native@1.0.2: - resolution: {integrity: sha512-KIkvH1jl6b3O7es/0ShyCgWLcfXxlBrLBbP3rOr23WArC66IMcU4DeZEeYEOwnopYhawLTn7/y+YtmASe8DFVQ==} + node-fetch-native@1.6.4: + resolution: {integrity: sha512-IhOigYzAKHd244OC0JIMIUrjzctirCmPkaIfhDeGcEETWof5zKYUW7e7MYvChGWh/4CJeXEgsRyGzuF334rOOQ==} node-fetch@2.6.12: resolution: {integrity: sha512-C/fGU2E8ToujUivIO0H+tpQ6HWo4eEmchoPIoXtxCrVghxdKq+QOHqEZW7tuP3KlV3bC8FRMO5nMCC7Zm1VP6g==} @@ -13216,6 +13459,10 @@ packages: encoding: optional: true + node-forge@1.3.1: + resolution: {integrity: sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==} + engines: {node: '>= 6.13.0'} + node-gyp-build-optional-packages@5.0.7: resolution: {integrity: sha512-YlCCc6Wffkx0kHkmam79GKvDQ6x+QZkMjFGrIMxgFNILFvGSbCp2fCBC55pGTT9gVaz8Na5CLmxt/urtzRv36w==} hasBin: true @@ -14239,6 +14486,9 @@ packages: peerDependencies: prettier: ^3.0.0 + printable-characters@1.0.42: + resolution: {integrity: sha512-dKp+C4iXWK4vVYZmYSd0KBH5F/h1HoZRsbJ82AVKRO3PEo8L4lBS/vLwhVtpwwuYcoIsVY+1JYKR268yn480uQ==} + proc-log@3.0.0: resolution: {integrity: sha512-++Vn7NS4Xf9NacaU9Xq3URUuqZETPsf8L4j5/ckhaRYsfPeRyzGw+iDjFhV/Jr3uNmTvvddEJFWh5R1gRgUH8A==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -14816,6 +15066,10 @@ packages: resolution: {integrity: sha512-6K/gDlqgQscOlg9fSRpWstA8sYe8rbELsSTNpx+3kTrsVCzvSl0zIvRErM7fdl9ERWDsKnrLnwB+Ne89918XOg==} engines: {node: '>=10'} + resolve.exports@2.0.2: + resolution: {integrity: sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==} + engines: {node: '>=10'} + resolve@1.22.8: resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} hasBin: true @@ -14898,6 +15152,16 @@ packages: robust-predicates@3.0.2: resolution: {integrity: sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==} + rollup-plugin-inject@3.0.2: + resolution: {integrity: sha512-ptg9PQwzs3orn4jkgXJ74bfs5vYz1NCZlSQMBUA0wKcGp5i5pA1AO3fOUEte8enhGUC+iapTCzEWw2jEFFUO/w==} + deprecated: This package has been deprecated and is no longer maintained. Please use @rollup/plugin-inject. + + rollup-plugin-node-polyfills@0.2.1: + resolution: {integrity: sha512-4kCrKPTJ6sK4/gLL/U5QzVT8cxJcofO0OU74tnB19F40cmuAKSzH5/siithxlofFEjwvw1YAhPmbvGNA6jEroA==} + + rollup-pluginutils@2.8.2: + resolution: {integrity: sha512-EEp9NhnUkwY8aif6bxgovPHMoMoNr2FulJziTndpt5H9RdwC47GSGuII9XxpSdzVGM0GWrNPHV6ie1LTNJPaLQ==} + rollup@4.18.0: resolution: {integrity: sha512-QmJz14PX3rzbJCN1SG4Xe/bAAX2a6NpCP8ab2vfu2GiUr8AQcr2nCV/oEO3yneFarB67zk8ShlIyWb2LGTb3Sg==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} @@ -14985,6 +15249,10 @@ packages: secure-json-parse@2.7.0: resolution: {integrity: sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==} + selfsigned@2.4.1: + resolution: {integrity: sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==} + engines: {node: '>=10'} + semver-compare@1.0.0: resolution: {integrity: sha512-YM3/ITh2MJ5MtzaM429anh+x2jiLVjqILF4m4oyQB18W7Ggea7BfqdH/wGMK7dDiMghv/6WG7znWMwUDzJiXow==} @@ -15234,6 +15502,10 @@ packages: resolution: {integrity: sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA==} engines: {node: '>= 8'} + sourcemap-codec@1.4.8: + resolution: {integrity: sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==} + deprecated: Please use @jridgewell/sourcemap-codec instead + space-separated-tokens@2.0.1: resolution: {integrity: sha512-ekwEbFp5aqSPKaqeY1PGrlGQxPNaq+Cnx4+bE2D8sciBQrHpbwoBbawqTN2+6jPs9IdWxxiUcN0K2pkczD3zmw==} @@ -15296,6 +15568,9 @@ packages: stackback@0.0.2: resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + stacktracey@2.1.8: + resolution: {integrity: sha512-Kpij9riA+UNg7TnphqjH7/CzctQ/owJGNbFkfEeve4Z4uxT5+JapVLFXcsurIfN34gnTWZNJ/f7NMG0E8JDzTw==} + standard-as-callback@2.1.0: resolution: {integrity: sha512-qoRRSyROncaz1z0mvYqIE4lCd9p2R90i6GxW3uZv5ucSu8tU7B5HXUP1gG8pVZsYNVaXjk8ClXHPttLyxAL48A==} @@ -15312,6 +15587,10 @@ packages: std-env@3.7.0: resolution: {integrity: sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==} + stoppable@1.1.0: + resolution: {integrity: sha512-KXDYZ9dszj6bzvnEMRYvxgeTHU74QBFL54XKtP3nyMuJ81CFYtABZ3bAzL2EdFUaEwJOBOgENyFj3R7oTzDyyw==} + engines: {node: '>=4', npm: '>=6'} + storybook@8.2.9: resolution: {integrity: sha512-S7Q/Yt4A+nu1O23rg39lQvBqL2Vg+PKXbserDWUR4LFJtfmoZ2xGO8oFIhJmvvhjUBvolw1q7QDeswPq2i0sGw==} hasBin: true @@ -15981,6 +16260,9 @@ packages: uc.micro@2.1.0: resolution: {integrity: sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==} + ufo@1.5.4: + resolution: {integrity: sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==} + uglify-js@3.17.4: resolution: {integrity: sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==} engines: {node: '>=0.8.0'} @@ -15999,10 +16281,17 @@ packages: undici-types@6.19.6: resolution: {integrity: sha512-e/vggGopEfTKSvj4ihnOLTsqhrKRN3LeO6qSN/GxohhuRv8qH9bNQ4B8W7e/vFL+0XTnmHPB4/kegunZGA4Org==} + undici@5.28.4: + resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==} + engines: {node: '>=14.0'} + undici@6.19.8: resolution: {integrity: sha512-U8uCCl2x9TK3WANvmBavymRzxbfFYG+tAu+fgx3zxQy3qdagQqBLwJVrdyO1TBfUXvfKveMKJZhpvUYoOjM+4g==} engines: {node: '>=18.17'} + unenv-nightly@1.10.0-1717606461.a117952: + resolution: {integrity: sha512-u3TfBX02WzbHTpaEfWEKwDijDSFAHcgXkayUZ+MVDrjhLFvgAJzFGTSTmwlEhwWi2exyRQey23ah9wELMM6etg==} + unicode-canonical-property-names-ecmascript@2.0.0: resolution: {integrity: sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==} engines: {node: '>=4'} @@ -16532,9 +16821,24 @@ packages: wordwrap@1.0.0: resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==} + workerd@1.20240610.1: + resolution: {integrity: sha512-Rtut5GrsODQMh6YU43b9WZ980Wd05Ov1/ds88pT/SoetmXFBvkBzdRfiHiATv+azmGX8KveE0i/Eqzk/yI01ug==} + engines: {node: '>=16'} + hasBin: true + workers-loki-logger@0.1.15: resolution: {integrity: sha512-NYtNZXeevm4HHlFZkQTQMuQru1LoVVpEfc7LPZGJk03y2wru2/eXkp/6a9WVejilTWWa/x9nLM8TLY1UT18rig==} + wrangler@3.61.0: + resolution: {integrity: sha512-feVAp0986x9xL3Dc1zin0ZVXKaqzp7eZur7iPLnpEwjG1Xy4dkVEZ5a1LET94Iyejt1P+EX5lgGcz63H7EfzUw==} + engines: {node: '>=16.17.0'} + hasBin: true + peerDependencies: + '@cloudflare/workers-types': ^4.20240605.0 + peerDependenciesMeta: + '@cloudflare/workers-types': + optional: true + wrap-ansi@6.2.0: resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==} engines: {node: '>=8'} @@ -16608,6 +16912,9 @@ packages: resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==} engines: {node: '>=0.4'} + xxhash-wasm@1.0.2: + resolution: {integrity: sha512-ibF0Or+FivM9lNrg+HGJfVX8WJqgo+kCLDc4vx6xMeTce7Aj+DLttKbxxRR/gNLSAelRc1omAPlJ77N/Jem07A==} + y18n@4.0.3: resolution: {integrity: sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==} @@ -16688,6 +16995,9 @@ packages: resolution: {integrity: sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g==} engines: {node: '>=12.20'} + youch@3.3.3: + resolution: {integrity: sha512-qSFXUk3UZBLfggAW3dJKg0BMblG5biqSF8M34E06o5CSsZtH92u9Hqmj2RzGiHDi64fhe83+4tENFP2DB6t6ZA==} + yup@0.29.3: resolution: {integrity: sha512-RNUGiZ/sQ37CkhzKFoedkeMfJM0vNQyaz+wRZJzxdKE7VfDeVKH8bb4rr7XhRLbHJz5hSjoDNwMEIaKhuMZ8gQ==} engines: {node: '>=10'} @@ -19179,6 +19489,25 @@ snapshots: human-id: 1.0.2 prettier: 2.8.8 + '@cloudflare/kv-asset-handler@0.3.3': + dependencies: + mime: 3.0.0 + + '@cloudflare/workerd-darwin-64@1.20240610.1': + optional: true + + '@cloudflare/workerd-darwin-arm64@1.20240610.1': + optional: true + + '@cloudflare/workerd-linux-64@1.20240610.1': + optional: true + + '@cloudflare/workerd-linux-arm64@1.20240610.1': + optional: true + + '@cloudflare/workerd-windows-64@1.20240610.1': + optional: true + '@cloudflare/workers-types@4.20240821.1': {} '@codemirror/language@6.10.2': @@ -19430,6 +19759,16 @@ snapshots: dependencies: tslib: 2.7.0 + '@esbuild-plugins/node-globals-polyfill@0.2.3(esbuild@0.17.19)': + dependencies: + esbuild: 0.17.19 + + '@esbuild-plugins/node-modules-polyfill@0.2.2(esbuild@0.17.19)': + dependencies: + esbuild: 0.17.19 + escape-string-regexp: 4.0.0 + rollup-plugin-node-polyfills: 0.2.1 + '@esbuild/aix-ppc64@0.21.5': optional: true @@ -19439,6 +19778,9 @@ snapshots: '@esbuild/aix-ppc64@0.23.1': optional: true + '@esbuild/android-arm64@0.17.19': + optional: true + '@esbuild/android-arm64@0.21.5': optional: true @@ -19448,6 +19790,9 @@ snapshots: '@esbuild/android-arm64@0.23.1': optional: true + '@esbuild/android-arm@0.17.19': + optional: true + '@esbuild/android-arm@0.21.5': optional: true @@ -19457,6 +19802,9 @@ snapshots: '@esbuild/android-arm@0.23.1': optional: true + '@esbuild/android-x64@0.17.19': + optional: true + '@esbuild/android-x64@0.21.5': optional: true @@ -19466,6 +19814,9 @@ snapshots: '@esbuild/android-x64@0.23.1': optional: true + '@esbuild/darwin-arm64@0.17.19': + optional: true + '@esbuild/darwin-arm64@0.21.5': optional: true @@ -19475,6 +19826,9 @@ snapshots: '@esbuild/darwin-arm64@0.23.1': optional: true + '@esbuild/darwin-x64@0.17.19': + optional: true + '@esbuild/darwin-x64@0.21.5': optional: true @@ -19484,6 +19838,9 @@ snapshots: '@esbuild/darwin-x64@0.23.1': optional: true + '@esbuild/freebsd-arm64@0.17.19': + optional: true + '@esbuild/freebsd-arm64@0.21.5': optional: true @@ -19493,6 +19850,9 @@ snapshots: '@esbuild/freebsd-arm64@0.23.1': optional: true + '@esbuild/freebsd-x64@0.17.19': + optional: true + '@esbuild/freebsd-x64@0.21.5': optional: true @@ -19502,6 +19862,9 @@ snapshots: '@esbuild/freebsd-x64@0.23.1': optional: true + '@esbuild/linux-arm64@0.17.19': + optional: true + '@esbuild/linux-arm64@0.21.5': optional: true @@ -19511,6 +19874,9 @@ snapshots: '@esbuild/linux-arm64@0.23.1': optional: true + '@esbuild/linux-arm@0.17.19': + optional: true + '@esbuild/linux-arm@0.21.5': optional: true @@ -19520,6 +19886,9 @@ snapshots: '@esbuild/linux-arm@0.23.1': optional: true + '@esbuild/linux-ia32@0.17.19': + optional: true + '@esbuild/linux-ia32@0.21.5': optional: true @@ -19529,6 +19898,9 @@ snapshots: '@esbuild/linux-ia32@0.23.1': optional: true + '@esbuild/linux-loong64@0.17.19': + optional: true + '@esbuild/linux-loong64@0.21.5': optional: true @@ -19538,6 +19910,9 @@ snapshots: '@esbuild/linux-loong64@0.23.1': optional: true + '@esbuild/linux-mips64el@0.17.19': + optional: true + '@esbuild/linux-mips64el@0.21.5': optional: true @@ -19547,6 +19922,9 @@ snapshots: '@esbuild/linux-mips64el@0.23.1': optional: true + '@esbuild/linux-ppc64@0.17.19': + optional: true + '@esbuild/linux-ppc64@0.21.5': optional: true @@ -19556,6 +19934,9 @@ snapshots: '@esbuild/linux-ppc64@0.23.1': optional: true + '@esbuild/linux-riscv64@0.17.19': + optional: true + '@esbuild/linux-riscv64@0.21.5': optional: true @@ -19565,6 +19946,9 @@ snapshots: '@esbuild/linux-riscv64@0.23.1': optional: true + '@esbuild/linux-s390x@0.17.19': + optional: true + '@esbuild/linux-s390x@0.21.5': optional: true @@ -19574,6 +19958,9 @@ snapshots: '@esbuild/linux-s390x@0.23.1': optional: true + '@esbuild/linux-x64@0.17.19': + optional: true + '@esbuild/linux-x64@0.21.5': optional: true @@ -19583,6 +19970,9 @@ snapshots: '@esbuild/linux-x64@0.23.1': optional: true + '@esbuild/netbsd-x64@0.17.19': + optional: true + '@esbuild/netbsd-x64@0.21.5': optional: true @@ -19598,6 +19988,9 @@ snapshots: '@esbuild/openbsd-arm64@0.23.1': optional: true + '@esbuild/openbsd-x64@0.17.19': + optional: true + '@esbuild/openbsd-x64@0.21.5': optional: true @@ -19607,6 +20000,9 @@ snapshots: '@esbuild/openbsd-x64@0.23.1': optional: true + '@esbuild/sunos-x64@0.17.19': + optional: true + '@esbuild/sunos-x64@0.21.5': optional: true @@ -19616,6 +20012,9 @@ snapshots: '@esbuild/sunos-x64@0.23.1': optional: true + '@esbuild/win32-arm64@0.17.19': + optional: true + '@esbuild/win32-arm64@0.21.5': optional: true @@ -19625,6 +20024,9 @@ snapshots: '@esbuild/win32-arm64@0.23.1': optional: true + '@esbuild/win32-ia32@0.17.19': + optional: true + '@esbuild/win32-ia32@0.21.5': optional: true @@ -19634,6 +20036,9 @@ snapshots: '@esbuild/win32-ia32@0.23.1': optional: true + '@esbuild/win32-x64@0.17.19': + optional: true + '@esbuild/win32-x64@0.21.5': optional: true @@ -19673,7 +20078,7 @@ snapshots: '@escape.tech/graphql-armor-types@0.6.0': dependencies: - graphql: 16.9.0 + graphql: 16.8.1 optional: true '@eslint-community/eslint-utils@4.4.0(eslint@8.57.0(patch_hash=fjbpfrtrjd6idngyeqxnwopfva))': @@ -19739,6 +20144,8 @@ snapshots: ajv-formats: 2.1.1(ajv@8.17.1) fast-uri: 2.3.0 + '@fastify/busboy@2.1.1': {} + '@fastify/cors@9.0.1': dependencies: fastify-plugin: 4.5.1 @@ -24686,7 +25093,7 @@ snapshots: '@theguild/buddy@0.1.0(patch_hash=ryylgra5xglhidfoiaxehn22hq)': {} - '@theguild/components@7.0.0-alpha-20240906131026-66c41355f58d6d009d54ea08f21d933f97fea6f2(@types/react-dom@18.3.0)(@types/react@18.3.3)(next@14.2.6(@babel/core@7.22.9)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(ts-node@10.9.2(@swc/core@1.7.18(@swc/helpers@0.5.11))(@types/node@20.16.1)(typescript@5.5.4))(typescript@5.5.4)(webpack@5.92.1(@swc/core@1.7.18(@swc/helpers@0.5.11))(esbuild@0.23.0))': + '@theguild/components@7.0.0-alpha-20240906131026-66c41355f58d6d009d54ea08f21d933f97fea6f2(patch_hash=oskjo2y24bb7ptwj3nsbyin4w4)(@types/react-dom@18.3.0)(@types/react@18.3.3)(next@14.2.6(@babel/core@7.22.9)(@opentelemetry/api@1.9.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(ts-node@10.9.2(@swc/core@1.7.18(@swc/helpers@0.5.11))(@types/node@20.16.1)(typescript@5.5.4))(typescript@5.5.4)(webpack@5.92.1(@swc/core@1.7.18(@swc/helpers@0.5.11))(esbuild@0.23.0))': dependencies: '@giscus/react': 3.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@next/bundle-analyzer': 14.2.7 @@ -25098,6 +25505,10 @@ snapshots: '@types/node': 20.16.1 form-data: 3.0.1 + '@types/node-forge@1.3.11': + dependencies: + '@types/node': 20.16.1 + '@types/node@10.17.60': {} '@types/node@12.20.55': {} @@ -25802,6 +26213,10 @@ snapshots: arrify@1.0.1: {} + as-table@1.0.55: + dependencies: + printable-characters: 1.0.42 + asap@2.0.6: {} asn1@0.2.6: @@ -26012,6 +26427,8 @@ snapshots: inherits: 2.0.4 readable-stream: 3.6.0 + blake3-wasm@2.1.5: {} + blob-util@2.0.2: {} bluebird@3.7.2: {} @@ -26067,10 +26484,6 @@ snapshots: dependencies: balanced-match: 1.0.2 - braces@3.0.2: - dependencies: - fill-range: 7.0.1 - braces@3.0.3: dependencies: fill-range: 7.1.1 @@ -26272,6 +26685,13 @@ snapshots: tslib: 2.7.0 upper-case-first: 2.0.2 + capnp-ts@0.7.0: + dependencies: + debug: 4.3.6(supports-color@8.1.1) + tslib: 2.6.3 + transitivePeerDependencies: + - supports-color + cardinal@2.1.1: dependencies: ansicolors: 0.3.2 @@ -27092,6 +27512,8 @@ snapshots: dependencies: assert-plus: 1.0.0 + data-uri-to-buffer@2.0.2: {} + dataloader@1.4.0: {} dataloader@2.2.2: {} @@ -27210,7 +27632,7 @@ snapshots: has-property-descriptors: 1.0.1 object-keys: 1.1.1 - defu@6.1.2: {} + defu@6.1.4: {} delaunator@5.0.1: dependencies: @@ -27553,6 +27975,31 @@ snapshots: transitivePeerDependencies: - supports-color + esbuild@0.17.19: + optionalDependencies: + '@esbuild/android-arm': 0.17.19 + '@esbuild/android-arm64': 0.17.19 + '@esbuild/android-x64': 0.17.19 + '@esbuild/darwin-arm64': 0.17.19 + '@esbuild/darwin-x64': 0.17.19 + '@esbuild/freebsd-arm64': 0.17.19 + '@esbuild/freebsd-x64': 0.17.19 + '@esbuild/linux-arm': 0.17.19 + '@esbuild/linux-arm64': 0.17.19 + '@esbuild/linux-ia32': 0.17.19 + '@esbuild/linux-loong64': 0.17.19 + '@esbuild/linux-mips64el': 0.17.19 + '@esbuild/linux-ppc64': 0.17.19 + '@esbuild/linux-riscv64': 0.17.19 + '@esbuild/linux-s390x': 0.17.19 + '@esbuild/linux-x64': 0.17.19 + '@esbuild/netbsd-x64': 0.17.19 + '@esbuild/openbsd-x64': 0.17.19 + '@esbuild/sunos-x64': 0.17.19 + '@esbuild/win32-arm64': 0.17.19 + '@esbuild/win32-ia32': 0.17.19 + '@esbuild/win32-x64': 0.17.19 + esbuild@0.21.5: optionalDependencies: '@esbuild/aix-ppc64': 0.21.5 @@ -27995,6 +28442,8 @@ snapshots: '@types/estree-jsx': 1.0.0 '@types/unist': 3.0.0 + estree-walker@0.6.1: {} + estree-walker@2.0.2: {} estree-walker@3.0.3: @@ -28077,6 +28526,8 @@ snapshots: dependencies: pify: 2.3.0 + exit-hook@2.2.1: {} + exponential-backoff@3.1.1: {} express@4.19.2: @@ -28319,10 +28770,6 @@ snapshots: filesize@6.4.0: {} - fill-range@7.0.1: - dependencies: - to-regex-range: 5.0.1 - fill-range@7.1.1: dependencies: to-regex-range: 5.0.1 @@ -28591,6 +29038,11 @@ snapshots: get-package-type@0.1.0: {} + get-source@2.0.12: + dependencies: + data-uri-to-buffer: 2.0.2 + source-map: 0.6.1 + get-stack-trace@2.1.1: dependencies: bluebird: 3.7.2 @@ -28639,10 +29091,10 @@ snapshots: giget@1.1.2: dependencies: colorette: 2.0.20 - defu: 6.1.2 + defu: 6.1.4 https-proxy-agent: 5.0.1 mri: 1.2.0 - node-fetch-native: 1.0.2 + node-fetch-native: 1.6.4 pathe: 1.1.2 tar: 6.1.13 transitivePeerDependencies: @@ -29028,6 +29480,21 @@ snapshots: lru-cache: 10.2.0 tslib: 2.7.0 + graphql-yoga@5.6.0(graphql@16.9.0): + dependencies: + '@envelop/core': 5.0.2 + '@graphql-tools/executor': 1.3.1(graphql@16.9.0) + '@graphql-tools/schema': 10.0.6(graphql@16.9.0) + '@graphql-tools/utils': 10.5.4(graphql@16.9.0) + '@graphql-yoga/logger': 2.0.0 + '@graphql-yoga/subscription': 5.0.1 + '@whatwg-node/fetch': 0.9.21 + '@whatwg-node/server': 0.9.49 + dset: 3.1.2 + graphql: 16.9.0 + lru-cache: 10.2.0 + tslib: 2.7.0 + graphql-yoga@5.7.0(graphql@16.9.0): dependencies: '@envelop/core': 5.0.1 @@ -29043,6 +29510,9 @@ snapshots: lru-cache: 10.2.0 tslib: 2.7.0 + graphql@16.8.1: + optional: true + graphql@16.9.0: {} graphql@17.0.0-alpha.7: {} @@ -30318,6 +30788,10 @@ snapshots: lz-string@1.5.0: {} + magic-string@0.25.9: + dependencies: + sourcemap-codec: 1.4.8 + magic-string@0.27.0: dependencies: '@jridgewell/sourcemap-codec': 1.4.15 @@ -31114,7 +31588,7 @@ snapshots: micromatch@4.0.5: dependencies: - braces: 3.0.2 + braces: 3.0.3 picomatch: 2.3.1 micromatch@4.0.7: @@ -31154,6 +31628,25 @@ snapshots: mini-svg-data-uri@1.4.4: {} + miniflare@3.20240610.1: + dependencies: + '@cspotcode/source-map-support': 0.8.1 + acorn: 8.12.0 + acorn-walk: 8.3.2 + capnp-ts: 0.7.0 + exit-hook: 2.2.1 + glob-to-regexp: 0.4.1 + stoppable: 1.1.0 + undici: 5.28.4 + workerd: 1.20240610.1 + ws: 8.18.0 + youch: 3.3.3 + zod: 3.23.8 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + minimatch@10.0.1: dependencies: brace-expansion: 2.0.1 @@ -31578,6 +32071,8 @@ snapshots: multi-fork@0.0.2: {} + mustache@4.2.0: {} + mute-stream@0.0.8: {} mute-stream@1.0.0: {} @@ -31753,7 +32248,7 @@ snapshots: dependencies: minimatch: 3.1.2 - node-fetch-native@1.0.2: {} + node-fetch-native@1.6.4: {} node-fetch@2.6.12(encoding@0.1.13): dependencies: @@ -31761,6 +32256,8 @@ snapshots: optionalDependencies: encoding: 0.1.13 + node-forge@1.3.1: {} + node-gyp-build-optional-packages@5.0.7: optional: true @@ -32803,6 +33300,8 @@ snapshots: prettier: 3.3.3 tslib: 2.7.0 + printable-characters@1.0.42: {} + proc-log@3.0.0: {} process-nextick-args@2.0.1: {} @@ -33478,6 +33977,8 @@ snapshots: resolve.exports@2.0.0: {} + resolve.exports@2.0.2: {} + resolve@1.22.8: dependencies: is-core-module: 2.13.1 @@ -33567,6 +34068,20 @@ snapshots: robust-predicates@3.0.2: {} + rollup-plugin-inject@3.0.2: + dependencies: + estree-walker: 0.6.1 + magic-string: 0.25.9 + rollup-pluginutils: 2.8.2 + + rollup-plugin-node-polyfills@0.2.1: + dependencies: + rollup-plugin-inject: 3.0.2 + + rollup-pluginutils@2.8.2: + dependencies: + estree-walker: 0.6.1 + rollup@4.18.0: dependencies: '@types/estree': 1.0.5 @@ -33695,6 +34210,11 @@ snapshots: secure-json-parse@2.7.0: {} + selfsigned@2.4.1: + dependencies: + '@types/node-forge': 1.3.11 + node-forge: 1.3.1 + semver-compare@1.0.0: {} semver@5.7.1: {} @@ -33994,6 +34514,8 @@ snapshots: dependencies: whatwg-url: 7.1.0 + sourcemap-codec@1.4.8: {} + space-separated-tokens@2.0.1: {} spawndamnit@2.0.0: @@ -34067,6 +34589,11 @@ snapshots: stackback@0.0.2: {} + stacktracey@2.1.8: + dependencies: + as-table: 1.0.55 + get-source: 2.0.12 + standard-as-callback@2.1.0: {} state-local@1.0.7: {} @@ -34077,6 +34604,8 @@ snapshots: std-env@3.7.0: {} + stoppable@1.1.0: {} + storybook@8.2.9(@babel/preset-env@7.24.5(@babel/core@7.24.7)): dependencies: '@babel/core': 7.24.7 @@ -34826,6 +35355,8 @@ snapshots: uc.micro@2.1.0: {} + ufo@1.5.4: {} + uglify-js@3.17.4: {} unbox-primitive@1.0.2: @@ -34841,8 +35372,21 @@ snapshots: undici-types@6.19.6: {} + undici@5.28.4: + dependencies: + '@fastify/busboy': 2.1.1 + undici@6.19.8: {} + unenv-nightly@1.10.0-1717606461.a117952: + dependencies: + consola: 3.2.3 + defu: 6.1.4 + mime: 3.0.0 + node-fetch-native: 1.6.4 + pathe: 1.1.2 + ufo: 1.5.4 + unicode-canonical-property-names-ecmascript@2.0.0: {} unicode-match-property-ecmascript@2.0.0: @@ -35481,8 +36025,41 @@ snapshots: wordwrap@1.0.0: {} + workerd@1.20240610.1: + optionalDependencies: + '@cloudflare/workerd-darwin-64': 1.20240610.1 + '@cloudflare/workerd-darwin-arm64': 1.20240610.1 + '@cloudflare/workerd-linux-64': 1.20240610.1 + '@cloudflare/workerd-linux-arm64': 1.20240610.1 + '@cloudflare/workerd-windows-64': 1.20240610.1 + workers-loki-logger@0.1.15: {} + wrangler@3.61.0(@cloudflare/workers-types@4.20240821.1): + dependencies: + '@cloudflare/kv-asset-handler': 0.3.3 + '@esbuild-plugins/node-globals-polyfill': 0.2.3(esbuild@0.17.19) + '@esbuild-plugins/node-modules-polyfill': 0.2.2(esbuild@0.17.19) + blake3-wasm: 2.1.5 + chokidar: 3.5.3 + esbuild: 0.17.19 + miniflare: 3.20240610.1 + nanoid: 3.3.7 + path-to-regexp: 6.2.2 + resolve: 1.22.8 + resolve.exports: 2.0.2 + selfsigned: 2.4.1 + source-map: 0.6.1 + unenv: unenv-nightly@1.10.0-1717606461.a117952 + xxhash-wasm: 1.0.2 + optionalDependencies: + '@cloudflare/workers-types': 4.20240821.1 + fsevents: 2.3.3 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + wrap-ansi@6.2.0: dependencies: ansi-styles: 4.3.0 @@ -35528,6 +36105,8 @@ snapshots: xtend@4.0.2: {} + xxhash-wasm@1.0.2: {} + y18n@4.0.3: {} y18n@5.0.8: {} @@ -35608,6 +36187,12 @@ snapshots: yocto-queue@1.1.1: {} + youch@3.3.3: + dependencies: + cookie: 0.5.0 + mustache: 4.2.0 + stacktracey: 2.1.8 + yup@0.29.3: dependencies: '@babel/runtime': 7.24.7 diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index 25fea1f0a..52ce95839 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -1,4 +1,5 @@ packages: + - packages/services/demo/* - packages/services/* - packages/migrations - packages/services/external-composition/*