chore: remove apollo-router-hive-fork from this repo (#7979)

This commit is contained in:
Dotan Simha 2026-04-15 11:50:18 +03:00 committed by GitHub
parent 7d4ef94432
commit e3d9750cc9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
33 changed files with 7 additions and 17969 deletions

View file

@ -1,3 +0,0 @@
# The following are aliases you can use with "cargo command_name"
[alias]
"clippy:fix" = "clippy --all --fix --allow-dirty --allow-staged"

View file

@ -1,169 +0,0 @@
name: apollo-router-release
on:
# For PRs, this pipeline will use the commit ID as Docker image tag and R2 artifact prefix.
pull_request:
branches:
- main
paths:
- 'packages/libraries/router/**'
- 'packages/libraries/sdk-rs/**'
- 'docker/router.dockerfile'
- 'scripts/compress/**'
- 'configs/cargo/Cargo.lock'
- 'Cargo.lock'
- 'Cargo.toml'
# For `main` changes, this pipeline will look for changes in Rust crates or plugin versioning, and
# publish them only if changes are found and image does not exists in GH Packages.
push:
paths:
- 'packages/libraries/router/**'
- 'packages/libraries/sdk-rs/**'
- 'docker/router.dockerfile'
- 'scripts/compress/**'
- 'configs/cargo/Cargo.lock'
- 'Cargo.lock'
- 'Cargo.toml'
branches:
- main
jobs:
# This script is doing the following:
# 1. Get the version of the apollo-router and the plugin from the Cargo.toml and package.json files
# 2. Check if there are changes in the Cargo.toml and package.json files in the current commit
# 3. If there are changes, check if the image tag exists in the GitHub Container Registry
find-changes:
runs-on: ubuntu-22.04
if: ${{ !github.event.pull_request.head.repo.fork }}
outputs:
should_release: ${{ steps.find_changes.outputs.should_release }}
release_version: ${{ steps.find_changes.outputs.release_version }}
release_latest: ${{ steps.find_changes.outputs.release_latest }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
fetch-depth: 2
- name: find changes in versions
id: find_changes
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
if [ "$GITHUB_EVENT_NAME" == "pull_request" ]; then
echo "Running in a PR, using commit ID as tag"
echo "should_release=true" >> $GITHUB_OUTPUT
echo "release_latest=false" >> $GITHUB_OUTPUT
echo "release_version=$GITHUB_SHA" >> $GITHUB_OUTPUT
exit 0
fi
echo "Running on push event, looking for changes in Rust crates or plugin versioning"
image_name="apollo-router"
github_org="graphql-hive"
router_version=$(cargo tree -i apollo-router --quiet | head -n 1 | awk -F" v" '{print $2}')
plugin_version=$(jq -r '.version' packages/libraries/router/package.json)
has_changes=$(git diff HEAD~ HEAD --name-only -- 'packages/libraries/router/Cargo.toml' 'packages/libraries/router/package.json' 'packages/libraries/sdk-rs/Cargo.toml' 'packages/libraries/sdk-rs/package.json' 'Cargo.lock' 'configs/cargo/Cargo.lock')
if [ "$has_changes" ]; then
image_tag_version="router${router_version}-plugin${plugin_version}"
response=$(curl -L \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${GITHUB_TOKEN}" \
-H "X-GitHub-Api-Version: 2022-11-28" \
-s \
https://api.github.com/orgs/${github_org}/packages/container/${image_name}/versions)
tag_exists=$(echo "$response" | jq -r ".[] | .metadata.container.tags[] | select(. | contains(\"${image_tag_version}\"))")
if [ ! "$tag_exists" ]; then
echo "Found changes in version $version_to_publish"
echo "release_version=$image_tag_version" >> $GITHUB_OUTPUT
echo "should_release=true" >> $GITHUB_OUTPUT
echo "release_latest=true" >> $GITHUB_OUTPUT
else
echo "No changes found in version $image_tag_version"
fi
fi
# Builds Rust crates, and creates Docker images
dockerize:
uses: ./.github/workflows/build-and-dockerize.yaml
name: image-build
needs:
- find-changes
if: ${{ needs.find-changes.outputs.should_release == 'true' }}
with:
imageTag: ${{ needs.find-changes.outputs.release_version }}
publishLatest: ${{ needs.find-changes.outputs.release_latest == 'true' }}
targets: apollo-router-hive-build
build: false
publishPrComment: true
secrets: inherit
# Test the Docker image, if it was published
test-image:
name: test apollo-router docker image
needs:
- dockerize
- find-changes
runs-on: ubuntu-22.04
env:
HIVE_TOKEN: ${{ secrets.HIVE_TOKEN }}
steps:
- name: Run Docker image
run: |
# Create router.yaml
cat << EOF > router.yaml
supergraph:
listen: 0.0.0.0:4000
health_check:
listen: 0.0.0.0:8088
enabled: true
path: /health
plugins:
hive.usage:
enabled: false
EOF
# Download supergraph
curl -sSL https://supergraph.demo.starstuff.dev/ > ./supergraph.graphql
# Run Docker image
docker run -p 4000:4000 -p 8088:8088 --name apollo_router_test -d \
--env HIVE_TOKEN="fake" \
--mount "type=bind,source=/$(pwd)/router.yaml,target=/dist/config/router.yaml" \
--mount "type=bind,source=/$(pwd)/supergraph.graphql,target=/dist/config/supergraph.graphql" \
ghcr.io/graphql-hive/apollo-router:${{ needs.find-changes.outputs.release_version }} \
--log debug \
--supergraph /dist/config/supergraph.graphql \
--config /dist/config/router.yaml
# Wait for the container to be ready
echo "Waiting for the container to be ready..."
sleep 20
HTTP_RESPONSE=$(curl --retry 5 --retry-delay 5 --max-time 30 --write-out "%{http_code}" --silent --output /dev/null "http://127.0.0.1:8088/health")
# Check if the HTTP response code is 200 (OK)
if [ $HTTP_RESPONSE -eq 200 ]; then
echo "Health check successful."
docker stop apollo_router_test
docker rm apollo_router_test
exit 0
else
echo "Health check failed with HTTP status code $HTTP_RESPONSE."
docker stop apollo_router_test
docker rm apollo_router_test
exit 1
fi
# Build and publish Rust crates and binaries
binary:
uses: ./.github/workflows/publish-rust.yaml
secrets: inherit
needs:
- find-changes
if: ${{ needs.find-changes.outputs.should_release == 'true' }}
with:
publish: true
latest: ${{ needs.find-changes.outputs.release_latest == 'true' }}
version: ${{ needs.find-changes.outputs.release_version }}

View file

@ -1,57 +0,0 @@
name: Apollo Router Updater
on:
schedule:
# Every 2 hours
- cron: '0 */2 * * *'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch: {}
jobs:
update:
runs-on: ubuntu-22.04
permissions:
issues: write
pull-requests: write
contents: write
steps:
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
fetch-depth: 1
token: ${{ secrets.BOT_GITHUB_TOKEN }}
- name: setup environment
uses: ./.github/actions/setup
with:
codegen: false
actor: apollo-router-updater
- name: Install Rust
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1
with:
toolchain: '1.94.1'
default: true
override: true
- name: Check for updates
id: check
run: |
pnpm tsx ./scripts/apollo-router-action.ts
- name: Run updates
if: steps.check.outputs.update == 'true'
run: cargo update -p apollo-router --precise ${{ steps.check.outputs.version }}
- name: Create Pull Request
if: steps.check.outputs.update == 'true'
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7
with:
token: ${{ secrets.BOT_GITHUB_TOKEN }}
commit-message: Update apollo-router to version ${{ steps.check.outputs.version }}
branch: apollo-router-update-${{ steps.check.outputs.version }}
delete-branch: true
title: ${{ steps.check.outputs.title }}
body: |
Automatic update of apollo-router to version ${{ steps.check.outputs.version }}.
assignees: kamilkisiela,dotansimha
reviewers: kamilkisiela,dotansimha

View file

@ -1,169 +0,0 @@
on:
workflow_call:
inputs:
publish:
default: false
type: boolean
required: true
latest:
default: false
type: boolean
required: true
version:
default: ${{ github.sha }}
type: string
required: true
jobs:
detect-changes:
runs-on: ubuntu-22.04
outputs:
rust_changed: ${{ steps.rust_changed.outputs.rust_changed }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
fetch-depth: 2
- name: Look for changes
id: rust_changed
run: |
lines=$( git diff HEAD~ HEAD --name-only -- 'packages/libraries/router' 'packages/libraries/sdk-rs' 'Cargo.toml' 'configs/cargo/Cargo.lock' | wc -l )
if [ $lines -gt 0 ]; then
echo 'rust_changed=true' >> $GITHUB_OUTPUT
fi
test-rust:
needs: detect-changes
if: needs.detect-changes.outputs.rust_changed == 'true'
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
fetch-depth: 2
- name: setup environment
uses: ./.github/actions/setup
with:
actor: test-rust
codegen: false
- name: Install Protoc
uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Rust
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1
with:
toolchain: '1.94.1'
default: true
override: true
- name: Cache Rust
uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2
- name: Run tests
run: cargo test
publish-rust:
needs: [detect-changes, test-rust]
if: needs.detect-changes.outputs.rust_changed == 'true'
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
runs-on: ${{ matrix.os }}
timeout-minutes: 60
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
fetch-depth: 2
- name: setup environment
uses: ./.github/actions/setup
with:
actor: publish-rust
codegen: false
- name: Prepare MacOS
if: ${{ matrix.os == 'macos-latest' }}
run: |
echo "RUST_TARGET=x86_64-apple-darwin" >> $GITHUB_ENV
echo "RUST_TARGET_FILE=router" >> $GITHUB_ENV
echo "RUST_TARGET_OS=macos" >> $GITHUB_ENV
- name: Prepare Linux
if: ${{ matrix.os == 'ubuntu-latest' }}
run: |
echo "RUST_TARGET=x86_64-unknown-linux-gnu" >> $GITHUB_ENV
echo "RUST_TARGET_FILE=router" >> $GITHUB_ENV
echo "RUST_TARGET_OS=linux" >> $GITHUB_ENV
- name: Prepare Windows
if: ${{ matrix.os == 'windows-latest' }}
run: |
echo "RUST_TARGET=x86_64-pc-windows-msvc" | Out-File -FilePath $env:GITHUB_ENV -Append
echo "RUST_TARGET_FILE=router.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
echo "RUST_TARGET_OS=win" | Out-File -FilePath $env:GITHUB_ENV -Append
npm run cargo:fix
- name: Install Protoc
uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Rust
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1
with:
toolchain: '1.94.1'
target: ${{ env.RUST_TARGET }}
default: true
override: true
- name: Cache Rust
uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2
- name: Build
uses: actions-rs/cargo@844f36862e911db73fe0815f00a4a2602c279505 # v1
with:
command: build
args: --release
- name: Strip binary from debug symbols
if: ${{ matrix.os == 'ubuntu-latest' }}
run: strip target/release/${{ env.RUST_TARGET_FILE }}
- name: Compress
run: |
./target/release/compress ./target/release/${{ env.RUST_TARGET_FILE }} ./router.tar.gz
- name: Upload artifact
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4
with:
name: router-${{ env.RUST_TARGET_OS }}
path: router.tar.gz
- name: Upload to R2 (${{ inputs.version }})
if: ${{ inputs.publish }}
uses: randomairborne/r2-release@9cbc35a2039ee2ef453a6988cd2a85bb2d7ba8af # v1.0.2
with:
endpoint: https://6d5bc18cd8d13babe7ed321adba3d8ae.r2.cloudflarestorage.com
accesskeyid: ${{ secrets.R2_ACCESS_KEY_ID }}
secretaccesskey: ${{ secrets.R2_SECRET_ACCESS_KEY }}
bucket: apollo-router
file: router.tar.gz
destination: ${{ inputs.version }}/${{ env.RUST_TARGET_OS }}/router.tar.gz
- name: Upload to R2 (latest)
if: ${{ inputs.publish && inputs.latest }}
uses: randomairborne/r2-release@9cbc35a2039ee2ef453a6988cd2a85bb2d7ba8af # v1.0.2
with:
endpoint: https://6d5bc18cd8d13babe7ed321adba3d8ae.r2.cloudflarestorage.com
accesskeyid: ${{ secrets.R2_ACCESS_KEY_ID }}
secretaccesskey: ${{ secrets.R2_SECRET_ACCESS_KEY }}
bucket: apollo-router
file: router.tar.gz
destination: latest/${{ env.RUST_TARGET_OS }}/router.tar.gz

View file

@ -118,11 +118,3 @@ jobs:
env: env:
VERSION: ${{ steps.cli.outputs.version }} VERSION: ${{ steps.cli.outputs.version }}
run: pnpm oclif promote --no-xz --sha ${GITHUB_SHA:0:7} --version $VERSION run: pnpm oclif promote --no-xz --sha ${GITHUB_SHA:0:7} --version $VERSION
- name: release to Crates.io
if:
steps.changesets.outputs.published && contains(steps.changesets.outputs.publishedPackages,
'"hive-apollo-router-plugin"')
run: |
cargo login ${{ secrets.CARGO_REGISTRY_TOKEN }}
cargo publish --allow-dirty --no-verify

View file

@ -26,7 +26,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
# Divide integration tests into 3 shards, to run them in parallel. # Divide integration tests into 3 shards, to run them in parallel.
shardIndex: [1, 2, 3, 'apollo-router'] shardIndex: [1, 2, 3]
env: env:
DOCKER_REGISTRY: ${{ inputs.registry }}/${{ inputs.imageName }}/ DOCKER_REGISTRY: ${{ inputs.registry }}/${{ inputs.imageName }}/
@ -78,68 +78,7 @@ jobs:
run: | run: |
docker compose -f docker/docker-compose.community.yml -f ./integration-tests/docker-compose.integration.yaml --env-file ./integration-tests/.env ps docker compose -f docker/docker-compose.community.yml -f ./integration-tests/docker-compose.integration.yaml --env-file ./integration-tests/.env ps
## ---- START ---- Apollo Router specific steps - name: run integration tests
- if: matrix.shardIndex == 'apollo-router'
name: Install Protoc
uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- if: matrix.shardIndex == 'apollo-router'
name: Install Rust
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1
with:
toolchain: '1.94.1'
default: true
override: true
- if: matrix.shardIndex == 'apollo-router'
name: Cache Rust
uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2
# ---- START ---- Disk space cleanup before apollo router tests
- if: matrix.shardIndex == 'apollo-router'
name: Before cleanup disk space
run: df -h
- if: matrix.shardIndex == 'apollo-router'
name: Cleanup disk space
run: |
sudo rm -rf \
/usr/lib/jvm \
/usr/share/swift \
/usr/local/julia* \
/usr/local/share/chromium \
/opt/az \
/usr/local/share/powershell \
/opt/microsoft /opt/google \
/usr/local/lib/android \
/usr/local/.ghcup \
/usr/share/dotnet \
/usr/local/lib/android \
/opt/ghc /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
sudo docker builder prune -a
- if: matrix.shardIndex == 'apollo-router'
name: After cleanup disk space
run: df -h
# ---- END ---- Disk space cleanup before apollo router tests
- if: matrix.shardIndex == 'apollo-router'
name: build apollo router
run: |
cargo build
- if: matrix.shardIndex == 'apollo-router'
name: run apollo router integration tests
timeout-minutes: 30
run: |
pnpm test:integration:apollo-router
## ---- END ---- Apollo Router specific steps
- if: matrix.shardIndex != 'apollo-router'
name: run integration tests
timeout-minutes: 30 timeout-minutes: 30
run: | run: |
pnpm test:integration --shard=${{ matrix.shardIndex }}/3 pnpm test:integration --shard=${{ matrix.shardIndex }}/3

25
.gitignore vendored
View file

@ -110,9 +110,6 @@ integration-tests/testkit/gql/
npm-shrinkwrap.json npm-shrinkwrap.json
# Rust
/target
# bob # bob
.bob/ .bob/
@ -127,6 +124,7 @@ packages/web/app/next.config.mjs
packages/web/app/environment-*.mjs packages/web/app/environment-*.mjs
packages/web/app/src/gql/*.ts packages/web/app/src/gql/*.ts
packages/web/app/src/gql/*.json packages/web/app/src/gql/*.json
# Changelog # Changelog
packages/web/app/src/components/ui/changelog/generated-changelog.ts packages/web/app/src/components/ui/changelog/generated-changelog.ts
@ -142,22 +140,5 @@ resolvers.generated.ts
docker/docker-compose.override.yml docker/docker-compose.override.yml
test-results/ test-results/
Cargo.lock
Cargo.lock target
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock
Cargo.lock

7626
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,3 +0,0 @@
[workspace]
resolver = "2"
members = ["packages/libraries/router", "scripts/compress"]

7313
configs/cargo/Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -337,25 +337,6 @@ target "app" {
] ]
} }
target "apollo-router" {
inherits = ["router-base", get_target()]
contexts = {
router_pkg = "${PWD}/packages/libraries/router"
config = "${PWD}/configs/cargo"
}
args = {
IMAGE_TITLE = "graphql-hive/apollo-router"
PORT = "4000"
IMAGE_DESCRIPTION = "Apollo Router for GraphQL Hive."
}
tags = [
local_image_tag("apollo-router"),
stable_image_tag("apollo-router"),
image_tag("apollo-router", COMMIT_SHA),
image_tag("apollo-router", BRANCH_NAME)
]
}
target "otel-collector" { target "otel-collector" {
inherits = ["otel-collector-base", get_target()] inherits = ["otel-collector-base", get_target()]
context = "${PWD}/docker/configs/otel-collector" context = "${PWD}/docker/configs/otel-collector"
@ -421,12 +402,6 @@ group "integration-tests" {
] ]
} }
group "apollo-router-hive-build" {
targets = [
"apollo-router"
]
}
group "cli" { group "cli" {
targets = [ targets = [
"cli" "cli"

View file

@ -1,62 +0,0 @@
# syntax=docker/dockerfile:1
FROM scratch AS router_pkg
FROM scratch AS config
FROM rust:1.94.1-slim-bookworm AS build
# Required by Apollo Router
RUN apt-get update
RUN apt-get -y install npm protobuf-compiler cmake
RUN rm -rf /var/lib/apt/lists/*
RUN update-ca-certificates
RUN rustup component add rustfmt
WORKDIR /usr/src
# Create blank projects
RUN USER=root cargo new router
# Copy Cargo files
COPY --from=router_pkg Cargo.toml /usr/src/router/
COPY --from=config Cargo.lock /usr/src/router/
WORKDIR /usr/src/router
# Get the dependencies cached, so we can use dummy input files so Cargo wont fail
RUN echo 'fn main() { println!(""); }' > ./src/main.rs
RUN echo 'fn main() { println!(""); }' > ./src/lib.rs
RUN cargo build --release
# Copy in the actual source code
COPY --from=router_pkg src ./src
RUN touch ./src/main.rs
RUN touch ./src/lib.rs
# Real build this time
RUN cargo build --release
# Runtime
FROM debian:bookworm-slim AS runtime
RUN apt-get update
RUN apt-get -y install ca-certificates
RUN rm -rf /var/lib/apt/lists/*
LABEL org.opencontainers.image.title=$IMAGE_TITLE
LABEL org.opencontainers.image.version=$RELEASE
LABEL org.opencontainers.image.description=$IMAGE_DESCRIPTION
LABEL org.opencontainers.image.authors="The Guild"
LABEL org.opencontainers.image.vendor="Kamil Kisiela"
LABEL org.opencontainers.image.url="https://github.com/graphql-hive/console"
LABEL org.opencontainers.image.source="https://github.com/graphql-hive/console"
RUN mkdir -p /dist/config
RUN mkdir /dist/schema
# Copy in the required files from our build image
COPY --from=build --chown=root:root /usr/src/router/target/release/router /dist
COPY --from=router_pkg router.yaml /dist/config/router.yaml
WORKDIR /dist
ENV APOLLO_ROUTER_CONFIG_PATH="/dist/config/router.yaml"
ENTRYPOINT ["./router"]

View file

@ -7,7 +7,6 @@
"prepare:env": "cd ../ && pnpm build:libraries && pnpm build:services", "prepare:env": "cd ../ && pnpm build:libraries && pnpm build:services",
"start": "./local.sh", "start": "./local.sh",
"test:integration": "vitest", "test:integration": "vitest",
"test:integration:apollo-router": "TEST_APOLLO_ROUTER=1 vitest tests/apollo-router",
"typecheck": "tsc --noEmit" "typecheck": "tsc --noEmit"
}, },
"devDependencies": { "devDependencies": {

View file

@ -1,161 +0,0 @@
import { existsSync, rmSync, writeFileSync } from 'node:fs';
import { createServer } from 'node:http';
import { tmpdir } from 'node:os';
import { join } from 'node:path';
import { ProjectType } from 'testkit/gql/graphql';
import { initSeed } from 'testkit/seed';
import { getServiceHost } from 'testkit/utils';
import { execa } from '@esm2cjs/execa';
type MaybePromise<T> = T | Promise<T>;
describe('Apollo Router Integration', () => {
const getAvailablePort = () =>
new Promise<number>(resolve => {
const server = createServer();
server.listen(0, () => {
const address = server.address();
if (address && typeof address === 'object') {
const port = address.port;
server.close(() => resolve(port));
} else {
throw new Error('Could not get available port');
}
});
});
function defer(deferFn: () => MaybePromise<void>) {
return {
async [Symbol.asyncDispose]() {
return deferFn();
},
};
}
it('fetches the supergraph and sends usage reports', async () => {
const { createOrg } = await initSeed().createOwner();
const { createProject } = await createOrg();
const { createTargetAccessToken, createCdnAccess, target, waitForOperationsCollected } =
await createProject(ProjectType.Federation);
const writeToken = await createTargetAccessToken({});
// Publish Schema
const publishSchemaResult = await writeToken
.publishSchema({
author: 'Arda',
commit: 'abc123',
sdl: /* GraphQL */ `
type Query {
me: User
}
type User {
id: ID!
name: String!
}
`,
service: 'users',
url: 'https://federation-demo.theguild.workers.dev/users',
})
.then(r => r.expectNoGraphQLErrors());
expect(publishSchemaResult.schemaPublish.__typename).toBe('SchemaPublishSuccess');
const cdnAccessResult = await createCdnAccess();
const usageAddress = await getServiceHost('usage', 8081);
const routerBinPath = join(__dirname, '../../../target/debug/router');
if (!existsSync(routerBinPath)) {
throw new Error(
`Apollo Router binary not found at path: ${routerBinPath}, make sure to build it first with 'cargo build'`,
);
}
const routerPort = await getAvailablePort();
const routerConfigContent = `
supergraph:
listen: 0.0.0.0:${routerPort}
plugins:
hive.usage: {}
`.trim();
const routerConfigPath = join(tmpdir(), `apollo-router-config-${Date.now()}.yaml`);
writeFileSync(routerConfigPath, routerConfigContent, 'utf-8');
const cdnEndpoint = await getServiceHost('server', 8082).then(
v => `http://${v}/artifacts/v1/${target.id}`,
);
const routerProc = execa(routerBinPath, ['--dev', '--config', routerConfigPath], {
all: true,
env: {
HIVE_CDN_ENDPOINT: cdnEndpoint,
HIVE_CDN_KEY: cdnAccessResult.secretAccessToken,
HIVE_ENDPOINT: `http://${usageAddress}`,
HIVE_TOKEN: writeToken.secret,
},
});
let log = '';
await new Promise((resolve, reject) => {
routerProc.catch(err => {
if (!err.isCanceled) {
reject(err);
}
});
const routerProcOut = routerProc.all;
if (!routerProcOut) {
return reject(new Error('No stdout from Apollo Router process'));
}
routerProcOut.on('data', data => {
log += data.toString();
if (log.includes('GraphQL endpoint exposed at')) {
resolve(true);
}
process.stdout.write(log);
});
});
await using _ = defer(() => {
rmSync(routerConfigPath);
routerProc.cancel();
});
const url = `http://localhost:${routerPort}/`;
async function sendOperation(i: number) {
const response = await fetch(url, {
method: 'POST',
headers: {
accept: 'application/json',
'content-type': 'application/json',
},
body: JSON.stringify({
query: `
query Query${i} {
me {
id
name
}
}
`,
}),
});
expect(response.status).toBe(200);
const result = await response.json();
expect(result).toEqual({
data: {
me: {
id: '1',
name: 'Ada Lovelace',
},
},
});
}
const cnt = 1000;
const jobs = [];
for (let i = 0; i < cnt; i++) {
if (i % 100 === 0) {
await new Promise(res => setTimeout(res, 500));
}
jobs.push(sendOperation(i));
}
await Promise.all(jobs);
await waitForOperationsCollected(cnt);
});
});

View file

@ -37,8 +37,6 @@ export default defineConfig({
}, },
setupFiles, setupFiles,
testTimeout: 90_000, testTimeout: 90_000,
exclude: process.env.TEST_APOLLO_ROUTER exclude: defaultExclude,
? defaultExclude
: [...defaultExclude, 'tests/apollo-router/**'],
}, },
}); });

View file

@ -42,7 +42,7 @@
"prerelease": "pnpm build:libraries", "prerelease": "pnpm build:libraries",
"prettier": "prettier --cache --write --list-different --ignore-unknown \"**/*\"", "prettier": "prettier --cache --write --list-different --ignore-unknown \"**/*\"",
"release": "pnpm build:libraries && changeset publish", "release": "pnpm build:libraries && changeset publish",
"release:version": "changeset version && pnpm --filter hive-apollo-router-plugin sync-cargo-file && pnpm build:libraries && pnpm --filter @graphql-hive/cli oclif:readme", "release:version": "changeset version && pnpm build:libraries && pnpm --filter @graphql-hive/cli oclif:readme",
"seed:app-deployments": "tsx scripts/seed-app-deployments.mts", "seed:app-deployments": "tsx scripts/seed-app-deployments.mts",
"seed:insights": "tsx scripts/seed-insights.mts", "seed:insights": "tsx scripts/seed-insights.mts",
"seed:org": "tsx scripts/seed-organization.mts", "seed:org": "tsx scripts/seed-organization.mts",
@ -54,7 +54,6 @@
"test:e2e:local": "CYPRESS_BASE_URL=http://localhost:3000 RUN_AGAINST_LOCAL_SERVICES=1 cypress open --browser chrome", "test:e2e:local": "CYPRESS_BASE_URL=http://localhost:3000 RUN_AGAINST_LOCAL_SERVICES=1 cypress open --browser chrome",
"test:e2e:open": "CYPRESS_BASE_URL=$HIVE_APP_BASE_URL cypress open", "test:e2e:open": "CYPRESS_BASE_URL=$HIVE_APP_BASE_URL cypress open",
"test:integration": "cd integration-tests && pnpm test:integration", "test:integration": "cd integration-tests && pnpm test:integration",
"test:integration:apollo-router": "cd integration-tests && pnpm test:integration:apollo-router",
"typecheck": "pnpm run -r --filter '!hive' typecheck", "typecheck": "pnpm run -r --filter '!hive' typecheck",
"upload-sourcemaps": "./scripts/upload-sourcemaps.sh", "upload-sourcemaps": "./scripts/upload-sourcemaps.sh",
"workspace": "pnpm run --filter $1 $2" "workspace": "pnpm run --filter $1 $2"

View file

@ -1 +0,0 @@
target/**

View file

@ -1,352 +0,0 @@
# 16.10.2024
## 3.0.3
### Patch Changes
- [#7815](https://github.com/graphql-hive/console/pull/7815)
[`078e661`](https://github.com/graphql-hive/console/commit/078e6611cbbd94b2ba325dc35bfbf636d2458f24)
Thanks [@ardatan](https://github.com/ardatan)! - Bump `hive-console-sdk` to `0.3.7` to pin
`graphql-tools` to a compatible version. The previous `hive-console-sdk@0.3.5` allowed
`graphql-tools@^0.5` which resolves to `0.5.2`, a version that removes public API traits
(`SchemaDocumentExtension`, `FieldByNameExtension`, etc.) that `hive-console-sdk` depends on.
## 3.0.2
### Patch Changes
- [#7585](https://github.com/graphql-hive/console/pull/7585)
[`9a6e8a9`](https://github.com/graphql-hive/console/commit/9a6e8a9fe7f337c4a2ee6b7375281f5ae42a38e3)
Thanks [@dotansimha](https://github.com/dotansimha)! - Upgrade to latest `hive-console-sdk` and
drop direct dependency on `graphql-tools`
## 3.0.1
### Patch Changes
- [#7476](https://github.com/graphql-hive/console/pull/7476)
[`f4d5f7e`](https://github.com/graphql-hive/console/commit/f4d5f7ee5bf50bc8b621b011696d43757de2e071)
Thanks [@kamilkisiela](https://github.com/kamilkisiela)! - Updated `hive-apollo-router-plugin` to
use `hive-console-sdk` from crates.io instead of a local dependency. The plugin now uses
`graphql-tools::parser` instead of `graphql-parser` to leverage the parser we now ship in
`graphql-tools` crate.
## 3.0.0
### Major Changes
- [#7379](https://github.com/graphql-hive/console/pull/7379)
[`b134461`](https://github.com/graphql-hive/console/commit/b13446109d9663ccabef07995eb25cf9dff34f37)
Thanks [@ardatan](https://github.com/ardatan)! - - Multiple endpoints support for `HiveRegistry`
and `PersistedOperationsPlugin`
Breaking Changes:
- Now there is no `endpoint` field in the configuration, it has been replaced with `endpoints`,
which is an array of strings. You are not affected if you use environment variables to set the
endpoint.
```diff
HiveRegistry::new(
Some(
HiveRegistryConfig {
- endpoint: String::from("CDN_ENDPOINT"),
+ endpoints: vec![String::from("CDN_ENDPOINT1"), String::from("CDN_ENDPOINT2")],
)
)
```
### Patch Changes
- [#7479](https://github.com/graphql-hive/console/pull/7479)
[`382b481`](https://github.com/graphql-hive/console/commit/382b481e980e588e3e6cf7831558b2d0811253f5)
Thanks [@ardatan](https://github.com/ardatan)! - Update dependencies
- Updated dependencies
[[`b134461`](https://github.com/graphql-hive/console/commit/b13446109d9663ccabef07995eb25cf9dff34f37),
[`b134461`](https://github.com/graphql-hive/console/commit/b13446109d9663ccabef07995eb25cf9dff34f37)]:
- hive-console-sdk-rs@0.3.0
## 2.3.6
### Patch Changes
- Updated dependencies
[[`0ac2e06`](https://github.com/graphql-hive/console/commit/0ac2e06fd6eb94c9d9817f78faf6337118f945eb),
[`4b796f9`](https://github.com/graphql-hive/console/commit/4b796f95bbc0fc37aac2c3a108a6165858b42b49),
[`a9905ec`](https://github.com/graphql-hive/console/commit/a9905ec7198cf1bec977a281c5021e0ef93c2c34)]:
- hive-console-sdk-rs@0.2.3
## 2.3.5
### Patch Changes
- Updated dependencies
[[`24c0998`](https://github.com/graphql-hive/console/commit/24c099818e4dfec43feea7775e8189d0f305a10c)]:
- hive-console-sdk-rs@0.2.2
## 2.3.4
### Patch Changes
- Updated dependencies
[[`69e2f74`](https://github.com/graphql-hive/console/commit/69e2f74ab867ee5e97bbcfcf6a1b69bb23ccc7b2)]:
- hive-console-sdk-rs@0.2.1
## 2.3.3
### Patch Changes
- Updated dependencies
[[`cc6cd28`](https://github.com/graphql-hive/console/commit/cc6cd28eb52d774683c088ce456812d3541d977d)]:
- hive-console-sdk-rs@0.2.0
## 2.3.2
### Patch Changes
- Updated dependencies
[[`d8f6e25`](https://github.com/graphql-hive/console/commit/d8f6e252ee3cd22948eb0d64b9d25c9b04dba47c)]:
- hive-console-sdk-rs@0.1.1
## 2.3.1
### Patch Changes
- [#7196](https://github.com/graphql-hive/console/pull/7196)
[`7878736`](https://github.com/graphql-hive/console/commit/7878736643578ab23d95412b893c091e32691e60)
Thanks [@ardatan](https://github.com/ardatan)! - Breaking;
- `UsageAgent` now accepts `Duration` for `connect_timeout` and `request_timeout` instead of
`u64`.
- `SupergraphFetcher` now accepts `Duration` for `connect_timeout` and `request_timeout` instead
of `u64`.
- `PersistedDocumentsManager` now accepts `Duration` for `connect_timeout` and `request_timeout`
instead of `u64`.
- Use original `graphql-parser` and `graphql-tools` crates instead of forked versions.
- Updated dependencies
[[`7878736`](https://github.com/graphql-hive/console/commit/7878736643578ab23d95412b893c091e32691e60)]:
- hive-console-sdk-rs@0.1.0
## 2.3.0
### Minor Changes
- [#7143](https://github.com/graphql-hive/console/pull/7143)
[`b80e896`](https://github.com/graphql-hive/console/commit/b80e8960f492e3bcfe1012caab294d9066d86fe3)
Thanks [@ardatan](https://github.com/ardatan)! - Extract Hive Console integration implementation
into a new package `hive-console-sdk` which can be used by any Rust library for Hive Console
integration
It also includes a refactor to use less Mutexes like replacing `lru` + `Mutex` with the
thread-safe `moka` package. Only one place that handles queueing uses `Mutex` now.
### Patch Changes
- [#7143](https://github.com/graphql-hive/console/pull/7143)
[`b80e896`](https://github.com/graphql-hive/console/commit/b80e8960f492e3bcfe1012caab294d9066d86fe3)
Thanks [@ardatan](https://github.com/ardatan)! - Fixes a bug when Persisted Operations are enabled
by default which should be explicitly enabled
- Updated dependencies
[[`b80e896`](https://github.com/graphql-hive/console/commit/b80e8960f492e3bcfe1012caab294d9066d86fe3)]:
- hive-console-sdk-rs@0.0.1
## 2.2.0
### Minor Changes
- [#6906](https://github.com/graphql-hive/console/pull/6906)
[`7fe1c27`](https://github.com/graphql-hive/console/commit/7fe1c271a596353d23ad770ce667f7781be6cc13)
Thanks [@egoodwinx](https://github.com/egoodwinx)! - Advanced breaking change detection for inputs
and arguments.
With this change, inputs and arguments will now be collected from the GraphQL operations executed
by the router, and will be reported to Hive Console.
Additional references:
- https://github.com/graphql-hive/console/pull/6764
- https://github.com/graphql-hive/console/issues/6649
### Patch Changes
- [#7173](https://github.com/graphql-hive/console/pull/7173)
[`eba62e1`](https://github.com/graphql-hive/console/commit/eba62e13f658f00a4a8f6db6b4d8501070fbed45)
Thanks [@dotansimha](https://github.com/dotansimha)! - Use the correct plugin version in the
User-Agent header used for Console requests
- [#6906](https://github.com/graphql-hive/console/pull/6906)
[`7fe1c27`](https://github.com/graphql-hive/console/commit/7fe1c271a596353d23ad770ce667f7781be6cc13)
Thanks [@egoodwinx](https://github.com/egoodwinx)! - Update Rust version to 1.90
## 2.1.3
### Patch Changes
- [#6753](https://github.com/graphql-hive/console/pull/6753)
[`7ef800e`](https://github.com/graphql-hive/console/commit/7ef800e8401a4e3fda4e8d1208b940ad6743449e)
Thanks [@Intellicode](https://github.com/Intellicode)! - fix tmp dir filename
## 2.1.2
### Patch Changes
- [#6788](https://github.com/graphql-hive/console/pull/6788)
[`6f0af0e`](https://github.com/graphql-hive/console/commit/6f0af0eb712ce358b212b335f11d4a86ede08931)
Thanks [@dotansimha](https://github.com/dotansimha)! - Bump version to trigger release, fix
lockfile
## 2.1.1
### Patch Changes
- [#6714](https://github.com/graphql-hive/console/pull/6714)
[`3f823c9`](https://github.com/graphql-hive/console/commit/3f823c9e1f3bd5fd8fde4e375a15f54a9d5b4b4e)
Thanks [@github-actions](https://github.com/apps/github-actions)! - Updated internal Apollo crates
to get downstream fix for advisories. See
https://github.com/apollographql/router/releases/tag/v2.1.1
## 2.1.0
### Minor Changes
- [#6577](https://github.com/graphql-hive/console/pull/6577)
[`c5d7822`](https://github.com/graphql-hive/console/commit/c5d78221b6c088f2377e6491b5bd3c7799d53e94)
Thanks [@dotansimha](https://github.com/dotansimha)! - Add support for providing a target for
usage reporting with organization access tokens.
This can either be a slug following the format `$organizationSlug/$projectSlug/$targetSlug` (e.g
`the-guild/graphql-hive/staging`) or an UUID (e.g. `a0f4c605-6541-4350-8cfe-b31f21a4bf80`).
```yaml
# ... other apollo-router configuration
plugins:
hive.usage:
enabled: true
registry_token: 'ORGANIZATION_ACCESS_TOKEN'
target: 'my-org/my-project/my-target'
```
## 2.0.0
### Major Changes
- [#6549](https://github.com/graphql-hive/console/pull/6549)
[`158b63b`](https://github.com/graphql-hive/console/commit/158b63b4f217bf08f59dbef1fa14553106074cc9)
Thanks [@dotansimha](https://github.com/dotansimha)! - Updated core dependnecies (body, http) to
match apollo-router v2
### Patch Changes
- [#6549](https://github.com/graphql-hive/console/pull/6549)
[`158b63b`](https://github.com/graphql-hive/console/commit/158b63b4f217bf08f59dbef1fa14553106074cc9)
Thanks [@dotansimha](https://github.com/dotansimha)! - Updated thiserror, jsonschema, lru, rand to
latest and adjust the code
## 1.1.1
### Patch Changes
- [#6383](https://github.com/graphql-hive/console/pull/6383)
[`ec356a7`](https://github.com/graphql-hive/console/commit/ec356a7784d1f59722f80a69f501f1f250b2f6b2)
Thanks [@kamilkisiela](https://github.com/kamilkisiela)! - Collect custom scalars from arguments
and input object fields
## 1.1.0
### Minor Changes
- [#5732](https://github.com/graphql-hive/console/pull/5732)
[`1d3c566`](https://github.com/graphql-hive/console/commit/1d3c566ddcf5eb31c68545931da32bcdf4b8a047)
Thanks [@dotansimha](https://github.com/dotansimha)! - Updated Apollo-Router custom plugin for
Hive to use Usage reporting spec v2.
[Learn more](https://the-guild.dev/graphql/hive/docs/specs/usage-reports)
- [#5732](https://github.com/graphql-hive/console/pull/5732)
[`1d3c566`](https://github.com/graphql-hive/console/commit/1d3c566ddcf5eb31c68545931da32bcdf4b8a047)
Thanks [@dotansimha](https://github.com/dotansimha)! - Add support for persisted documents using
Hive App Deployments.
[Learn more](https://the-guild.dev/graphql/hive/product-updates/2024-07-30-persisted-documents-app-deployments-preview)
## 1.0.1
### Patch Changes
- [#6057](https://github.com/graphql-hive/console/pull/6057)
[`e4f8b0a`](https://github.com/graphql-hive/console/commit/e4f8b0a51d1158da966a719f321bc13e5af39ea0)
Thanks [@kamilkisiela](https://github.com/kamilkisiela)! - Explain what Hive is in README
## 1.0.0
### Major Changes
- [#5941](https://github.com/graphql-hive/console/pull/5941)
[`762bcd8`](https://github.com/graphql-hive/console/commit/762bcd83941d7854873f6670580ae109c4901dea)
Thanks [@dotansimha](https://github.com/dotansimha)! - Release v1 of Hive plugin for apollo-router
## 0.1.2
### Patch Changes
- [#5991](https://github.com/graphql-hive/console/pull/5991)
[`1ea4df9`](https://github.com/graphql-hive/console/commit/1ea4df95b5fcef85f19caf682a827baf1849a28d)
Thanks [@dotansimha](https://github.com/dotansimha)! - Improvements to release pipeline and added
missing metadata to Cargo file
## 0.1.1
### Patch Changes
- [#5930](https://github.com/graphql-hive/console/pull/5930)
[`1b7acd6`](https://github.com/graphql-hive/console/commit/1b7acd6978391e402fe04cc752b5e61ec05d0f03)
Thanks [@dotansimha](https://github.com/dotansimha)! - Fixes for Crate publishing flow
## 0.1.0
### Minor Changes
- [#5922](https://github.com/graphql-hive/console/pull/5922)
[`28c6da8`](https://github.com/graphql-hive/console/commit/28c6da8b446d62dcc4460be946fe3aecdbed858d)
Thanks [@dotansimha](https://github.com/dotansimha)! - Initial release of Hive plugin for
Apollo-Router
## 0.0.1
### Patch Changes
- [#5898](https://github.com/graphql-hive/console/pull/5898)
[`1a92d7d`](https://github.com/graphql-hive/console/commit/1a92d7decf9d0593450e81b394d12c92f40c2b3d)
Thanks [@dotansimha](https://github.com/dotansimha)! - Initial release of
hive-apollo-router-plugin crate
- Report enum values when an enum is used as an output type and align with JS implementation
# 19.07.2024
- Writes `supergraph-schema.graphql` file to a temporary directory (the path depends on OS), and
this is now the default of `HIVE_CDN_SCHEMA_FILE_PATH`.
# 10.04.2024
- `HIVE_CDN_ENDPOINT` and `endpoint` accept an URL with and without the `/supergraph` part
# 09.01.2024
- Introduce `HIVE_CDN_SCHEMA_FILE_PATH` environment variable to specify where to download the
supergraph schema (default is `./supergraph-schema.graphql`)
# 11.07.2023
- Use debug level when logging dropped operations
# 07.06.2023
- Introduce `enabled` flag (Usage Plugin)
# 23.08.2022
- Don't panic on scalars used as variable types
- Introduce `buffer_size`
- Ignore operations including `__schema` or `__type`

View file

@ -1,44 +0,0 @@
[package]
name = "hive-apollo-router-plugin"
authors = ["Kamil Kisiela <kamil.kisiela@gmail.com>"]
repository = "https://github.com/graphql-hive/console/"
edition = "2024"
license = "MIT"
publish = true
version = "3.0.3"
description = "Apollo-Router Plugin for Hive"
[[bin]]
name = "router"
path = "src/main.rs"
[lib]
name = "hive_apollo_router_plugin"
path = "src/lib.rs"
[dependencies]
apollo-router = { version = "^2.0.0" }
axum-core = "0.5"
hive-console-sdk = "=0.3.7"
sha2 = { version = "0.10.8", features = ["std"] }
anyhow = "1"
tracing = "0.1"
bytes = "1.11.1"
async-trait = "0.1.77"
futures = { version = "0.3.30", features = ["thread-pool"] }
schemars = { version = "1.0.4", features = ["url2"] }
serde = "1"
serde_json = "1"
tokio = { version = "1.36.0", features = ["full"] }
tower = { version = "0.5", features = ["full"] }
http = "1"
http-body-util = "0.1"
rand = "0.9.0"
tokio-util = "0.7.16"
[dev-dependencies]
httpmock = "0.7.0"
jsonschema = { version = "0.29.0", default-features = false, features = [
"resolve-file",
] }
lazy_static = "1.5.0"

View file

@ -1,86 +0,0 @@
# Hive plugin for Apollo-Router
[Hive](https://the-guild.dev/graphql/hive) is a fully open-source schema registry, analytics,
metrics and gateway for [GraphQL federation](https://the-guild.dev/graphql/hive/federation) and
other GraphQL APIs.
---
This project includes a Hive integration plugin for Apollo-Router.
At the moment, the following are implemented:
- [Fetching Supergraph from Hive CDN](https://the-guild.dev/graphql/hive/docs/high-availability-cdn)
- [Sending usage information](https://the-guild.dev/graphql/hive/docs/schema-registry/usage-reporting)
from a running Apollo Router instance to Hive
- Persisted Operations using Hive's
[App Deployments](https://the-guild.dev/graphql/hive/docs/schema-registry/app-deployments)
This project is constructed as a Rust project that implements Apollo-Router plugin interface.
This build of this project creates an artifact identical to Apollo-Router releases, with additional
features provided by Hive.
## Getting Started
### Binary/Docker
We provide a custom build of Apollo-Router that acts as a drop-in replacement, and adds Hive
integration to Apollo-Router.
[Please follow this guide and documentation for integrating Hive with Apollo Router](https://the-guild.dev/graphql/hive/docs/other-integrations/apollo-router)
### As a Library
If you are
[building a custom Apollo-Router with your own native plugins](https://www.apollographql.com/docs/graphos/routing/customization/native-plugins),
you can use the Hive plugin as a dependency from Crates.io:
```toml
[dependencies]
hive-apollo-router-plugin = "..."
```
And then in your codebase, make sure to import and register the Hive plugin:
```rs
use apollo_router::register_plugin;
// import the registry instance and the plugin registration function
use hive_apollo_router_plugin::registry::HiveRegistry;
// Import the usage plugin
use hive_apollo_router_plugin::usage::UsagePlugin;
// Import persisted documents plugin, if needed
use persisted_documents::PersistedDocumentsPlugin;
// In your main function, make sure to register the plugin before you create or initialize Apollo-Router
fn main() {
// Register the Hive usage_reporting plugin
register_plugin!("hive", "usage", UsagePlugin);
// Register the persisted documents plugin, if needed
register_plugin!("hive", "persisted_documents", PersistedDocumentsPlugin);
// Initialize the Hive Registry instance and start the Apollo Router
match HiveRegistry::new(None).and(apollo_router::main()) {
Ok(_) => {}
Err(e) => {
eprintln!("{}", e);
std::process::exit(1);
}
}
}
```
## Development
0. Install latest version of Rust
1. To get started with development, it is recommended to ensure Rust-analyzer extension is enabled
on your VSCode instance.
2. Validate project status by running `cargo check`
3. To start the server with the demo config file (`./router.yaml`), use
`cargo run -- --config router.yaml`. Make sure to set environment variables required for your
setup and development process
([docs](https://the-guild.dev/graphql/hive/docs/other-integrations/apollo-router#configuration)).
4. You can also just run
`cargo run -- --config router.yaml --log debug --dev --supergraph some.supergraph.graphql` for
running it with a test supergraph file.

View file

@ -1,8 +0,0 @@
{
"name": "hive-apollo-router-plugin",
"version": "3.0.3",
"private": true,
"scripts": {
"sync-cargo-file": "ls -l sync-cargo-file.sh && bash ./sync-cargo-file.sh"
}
}

View file

@ -1,10 +0,0 @@
sandbox:
enabled: true
homepage:
enabled: false
supergraph:
listen: 0.0.0.0:4000
introspection: true
plugins:
hive.usage: {}
hive.persisted_documents: {}

View file

@ -1 +0,0 @@
pub const PLUGIN_VERSION: &str = env!("CARGO_PKG_VERSION");

View file

@ -1,5 +0,0 @@
pub mod consts;
pub mod persisted_documents;
pub mod registry;
pub mod registry_logger;
pub mod usage;

View file

@ -1,32 +0,0 @@
// Specify the modules our binary should include -- https://twitter.com/YassinEldeeb7/status/1468680104243077128
mod consts;
mod persisted_documents;
mod registry;
mod registry_logger;
mod usage;
use apollo_router::register_plugin;
use persisted_documents::PersistedDocumentsPlugin;
use registry::HiveRegistry;
use usage::UsagePlugin;
// Register the Hive plugin
pub fn register_plugins() {
register_plugin!("hive", "usage", UsagePlugin);
register_plugin!("hive", "persisted_documents", PersistedDocumentsPlugin);
}
fn main() {
// Register the Hive plugins in Apollo Router
register_plugins();
// Initialize the Hive Registry and start the Apollo Router
// TODO: Look at builder pattern in Executable::builder().start()
match HiveRegistry::new(None).and(apollo_router::main()) {
Ok(_) => {}
Err(e) => {
eprintln!("{}", e);
std::process::exit(1);
}
}
}

View file

@ -1,746 +0,0 @@
use apollo_router::graphql;
use apollo_router::graphql::Error;
use apollo_router::layers::ServiceBuilderExt;
use apollo_router::plugin::Plugin;
use apollo_router::plugin::PluginInit;
use apollo_router::services::router;
use apollo_router::services::router::Body;
use apollo_router::Context;
use bytes::Bytes;
use core::ops::Drop;
use futures::FutureExt;
use hive_console_sdk::persisted_documents::PersistedDocumentsError;
use hive_console_sdk::persisted_documents::PersistedDocumentsManager;
use http::StatusCode;
use http_body_util::combinators::UnsyncBoxBody;
use http_body_util::BodyExt;
use http_body_util::Full;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::env;
use std::ops::ControlFlow;
use std::sync::Arc;
use std::time::Duration;
use tower::{BoxError, ServiceBuilder, ServiceExt};
use tracing::{debug, info, warn};
use crate::consts::PLUGIN_VERSION;
pub static PERSISTED_DOCUMENT_HASH_KEY: &str = "hive::persisted_document_hash";
#[derive(Clone, Debug, Deserialize, JsonSchema, Default)]
pub struct Config {
pub enabled: Option<bool>,
/// GraphQL Hive persisted documents CDN endpoint URL.
pub endpoint: Option<EndpointConfig>,
/// GraphQL Hive persisted documents CDN access token.
pub key: Option<String>,
/// Whether arbitrary documents should be allowed along-side persisted documents.
/// default: false
pub allow_arbitrary_documents: Option<bool>,
/// A timeout for only the connect phase of a request to GraphQL Hive
/// Unit: seconds
/// Default: 5
pub connect_timeout: Option<u64>,
/// Retry count for the request to CDN request
/// Default: 3
pub retry_count: Option<u32>,
/// A timeout for the entire request to GraphQL Hive
/// Unit: seconds
/// Default: 15
pub request_timeout: Option<u64>,
/// Accept invalid SSL certificates
/// default: false
pub accept_invalid_certs: Option<bool>,
/// Configuration for the size of the in-memory caching of persisted documents.
/// Default: 1000
pub cache_size: Option<u64>,
}
#[derive(Clone, Debug, Deserialize, JsonSchema)]
#[serde(untagged)]
pub enum EndpointConfig {
Single(String),
Multiple(Vec<String>),
}
impl From<&str> for EndpointConfig {
fn from(value: &str) -> Self {
EndpointConfig::Single(value.into())
}
}
impl From<&[&str]> for EndpointConfig {
fn from(value: &[&str]) -> Self {
EndpointConfig::Multiple(value.iter().map(|s| s.to_string()).collect())
}
}
pub struct PersistedDocumentsPlugin {
persisted_documents_manager: Option<Arc<PersistedDocumentsManager>>,
allow_arbitrary_documents: bool,
}
impl PersistedDocumentsPlugin {
fn from_config(config: Config) -> Result<Self, BoxError> {
let enabled = config.enabled.unwrap_or(true);
let allow_arbitrary_documents = config.allow_arbitrary_documents.unwrap_or(false);
if !enabled {
return Ok(PersistedDocumentsPlugin {
persisted_documents_manager: None,
allow_arbitrary_documents,
});
}
let endpoints = match &config.endpoint {
Some(ep) => match ep {
EndpointConfig::Single(url) => vec![url.clone()],
EndpointConfig::Multiple(urls) => urls.clone(),
},
None => {
if let Ok(ep) = env::var("HIVE_CDN_ENDPOINT") {
vec![ep]
} else {
return Err(
"Endpoint for persisted documents CDN is not configured. Please set it via the plugin configuration or HIVE_CDN_ENDPOINT environment variable."
.into(),
);
}
}
};
let key = match &config.key {
Some(k) => k.clone(),
None => {
if let Ok(key) = env::var("HIVE_CDN_KEY") {
key
} else {
return Err(
"Access token for persisted documents CDN is not configured. Please set it via the plugin configuration or HIVE_CDN_KEY environment variable."
.into(),
);
}
}
};
let mut persisted_documents_manager = PersistedDocumentsManager::builder()
.key(key)
.user_agent(format!("hive-apollo-router/{}", PLUGIN_VERSION));
for endpoint in endpoints {
persisted_documents_manager = persisted_documents_manager.add_endpoint(endpoint);
}
if let Some(connect_timeout) = config.connect_timeout {
persisted_documents_manager =
persisted_documents_manager.connect_timeout(Duration::from_secs(connect_timeout));
}
if let Some(request_timeout) = config.request_timeout {
persisted_documents_manager =
persisted_documents_manager.request_timeout(Duration::from_secs(request_timeout));
}
if let Some(retry_count) = config.retry_count {
persisted_documents_manager = persisted_documents_manager.max_retries(retry_count);
}
if let Some(accept_invalid_certs) = config.accept_invalid_certs {
persisted_documents_manager =
persisted_documents_manager.accept_invalid_certs(accept_invalid_certs);
}
if let Some(cache_size) = config.cache_size {
persisted_documents_manager = persisted_documents_manager.cache_size(cache_size);
}
let persisted_documents_manager = persisted_documents_manager.build()?;
Ok(PersistedDocumentsPlugin {
persisted_documents_manager: Some(Arc::new(persisted_documents_manager)),
allow_arbitrary_documents,
})
}
}
#[async_trait::async_trait]
impl Plugin for PersistedDocumentsPlugin {
type Config = Config;
async fn new(init: PluginInit<Config>) -> Result<Self, BoxError> {
PersistedDocumentsPlugin::from_config(init.config)
}
fn router_service(&self, service: router::BoxService) -> router::BoxService {
if let Some(mgr) = &self.persisted_documents_manager {
let mgr = mgr.clone();
let allow_arbitrary_documents = self.allow_arbitrary_documents;
ServiceBuilder::new()
.checkpoint_async(move |req: router::Request| {
let mgr = mgr.clone();
async move {
let (parts, body) = req.router_request.into_parts();
let bytes = body
.collect()
.await
.map_err(|err| PersistedDocumentsError::FailedToReadBody(err.to_string()))?
.to_bytes();
let payload = extract_document_id(&bytes);
let mut payload = match payload {
Ok(payload) => payload,
Err(e) => {
return Ok(ControlFlow::Break(
to_router_response(e, req.context),
));
}
};
if payload.original_req.query.is_some() {
if allow_arbitrary_documents {
let roll_req: router::Request = (
http::Request::<Body>::from_parts(
parts,
body_from_bytes(bytes),
),
req.context,
)
.into();
return Ok(ControlFlow::Continue(roll_req));
} else {
return Ok(ControlFlow::Break(
to_router_response(PersistedDocumentsError::PersistedDocumentRequired, req.context)
));
}
}
if payload.document_id.is_none() {
return Ok(ControlFlow::Break(
to_router_response(PersistedDocumentsError::KeyNotFound, req.context)
));
}
match payload.document_id.as_ref() {
None => {
Ok(ControlFlow::Break(
to_router_response(PersistedDocumentsError::PersistedDocumentRequired, req.context)
))
}
Some(document_id) => match mgr.resolve_document(document_id).await {
Ok(document) => {
info!("Document found in persisted documents: {}", document);
if req
.context
.insert(PERSISTED_DOCUMENT_HASH_KEY, document_id.clone())
.is_err()
{
warn!("failed to extend router context with persisted document hash key");
}
payload.original_req.query = Some(document);
let mut bytes: Vec<u8> = Vec::new();
serde_json::to_writer(&mut bytes, &payload).unwrap();
let roll_req: router::Request = (
http::Request::<Body>::from_parts(parts, body_from_bytes(bytes)),
req.context,
)
.into();
Ok(ControlFlow::Continue(roll_req))
}
Err(e) => {
Ok(ControlFlow::Break(
to_router_response(e, req.context),
))
}
},
}
}
.boxed()
})
.buffered()
.service(service)
.boxed()
} else {
service
}
}
}
fn body_from_bytes<T: Into<Bytes>>(chunk: T) -> UnsyncBoxBody<Bytes, axum_core::Error> {
Full::new(chunk.into())
.map_err(|never| match never {})
.boxed_unsync()
}
impl Drop for PersistedDocumentsPlugin {
fn drop(&mut self) {
debug!("PersistedDocumentsPlugin has been dropped!");
}
}
fn to_router_response(err: PersistedDocumentsError, ctx: Context) -> router::Response {
let errors = vec![Error::builder()
.message(err.message())
.extension_code(err.code())
.build()];
router::Response::error_builder()
.errors(errors)
.status_code(StatusCode::OK)
.context(ctx)
.build()
.unwrap()
}
/// Expected body structure for the router incoming requests
/// This is used to extract the document id and the original request as-is (see `flatten` attribute)
#[derive(Debug, Serialize, Deserialize, Clone)]
struct ExpectedBodyStructure {
/// This field is set to optional in order to prevent parsing errors
/// At runtime later, the plugin will double check the value.
#[serde(rename = "documentId")]
#[serde(skip_serializing)]
document_id: Option<String>,
/// The rest of the GraphQL request, flattened to keep the original structure.
#[serde(flatten)]
original_req: graphql::Request,
}
fn extract_document_id(
body: &bytes::Bytes,
) -> Result<ExpectedBodyStructure, PersistedDocumentsError> {
serde_json::from_slice::<ExpectedBodyStructure>(body)
.map_err(PersistedDocumentsError::FailedToParseBody)
}
/// To test this plugin, we do the following:
/// 1. Create the plugin instance
/// 2. Link it to a mocked router service that reflects
/// back the body (to validate that the plugin is working and passes the body correctly)
/// 3. Run HTTP mock to create a mock Hive CDN server
#[cfg(test)]
mod hive_persisted_documents_tests {
use apollo_router::plugin::test::MockRouterService;
use futures::executor::block_on;
use http::Method;
use httpmock::{Method::GET, Mock, MockServer};
use serde_json::json;
use super::*;
/// Creates a regular GraphQL request with a very simple GraphQL query:
/// { "query": "query { __typename }" }
fn create_regular_request() -> router::Request {
let mut r = graphql::Request::default();
r.query = Some("query { __typename }".into());
router::Request::fake_builder()
.method(Method::POST)
.body(serde_json::to_string(&r).unwrap())
.header("content-type", "application/json")
.build()
.unwrap()
}
/// Creates a persisted document request with a document id and optional variables.
/// The document id is used to fetch the persisted document from the CDN.
/// { "documentId": "123", "variables": { ... } }
fn create_persisted_request(
document_id: &str,
variables: Option<serde_json::Value>,
) -> router::Request {
let body = json!({
"documentId": document_id,
"variables": variables,
});
let body_str = serde_json::to_string(&body).unwrap();
router::Request::fake_builder()
.body(body_str)
.header("content-type", "application/json")
.build()
.unwrap()
}
/// Creates an "invalid" persisted request with an empty JSON object body.
fn create_invalid_req() -> router::Request {
router::Request::fake_builder()
.method(Method::POST)
.body(serde_json::to_string(&json!({})).unwrap())
.header("content-type", "application/json")
.build()
.unwrap()
}
struct PersistedDocumentsCDNMock {
server: MockServer,
}
impl PersistedDocumentsCDNMock {
fn new() -> Self {
let server = MockServer::start();
Self { server }
}
fn endpoint(&self) -> EndpointConfig {
EndpointConfig::Single(self.server.url(""))
}
/// Registers a valid artifact URL with an actual GraphQL document
fn add_valid(&'_ self, document_id: &str) -> Mock<'_> {
let valid_artifact_url = format!("/apps/{}", str::replace(document_id, "~", "/"));
let document = "query { __typename }";
let mock = self.server.mock(|when, then| {
when.method(GET).path(valid_artifact_url);
then.status(200)
.header("content-type", "text/plain")
.body(document);
});
mock
}
}
async fn get_body(router_req: router::Request) -> String {
let (_parts, body) = router_req.router_request.into_parts();
let body = body.collect().await.unwrap().to_bytes();
String::from_utf8(body.to_vec()).unwrap()
}
/// Creates a mocked router service that reflects the incoming body
/// back to the client.
/// We are using this mocked router in order to make sure that the Persisted Documents layer
/// is able to resolve, fetch and pass the document to the next layer.
fn create_reflecting_mocked_router() -> MockRouterService {
let mut mocked_execution: MockRouterService = MockRouterService::new();
mocked_execution
.expect_call()
.times(1)
.returning(move |req| {
let incoming_body = block_on(get_body(req));
Ok(router::Response::fake_builder()
.data(json!({
"incomingBody": incoming_body,
}))
.build()
.unwrap())
});
mocked_execution
}
/// Creates a mocked router service that returns a fake GraphQL response.
fn create_dummy_mocked_router() -> MockRouterService {
let mut mocked_execution = MockRouterService::new();
mocked_execution.expect_call().times(1).returning(move |_| {
Ok(router::Response::fake_builder()
.data(json!({
"__typename": "Query"
}))
.build()
.unwrap())
});
mocked_execution
}
#[tokio::test]
async fn should_allow_arbitrary_when_regular_req_is_sent() {
let service = create_reflecting_mocked_router();
let service_stack = PersistedDocumentsPlugin::from_config(Config {
enabled: Some(true),
endpoint: Some("https://cdn.example.com".into()),
key: Some("123".into()),
allow_arbitrary_documents: Some(true),
..Default::default()
})
.expect("Failed to create PersistedDocumentsPlugin")
.router_service(service.boxed());
let request = create_regular_request();
let mut response = service_stack.oneshot(request).await.unwrap();
let response_inner = response.next_response().await.unwrap().unwrap();
assert_eq!(response.response.status(), StatusCode::OK);
assert_eq!(
response_inner,
json!({
"data": {
"incomingBody": "{\"query\":\"query { __typename }\"}"
}
})
.to_string()
.as_bytes()
);
}
#[tokio::test]
async fn should_disallow_arbitrary_when_regular_req_sent() {
let service_stack = PersistedDocumentsPlugin::from_config(Config {
enabled: Some(true),
endpoint: Some("https://cdn.example.com".into()),
key: Some("123".into()),
allow_arbitrary_documents: Some(false),
..Default::default()
})
.expect("Failed to create PersistedDocumentsPlugin")
.router_service(MockRouterService::new().boxed());
let request = create_regular_request();
let mut response = service_stack.oneshot(request).await.unwrap();
let response_inner = response.next_response().await.unwrap().unwrap();
assert_eq!(response.response.status(), StatusCode::OK);
assert_eq!(
response_inner,
json!({
"errors": [
{
"message": "No persisted document provided, or document id cannot be resolved.",
"extensions": {
"code": "PERSISTED_DOCUMENT_REQUIRED"
}
}
]
})
.to_string()
.as_bytes()
);
}
#[tokio::test]
async fn returns_not_found_error_for_missing_persisted_query() {
let cdn_mock = PersistedDocumentsCDNMock::new();
let service_stack = PersistedDocumentsPlugin::from_config(Config {
enabled: Some(true),
endpoint: Some(cdn_mock.endpoint()),
key: Some("123".into()),
allow_arbitrary_documents: Some(true),
..Default::default()
})
.expect("Failed to create PersistedDocumentsPlugin")
.router_service(MockRouterService::new().boxed());
let request = create_persisted_request("123", None);
let mut response = service_stack.oneshot(request).await.unwrap();
let response_inner = response.next_response().await.unwrap().unwrap();
assert_eq!(response.response.status(), StatusCode::OK);
assert_eq!(
response_inner,
json!({
"errors": [
{
"message": "Persisted document not found.",
"extensions": {
"code": "PERSISTED_DOCUMENT_NOT_FOUND"
}
}
]
})
.to_string()
.as_bytes()
);
}
#[tokio::test]
async fn returns_key_not_found_error_for_missing_input() {
let service_stack = PersistedDocumentsPlugin::from_config(Config {
enabled: Some(true),
endpoint: Some("https://cdn.example.com".into()),
key: Some("123".into()),
allow_arbitrary_documents: Some(true),
..Default::default()
})
.expect("Failed to create PersistedDocumentsPlugin")
.router_service(MockRouterService::new().boxed());
let request = create_invalid_req();
let mut response = service_stack.oneshot(request).await.unwrap();
let response_inner = response.next_response().await.unwrap().unwrap();
assert_eq!(response.response.status(), StatusCode::OK);
assert_eq!(
response_inner,
json!({
"errors": [
{
"message": "Failed to locate the persisted document key in request.",
"extensions": {
"code": "PERSISTED_DOCUMENT_KEY_NOT_FOUND"
}
}
]
})
.to_string()
.as_bytes()
);
}
#[tokio::test]
async fn rejects_req_when_cdn_not_available() {
let service_stack = PersistedDocumentsPlugin::from_config(Config {
enabled: Some(true),
endpoint: Some("https://127.0.0.1:9999".into()), // Invalid endpoint
key: Some("123".into()),
allow_arbitrary_documents: Some(false),
..Default::default()
})
.expect("Failed to create PersistedDocumentsPlugin")
.router_service(MockRouterService::new().boxed());
let request = create_persisted_request("123", None);
let mut response = service_stack.oneshot(request).await.unwrap();
let response_inner = response.next_response().await.unwrap().unwrap();
assert_eq!(response.response.status(), StatusCode::OK);
assert_eq!(
response_inner,
json!({
"errors": [
{
"message": "Failed to validate persisted document",
"extensions": {
"code": "FAILED_TO_FETCH_FROM_CDN"
}
}
]
})
.to_string()
.as_bytes()
);
}
#[tokio::test]
async fn should_return_valid_response() {
let cdn_mock = PersistedDocumentsCDNMock::new();
cdn_mock.add_valid("my-app~cacb95c69ba4684aec972777a38cd106740c6453~04bfa72dfb83b297dd8a5b6fed9bafac2b395a0f");
let upstream = create_dummy_mocked_router();
let service_stack = PersistedDocumentsPlugin::from_config(Config {
enabled: Some(true),
endpoint: Some(cdn_mock.endpoint()),
key: Some("123".into()),
allow_arbitrary_documents: Some(false),
..Default::default()
})
.expect("Failed to create PersistedDocumentsPlugin")
.router_service(upstream.boxed());
let request = create_persisted_request(
"my-app~cacb95c69ba4684aec972777a38cd106740c6453~04bfa72dfb83b297dd8a5b6fed9bafac2b395a0f",
None,
);
let mut response = service_stack.oneshot(request).await.unwrap();
let response_inner = response.next_response().await.unwrap().unwrap();
assert_eq!(response.response.status(), StatusCode::OK);
assert_eq!(
response_inner,
json!({
"data": {
"__typename": "Query"
}
})
.to_string()
.as_bytes()
);
}
#[tokio::test]
async fn should_passthrough_additional_req_params() {
let cdn_mock = PersistedDocumentsCDNMock::new();
cdn_mock.add_valid("my-app~cacb95c69ba4684aec972777a38cd106740c6453~04bfa72dfb83b297dd8a5b6fed9bafac2b395a0f");
let upstream = create_reflecting_mocked_router();
let service_stack = PersistedDocumentsPlugin::from_config(Config {
enabled: Some(true),
endpoint: Some(cdn_mock.endpoint()),
key: Some("123".into()),
allow_arbitrary_documents: Some(false),
..Default::default()
})
.expect("Failed to create PersistedDocumentsPlugin")
.router_service(upstream.boxed());
let request = create_persisted_request(
"my-app~cacb95c69ba4684aec972777a38cd106740c6453~04bfa72dfb83b297dd8a5b6fed9bafac2b395a0f",
Some(json!({"var": "value"})),
);
let mut response = service_stack.oneshot(request).await.unwrap();
let response_inner = response.next_response().await.unwrap().unwrap();
assert_eq!(response.response.status(), StatusCode::OK);
assert_eq!(
response_inner,
"{\"data\":{\"incomingBody\":\"{\\\"query\\\":\\\"query { __typename }\\\",\\\"variables\\\":{\\\"var\\\":\\\"value\\\"}}\"}}"
);
}
#[tokio::test]
async fn should_use_caching_for_documents() {
let cdn_mock = PersistedDocumentsCDNMock::new();
let cdn_req_mock = cdn_mock.add_valid("my-app~cacb95c69ba4684aec972777a38cd106740c6453~04bfa72dfb83b297dd8a5b6fed9bafac2b395a0f");
let p = PersistedDocumentsPlugin::from_config(Config {
enabled: Some(true),
endpoint: Some(cdn_mock.endpoint()),
key: Some("123".into()),
allow_arbitrary_documents: Some(false),
..Default::default()
})
.expect("Failed to create PersistedDocumentsPlugin");
let s1 = p.router_service(create_dummy_mocked_router().boxed());
let s2 = p.router_service(create_dummy_mocked_router().boxed());
// first call
let request = create_persisted_request(
"my-app~cacb95c69ba4684aec972777a38cd106740c6453~04bfa72dfb83b297dd8a5b6fed9bafac2b395a0f",
None,
);
let mut response = s1.oneshot(request).await.unwrap();
let response_inner = response.next_response().await.unwrap().unwrap();
assert_eq!(response.response.status(), StatusCode::OK);
assert_eq!(
response_inner,
json!({
"data": {
"__typename": "Query"
}
})
.to_string()
.as_bytes()
);
// second call
let request = create_persisted_request(
"my-app~cacb95c69ba4684aec972777a38cd106740c6453~04bfa72dfb83b297dd8a5b6fed9bafac2b395a0f",
None,
);
let mut response = s2.oneshot(request).await.unwrap();
let response_inner = response.next_response().await.unwrap().unwrap();
assert_eq!(response.response.status(), StatusCode::OK);
assert_eq!(
response_inner,
json!({
"data": {
"__typename": "Query"
}
})
.to_string()
.as_bytes()
);
// makes sure cdn called only once. If called more than once, it will fail with 404 -> leading to error (and the above assertion will fail...)
cdn_req_mock.assert();
}
}

View file

@ -1,212 +0,0 @@
use crate::consts::PLUGIN_VERSION;
use crate::registry_logger::Logger;
use anyhow::{anyhow, Result};
use hive_console_sdk::supergraph_fetcher::sync_fetcher::SupergraphFetcherSyncState;
use hive_console_sdk::supergraph_fetcher::SupergraphFetcher;
use sha2::Digest;
use sha2::Sha256;
use std::env;
use std::io::Write;
use std::thread;
#[derive(Debug)]
pub struct HiveRegistry {
file_name: String,
fetcher: SupergraphFetcher<SupergraphFetcherSyncState>,
pub logger: Logger,
}
pub struct HiveRegistryConfig {
endpoints: Vec<String>,
key: Option<String>,
poll_interval: Option<u64>,
accept_invalid_certs: Option<bool>,
schema_file_path: Option<String>,
}
impl HiveRegistry {
#[allow(clippy::new_ret_no_self)]
pub fn new(user_config: Option<HiveRegistryConfig>) -> Result<()> {
let mut config = HiveRegistryConfig {
endpoints: vec![],
key: None,
poll_interval: None,
accept_invalid_certs: Some(true),
schema_file_path: None,
};
// Pass values from user's config
if let Some(user_config) = user_config {
config.endpoints = user_config.endpoints;
config.key = user_config.key;
config.poll_interval = user_config.poll_interval;
config.accept_invalid_certs = user_config.accept_invalid_certs;
config.schema_file_path = user_config.schema_file_path;
}
// Pass values from environment variables if they are not set in the user's config
if config.endpoints.is_empty() {
if let Ok(endpoint) = env::var("HIVE_CDN_ENDPOINT") {
config.endpoints.push(endpoint);
}
}
if config.key.is_none() {
if let Ok(key) = env::var("HIVE_CDN_KEY") {
config.key = Some(key);
}
}
if config.poll_interval.is_none() {
if let Ok(poll_interval) = env::var("HIVE_CDN_POLL_INTERVAL") {
config.poll_interval = Some(
poll_interval
.parse()
.expect("failed to parse HIVE_CDN_POLL_INTERVAL"),
);
}
}
if config.accept_invalid_certs.is_none() {
if let Ok(accept_invalid_certs) = env::var("HIVE_CDN_ACCEPT_INVALID_CERTS") {
config.accept_invalid_certs = Some(
accept_invalid_certs.eq("1")
|| accept_invalid_certs.to_lowercase().eq("true")
|| accept_invalid_certs.to_lowercase().eq("on"),
);
}
}
if config.schema_file_path.is_none() {
if let Ok(schema_file_path) = env::var("HIVE_CDN_SCHEMA_FILE_PATH") {
config.schema_file_path = Some(schema_file_path);
}
}
// Resolve values
let endpoint = config.endpoints;
let key = config.key.unwrap_or_default();
let poll_interval: u64 = config.poll_interval.unwrap_or(10);
let accept_invalid_certs = config.accept_invalid_certs.unwrap_or(false);
let logger = Logger::new();
// In case of an endpoint and an key being empty, we don't start the polling and skip the registry
if endpoint.is_empty() && key.is_empty() {
logger.info("You're not using GraphQL Hive as the source of schema.");
logger.info(
"Reason: could not find HIVE_CDN_KEY and HIVE_CDN_ENDPOINT environment variables.",
);
return Ok(());
}
// Throw if endpoint is empty
if endpoint.is_empty() {
return Err(anyhow!("environment variable HIVE_CDN_ENDPOINT not found",));
}
// Throw if key is empty
if key.is_empty() {
return Err(anyhow!("environment variable HIVE_CDN_KEY not found"));
}
// A hacky way to force the router to use GraphQL Hive CDN as the source of schema.
// Our plugin does the polling and saves the supergraph to a file.
// It also enables hot-reloading to makes sure Apollo Router watches the file.
let file_name = config.schema_file_path.unwrap_or(
env::temp_dir()
.join("supergraph-schema.graphql")
.to_string_lossy()
.to_string(),
);
unsafe {
env::set_var("APOLLO_ROUTER_SUPERGRAPH_PATH", file_name.clone());
env::set_var("APOLLO_ROUTER_HOT_RELOAD", "true");
}
let mut fetcher = SupergraphFetcher::builder()
.key(key)
.user_agent(format!("hive-apollo-router/{}", PLUGIN_VERSION))
.accept_invalid_certs(accept_invalid_certs);
for ep in endpoint {
fetcher = fetcher.add_endpoint(ep);
}
let fetcher = fetcher
.build_sync()
.map_err(|e| anyhow!("Failed to create SupergraphFetcher: {}", e))?;
let registry = HiveRegistry {
fetcher,
file_name,
logger,
};
match registry.initial_supergraph() {
Ok(_) => {
registry
.logger
.info("Successfully fetched and saved supergraph from GraphQL Hive");
}
Err(e) => {
registry.logger.error(&e);
std::process::exit(1);
}
}
thread::spawn(move || loop {
thread::sleep(std::time::Duration::from_secs(poll_interval));
registry.poll()
});
Ok(())
}
fn initial_supergraph(&self) -> Result<(), String> {
let mut file = std::fs::File::create(self.file_name.clone()).map_err(|e| e.to_string())?;
let resp = self
.fetcher
.fetch_supergraph()
.map_err(|err| err.to_string())?;
match resp {
Some(supergraph) => {
file.write_all(supergraph.as_bytes())
.map_err(|e| e.to_string())?;
}
None => {
return Err("Failed to fetch supergraph".to_string());
}
}
Ok(())
}
fn poll(&self) {
match self.fetcher.fetch_supergraph() {
Ok(new_supergraph) => {
if let Some(new_supergraph) = new_supergraph {
let current_file = std::fs::read_to_string(self.file_name.clone())
.expect("Could not read file");
let current_supergraph_hash = hash(current_file.as_bytes());
let new_supergraph_hash = hash(new_supergraph.as_bytes());
if current_supergraph_hash != new_supergraph_hash {
self.logger.info("New supergraph detected!");
std::fs::write(self.file_name.clone(), new_supergraph)
.expect("Could not write file");
}
}
}
Err(e) => self.logger.error(&e.to_string()),
}
}
}
fn hash(bytes: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.update(bytes);
format!("{:X}", hasher.finalize())
}

View file

@ -1,94 +0,0 @@
use std::env;
static LOG_LEVEL_NAMES: [&str; 5] = ["ERROR", "WARN", "INFO", "DEBUG", "TRACE"];
#[repr(usize)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Debug)]
pub enum LogLevel {
Error,
Warn,
Info,
Debug,
Trace,
}
impl LogLevel {
fn from_usize(u: usize) -> Option<LogLevel> {
match u {
0 => Some(LogLevel::Error),
1 => Some(LogLevel::Warn),
2 => Some(LogLevel::Info),
3 => Some(LogLevel::Debug),
4 => Some(LogLevel::Trace),
_ => None,
}
}
fn from_str(s: &str) -> LogLevel {
LOG_LEVEL_NAMES
.iter()
.position(|&name| name.eq_ignore_ascii_case(s))
.map(|p| LogLevel::from_usize(p).expect("Hive failed to read the log level"))
.expect("Hive failed to parse the log level filter")
}
}
#[derive(Clone, Debug)]
pub struct Logger {
max_level: LogLevel,
}
impl Default for Logger {
fn default() -> Self {
Self::new()
}
}
impl Logger {
pub fn new() -> Logger {
Self {
max_level: LogLevel::from_str(
env::var("HIVE_REGISTRY_LOG")
.unwrap_or_else(|_| "info".to_string())
.as_str(),
),
}
}
fn should_log(&self, level: LogLevel) -> bool {
self.max_level >= level
}
#[allow(dead_code)]
pub fn trace(&self, message: &str) {
if self.should_log(LogLevel::Trace) {
println!("TRACE: {}", message);
}
}
#[allow(dead_code)]
pub fn debug(&self, message: &str) {
if self.should_log(LogLevel::Debug) {
println!("DEBUG: {}", message);
}
}
pub fn info(&self, message: &str) {
if self.should_log(LogLevel::Info) {
println!("INFO: {}", message);
}
}
#[allow(dead_code)]
pub fn warn(&self, message: &str) {
if self.should_log(LogLevel::Warn) {
println!("WARNING: {}", message);
}
}
pub fn error(&self, message: &str) {
if self.should_log(LogLevel::Error) {
println!("ERROR: {}", message);
}
}
}

View file

@ -1,558 +0,0 @@
use crate::consts::PLUGIN_VERSION;
use apollo_router::layers::ServiceBuilderExt;
use apollo_router::plugin::Plugin;
use apollo_router::plugin::PluginInit;
use apollo_router::services::*;
use apollo_router::Context;
use core::ops::Drop;
use futures::StreamExt;
use hive_console_sdk::agent::usage_agent::UsageAgentExt;
use hive_console_sdk::agent::usage_agent::{ExecutionReport, UsageAgent};
use hive_console_sdk::graphql_tools::parser::parse_schema;
use hive_console_sdk::graphql_tools::parser::schema::Document;
use http::HeaderValue;
use rand::Rng;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::env;
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::time::{SystemTime, UNIX_EPOCH};
use tokio_util::sync::CancellationToken;
use tower::BoxError;
use tower::ServiceBuilder;
use tower::ServiceExt;
use crate::persisted_documents::PERSISTED_DOCUMENT_HASH_KEY;
pub(crate) static OPERATION_CONTEXT: &str = "hive::operation_context";
#[derive(Serialize, Deserialize, Debug)]
struct OperationContext {
pub(crate) client_name: Option<String>,
pub(crate) client_version: Option<String>,
pub(crate) timestamp: u64,
pub(crate) operation_body: String,
pub(crate) operation_name: Option<String>,
pub(crate) dropped: bool,
}
#[derive(Clone, Debug)]
struct OperationConfig {
sample_rate: f64,
exclude: Option<Vec<String>>,
client_name_header: String,
client_version_header: String,
}
pub struct UsagePlugin {
config: OperationConfig,
agent: Option<UsageAgent>,
schema: Arc<Document<'static, String>>,
cancellation_token: Arc<CancellationToken>,
}
#[derive(Clone, Debug, Deserialize, JsonSchema, Default)]
pub struct Config {
/// Default: true
enabled: Option<bool>,
/// Hive token, can also be set using the HIVE_TOKEN environment variable.
/// The token can be a registry access token, or a organization access token.
registry_token: Option<String>,
/// Hive registry token. Set to your `/usage` endpoint if you are self-hosting.
/// Default: https://app.graphql-hive.com/usage
/// When `target` is set and organization access token is in use, the target ID is appended to the endpoint,
/// so usage endpoint becomes `https://app.graphql-hive.com/usage/<target_id>`
registry_usage_endpoint: Option<String>,
/// The target to which the usage data should be reported to.
/// This can either be a slug following the format "$organizationSlug/$projectSlug/$targetSlug" (e.g "the-guild/graphql-hive/staging")
/// or an UUID (e.g. "a0f4c605-6541-4350-8cfe-b31f21a4bf80").
target: Option<String>,
/// Sample rate to determine sampling.
/// 0.0 = 0% chance of being sent
/// 1.0 = 100% chance of being sent.
/// Default: 1.0
sample_rate: Option<f64>,
/// A list of operations (by name) to be ignored by GraphQL Hive.
exclude: Option<Vec<String>>,
client_name_header: Option<String>,
client_version_header: Option<String>,
/// A maximum number of operations to hold in a buffer before sending to GraphQL Hive
/// Default: 1000
buffer_size: Option<usize>,
/// A timeout for only the connect phase of a request to GraphQL Hive
/// Unit: seconds
/// Default: 5 (s)
connect_timeout: Option<u64>,
/// A timeout for the entire request to GraphQL Hive
/// Unit: seconds
/// Default: 15 (s)
request_timeout: Option<u64>,
/// Accept invalid SSL certificates
/// Default: false
accept_invalid_certs: Option<bool>,
/// Frequency of flushing the buffer to the server
/// Default: 5 seconds
flush_interval: Option<u64>,
}
impl UsagePlugin {
fn populate_context(config: OperationConfig, req: &supergraph::Request) {
let context = &req.context;
let http_request = &req.supergraph_request;
let headers = http_request.headers();
let get_header_value = |key: &str| {
headers
.get(key)
.cloned()
.unwrap_or_else(|| HeaderValue::from_static(""))
.to_str()
.ok()
.map(|v| v.to_string())
};
let client_name = get_header_value(&config.client_name_header);
let client_version = get_header_value(&config.client_version_header);
let operation_name = req.supergraph_request.body().operation_name.clone();
let operation_body = req
.supergraph_request
.body()
.query
.clone()
.expect("operation body should not be empty");
let excluded_operation_names: HashSet<String> = config
.exclude
.unwrap_or_default()
.clone()
.into_iter()
.collect();
let mut rng = rand::rng();
let sampled = rng.random::<f64>() < config.sample_rate;
let mut dropped = !sampled;
if !dropped {
if let Some(name) = &operation_name {
if excluded_operation_names.contains(name) {
dropped = true;
}
}
}
let _ = context.insert(
OPERATION_CONTEXT,
OperationContext {
dropped,
client_name,
client_version,
operation_name,
operation_body,
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs()
* 1000,
},
);
}
}
#[async_trait::async_trait]
impl Plugin for UsagePlugin {
type Config = Config;
async fn new(init: PluginInit<Config>) -> Result<Self, BoxError> {
let user_config = init.config;
let enabled = user_config.enabled.unwrap_or(true);
if enabled {
tracing::info!("Starting GraphQL Hive Usage plugin");
}
let cancellation_token = Arc::new(CancellationToken::new());
let agent = if enabled {
let mut agent =
UsageAgent::builder().user_agent(format!("hive-apollo-router/{}", PLUGIN_VERSION));
if let Some(endpoint) = user_config.registry_usage_endpoint {
agent = agent.endpoint(endpoint);
} else if let Ok(env_endpoint) = env::var("HIVE_ENDPOINT") {
agent = agent.endpoint(env_endpoint);
}
if let Some(token) = user_config.registry_token {
agent = agent.token(token);
} else if let Ok(env_token) = env::var("HIVE_TOKEN") {
agent = agent.token(env_token);
}
if let Some(target_id) = user_config.target {
agent = agent.target_id(target_id);
} else if let Ok(env_target) = env::var("HIVE_TARGET_ID") {
agent = agent.target_id(env_target);
}
if let Some(buffer_size) = user_config.buffer_size {
agent = agent.buffer_size(buffer_size);
}
if let Some(connect_timeout) = user_config.connect_timeout {
agent = agent.connect_timeout(Duration::from_secs(connect_timeout));
}
if let Some(request_timeout) = user_config.request_timeout {
agent = agent.request_timeout(Duration::from_secs(request_timeout));
}
if let Some(accept_invalid_certs) = user_config.accept_invalid_certs {
agent = agent.accept_invalid_certs(accept_invalid_certs);
}
if let Some(flush_interval) = user_config.flush_interval {
agent = agent.flush_interval(Duration::from_secs(flush_interval));
}
let agent = agent.build().map_err(Box::new)?;
let cancellation_token_for_interval = cancellation_token.clone();
let agent_for_interval = agent.clone();
tokio::task::spawn(async move {
agent_for_interval
.start_flush_interval(&cancellation_token_for_interval)
.await;
});
Some(agent)
} else {
None
};
let schema = parse_schema(&init.supergraph_sdl)
.expect("Failed to parse schema")
.into_static();
Ok(UsagePlugin {
schema: Arc::new(schema),
config: OperationConfig {
sample_rate: user_config.sample_rate.unwrap_or(1.0),
exclude: user_config.exclude,
client_name_header: user_config
.client_name_header
.unwrap_or("graphql-client-name".to_string()),
client_version_header: user_config
.client_version_header
.unwrap_or("graphql-client-version".to_string()),
},
agent,
cancellation_token,
})
}
fn supergraph_service(&self, service: supergraph::BoxService) -> supergraph::BoxService {
let config = self.config.clone();
let schema = self.schema.clone();
match self.agent.clone() {
None => ServiceBuilder::new().service(service).boxed(),
Some(agent) => {
ServiceBuilder::new()
.map_future_with_request_data(
move |req: &supergraph::Request| {
Self::populate_context(config.clone(), req);
req.context.clone()
},
move |ctx: Context, fut| {
let agent = agent.clone();
let schema = schema.clone();
async move {
let start: Instant = Instant::now();
// nested async block, bc async is unstable with closures that receive arguments
let operation_context = ctx
.get::<_, OperationContext>(OPERATION_CONTEXT)
.unwrap_or_default()
.unwrap();
// Injected by the persisted document plugin, if it was activated
// and discovered document id
let persisted_document_hash = ctx
.get::<_, String>(PERSISTED_DOCUMENT_HASH_KEY)
.ok()
.unwrap();
let result: supergraph::ServiceResult = fut.await;
if operation_context.dropped {
tracing::debug!(
"Dropping operation (phase: SAMPLING): {}",
operation_context
.operation_name
.clone()
.or_else(|| Some("anonymous".to_string()))
.unwrap()
);
return result;
}
let OperationContext {
client_name,
client_version,
operation_name,
timestamp,
operation_body,
..
} = operation_context;
let duration = start.elapsed();
match result {
Err(e) => {
tokio::spawn(async move {
let res = agent
.add_report(ExecutionReport {
schema,
client_name,
client_version,
timestamp,
duration,
ok: false,
errors: 1,
operation_body,
operation_name,
persisted_document_hash,
})
.await;
if let Err(e) = res {
tracing::error!("Error adding report: {}", e);
}
});
Err(e)
}
Ok(router_response) => {
let is_failure =
!router_response.response.status().is_success();
Ok(router_response.map(move |response_stream| {
let res = response_stream
.map(move |response| {
// make sure we send a single report, not for each chunk
let response_has_errors =
!response.errors.is_empty();
let agent = agent.clone();
let execution_report = ExecutionReport {
schema: schema.clone(),
client_name: client_name.clone(),
client_version: client_version.clone(),
timestamp,
duration,
ok: !is_failure && !response_has_errors,
errors: response.errors.len(),
operation_body: operation_body.clone(),
operation_name: operation_name.clone(),
persisted_document_hash:
persisted_document_hash.clone(),
};
tokio::spawn(async move {
let res = agent
.add_report(execution_report)
.await;
if let Err(e) = res {
tracing::error!(
"Error adding report: {}",
e
);
}
});
response
})
.boxed();
res
}))
}
}
}
},
)
.service(service)
.boxed()
}
}
}
}
impl Drop for UsagePlugin {
fn drop(&mut self) {
self.cancellation_token.cancel();
// Flush already done by UsageAgent's Drop impl
}
}
#[cfg(test)]
mod hive_usage_tests {
use apollo_router::{
plugin::{test::MockSupergraphService, Plugin, PluginInit},
services::supergraph,
};
use http::header::{AUTHORIZATION, CONTENT_TYPE, USER_AGENT};
use httpmock::{Method::POST, Mock, MockServer};
use jsonschema::Validator;
use serde_json::json;
use tower::ServiceExt;
use crate::consts::PLUGIN_VERSION;
use super::{Config, UsagePlugin};
lazy_static::lazy_static! {
static ref SCHEMA_VALIDATOR: Validator =
jsonschema::validator_for(&serde_json::from_str(&std::fs::read_to_string("../../services/usage/usage-report-v2.schema.json").expect("can't load json schema file")).expect("failed to parse json schema")).expect("failed to parse schema");
}
struct UsageTestHelper {
mocked_upstream: MockServer,
plugin: UsagePlugin,
}
impl UsageTestHelper {
async fn new() -> Self {
let server: MockServer = MockServer::start();
let usage_endpoint = server.url("/usage");
let mut config = Config::default();
config.enabled = Some(true);
config.registry_usage_endpoint = Some(usage_endpoint.to_string());
config.registry_token = Some("123".into());
config.buffer_size = Some(1);
config.flush_interval = Some(1);
let plugin_service = UsagePlugin::new(
PluginInit::fake_builder()
.config(config)
.supergraph_sdl("type Query { dummy: String! }".to_string().into())
.build(),
)
.await
.expect("failed to init plugin");
UsageTestHelper {
mocked_upstream: server,
plugin: plugin_service,
}
}
fn wait_for_processing(&self) -> tokio::time::Sleep {
tokio::time::sleep(tokio::time::Duration::from_secs(2))
}
fn activate_usage_mock(&'_ self) -> Mock<'_> {
self.mocked_upstream.mock(|when, then| {
when.method(POST)
.path("/usage")
.header(CONTENT_TYPE.as_str(), "application/json")
.header(
USER_AGENT.as_str(),
format!("hive-apollo-router/{}", PLUGIN_VERSION),
)
.header(AUTHORIZATION.as_str(), "Bearer 123")
.header("X-Usage-API-Version", "2")
.matches(|r| {
// This mock also validates that the content of the reported usage is valid
// when it comes to the JSON schema validation.
// if it does not match, the request matching will fail and this will lead
// to a failed assertion
let body = r.body.as_ref().unwrap();
let body = String::from_utf8(body.to_vec()).unwrap();
let body = serde_json::from_str(&body).unwrap();
SCHEMA_VALIDATOR.is_valid(&body)
});
then.status(200);
})
}
async fn execute_operation(&self, req: supergraph::Request) -> supergraph::Response {
let mut supergraph_service_mock = MockSupergraphService::new();
supergraph_service_mock
.expect_call()
.times(1)
.returning(move |_| {
Ok(supergraph::Response::fake_builder()
.data(json!({
"data": { "hello": "world" },
}))
.build()
.unwrap())
});
let tower_service = self
.plugin
.supergraph_service(supergraph_service_mock.boxed());
let response = tower_service
.oneshot(req)
.await
.expect("failed to execute operation");
response
}
}
#[tokio::test]
async fn should_work_correctly_for_simple_query() {
let instance = UsageTestHelper::new().await;
let req = supergraph::Request::fake_builder()
.query("query test { hello }")
.operation_name("test")
.build()
.unwrap();
let mock = instance.activate_usage_mock();
instance.execute_operation(req).await.next_response().await;
instance.wait_for_processing().await;
mock.assert();
mock.assert_hits(1);
}
#[tokio::test]
async fn without_operation_name() {
let instance = UsageTestHelper::new().await;
let req = supergraph::Request::fake_builder()
.query("query { hello }")
.build()
.unwrap();
let mock = instance.activate_usage_mock();
instance.execute_operation(req).await.next_response().await;
instance.wait_for_processing().await;
mock.assert();
mock.assert_hits(1);
}
#[tokio::test]
async fn multiple_operations() {
let instance = UsageTestHelper::new().await;
let req = supergraph::Request::fake_builder()
.query("query test { hello } query test2 { hello }")
.operation_name("test")
.build()
.unwrap();
let mock = instance.activate_usage_mock();
instance.execute_operation(req).await.next_response().await;
instance.wait_for_processing().await;
println!("Waiting done");
mock.assert();
mock.assert_hits(1);
}
}

View file

@ -1,14 +0,0 @@
#/bin/bash
# The following script syncs the "version" field in package.json to the "package.version" field in Cargo.toml
# This main versioning flow is managed by Changeset.
# This file is executed during "changeset version" (when the version is bumped and release PR is created)
# to sync the version in Cargo.toml
# References:
# .github/workflows/publish-rust.yaml - The GitHub action that runs this script (after "changeset version")
# .github/workflows/main-rust.yaml - The GitHub action that does the actual publishing, if a changeset is declared to this package/crate
npm_version=$(node -p "require('./package.json').version")
cargo install set-cargo-version
set-cargo-version ./Cargo.toml $npm_version

View file

@ -1,4 +0,0 @@
[toolchain]
channel = "1.94.1"
components = ["rustfmt", "clippy"]
profile = "minimal"

View file

@ -1,100 +0,0 @@
import { readFile } from 'node:fs/promises';
import { join } from 'node:path';
import { setOutput } from '@actions/core';
const [localVersion, latestStableVersion] = await Promise.all([
fetchLocalVersion(),
fetchLatestVersion(),
]);
console.log(`Latest stable version: ${latestStableVersion}`);
console.log(`Local version: ${localVersion}`);
if (localVersion === latestStableVersion) {
console.log('Local version is up to date');
setOutput('update', 'false');
process.exit(0);
}
console.log('Local version is out of date');
if (await isPullRequestOpen(latestStableVersion)) {
console.log(`PR already exists`);
setOutput('update', 'false');
} else {
console.log('PR does not exist.');
console.log(`Run: cargo update -p apollo-router --precise ${latestStableVersion}`);
console.log('Then commit and push the changes.');
setOutput('update', 'true');
setOutput('version', latestStableVersion);
}
function ensureEnv(name: string): string {
const value = process.env[name];
if (!value) {
throw new Error(`Missing ${name} environment variable`);
}
return value;
}
async function fetchLatestVersion() {
const latestResponse = await fetch(
'https://api.github.com/repos/apollographql/router/releases/latest',
{
method: 'GET',
},
);
if (!latestResponse.ok) {
throw new Error('Failed to fetch versions');
}
const latest = await latestResponse.json();
const latestStableVersion = latest.tag_name.replace('v', '');
if (!latestStableVersion) {
throw new Error('Failed to find latest stable version');
}
return latestStableVersion;
}
async function fetchLocalVersion() {
const lockFile = await readFile(join(process.cwd(), './Cargo.lock'), 'utf-8');
const apolloRouterPackage = lockFile
.split('[[package]]')
.find(pkg => pkg.includes('name = "apollo-router"'));
if (!apolloRouterPackage) {
throw new Error('Failed to find apollo-router package in Cargo.lock');
}
const versionMatch = apolloRouterPackage.match(/version = "(.*)"/);
if (!versionMatch) {
throw new Error('Failed to find version of apollo-router package in Cargo.lock');
}
return versionMatch[1];
}
async function isPullRequestOpen(latestStableVersion: string) {
const prTitle = `Update apollo-router to ${latestStableVersion}`;
setOutput('title', prTitle);
const prResponse = await fetch(`https://api.github.com/repos/apollographql/router/pulls`);
if (!prResponse.ok) {
throw new Error('Failed to fetch PRs');
}
const prs: Array<{
title: string;
html_url: string;
}> = await prResponse.json();
return prs.some(pr => pr.title === prTitle);
}

View file

@ -1,15 +0,0 @@
[package]
name = "compress"
version = "0.0.1"
authors = ["Kamil Kisiela <kamil.kisiela@gmail.com>"]
edition = "2021"
license = "MIT"
publish = false
[[bin]]
name = "compress"
path = "src/main.rs"
[dependencies]
flate2 = "1.0.30"
tar = "0.4"