Move hive-console-sdk-rs to Hive Router repository (#7476)

This commit is contained in:
Kamil Kisiela 2026-01-20 17:27:47 +01:00 committed by GitHub
parent 6274fa3595
commit f4d5f7ee5b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
29 changed files with 40 additions and 4515 deletions

View file

@ -0,0 +1,5 @@
---
'hive-apollo-router-plugin': patch
---
Updated `hive-apollo-router-plugin` to use `hive-console-sdk` from crates.io instead of a local dependency. The plugin now uses `graphql-tools::parser` instead of `graphql-parser` to leverage the parser we now ship in `graphql-tools` crate.

View file

@ -1,3 +1,3 @@
[workspace]
resolver = "2"
members = ["packages/libraries/router", "scripts/compress", "packages/libraries/sdk-rs"]
members = ["packages/libraries/router", "scripts/compress"]

116
configs/cargo/Cargo.lock generated
View file

@ -1240,9 +1240,9 @@ dependencies = [
[[package]]
name = "cc"
version = "1.2.43"
version = "1.2.52"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "739eb0f94557554b3ca9a86d2d37bebd49c5e6d0c1d2bda35ba5bdac830befc2"
checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3"
dependencies = [
"find-msvc-tools",
"jobserver",
@ -1325,15 +1325,6 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]]
name = "colored"
version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e"
dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "combine"
version = "4.6.7"
@ -2039,9 +2030,9 @@ dependencies = [
[[package]]
name = "find-msvc-tools"
version = "0.1.4"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127"
checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41"
[[package]]
name = "fixedbitset"
@ -2414,15 +2405,18 @@ dependencies = [
[[package]]
name = "graphql-tools"
version = "0.4.0"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68fb22726aceab7a8933cdcff4201e1cdbcc7c7394df5bc1ebdcf27b44376433"
checksum = "9513662272317e955f5d72b13b4015ba31c84d68225f4b5d0a2ad6ffceec1258"
dependencies = [
"graphql-parser",
"combine",
"itoa",
"lazy_static",
"ryu",
"serde",
"serde_json",
"serde_with",
"thiserror 2.0.17",
]
[[package]]
@ -2600,7 +2594,7 @@ dependencies = [
[[package]]
name = "hive-apollo-router-plugin"
version = "2.3.6"
version = "3.0.0"
dependencies = [
"anyhow",
"apollo-router",
@ -2608,7 +2602,7 @@ dependencies = [
"axum-core 0.5.5",
"bytes",
"futures",
"graphql-parser",
"graphql-tools",
"hive-console-sdk",
"http 1.3.1",
"http-body-util",
@ -2628,18 +2622,17 @@ dependencies = [
[[package]]
name = "hive-console-sdk"
version = "0.2.3"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f450613f002f14d421378aa8dde956b55c91db4a7b66d41b8c18bc8d1d51e671"
dependencies = [
"anyhow",
"async-dropper-simple",
"async-trait",
"axum-core 0.5.5",
"futures-util",
"graphql-parser",
"graphql-tools",
"lazy_static",
"md5",
"mockito",
"moka",
"once_cell",
"recloser",
@ -2933,7 +2926,7 @@ dependencies = [
"js-sys",
"log",
"wasm-bindgen",
"windows-core 0.62.2",
"windows-core 0.61.2",
]
[[package]]
@ -3174,9 +3167,9 @@ dependencies = [
[[package]]
name = "itoa"
version = "1.0.15"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
[[package]]
name = "jobserver"
@ -3493,9 +3486,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
[[package]]
name = "md5"
version = "0.7.0"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
checksum = "ae960838283323069879657ca3de837e9f7bbb4c7bf6ea7f1b290d5e9476d2e0"
[[package]]
name = "mediatype"
@ -3588,31 +3581,6 @@ dependencies = [
"syn 2.0.108",
]
[[package]]
name = "mockito"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e0603425789b4a70fcc4ac4f5a46a566c116ee3e2a6b768dc623f7719c611de"
dependencies = [
"assert-json-diff",
"bytes",
"colored",
"futures-core",
"http 1.3.1",
"http-body 1.0.1",
"http-body-util",
"hyper 1.7.0",
"hyper-util",
"log",
"pin-project-lite",
"rand 0.9.2",
"regex",
"serde_json",
"serde_urlencoded",
"similar",
"tokio",
]
[[package]]
name = "moka"
version = "0.12.11"
@ -4783,6 +4751,7 @@ dependencies = [
"futures-channel",
"futures-core",
"futures-util",
"h2",
"http 1.3.1",
"http-body 1.0.1",
"http-body-util",
@ -5082,9 +5051,9 @@ dependencies = [
[[package]]
name = "ryu"
version = "1.0.20"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984"
[[package]]
name = "same-file"
@ -6041,9 +6010,9 @@ dependencies = [
[[package]]
name = "tower-http"
version = "0.6.6"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8"
dependencies = [
"async-compression",
"base64 0.22.1",
@ -6707,7 +6676,7 @@ checksum = "810ce18ed2112484b0d4e15d022e5f598113e220c53e373fb31e67e21670c1ce"
dependencies = [
"windows-implement 0.59.0",
"windows-interface",
"windows-result 0.3.4",
"windows-result",
"windows-strings 0.3.1",
"windows-targets 0.53.5",
]
@ -6721,23 +6690,10 @@ dependencies = [
"windows-implement 0.60.2",
"windows-interface",
"windows-link 0.1.3",
"windows-result 0.3.4",
"windows-result",
"windows-strings 0.4.2",
]
[[package]]
name = "windows-core"
version = "0.62.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb"
dependencies = [
"windows-implement 0.60.2",
"windows-interface",
"windows-link 0.2.1",
"windows-result 0.4.1",
"windows-strings 0.5.1",
]
[[package]]
name = "windows-future"
version = "0.2.1"
@ -6813,15 +6769,6 @@ dependencies = [
"windows-link 0.1.3",
]
[[package]]
name = "windows-result"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5"
dependencies = [
"windows-link 0.2.1",
]
[[package]]
name = "windows-strings"
version = "0.3.1"
@ -6840,15 +6787,6 @@ dependencies = [
"windows-link 0.1.3",
]
[[package]]
name = "windows-strings"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091"
dependencies = [
"windows-link 0.2.1",
]
[[package]]
name = "windows-sys"
version = "0.45.0"

View file

@ -341,8 +341,6 @@ target "apollo-router" {
inherits = ["router-base", get_target()]
contexts = {
router_pkg = "${PWD}/packages/libraries/router"
sdk_rs_pkg = "${PWD}/packages/libraries/sdk-rs"
usage_service = "${PWD}/packages/services/usage"
config = "${PWD}/configs/cargo"
}
args = {

View file

@ -1,6 +1,5 @@
# syntax=docker/dockerfile:1
FROM scratch AS router_pkg
FROM scratch AS sdk_rs_pkg
FROM scratch AS config
FROM rust:1.91.1-slim-bookworm AS build
@ -15,29 +14,11 @@ RUN rustup component add rustfmt
WORKDIR /usr/src
# Create blank projects
RUN USER=root cargo new router
RUN USER=root cargo new sdk-rs
# Copy Cargo files
COPY --from=router_pkg Cargo.toml /usr/src/router/
COPY --from=sdk_rs_pkg Cargo.toml /usr/src/sdk-rs/
COPY --from=config Cargo.lock /usr/src/router/
# Copy usage report schema
# `agent.rs` uses it
# So we need to place it accordingly
COPY --from=usage_service usage-report-v2.schema.json /usr/src/sdk-rs/
WORKDIR /usr/src/sdk-rs
# Get the dependencies cached, so we can use dummy input files so Cargo wont fail
RUN echo 'fn main() { println!(""); }' > ./src/main.rs
RUN echo 'fn main() { println!(""); }' > ./src/lib.rs
RUN cargo build --release
# Copy in the actual source code
COPY --from=sdk_rs_pkg src ./src
RUN touch ./src/main.rs
RUN touch ./src/lib.rs
WORKDIR /usr/src/router
# Get the dependencies cached, so we can use dummy input files so Cargo wont fail
RUN echo 'fn main() { println!(""); }' > ./src/main.rs

View file

@ -43,7 +43,7 @@
"prettier": "prettier --cache --write --list-different --ignore-unknown \"**/*\"",
"release": "pnpm build:libraries && changeset publish",
"release:docs:update-version": "tsx scripts/sync-docker-image-tag-docs.ts",
"release:version": "changeset version && pnpm --filter hive-apollo-router-plugin --filter hive-console-sdk-rs sync-cargo-file && pnpm build:libraries && pnpm --filter @graphql-hive/cli oclif:readme && pnpm run release:docs:update-version",
"release:version": "changeset version && pnpm --filter hive-apollo-router-plugin sync-cargo-file && pnpm build:libraries && pnpm --filter @graphql-hive/cli oclif:readme && pnpm run release:docs:update-version",
"seed:org": "tsx scripts/seed-organization.mts",
"seed:schemas": "tsx scripts/seed-schemas.ts",
"seed:usage": "tsx scripts/seed-usage.ts",

View file

@ -19,7 +19,7 @@ path = "src/lib.rs"
[dependencies]
apollo-router = { version = "^2.0.0" }
axum-core = "0.5"
hive-console-sdk = { path = "../sdk-rs", version = "0" }
hive-console-sdk = "0.3.3"
sha2 = { version = "0.10.8", features = ["std"] }
anyhow = "1"
tracing = "0.1"
@ -33,7 +33,7 @@ tokio = { version = "1.36.0", features = ["full"] }
tower = { version = "0.5", features = ["full"] }
http = "1"
http-body-util = "0.1"
graphql-parser = "0.4.1"
graphql-tools = "0.4.2"
rand = "0.9.0"
tokio-util = "0.7.16"

View file

@ -4,8 +4,5 @@
"private": true,
"scripts": {
"sync-cargo-file": "./sync-cargo-file.sh"
},
"dependencies": {
"hive-console-sdk-rs": "workspace:*"
}
}

View file

@ -1,7 +1,7 @@
use crate::consts::PLUGIN_VERSION;
use crate::registry_logger::Logger;
use anyhow::{anyhow, Result};
use hive_console_sdk::supergraph_fetcher::sync::SupergraphFetcherSyncState;
use hive_console_sdk::supergraph_fetcher::sync_fetcher::SupergraphFetcherSyncState;
use hive_console_sdk::supergraph_fetcher::SupergraphFetcher;
use sha2::Digest;
use sha2::Sha256;

View file

@ -6,8 +6,8 @@ use apollo_router::services::*;
use apollo_router::Context;
use core::ops::Drop;
use futures::StreamExt;
use graphql_parser::parse_schema;
use graphql_parser::schema::Document;
use graphql_tools::parser::parse_schema;
use graphql_tools::parser::schema::Document;
use hive_console_sdk::agent::usage_agent::UsageAgentExt;
use hive_console_sdk::agent::usage_agent::{ExecutionReport, UsageAgent};
use http::HeaderValue;

View file

@ -1 +0,0 @@
target/**

View file

@ -1,228 +0,0 @@
# hive-console-sdk-rs
## 0.3.0
### Minor Changes
- [#7379](https://github.com/graphql-hive/console/pull/7379)
[`b134461`](https://github.com/graphql-hive/console/commit/b13446109d9663ccabef07995eb25cf9dff34f37)
Thanks [@ardatan](https://github.com/ardatan)! - Breaking Changes to avoid future breaking
changes;
Switch to [Builder](https://rust-unofficial.github.io/patterns/patterns/creational/builder.html)
pattern for `SupergraphFetcher`, `PersistedDocumentsManager` and `UsageAgent` structs.
No more `try_new` or `try_new_async` or `try_new_sync` functions, instead use
`SupergraphFetcherBuilder`, `PersistedDocumentsManagerBuilder` and `UsageAgentBuilder` structs to
create instances.
Benefits;
- No need to provide all parameters at once when creating an instance even for default values.
Example;
```rust
// Before
let fetcher = SupergraphFetcher::try_new_async(
"SOME_ENDPOINT", // endpoint
"SOME_KEY",
"MyUserAgent/1.0".to_string(),
Duration::from_secs(5), // connect_timeout
Duration::from_secs(10), // request_timeout
false, // accept_invalid_certs
3, // retry_count
)?;
// After
// No need to provide all parameters at once, can use default values
let fetcher = SupergraphFetcherBuilder::new()
.endpoint("SOME_ENDPOINT".to_string())
.key("SOME_KEY".to_string())
.build_async()?;
```
- Easier to add new configuration options in the future without breaking existing code.
Example;
```rust
let fetcher = SupergraphFetcher::try_new_async(
"SOME_ENDPOINT", // endpoint
"SOME_KEY",
"MyUserAgent/1.0".to_string(),
Duration::from_secs(5), // connect_timeout
Duration::from_secs(10), // request_timeout
false, // accept_invalid_certs
3, // retry_count
circuit_breaker_config, // Breaking Change -> new parameter added
)?;
let fetcher = SupergraphFetcherBuilder::new()
.endpoint("SOME_ENDPOINT".to_string())
.key("SOME_KEY".to_string())
.build_async()?; // No breaking change, circuit_breaker_config can be added later if needed
```
### Patch Changes
- [#7379](https://github.com/graphql-hive/console/pull/7379)
[`b134461`](https://github.com/graphql-hive/console/commit/b13446109d9663ccabef07995eb25cf9dff34f37)
Thanks [@ardatan](https://github.com/ardatan)! - Circuit Breaker Implementation and Multiple
Endpoints Support
Implementation of Circuit Breakers in Hive Console Rust SDK, you can learn more
[here](https://the-guild.dev/graphql/hive/product-updates/2025-12-04-cdn-mirror-and-circuit-breaker)
Breaking Changes:
Now `endpoint` configuration accepts multiple endpoints as an array for `SupergraphFetcherBuilder`
and `PersistedDocumentsManager`.
```diff
SupergraphFetcherBuilder::default()
- .endpoint(endpoint)
+ .add_endpoint(endpoint1)
+ .add_endpoint(endpoint2)
```
This change requires updating the configuration structure to accommodate multiple endpoints.
## 0.2.3
### Patch Changes
- [#7446](https://github.com/graphql-hive/console/pull/7446)
[`0ac2e06`](https://github.com/graphql-hive/console/commit/0ac2e06fd6eb94c9d9817f78faf6337118f945eb)
Thanks [@ardatan](https://github.com/ardatan)! - Fixed the stack overflow error while collecting
schema coordinates from the recursive input object types correctly;
Let's consider the following schema:
```graphql
input RecursiveInput {
field: String
nested: RecursiveInput
}
```
And you have an operation that uses this input type:
```graphql
query UserQuery($input: RecursiveInput!) {
user(input: $input) {
id
}
}
```
When collecting schema coordinates from operations that use this input type, the previous
implementation could enter an infinite recursion when traversing the nested `RecursiveInput` type.
This would lead to a stack overflow error.
- [#7448](https://github.com/graphql-hive/console/pull/7448)
[`4b796f9`](https://github.com/graphql-hive/console/commit/4b796f95bbc0fc37aac2c3a108a6165858b42b49)
Thanks [@kamilkisiela](https://github.com/kamilkisiela)! - export `minify_query` and
`normalize_operation` functions (mainly for Hive Router)
- [#7439](https://github.com/graphql-hive/console/pull/7439)
[`a9905ec`](https://github.com/graphql-hive/console/commit/a9905ec7198cf1bec977a281c5021e0ef93c2c34)
Thanks [@jdolle](https://github.com/jdolle)! - Remove the usage flag (!) from non-null, but unused
variables to match js sdk
## 0.2.2
### Patch Changes
- [#7405](https://github.com/graphql-hive/console/pull/7405)
[`24c0998`](https://github.com/graphql-hive/console/commit/24c099818e4dfec43feea7775e8189d0f305a10c)
Thanks [@ardatan](https://github.com/ardatan)! - Use the JSON Schema specification of the usage
reports directly to generate Rust structs as a source of truth instead of manually written types
## 0.2.1
### Patch Changes
- [#7364](https://github.com/graphql-hive/console/pull/7364)
[`69e2f74`](https://github.com/graphql-hive/console/commit/69e2f74ab867ee5e97bbcfcf6a1b69bb23ccc7b2)
Thanks [@ardatan](https://github.com/ardatan)! - Fix the bug where reports were not being sent
correctly due to missing headers
## 0.2.0
### Minor Changes
- [#7246](https://github.com/graphql-hive/console/pull/7246)
[`cc6cd28`](https://github.com/graphql-hive/console/commit/cc6cd28eb52d774683c088ce456812d3541d977d)
Thanks [@ardatan](https://github.com/ardatan)! - Breaking;
- `SupergraphFetcher` now has two different modes: async and sync. You can choose between
`SupergraphFetcherAsyncClient` and `SupergraphFetcherSyncClient` based on your needs. See the
examples at the bottom.
- `SupergraphFetcher` now has a new `retry_count` parameter to specify how many times to retry
fetching the supergraph in case of failures.
- `PersistedDocumentsManager` new needs `user_agent` parameter to be sent to Hive Console when
fetching persisted queries.
- `UsageAgent::new` is now `UsageAgent::try_new` and it returns a `Result` with `Arc`, so you can
freely clone it across threads. This change was made to handle potential errors during the
creation of the HTTP client. Make sure to handle the `Result` when creating a `UsageAgent`.
```rust
// Sync Mode
let fetcher = SupergraphFetcher::try_new_sync(/* params */)
.map_err(|e| anyhow!("Failed to create SupergraphFetcher: {}", e))?;
// Use the fetcher to fetch the supergraph (Sync)
let supergraph = fetcher
.fetch_supergraph()
.map_err(|e| anyhow!("Failed to fetch supergraph: {}", e))?;
// Async Mode
let fetcher = SupergraphFetcher::try_new_async(/* params */)
.map_err(|e| anyhow!("Failed to create SupergraphFetcher: {}", e))?;
// Use the fetcher to fetch the supergraph (Async)
let supergraph = fetcher
.fetch_supergraph()
.await
.map_err(|e| anyhow!("Failed to fetch supergraph: {}", e))?;
```
## 0.1.1
### Patch Changes
- [#7248](https://github.com/graphql-hive/console/pull/7248)
[`d8f6e25`](https://github.com/graphql-hive/console/commit/d8f6e252ee3cd22948eb0d64b9d25c9b04dba47c)
Thanks [@n1ru4l](https://github.com/n1ru4l)! - Support project and personal access tokens (`hvp1/`
and `hvu1/`).
## 0.1.0
### Minor Changes
- [#7196](https://github.com/graphql-hive/console/pull/7196)
[`7878736`](https://github.com/graphql-hive/console/commit/7878736643578ab23d95412b893c091e32691e60)
Thanks [@ardatan](https://github.com/ardatan)! - Breaking;
- `UsageAgent` now accepts `Duration` for `connect_timeout` and `request_timeout` instead of
`u64`.
- `SupergraphFetcher` now accepts `Duration` for `connect_timeout` and `request_timeout` instead
of `u64`.
- `PersistedDocumentsManager` now accepts `Duration` for `connect_timeout` and `request_timeout`
instead of `u64`.
- Use original `graphql-parser` and `graphql-tools` crates instead of forked versions.
## 0.0.1
### Patch Changes
- [#7143](https://github.com/graphql-hive/console/pull/7143)
[`b80e896`](https://github.com/graphql-hive/console/commit/b80e8960f492e3bcfe1012caab294d9066d86fe3)
Thanks [@ardatan](https://github.com/ardatan)! - Extract Hive Console integration implementation
into a new package `hive-console-sdk` which can be used by any Rust library for Hive Console
integration
It also includes a refactor to use less Mutexes like replacing `lru` + `Mutex` with the
thread-safe `moka` package. Only one place that handles queueing uses `Mutex` now.

View file

@ -1,46 +0,0 @@
[package]
name = "hive-console-sdk"
repository = "https://github.com/graphql-hive/console/"
edition = "2021"
license = "MIT"
publish = true
version = "0.3.0"
description = "Rust SDK for Hive Console"
[lib]
name = "hive_console_sdk"
path = "src/lib.rs"
[dependencies]
async-trait = "0.1.77"
axum-core = "0.5"
thiserror = "2.0.11"
reqwest = { version = "0.12.24", default-features = false, features = [
"rustls-tls",
"blocking",
] }
reqwest-retry = "0.8.0"
reqwest-middleware = { version = "0.4.2", features = ["json"]}
anyhow = "1"
tracing = "0.1"
serde = "1"
tokio = { version = "1.36.0", features = ["full"] }
graphql-tools = "0.4.0"
graphql-parser = "0.4.1"
md5 = "0.7.0"
serde_json = "1"
moka = { version = "0.12.10", features = ["future", "sync"] }
sha2 = { version = "0.10.8", features = ["std"] }
tokio-util = "0.7.16"
regex-automata = "0.4.10"
once_cell = "1.21.3"
retry-policies = "0.5.0"
recloser = "1.3.1"
futures-util = "0.3.31"
typify = "0.5.0"
regress = "0.10.5"
lazy_static = "1.5.0"
async-dropper-simple = { version = "0.2.6", features = ["tokio", "no-default-bound"] }
[dev-dependencies]
mockito = "1.7.0"

View file

@ -1,8 +0,0 @@
{
"name": "hive-console-sdk-rs",
"version": "0.3.0",
"private": true,
"scripts": {
"sync-cargo-file": "./sync-cargo-file.sh"
}
}

View file

@ -1,39 +0,0 @@
use std::collections::VecDeque;
use tokio::sync::Mutex;
pub struct Buffer<T> {
max_size: usize,
queue: Mutex<VecDeque<T>>,
}
pub enum AddStatus<T> {
Full { drained: Vec<T> },
Ok,
}
impl<T> Buffer<T> {
pub fn new(max_size: usize) -> Self {
Self {
queue: Mutex::new(VecDeque::with_capacity(max_size)),
max_size,
}
}
pub async fn add(&self, item: T) -> AddStatus<T> {
let mut queue = self.queue.lock().await;
if queue.len() >= self.max_size {
let mut drained: Vec<T> = queue.drain(..).collect();
drained.push(item);
AddStatus::Full { drained }
} else {
queue.push_back(item);
AddStatus::Ok
}
}
pub async fn drain(&self) -> Vec<T> {
let mut queue = self.queue.lock().await;
queue.drain(..).collect()
}
}

View file

@ -1,229 +0,0 @@
use std::{sync::Arc, time::Duration};
use async_dropper_simple::AsyncDropper;
use once_cell::sync::Lazy;
use recloser::AsyncRecloser;
use reqwest::header::{HeaderMap, HeaderValue};
use reqwest_middleware::ClientBuilder;
use reqwest_retry::RetryTransientMiddleware;
use crate::agent::buffer::Buffer;
use crate::agent::usage_agent::{non_empty_string, AgentError, UsageAgent, UsageAgentInner};
use crate::agent::utils::OperationProcessor;
use crate::circuit_breaker;
use retry_policies::policies::ExponentialBackoff;
pub struct UsageAgentBuilder {
token: Option<String>,
endpoint: String,
target_id: Option<String>,
buffer_size: usize,
connect_timeout: Duration,
request_timeout: Duration,
accept_invalid_certs: bool,
flush_interval: Duration,
retry_policy: ExponentialBackoff,
user_agent: Option<String>,
circuit_breaker: Option<AsyncRecloser>,
}
pub static DEFAULT_HIVE_USAGE_ENDPOINT: &str = "https://app.graphql-hive.com/usage";
impl Default for UsageAgentBuilder {
fn default() -> Self {
Self {
endpoint: DEFAULT_HIVE_USAGE_ENDPOINT.to_string(),
token: None,
target_id: None,
buffer_size: 1000,
connect_timeout: Duration::from_secs(5),
request_timeout: Duration::from_secs(15),
accept_invalid_certs: false,
flush_interval: Duration::from_secs(5),
retry_policy: ExponentialBackoff::builder().build_with_max_retries(3),
user_agent: None,
circuit_breaker: None,
}
}
}
fn is_legacy_token(token: &str) -> bool {
!token.starts_with("hvo1/") && !token.starts_with("hvu1/") && !token.starts_with("hvp1/")
}
impl UsageAgentBuilder {
/// Your [Registry Access Token](https://the-guild.dev/graphql/hive/docs/management/targets#registry-access-tokens) with write permission.
pub fn token(mut self, token: String) -> Self {
if let Some(token) = non_empty_string(Some(token)) {
self.token = Some(token);
}
self
}
/// For self-hosting, you can override `/usage` endpoint (defaults to `https://app.graphql-hive.com/usage`).
pub fn endpoint(mut self, endpoint: String) -> Self {
if let Some(endpoint) = non_empty_string(Some(endpoint)) {
self.endpoint = endpoint;
}
self
}
/// A target ID, this can either be a slug following the format “$organizationSlug/$projectSlug/$targetSlug” (e.g “the-guild/graphql-hive/staging”) or an UUID (e.g. “a0f4c605-6541-4350-8cfe-b31f21a4bf80”). To be used when the token is configured with an organization access token.
pub fn target_id(mut self, target_id: String) -> Self {
if let Some(target_id) = non_empty_string(Some(target_id)) {
self.target_id = Some(target_id);
}
self
}
/// A maximum number of operations to hold in a buffer before sending to Hive Console
/// Default: 1000
pub fn buffer_size(mut self, buffer_size: usize) -> Self {
self.buffer_size = buffer_size;
self
}
/// A timeout for only the connect phase of a request to Hive Console
/// Default: 5 seconds
pub fn connect_timeout(mut self, connect_timeout: Duration) -> Self {
self.connect_timeout = connect_timeout;
self
}
/// A timeout for the entire request to Hive Console
/// Default: 15 seconds
pub fn request_timeout(mut self, request_timeout: Duration) -> Self {
self.request_timeout = request_timeout;
self
}
/// Accepts invalid SSL certificates
/// Default: false
pub fn accept_invalid_certs(mut self, accept_invalid_certs: bool) -> Self {
self.accept_invalid_certs = accept_invalid_certs;
self
}
/// Frequency of flushing the buffer to the server
/// Default: 5 seconds
pub fn flush_interval(mut self, flush_interval: Duration) -> Self {
self.flush_interval = flush_interval;
self
}
/// User-Agent header to be sent with each request
pub fn user_agent(mut self, user_agent: String) -> Self {
if let Some(user_agent) = non_empty_string(Some(user_agent)) {
self.user_agent = Some(user_agent);
}
self
}
/// Retry policy for sending reports
/// Default: ExponentialBackoff with max 3 retries
pub fn retry_policy(mut self, retry_policy: ExponentialBackoff) -> Self {
self.retry_policy = retry_policy;
self
}
/// Maximum number of retries for sending reports
/// Default: ExponentialBackoff with max 3 retries
pub fn max_retries(mut self, max_retries: u32) -> Self {
self.retry_policy = ExponentialBackoff::builder().build_with_max_retries(max_retries);
self
}
pub(crate) fn build_agent(self) -> Result<UsageAgentInner, AgentError> {
let mut default_headers = HeaderMap::new();
default_headers.insert("X-Usage-API-Version", HeaderValue::from_static("2"));
let token = match self.token {
Some(token) => token,
None => return Err(AgentError::MissingToken),
};
let mut authorization_header = HeaderValue::from_str(&format!("Bearer {}", token))
.map_err(|_| AgentError::InvalidToken)?;
authorization_header.set_sensitive(true);
default_headers.insert(reqwest::header::AUTHORIZATION, authorization_header);
default_headers.insert(
reqwest::header::CONTENT_TYPE,
HeaderValue::from_static("application/json"),
);
let mut reqwest_agent = reqwest::Client::builder()
.danger_accept_invalid_certs(self.accept_invalid_certs)
.connect_timeout(self.connect_timeout)
.timeout(self.request_timeout)
.default_headers(default_headers);
if let Some(user_agent) = &self.user_agent {
reqwest_agent = reqwest_agent.user_agent(user_agent);
}
let reqwest_agent = reqwest_agent
.build()
.map_err(AgentError::HTTPClientCreationError)?;
let client = ClientBuilder::new(reqwest_agent)
.with(RetryTransientMiddleware::new_with_policy(self.retry_policy))
.build();
let mut endpoint = self.endpoint;
match self.target_id {
Some(_) if is_legacy_token(&token) => return Err(AgentError::TargetIdWithLegacyToken),
Some(target_id) if !is_legacy_token(&token) => {
let target_id = validate_target_id(&target_id)?;
endpoint.push_str(&format!("/{}", target_id));
}
None if !is_legacy_token(&token) => return Err(AgentError::MissingTargetId),
_ => {}
}
let circuit_breaker = if let Some(cb) = self.circuit_breaker {
cb
} else {
circuit_breaker::CircuitBreakerBuilder::default()
.build_async()
.map_err(AgentError::CircuitBreakerCreationError)?
};
let buffer = Buffer::new(self.buffer_size);
Ok(UsageAgentInner {
endpoint,
buffer,
processor: OperationProcessor::new(),
client,
flush_interval: self.flush_interval,
circuit_breaker,
})
}
pub fn build(self) -> Result<UsageAgent, AgentError> {
let agent = self.build_agent()?;
Ok(Arc::new(AsyncDropper::new(agent)))
}
}
// Target ID regexp for validation: slug format
static SLUG_REGEX: Lazy<regex_automata::meta::Regex> = Lazy::new(|| {
regex_automata::meta::Regex::new(r"^[a-zA-Z0-9-_]+\/[a-zA-Z0-9-_]+\/[a-zA-Z0-9-_]+$").unwrap()
});
// Target ID regexp for validation: UUID format
static UUID_REGEX: Lazy<regex_automata::meta::Regex> = Lazy::new(|| {
regex_automata::meta::Regex::new(
r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$",
)
.unwrap()
});
fn validate_target_id(target_id: &str) -> Result<&str, AgentError> {
let trimmed_s = target_id.trim();
if trimmed_s.is_empty() {
Err(AgentError::InvalidTargetId("<empty>".to_string()))
} else {
if SLUG_REGEX.is_match(trimmed_s) {
return Ok(trimmed_s);
}
if UUID_REGEX.is_match(trimmed_s) {
return Ok(trimmed_s);
}
Err(AgentError::InvalidTargetId(format!(
"Invalid target_id format: '{}'. It must be either in slug format '$organizationSlug/$projectSlug/$targetSlug' or UUID format 'a0f4c605-6541-4350-8cfe-b31f21a4bf80'",
trimmed_s
)))
}
}

View file

@ -1,4 +0,0 @@
pub mod buffer;
pub mod builder;
pub mod usage_agent;
pub mod utils;

View file

@ -1,453 +0,0 @@
use async_dropper_simple::{AsyncDrop, AsyncDropper};
use graphql_parser::schema::Document;
use recloser::AsyncRecloser;
use reqwest_middleware::ClientWithMiddleware;
use std::{
collections::{hash_map::Entry, HashMap},
sync::Arc,
time::Duration,
};
use thiserror::Error;
use tokio_util::sync::CancellationToken;
use crate::agent::{buffer::AddStatus, utils::OperationProcessor};
use crate::agent::{buffer::Buffer, builder::UsageAgentBuilder};
#[derive(Debug, Clone)]
pub struct ExecutionReport {
pub schema: Arc<Document<'static, String>>,
pub client_name: Option<String>,
pub client_version: Option<String>,
pub timestamp: u64,
pub duration: Duration,
pub ok: bool,
pub errors: usize,
pub operation_body: String,
pub operation_name: Option<String>,
pub persisted_document_hash: Option<String>,
}
typify::import_types!(schema = "./usage-report-v2.schema.json");
pub struct UsageAgentInner {
pub(crate) endpoint: String,
pub(crate) buffer: Buffer<ExecutionReport>,
pub(crate) processor: OperationProcessor,
pub(crate) client: ClientWithMiddleware,
pub(crate) flush_interval: Duration,
pub(crate) circuit_breaker: AsyncRecloser,
}
pub fn non_empty_string(value: Option<String>) -> Option<String> {
value.filter(|str| !str.is_empty())
}
#[derive(Error, Debug)]
pub enum AgentError {
#[error("unable to acquire lock: {0}")]
Lock(String),
#[error("unable to send report: unauthorized")]
Unauthorized,
#[error("unable to send report: no access")]
Forbidden,
#[error("unable to send report: rate limited")]
RateLimited,
#[error("missing token")]
MissingToken,
#[error("your access token requires providing a 'target_id' option.")]
MissingTargetId,
#[error("using 'target_id' with legacy tokens is not supported")]
TargetIdWithLegacyToken,
#[error("invalid token provided")]
InvalidToken,
#[error("invalid target id provided: {0}, it should be either a slug like \"$organizationSlug/$projectSlug/$targetSlug\" or an UUID")]
InvalidTargetId(String),
#[error("unable to instantiate the http client for reports sending: {0}")]
HTTPClientCreationError(reqwest::Error),
#[error("unable to create circuit breaker: {0}")]
CircuitBreakerCreationError(#[from] crate::circuit_breaker::CircuitBreakerError),
#[error("rejected by the circuit breaker")]
CircuitBreakerRejected,
#[error("unable to send report: {0}")]
Unknown(String),
}
pub type UsageAgent = Arc<AsyncDropper<UsageAgentInner>>;
#[async_trait::async_trait]
pub trait UsageAgentExt {
fn builder() -> UsageAgentBuilder {
UsageAgentBuilder::default()
}
async fn flush(&self) -> Result<(), AgentError>;
async fn start_flush_interval(&self, token: &CancellationToken);
async fn add_report(&self, execution_report: ExecutionReport) -> Result<(), AgentError>;
}
impl UsageAgentInner {
fn produce_report(&self, reports: Vec<ExecutionReport>) -> Result<Report, AgentError> {
let mut report = Report {
size: 0,
map: HashMap::new(),
operations: Vec::new(),
subscription_operations: Vec::new(),
};
// iterate over reports and check if they are valid
for op in reports {
let operation = self.processor.process(&op.operation_body, &op.schema);
match operation {
Err(e) => {
tracing::warn!(
"Dropping operation \"{}\" (phase: PROCESSING): {}",
op.operation_name
.clone()
.or_else(|| Some("anonymous".to_string()))
.unwrap(),
e
);
continue;
}
Ok(operation) => match operation {
Some(operation) => {
let hash = operation.hash;
let client_name = non_empty_string(op.client_name);
let client_version = non_empty_string(op.client_version);
let metadata: Option<Metadata> =
if client_name.is_some() || client_version.is_some() {
Some(Metadata {
client: Some(Client {
name: client_name.unwrap_or_default(),
version: client_version.unwrap_or_default(),
}),
})
} else {
None
};
report.operations.push(RequestOperation {
operation_map_key: hash.clone(),
timestamp: op.timestamp,
execution: Execution {
ok: op.ok,
/*
The conversion from u128 (from op.duration.as_nanos()) to u64 using try_into().unwrap() can panic if the duration is longer than u64::MAX nanoseconds (over 584 years).
While highly unlikely, it's safer to handle this potential overflow gracefully in library code to prevent panics.
A safe alternative is to convert the Result to an Option and provide a fallback value on failure,
effectively saturating at u64::MAX.
*/
duration: op
.duration
.as_nanos()
.try_into()
.ok()
.unwrap_or(u64::MAX),
errors_total: op.errors.try_into().unwrap(),
},
persisted_document_hash: op
.persisted_document_hash
.map(PersistedDocumentHash),
metadata,
});
if let Entry::Vacant(e) = report.map.entry(ReportMapKey(hash)) {
e.insert(OperationMapRecord {
operation: operation.operation,
operation_name: non_empty_string(op.operation_name),
fields: operation.coordinates,
});
}
report.size += 1;
}
None => {
tracing::debug!(
"Dropping operation (phase: PROCESSING): probably introspection query"
);
}
},
}
}
Ok(report)
}
async fn send_report(&self, report: Report) -> Result<(), AgentError> {
if report.size == 0 {
return Ok(());
}
// Based on https://the-guild.dev/graphql/hive/docs/specs/usage-reports#data-structure
let resp_fut = self.client.post(&self.endpoint).json(&report).send();
let resp = self
.circuit_breaker
.call(resp_fut)
.await
.map_err(|e| match e {
recloser::Error::Inner(e) => AgentError::Unknown(e.to_string()),
recloser::Error::Rejected => AgentError::CircuitBreakerRejected,
})?;
match resp.status() {
reqwest::StatusCode::OK => Ok(()),
reqwest::StatusCode::UNAUTHORIZED => Err(AgentError::Unauthorized),
reqwest::StatusCode::FORBIDDEN => Err(AgentError::Forbidden),
reqwest::StatusCode::TOO_MANY_REQUESTS => Err(AgentError::RateLimited),
_ => Err(AgentError::Unknown(format!(
"({}) {}",
resp.status(),
resp.text().await.unwrap_or_default()
))),
}
}
async fn handle_drained(&self, drained: Vec<ExecutionReport>) -> Result<(), AgentError> {
if drained.is_empty() {
return Ok(());
}
let report = self.produce_report(drained)?;
self.send_report(report).await
}
async fn flush(&self) -> Result<(), AgentError> {
let execution_reports = self.buffer.drain().await;
self.handle_drained(execution_reports).await?;
Ok(())
}
}
#[async_trait::async_trait]
impl UsageAgentExt for UsageAgent {
async fn flush(&self) -> Result<(), AgentError> {
self.inner().flush().await
}
async fn start_flush_interval(&self, token: &CancellationToken) {
loop {
tokio::time::sleep(self.inner().flush_interval).await;
if token.is_cancelled() {
println!("Shutting down.");
return;
}
self.flush()
.await
.unwrap_or_else(|e| tracing::error!("Failed to flush usage reports: {}", e));
}
}
async fn add_report(&self, execution_report: ExecutionReport) -> Result<(), AgentError> {
if let AddStatus::Full { drained } = self.inner().buffer.add(execution_report).await {
self.inner().handle_drained(drained).await?;
}
Ok(())
}
}
#[async_trait::async_trait]
impl AsyncDrop for UsageAgentInner {
async fn async_drop(&mut self) {
if let Err(e) = self.flush().await {
tracing::error!("Failed to flush usage reports during drop: {}", e);
}
}
}
#[cfg(test)]
mod tests {
use std::{sync::Arc, time::Duration};
use graphql_parser::{parse_query, parse_schema};
use reqwest::header::{AUTHORIZATION, CONTENT_TYPE, USER_AGENT};
use crate::agent::usage_agent::{ExecutionReport, Report, UsageAgent, UsageAgentExt};
const CONTENT_TYPE_VALUE: &'static str = "application/json";
const GRAPHQL_CLIENT_NAME: &'static str = "Hive Client";
const GRAPHQL_CLIENT_VERSION: &'static str = "1.0.0";
#[tokio::test(flavor = "multi_thread")]
async fn should_send_data_to_hive() -> Result<(), Box<dyn std::error::Error>> {
let token = "Token";
let mut server = mockito::Server::new_async().await;
let server_url = server.url();
let timestamp = 1625247600;
let duration = Duration::from_millis(20);
let user_agent = "hive-router-sdk-test";
let mock = server
.mock("POST", "/200")
.match_header(AUTHORIZATION, format!("Bearer {}", token).as_str())
.match_header(CONTENT_TYPE, CONTENT_TYPE_VALUE)
.match_header(USER_AGENT, user_agent)
.match_header("X-Usage-API-Version", "2")
.match_request(move |request| {
let request_body = request.body().expect("Failed to extract body");
let report: Report = serde_json::from_slice(request_body)
.expect("Failed to parse request body as JSON");
assert_eq!(report.size, 1);
let record = report.map.values().next().expect("No operation record");
// operation
assert!(record.operation.contains("mutation deleteProject"));
assert_eq!(record.operation_name.as_deref(), Some("deleteProject"));
// fields
let expected_fields = vec![
"Mutation.deleteProject",
"Mutation.deleteProject.selector",
"DeleteProjectPayload.selector",
"ProjectSelector.organization",
"ProjectSelector.project",
"DeleteProjectPayload.deletedProject",
"Project.id",
"Project.cleanId",
"Project.name",
"Project.type",
"ProjectType.FEDERATION",
"ProjectType.STITCHING",
"ProjectType.SINGLE",
"ProjectType.CUSTOM",
"ProjectSelectorInput.organization",
"ID",
"ProjectSelectorInput.project",
];
for field in &expected_fields {
assert!(
record.fields.contains(&field.to_string()),
"Missing field: {}",
field
);
}
assert_eq!(
record.fields.len(),
expected_fields.len(),
"Unexpected number of fields"
);
// Operations
let operations = report.operations;
assert_eq!(operations.len(), 1); // one operation
let operation = &operations[0];
let key = report.map.keys().next().expect("No operation key");
assert_eq!(operation.operation_map_key, key.0);
assert_eq!(operation.timestamp, timestamp);
assert_eq!(operation.execution.duration, duration.as_nanos() as u64);
assert_eq!(operation.execution.ok, true);
assert_eq!(operation.execution.errors_total, 0);
true
})
.expect(1)
.with_status(200)
.create_async()
.await;
let schema: graphql_tools::static_graphql::schema::Document = parse_schema(
r#"
type Query {
project(selector: ProjectSelectorInput!): Project
projectsByType(type: ProjectType!): [Project!]!
projects(filter: FilterInput): [Project!]!
}
type Mutation {
deleteProject(selector: ProjectSelectorInput!): DeleteProjectPayload!
}
input ProjectSelectorInput {
organization: ID!
project: ID!
}
input FilterInput {
type: ProjectType
pagination: PaginationInput
}
input PaginationInput {
limit: Int
offset: Int
}
type ProjectSelector {
organization: ID!
project: ID!
}
type DeleteProjectPayload {
selector: ProjectSelector!
deletedProject: Project!
}
type Project {
id: ID!
cleanId: ID!
name: String!
type: ProjectType!
buildUrl: String
validationUrl: String
}
enum ProjectType {
FEDERATION
STITCHING
SINGLE
CUSTOM
}
"#,
)?;
let op: graphql_tools::static_graphql::query::Document = parse_query(
r#"
mutation deleteProject($selector: ProjectSelectorInput!) {
deleteProject(selector: $selector) {
selector {
organization
project
}
deletedProject {
...ProjectFields
}
}
}
fragment ProjectFields on Project {
id
cleanId
name
type
}
"#,
)?;
// Testing async drop
{
let usage_agent = UsageAgent::builder()
.token(token.into())
.endpoint(format!("{}/200", server_url))
.user_agent(user_agent.into())
.build()?;
usage_agent
.add_report(ExecutionReport {
schema: Arc::new(schema),
operation_body: op.to_string(),
operation_name: Some("deleteProject".to_string()),
client_name: Some(GRAPHQL_CLIENT_NAME.to_string()),
client_version: Some(GRAPHQL_CLIENT_VERSION.to_string()),
timestamp,
duration,
ok: true,
errors: 0,
persisted_document_hash: None,
})
.await?;
}
mock.assert_async().await;
Ok(())
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,67 +0,0 @@
use std::time::Duration;
use recloser::{AsyncRecloser, Recloser};
#[derive(Clone)]
pub struct CircuitBreakerBuilder {
error_threshold: f32,
volume_threshold: usize,
reset_timeout: Duration,
}
impl Default for CircuitBreakerBuilder {
fn default() -> Self {
Self {
error_threshold: 0.5,
volume_threshold: 5,
reset_timeout: Duration::from_secs(30),
}
}
}
#[derive(Debug, thiserror::Error)]
pub enum CircuitBreakerError {
#[error("Invalid error threshold: {0}. It must be between 0.0 and 1.0")]
InvalidErrorThreshold(f32),
}
impl CircuitBreakerBuilder {
/// Percentage after what the circuit breaker should kick in.
/// Default: .5
pub fn error_threshold(mut self, percentage: f32) -> Self {
self.error_threshold = percentage;
self
}
/// Count of requests before starting evaluating.
/// Default: 5
pub fn volume_threshold(mut self, threshold: usize) -> Self {
self.volume_threshold = threshold;
self
}
/// After what time the circuit breaker is attempting to retry sending requests in milliseconds.
/// Default: 30s
pub fn reset_timeout(mut self, timeout: Duration) -> Self {
self.reset_timeout = timeout;
self
}
pub fn build_async(self) -> Result<AsyncRecloser, CircuitBreakerError> {
let recloser = self.build_sync()?;
Ok(AsyncRecloser::from(recloser))
}
pub fn build_sync(self) -> Result<Recloser, CircuitBreakerError> {
let error_threshold = if self.error_threshold < 0.0 || self.error_threshold > 1.0 {
return Err(CircuitBreakerError::InvalidErrorThreshold(
self.error_threshold,
));
} else {
self.error_threshold
};
let recloser = Recloser::custom()
.error_rate(error_threshold)
.closed_len(self.volume_threshold)
.open_wait(self.reset_timeout)
.build();
Ok(recloser)
}
}

View file

@ -1,4 +0,0 @@
pub mod agent;
pub mod circuit_breaker;
pub mod persisted_documents;
pub mod supergraph_fetcher;

View file

@ -1,326 +0,0 @@
use std::time::Duration;
use crate::agent::usage_agent::non_empty_string;
use crate::circuit_breaker::CircuitBreakerBuilder;
use moka::future::Cache;
use recloser::AsyncRecloser;
use reqwest::header::HeaderMap;
use reqwest::header::HeaderValue;
use reqwest_middleware::ClientBuilder;
use reqwest_middleware::ClientWithMiddleware;
use reqwest_retry::RetryTransientMiddleware;
use retry_policies::policies::ExponentialBackoff;
use tracing::{debug, info, warn};
#[derive(Debug)]
pub struct PersistedDocumentsManager {
client: ClientWithMiddleware,
cache: Cache<String, String>,
endpoints_with_circuit_breakers: Vec<(String, AsyncRecloser)>,
}
#[derive(Debug, thiserror::Error)]
pub enum PersistedDocumentsError {
#[error("Failed to read body: {0}")]
FailedToReadBody(String),
#[error("Failed to parse body: {0}")]
FailedToParseBody(serde_json::Error),
#[error("Persisted document not found.")]
DocumentNotFound,
#[error("Failed to locate the persisted document key in request.")]
KeyNotFound,
#[error("Failed to validate persisted document")]
FailedToFetchFromCDN(reqwest_middleware::Error),
#[error("Failed to read CDN response body")]
FailedToReadCDNResponse(reqwest::Error),
#[error("No persisted document provided, or document id cannot be resolved.")]
PersistedDocumentRequired,
#[error("Missing required configuration option: {0}")]
MissingConfigurationOption(String),
#[error("Invalid CDN key {0}")]
InvalidCDNKey(String),
#[error("Failed to create HTTP client: {0}")]
HTTPClientCreationError(reqwest::Error),
#[error("unable to create circuit breaker: {0}")]
CircuitBreakerCreationError(#[from] crate::circuit_breaker::CircuitBreakerError),
#[error("rejected by the circuit breaker")]
CircuitBreakerRejected,
#[error("unknown error")]
Unknown,
}
impl PersistedDocumentsError {
pub fn message(&self) -> String {
self.to_string()
}
pub fn code(&self) -> String {
match self {
PersistedDocumentsError::FailedToReadBody(_) => "FAILED_TO_READ_BODY".into(),
PersistedDocumentsError::FailedToParseBody(_) => "FAILED_TO_PARSE_BODY".into(),
PersistedDocumentsError::DocumentNotFound => "PERSISTED_DOCUMENT_NOT_FOUND".into(),
PersistedDocumentsError::KeyNotFound => "PERSISTED_DOCUMENT_KEY_NOT_FOUND".into(),
PersistedDocumentsError::FailedToFetchFromCDN(_) => "FAILED_TO_FETCH_FROM_CDN".into(),
PersistedDocumentsError::FailedToReadCDNResponse(_) => {
"FAILED_TO_READ_CDN_RESPONSE".into()
}
PersistedDocumentsError::PersistedDocumentRequired => {
"PERSISTED_DOCUMENT_REQUIRED".into()
}
PersistedDocumentsError::MissingConfigurationOption(_) => {
"MISSING_CONFIGURATION_OPTION".into()
}
PersistedDocumentsError::InvalidCDNKey(_) => "INVALID_CDN_KEY".into(),
PersistedDocumentsError::HTTPClientCreationError(_) => {
"HTTP_CLIENT_CREATION_ERROR".into()
}
PersistedDocumentsError::CircuitBreakerCreationError(_) => {
"CIRCUIT_BREAKER_CREATION_ERROR".into()
}
PersistedDocumentsError::CircuitBreakerRejected => "CIRCUIT_BREAKER_REJECTED".into(),
PersistedDocumentsError::Unknown => "UNKNOWN_ERROR".into(),
}
}
}
impl PersistedDocumentsManager {
pub fn builder() -> PersistedDocumentsManagerBuilder {
PersistedDocumentsManagerBuilder::default()
}
async fn resolve_from_endpoint(
&self,
endpoint: &str,
document_id: &str,
circuit_breaker: &AsyncRecloser,
) -> Result<String, PersistedDocumentsError> {
let cdn_document_id = str::replace(document_id, "~", "/");
let cdn_artifact_url = format!("{}/apps/{}", endpoint, cdn_document_id);
info!(
"Fetching document {} from CDN: {}",
document_id, cdn_artifact_url
);
let response_fut = self.client.get(cdn_artifact_url).send();
let response = circuit_breaker
.call(response_fut)
.await
.map_err(|e| match e {
recloser::Error::Inner(e) => PersistedDocumentsError::FailedToFetchFromCDN(e),
recloser::Error::Rejected => PersistedDocumentsError::CircuitBreakerRejected,
})?;
if response.status().is_success() {
let document = response
.text()
.await
.map_err(PersistedDocumentsError::FailedToReadCDNResponse)?;
debug!(
"Document fetched from CDN: {}, storing in local cache",
document
);
self.cache
.insert(document_id.into(), document.clone())
.await;
return Ok(document);
}
warn!(
"Document fetch from CDN failed: HTTP {}, Body: {:?}",
response.status(),
response
.text()
.await
.unwrap_or_else(|_| "Unavailable".to_string())
);
Err(PersistedDocumentsError::DocumentNotFound)
}
/// Resolves the document from the cache, or from the CDN
pub async fn resolve_document(
&self,
document_id: &str,
) -> Result<String, PersistedDocumentsError> {
let cached_record = self.cache.get(document_id).await;
match cached_record {
Some(document) => {
debug!("Document {} found in cache: {}", document_id, document);
Ok(document)
}
None => {
debug!(
"Document {} not found in cache. Fetching from CDN",
document_id
);
let mut last_error: Option<PersistedDocumentsError> = None;
for (endpoint, circuit_breaker) in &self.endpoints_with_circuit_breakers {
let result = self
.resolve_from_endpoint(endpoint, document_id, circuit_breaker)
.await;
match result {
Ok(document) => return Ok(document),
Err(e) => {
last_error = Some(e);
}
}
}
match last_error {
Some(e) => Err(e),
None => Err(PersistedDocumentsError::Unknown),
}
}
}
}
}
pub struct PersistedDocumentsManagerBuilder {
key: Option<String>,
endpoints: Vec<String>,
accept_invalid_certs: bool,
connect_timeout: Duration,
request_timeout: Duration,
retry_policy: ExponentialBackoff,
cache_size: u64,
user_agent: Option<String>,
circuit_breaker: CircuitBreakerBuilder,
}
impl Default for PersistedDocumentsManagerBuilder {
fn default() -> Self {
Self {
key: None,
endpoints: vec![],
accept_invalid_certs: false,
connect_timeout: Duration::from_secs(5),
request_timeout: Duration::from_secs(15),
retry_policy: ExponentialBackoff::builder().build_with_max_retries(3),
cache_size: 10_000,
user_agent: None,
circuit_breaker: CircuitBreakerBuilder::default(),
}
}
}
impl PersistedDocumentsManagerBuilder {
/// The CDN Access Token with from the Hive Console target.
pub fn key(mut self, key: String) -> Self {
self.key = non_empty_string(Some(key));
self
}
/// The CDN endpoint from Hive Console target.
pub fn add_endpoint(mut self, endpoint: String) -> Self {
if let Some(endpoint) = non_empty_string(Some(endpoint)) {
self.endpoints.push(endpoint);
}
self
}
/// Accept invalid SSL certificates
/// default: false
pub fn accept_invalid_certs(mut self, accept_invalid_certs: bool) -> Self {
self.accept_invalid_certs = accept_invalid_certs;
self
}
/// Connection timeout for the Hive Console CDN requests.
/// Default: 5 seconds
pub fn connect_timeout(mut self, connect_timeout: Duration) -> Self {
self.connect_timeout = connect_timeout;
self
}
/// Request timeout for the Hive Console CDN requests.
/// Default: 15 seconds
pub fn request_timeout(mut self, request_timeout: Duration) -> Self {
self.request_timeout = request_timeout;
self
}
/// Retry policy for fetching persisted documents
/// Default: ExponentialBackoff with max 3 retries
pub fn retry_policy(mut self, retry_policy: ExponentialBackoff) -> Self {
self.retry_policy = retry_policy;
self
}
/// Maximum number of retries for fetching persisted documents
/// Default: ExponentialBackoff with max 3 retries
pub fn max_retries(mut self, max_retries: u32) -> Self {
self.retry_policy = ExponentialBackoff::builder().build_with_max_retries(max_retries);
self
}
/// Size of the in-memory cache for persisted documents
/// Default: 10,000 entries
pub fn cache_size(mut self, cache_size: u64) -> Self {
self.cache_size = cache_size;
self
}
/// User-Agent header to be sent with each request
pub fn user_agent(mut self, user_agent: String) -> Self {
self.user_agent = non_empty_string(Some(user_agent));
self
}
pub fn build(self) -> Result<PersistedDocumentsManager, PersistedDocumentsError> {
let mut default_headers = HeaderMap::new();
let key = match self.key {
Some(key) => key,
None => {
return Err(PersistedDocumentsError::MissingConfigurationOption(
"key".to_string(),
));
}
};
default_headers.insert(
"X-Hive-CDN-Key",
HeaderValue::from_str(&key)
.map_err(|e| PersistedDocumentsError::InvalidCDNKey(e.to_string()))?,
);
let mut reqwest_agent = reqwest::Client::builder()
.danger_accept_invalid_certs(self.accept_invalid_certs)
.connect_timeout(self.connect_timeout)
.timeout(self.request_timeout)
.default_headers(default_headers);
if let Some(user_agent) = self.user_agent {
reqwest_agent = reqwest_agent.user_agent(user_agent);
}
let reqwest_agent = reqwest_agent
.build()
.map_err(PersistedDocumentsError::HTTPClientCreationError)?;
let client = ClientBuilder::new(reqwest_agent)
.with(RetryTransientMiddleware::new_with_policy(self.retry_policy))
.build();
let cache = Cache::<String, String>::new(self.cache_size);
if self.endpoints.is_empty() {
return Err(PersistedDocumentsError::MissingConfigurationOption(
"endpoints".to_string(),
));
}
Ok(PersistedDocumentsManager {
client,
cache,
endpoints_with_circuit_breakers: self
.endpoints
.into_iter()
.map(move |endpoint| {
let circuit_breaker = self
.circuit_breaker
.clone()
.build_async()
.map_err(PersistedDocumentsError::CircuitBreakerCreationError)?;
Ok((endpoint, circuit_breaker))
})
.collect::<Result<Vec<(String, AsyncRecloser)>, PersistedDocumentsError>>()?,
})
}
}

View file

@ -1,141 +0,0 @@
use futures_util::TryFutureExt;
use recloser::AsyncRecloser;
use reqwest::header::{HeaderValue, IF_NONE_MATCH};
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
use reqwest_retry::RetryTransientMiddleware;
use tokio::sync::RwLock;
use crate::supergraph_fetcher::{
builder::SupergraphFetcherBuilder, SupergraphFetcher, SupergraphFetcherError,
};
#[derive(Debug)]
pub struct SupergraphFetcherAsyncState {
endpoints_with_circuit_breakers: Vec<(String, AsyncRecloser)>,
reqwest_client: ClientWithMiddleware,
}
impl SupergraphFetcher<SupergraphFetcherAsyncState> {
pub async fn fetch_supergraph(&self) -> Result<Option<String>, SupergraphFetcherError> {
let mut last_error: Option<SupergraphFetcherError> = None;
let mut last_resp = None;
for (endpoint, circuit_breaker) in &self.state.endpoints_with_circuit_breakers {
let mut req = self.state.reqwest_client.get(endpoint);
let etag = self.get_latest_etag().await;
if let Some(etag) = etag {
req = req.header(IF_NONE_MATCH, etag);
}
let resp_fut = async {
let mut resp = req.send().await.map_err(SupergraphFetcherError::Network);
// Server errors (5xx) are considered errors
if let Ok(ok_res) = resp {
resp = if ok_res.status().is_server_error() {
return Err(SupergraphFetcherError::Network(
reqwest_middleware::Error::Middleware(anyhow::anyhow!(
"Server error: {}",
ok_res.status()
)),
));
} else {
Ok(ok_res)
}
}
resp
};
let resp = circuit_breaker
.call(resp_fut)
// Map recloser errors to SupergraphFetcherError
.map_err(|e| match e {
recloser::Error::Inner(e) => e,
recloser::Error::Rejected => SupergraphFetcherError::RejectedByCircuitBreaker,
})
.await;
match resp {
Err(err) => {
last_error = Some(err);
continue;
}
Ok(resp) => {
last_resp = Some(resp);
break;
}
}
}
if let Some(last_resp) = last_resp {
let etag = last_resp.headers().get("etag");
self.update_latest_etag(etag).await;
let text = last_resp
.text()
.await
.map_err(SupergraphFetcherError::ResponseParse)?;
Ok(Some(text))
} else if let Some(error) = last_error {
Err(error)
} else {
Ok(None)
}
}
async fn get_latest_etag(&self) -> Option<HeaderValue> {
let guard = self.etag.read().await;
guard.clone()
}
async fn update_latest_etag(&self, etag: Option<&HeaderValue>) -> () {
let mut guard = self.etag.write().await;
if let Some(etag_value) = etag {
*guard = Some(etag_value.clone());
} else {
*guard = None;
}
}
}
impl SupergraphFetcherBuilder {
/// Builds an asynchronous SupergraphFetcher
pub fn build_async(
self,
) -> Result<SupergraphFetcher<SupergraphFetcherAsyncState>, SupergraphFetcherError> {
self.validate_endpoints()?;
let headers = self.prepare_headers()?;
let mut reqwest_agent = reqwest::Client::builder()
.danger_accept_invalid_certs(self.accept_invalid_certs)
.connect_timeout(self.connect_timeout)
.timeout(self.request_timeout)
.default_headers(headers);
if let Some(user_agent) = self.user_agent {
reqwest_agent = reqwest_agent.user_agent(user_agent);
}
let reqwest_agent = reqwest_agent
.build()
.map_err(SupergraphFetcherError::HTTPClientCreation)?;
let reqwest_client = ClientBuilder::new(reqwest_agent)
.with(RetryTransientMiddleware::new_with_policy(self.retry_policy))
.build();
Ok(SupergraphFetcher {
state: SupergraphFetcherAsyncState {
reqwest_client,
endpoints_with_circuit_breakers: self
.endpoints
.into_iter()
.map(|endpoint| {
let circuit_breaker = self
.circuit_breaker
.clone()
.unwrap_or_default()
.build_async()
.map_err(SupergraphFetcherError::CircuitBreakerCreation);
circuit_breaker.map(|cb| (endpoint, cb))
})
.collect::<Result<Vec<_>, _>>()?,
},
etag: RwLock::new(None),
})
}
}

View file

@ -1,135 +0,0 @@
use std::time::Duration;
use reqwest::header::{HeaderMap, HeaderValue};
use retry_policies::policies::ExponentialBackoff;
use crate::{
agent::usage_agent::non_empty_string, circuit_breaker::CircuitBreakerBuilder,
supergraph_fetcher::SupergraphFetcherError,
};
pub struct SupergraphFetcherBuilder {
pub(crate) endpoints: Vec<String>,
pub(crate) key: Option<String>,
pub(crate) user_agent: Option<String>,
pub(crate) connect_timeout: Duration,
pub(crate) request_timeout: Duration,
pub(crate) accept_invalid_certs: bool,
pub(crate) retry_policy: ExponentialBackoff,
pub(crate) circuit_breaker: Option<CircuitBreakerBuilder>,
}
impl Default for SupergraphFetcherBuilder {
fn default() -> Self {
Self {
endpoints: vec![],
key: None,
user_agent: None,
connect_timeout: Duration::from_secs(5),
request_timeout: Duration::from_secs(60),
accept_invalid_certs: false,
retry_policy: ExponentialBackoff::builder().build_with_max_retries(3),
circuit_breaker: None,
}
}
}
impl SupergraphFetcherBuilder {
pub fn new() -> Self {
Self::default()
}
/// The CDN endpoint from Hive Console target.
pub fn add_endpoint(mut self, endpoint: String) -> Self {
if let Some(mut endpoint) = non_empty_string(Some(endpoint)) {
if !endpoint.ends_with("/supergraph") {
if endpoint.ends_with("/") {
endpoint.push_str("supergraph");
} else {
endpoint.push_str("/supergraph");
}
}
self.endpoints.push(endpoint);
}
self
}
/// The CDN Access Token with from the Hive Console target.
pub fn key(mut self, key: String) -> Self {
self.key = Some(key);
self
}
/// User-Agent header to be sent with each request
pub fn user_agent(mut self, user_agent: String) -> Self {
self.user_agent = Some(user_agent);
self
}
/// Connection timeout for the Hive Console CDN requests.
/// Default: 5 seconds
pub fn connect_timeout(mut self, timeout: Duration) -> Self {
self.connect_timeout = timeout;
self
}
/// Request timeout for the Hive Console CDN requests.
/// Default: 60 seconds
pub fn request_timeout(mut self, timeout: Duration) -> Self {
self.request_timeout = timeout;
self
}
pub fn accept_invalid_certs(mut self, accept: bool) -> Self {
self.accept_invalid_certs = accept;
self
}
/// Policy for retrying failed requests.
///
/// By default, an exponential backoff retry policy is used, with 10 attempts.
pub fn retry_policy(mut self, retry_policy: ExponentialBackoff) -> Self {
self.retry_policy = retry_policy;
self
}
/// Maximum number of retries for failed requests.
///
/// By default, an exponential backoff retry policy is used, with 10 attempts.
pub fn max_retries(mut self, max_retries: u32) -> Self {
self.retry_policy = ExponentialBackoff::builder().build_with_max_retries(max_retries);
self
}
pub fn circuit_breaker(&mut self, builder: CircuitBreakerBuilder) -> &mut Self {
self.circuit_breaker = Some(builder);
self
}
pub(crate) fn validate_endpoints(&self) -> Result<(), SupergraphFetcherError> {
if self.endpoints.is_empty() {
return Err(SupergraphFetcherError::MissingConfigurationOption(
"endpoint".to_string(),
));
}
Ok(())
}
pub(crate) fn prepare_headers(&self) -> Result<HeaderMap, SupergraphFetcherError> {
let key = match &self.key {
Some(key) => key,
None => {
return Err(SupergraphFetcherError::MissingConfigurationOption(
"key".to_string(),
))
}
};
let mut headers = HeaderMap::new();
let mut cdn_key_header =
HeaderValue::from_str(key).map_err(SupergraphFetcherError::InvalidKey)?;
cdn_key_header.set_sensitive(true);
headers.insert("X-Hive-CDN-Key", cdn_key_header);
Ok(headers)
}
}

View file

@ -1,51 +0,0 @@
use tokio::sync::RwLock;
use tokio::sync::TryLockError;
use crate::circuit_breaker::CircuitBreakerError;
use crate::supergraph_fetcher::async_::SupergraphFetcherAsyncState;
use reqwest::header::HeaderValue;
use reqwest::header::InvalidHeaderValue;
pub mod async_;
pub mod builder;
pub mod sync;
#[derive(Debug)]
pub struct SupergraphFetcher<State> {
state: State,
etag: RwLock<Option<HeaderValue>>,
}
// Doesn't matter which one we implement this for, both have the same builder
impl SupergraphFetcher<SupergraphFetcherAsyncState> {
pub fn builder() -> builder::SupergraphFetcherBuilder {
builder::SupergraphFetcherBuilder::default()
}
}
pub enum LockErrorType {
Read,
Write,
}
#[derive(Debug, thiserror::Error)]
pub enum SupergraphFetcherError {
#[error("Creating HTTP Client failed: {0}")]
HTTPClientCreation(reqwest::Error),
#[error("Network error: {0}")]
Network(reqwest_middleware::Error),
#[error("Parsing response failed: {0}")]
ResponseParse(reqwest::Error),
#[error("Reading the etag record failed: {0:?}")]
ETagRead(TryLockError),
#[error("Updating the etag record failed: {0:?}")]
ETagWrite(TryLockError),
#[error("Invalid CDN key: {0}")]
InvalidKey(InvalidHeaderValue),
#[error("Missing configuration option: {0}")]
MissingConfigurationOption(String),
#[error("Request rejected by circuit breaker")]
RejectedByCircuitBreaker,
#[error("Creating circuit breaker failed: {0}")]
CircuitBreakerCreation(CircuitBreakerError),
}

View file

@ -1,191 +0,0 @@
use std::time::SystemTime;
use recloser::Recloser;
use reqwest::header::{HeaderValue, IF_NONE_MATCH};
use reqwest_retry::{RetryDecision, RetryPolicy};
use retry_policies::policies::ExponentialBackoff;
use tokio::sync::RwLock;
use crate::supergraph_fetcher::{
builder::SupergraphFetcherBuilder, SupergraphFetcher, SupergraphFetcherError,
};
#[derive(Debug)]
pub struct SupergraphFetcherSyncState {
endpoints_with_circuit_breakers: Vec<(String, Recloser)>,
reqwest_client: reqwest::blocking::Client,
retry_policy: ExponentialBackoff,
}
impl SupergraphFetcher<SupergraphFetcherSyncState> {
pub fn fetch_supergraph(&self) -> Result<Option<String>, SupergraphFetcherError> {
let mut last_error: Option<SupergraphFetcherError> = None;
let mut last_resp = None;
for (endpoint, circuit_breaker) in &self.state.endpoints_with_circuit_breakers {
let resp = {
circuit_breaker
.call(|| {
let request_start_time = SystemTime::now();
// Implementing retry logic for sync client
let mut n_past_retries = 0;
loop {
let mut req = self.state.reqwest_client.get(endpoint);
let etag = self.get_latest_etag()?;
if let Some(etag) = etag {
req = req.header(IF_NONE_MATCH, etag);
}
let mut response = req.send().map_err(|err| {
SupergraphFetcherError::Network(reqwest_middleware::Error::Reqwest(
err,
))
});
// Server errors (5xx) are considered retryable
if let Ok(ok_res) = response {
response = if ok_res.status().is_server_error() {
Err(SupergraphFetcherError::Network(
reqwest_middleware::Error::Middleware(anyhow::anyhow!(
"Server error: {}",
ok_res.status()
)),
))
} else {
Ok(ok_res)
}
}
match response {
Ok(resp) => break Ok(resp),
Err(e) => {
match self
.state
.retry_policy
.should_retry(request_start_time, n_past_retries)
{
RetryDecision::DoNotRetry => {
return Err(e);
}
RetryDecision::Retry { execute_after } => {
n_past_retries += 1;
match execute_after.elapsed() {
Ok(duration) => {
std::thread::sleep(duration);
}
Err(err) => {
tracing::error!(
"Error determining sleep duration for retry: {}",
err
);
// If elapsed time cannot be determined, do not wait
return Err(e);
}
}
}
}
}
}
}
})
// Map recloser errors to SupergraphFetcherError
.map_err(|e| match e {
recloser::Error::Inner(e) => e,
recloser::Error::Rejected => {
SupergraphFetcherError::RejectedByCircuitBreaker
}
})
};
match resp {
Err(e) => {
last_error = Some(e);
continue;
}
Ok(resp) => {
last_resp = Some(resp);
break;
}
}
}
if let Some(last_resp) = last_resp {
if last_resp.status().as_u16() == 304 {
return Ok(None);
}
self.update_latest_etag(last_resp.headers().get("etag"))?;
let text = last_resp
.text()
.map_err(SupergraphFetcherError::ResponseParse)?;
Ok(Some(text))
} else if let Some(error) = last_error {
Err(error)
} else {
Ok(None)
}
}
fn get_latest_etag(&self) -> Result<Option<HeaderValue>, SupergraphFetcherError> {
let guard = self
.etag
.try_read()
.map_err(SupergraphFetcherError::ETagRead)?;
Ok(guard.clone())
}
fn update_latest_etag(&self, etag: Option<&HeaderValue>) -> Result<(), SupergraphFetcherError> {
let mut guard = self
.etag
.try_write()
.map_err(SupergraphFetcherError::ETagWrite)?;
if let Some(etag_value) = etag {
*guard = Some(etag_value.clone());
} else {
*guard = None;
}
Ok(())
}
}
impl SupergraphFetcherBuilder {
/// Builds a synchronous SupergraphFetcher
pub fn build_sync(
self,
) -> Result<SupergraphFetcher<SupergraphFetcherSyncState>, SupergraphFetcherError> {
self.validate_endpoints()?;
let headers = self.prepare_headers()?;
let mut reqwest_client = reqwest::blocking::Client::builder()
.danger_accept_invalid_certs(self.accept_invalid_certs)
.connect_timeout(self.connect_timeout)
.timeout(self.request_timeout)
.default_headers(headers);
if let Some(user_agent) = &self.user_agent {
reqwest_client = reqwest_client.user_agent(user_agent);
}
let reqwest_client = reqwest_client
.build()
.map_err(SupergraphFetcherError::HTTPClientCreation)?;
let fetcher = SupergraphFetcher {
state: SupergraphFetcherSyncState {
reqwest_client,
retry_policy: self.retry_policy,
endpoints_with_circuit_breakers: self
.endpoints
.into_iter()
.map(|endpoint| {
let circuit_breaker = self
.circuit_breaker
.clone()
.unwrap_or_default()
.build_sync()
.map_err(SupergraphFetcherError::CircuitBreakerCreation);
circuit_breaker.map(|cb| (endpoint, cb))
})
.collect::<Result<Vec<_>, _>>()?,
},
etag: RwLock::new(None),
};
Ok(fetcher)
}
}

View file

@ -1,14 +0,0 @@
#/bin/bash
# The following script syncs the "version" field in package.json to the "package.version" field in Cargo.toml
# This main versioning flow is managed by Changeset.
# This file is executed during "changeset version" (when the version is bumped and release PR is created)
# to sync the version in Cargo.toml
# References:
# .github/workflows/publish-rust.yaml - The GitHub action that runs this script (after "changeset version")
# .github/workflows/main-rust.yaml - The GitHub action that does the actual publishing, if a changeset is declared to this package/crate
npm_version=$(node -p "require('./package.json').version")
cargo install set-cargo-version
set-cargo-version ./Cargo.toml $npm_version

View file

@ -1 +0,0 @@
../../services/usage/usage-report-v2.schema.json

View file

@ -618,13 +618,7 @@ importers:
version: 16.9.0
publishDirectory: dist
packages/libraries/router:
dependencies:
hive-console-sdk-rs:
specifier: workspace:*
version: link:../sdk-rs
packages/libraries/sdk-rs: {}
packages/libraries/router: {}
packages/libraries/yoga:
dependencies: