Initial version a plugin for Apollo Router (#266)

This commit is contained in:
Kamil Kisiela 2022-08-08 14:58:22 +02:00 committed by GitHub
parent 206f1ddcb4
commit 449b0f6e37
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 6018 additions and 41 deletions

View file

@ -83,6 +83,200 @@ jobs:
VERSION: ${{ steps.cli.outputs.version }}
run: yarn oclif promote --no-xz --sha ${GITHUB_SHA:0:7} --version $VERSION || yarn oclif promote --no-xz --sha ${GITHUB_SHA:0:8} --version $VERSION
publish_rust:
name: Publish Rust
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Look for changes
id: rust_changed
run: |
lines=$( git diff HEAD~ HEAD --name-only -- 'packages/libraries/router' | wc -l )
if [ $lines -gt 0 ]; then
echo '::set-output name=rust_changed::true'
fi
publish_rust_windows:
needs: publish_rust
if: needs.publish_rust.outputs.rust_changed == 'true'
name: Publish for Windows
runs-on: windows-latest
timeout-minutes: 40
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- name: Cache Rust
uses: Swatinem/rust-cache@v1
- name: Build
uses: actions-rs/cargo@v1
with:
command: build
args: --release
- name: Compress
run: ./target/release/compress ./target/release/router.exe ./router.tar.gz
- name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: router-win
path: router.tar.gz
- name: Upload to R2
uses: randomairborne/r2-release@v1.0.2
with:
endpoint: https://6d5bc18cd8d13babe7ed321adba3d8ae.r2.cloudflarestorage.com
accesskeyid: ${{ secrets.R2_ACCESS_KEY_ID }}
secretaccesskey: ${{ secrets.R2_SECRET_ACCESS_KEY }}
bucket: apollo-router
file: router.tar.gz
destination: ${{ github.sha }}/win/router.tar.gz
- name: Upload to R2 as latest
uses: randomairborne/r2-release@v1.0.2
with:
endpoint: https://6d5bc18cd8d13babe7ed321adba3d8ae.r2.cloudflarestorage.com
accesskeyid: ${{ secrets.R2_ACCESS_KEY_ID }}
secretaccesskey: ${{ secrets.R2_SECRET_ACCESS_KEY }}
bucket: apollo-router
file: router.tar.gz
destination: latest/win/router.tar.gz
publish_rust_macos:
needs: publish_rust
if: needs.publish_rust.outputs.rust_changed == 'true'
name: Publish for MacOS
runs-on: macos-latest
timeout-minutes: 40
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Node
uses: actions/setup-node@v2
with:
node-version: 16
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- name: Cache Rust
uses: Swatinem/rust-cache@v1
- name: Build
uses: actions-rs/cargo@v1
with:
command: build
args: --release
- name: Strip binary from debug symbols
run: strip target/release/router
- name: Compress
run: ./target/release/compress ./target/release/router ./router.tar.gz
- name: Upload artifact
uses: actions/upload-artifact@v3
with:
name: router-macos
path: router.tar.gz
- name: Upload to R2
uses: randomairborne/r2-release@v1.0.2
with:
endpoint: https://6d5bc18cd8d13babe7ed321adba3d8ae.r2.cloudflarestorage.com
accesskeyid: ${{ secrets.R2_ACCESS_KEY_ID }}
secretaccesskey: ${{ secrets.R2_SECRET_ACCESS_KEY }}
bucket: apollo-router
file: router.tar.gz
destination: ${{ github.sha }}/macos/router.tar.gz
- name: Upload to R2 as latest
uses: randomairborne/r2-release@v1.0.2
with:
endpoint: https://6d5bc18cd8d13babe7ed321adba3d8ae.r2.cloudflarestorage.com
accesskeyid: ${{ secrets.R2_ACCESS_KEY_ID }}
secretaccesskey: ${{ secrets.R2_SECRET_ACCESS_KEY }}
bucket: apollo-router
file: router.tar.gz
destination: latest/macos/router.tar.gz
publish_rust_linux:
needs: publish_rust
if: needs.publish_rust.outputs.rust_changed == 'true'
name: Publish for Linux
runs-on: ubuntu-latest
timeout-minutes: 40
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Node
uses: actions/setup-node@v2
with:
node-version: 16
- name: Install Rust
run: |
curl https://sh.rustup.rs -sSf | sh -s -- -y
rustup target add x86_64-unknown-linux-gnu
- name: Cache Rust
uses: Swatinem/rust-cache@v1
with:
key: rust_linux_cross
- name: Build
run: cargo build --target x86_64-unknown-linux-gnu --release
- name: Strip binary from debug symbols
run: strip target/x86_64-unknown-linux-gnu/release/router
- name: Compress
run: ./target/x86_64-unknown-linux-gnu/release/compress ./target/x86_64-unknown-linux-gnu/release/router ./router.tar.gz
- name: Upload artifact
uses: actions/upload-artifact@v3
with:
name: router-linux
path: router.tar.gz
- name: Upload to R2
uses: randomairborne/r2-release@v1.0.2
with:
endpoint: https://6d5bc18cd8d13babe7ed321adba3d8ae.r2.cloudflarestorage.com
accesskeyid: ${{ secrets.R2_ACCESS_KEY_ID }}
secretaccesskey: ${{ secrets.R2_SECRET_ACCESS_KEY }}
bucket: apollo-router
file: router.tar.gz
destination: ${{ github.sha }}/linux/router.tar.gz
- name: Upload to R2 as latest
uses: randomairborne/r2-release@v1.0.2
with:
endpoint: https://6d5bc18cd8d13babe7ed321adba3d8ae.r2.cloudflarestorage.com
accesskeyid: ${{ secrets.R2_ACCESS_KEY_ID }}
secretaccesskey: ${{ secrets.R2_SECRET_ACCESS_KEY }}
bucket: apollo-router
file: router.tar.gz
destination: latest/linux/router.tar.gz
deploy:
name: 'deploy to staging'
needs: publish

View file

@ -37,7 +37,8 @@ jobs:
with:
fetch-depth: 1
- uses: actions/setup-node@v2
- name: Install Node
uses: actions/setup-node@v2
with:
node-version: 16
@ -83,7 +84,8 @@ jobs:
with:
fetch-depth: 5
- uses: actions/setup-node@v2
- name: Install Node
uses: actions/setup-node@v2
with:
node-version: 16
@ -142,7 +144,8 @@ jobs:
with:
fetch-depth: 5
- uses: actions/setup-node@v2
- name: Install Node
uses: actions/setup-node@v2
with:
node-version: 16
@ -181,7 +184,8 @@ jobs:
with:
fetch-depth: 1
- uses: actions/setup-node@v2
- name: Install Node
uses: actions/setup-node@v2
with:
node-version: 16
@ -201,7 +205,7 @@ jobs:
name: Build
runs-on: ubuntu-latest
needs: setup
timeout-minutes: 5
timeout-minutes: 10
env:
TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }}
@ -215,7 +219,8 @@ jobs:
with:
fetch-depth: 5
- uses: actions/setup-node@v2
- name: Install Node
uses: actions/setup-node@v2
with:
node-version: 16
@ -234,11 +239,52 @@ jobs:
- name: Build
run: yarn build
rust:
name: Build Rust
runs-on: ubuntu-latest
timeout-minutes: 40
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Install Node
uses: actions/setup-node@v2
with:
node-version: 16
- name: Install Rust
run: |
curl https://sh.rustup.rs -sSf | sh -s -- -y
rustup target add x86_64-unknown-linux-gnu
- name: Cache Rust
uses: Swatinem/rust-cache@v1
with:
key: rust_linux_cross
- name: Build
run: cargo build --target x86_64-unknown-linux-gnu --release
- name: Strip binary from debug symbols
run: strip target/x86_64-unknown-linux-gnu/release/router
- name: Compress
run: ./target/x86_64-unknown-linux-gnu/release/compress ./target/x86_64-unknown-linux-gnu/release/router ./router.tar.gz
- name: Upload artifact
uses: actions/upload-artifact@v3
with:
name: router-linux
path: target/x86_64-unknown-linux-gnu/release/router
lint:
name: Lint
runs-on: ubuntu-latest
needs: setup
timeout-minutes: 15
timeout-minutes: 25
env:
TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }}

3
.gitignore vendored
View file

@ -112,3 +112,6 @@ integration-tests/testkit/gql/
*.pem
/.husky/_/
# Rust
/target

4307
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

5
Cargo.toml Normal file
View file

@ -0,0 +1,5 @@
[workspace]
members = [
"packages/libraries/router",
"scripts/compress"
]

View file

@ -80,37 +80,39 @@ The `collectUsage` method accepts the same arguments as execute function of grap
- `finish(result)` (function returned by `collectUsage(args)`) - has to be invoked right after execution finishes.
```ts
import express from 'express';
import { graphqlHTTP } from 'express-graphql';
import { createHive } from '@graphql-hive/client';
import express from 'express'
import { graphqlHTTP } from 'express-graphql'
import { createHive } from '@graphql-hive/client'
const app = express();
const app = express()
const hive = createHive({
enabled: true, // Enable/Disable Hive Client
debug: true, // Debugging mode
token: 'YOUR-TOKEN',
reporting: {
// feel free to set dummy values here
// feel free to set dummy values here
author: 'Author of the latest change',
commit: 'git sha or any identifier',
commit: 'git sha or any identifier'
},
usage: true, // Collects schema usage based operations
},
});
usage: true // Collects schema usage based operations
})
// Report Schema
hive.reportSchema({ schema: yourSchema });
hive.reportSchema({ schema: yourSchema })
app.post("/graphql", graphqlHTTP({
schema: yourSchema,
async customExecuteFn(args) {
// Collecting usage
const finish = hive.collectUsage(args);
const result = await execute(args);
finish(result);
return result;
}
}));
app.post(
'/graphql',
graphqlHTTP({
schema: yourSchema,
async customExecuteFn(args) {
// Collecting usage
const finish = hive.collectUsage(args)
const result = await execute(args)
finish(result)
return result
}
})
)
```
#### Using the registry when Stitching
@ -154,21 +156,20 @@ startMyGraphQLGateway({
You can connect your Apollo Gateway with Hive client.
The `experimental_pollInterval` value is up to you. Apollo Gateway uses 10s (10_000 ms) by default but we think it's better to fetch a supergraph more often.
- `HIVE_CDN_ENDPOINT` - the endpoint Hive generated for you in the previous step
- `HIVE_CDN_KEY` - the access
```ts
import { createSupergraphSDLFetcher } from '@graphql-hive/client'
import { createSupergraphManager } from '@graphql-hive/client'
import { ApolloGateway } from '@apollo/gateway'
import { ApolloServer } from 'apollo-server'
const gateway = new ApolloGateway({
experimental_pollInterval: 10_000, // define the poll interval (in ms)
experimental_updateSupergraphSdl: createSupergraphFetcher({
// Apollo Gateway will fetch Supergraph from GraphQL Hive CDN
supergraphSdl: createSupergraphManager({
endpoint: HIVE_CDN_ENDPOINT,
key: HIVE_CDN_KEY
key: HIVE_CDN_KEY,
pollIntervalInMs: 15_000
})
})

View file

@ -0,0 +1,31 @@
[package]
name = "graphql-hive-router"
version = "0.0.1"
authors = ["Kamil Kisiela <kamil.kisiela@gmail.com>"]
edition = "2021"
license = "MIT"
publish = false
[[bin]]
name = "router"
path = "src/main.rs"
[dependencies]
apollo-router = { git = "https://github.com/apollographql/router.git", branch = "main" }
reqwest = { version = "0.11.11", default-features = false, features = ["rustls-tls", "blocking", "json"] }
sha2 = { version = "0.10.2", features = ["std"] }
anyhow = "1"
tracing = "0.1"
async-trait = "0.1"
futures = "0.3"
schemars = { version = "0.8", features = ["url"] }
serde = "1"
serde_json = "1"
tokio = { version = "1", features = ["full"] }
tower = { version = "0.4", features = ["full"] }
http = "0.2"
graphql-parser = { git = "https://github.com/kamilkisiela/graphql-parser.git", branch = "kamil-minifier" }
graphql-tools = { git = "https://github.com/dotansimha/graphql-tools-rs.git", branch = "kamil-transformer" }
lru = "0.7.8"
md5 = "0.7.0"
rand = "0.8.5"

View file

@ -0,0 +1,295 @@
use super::graphql::OperationProcessor;
use graphql_parser::schema::{parse_schema, Document};
use serde::Serialize;
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio::time::sleep;
static COMMIT: Option<&'static str> = option_env!("GITHUB_SHA");
#[derive(Serialize, Debug)]
struct Report {
size: usize,
map: HashMap<String, OperationMapRecord>,
operations: Vec<Operation>,
}
#[allow(non_snake_case)]
#[derive(Serialize, Debug)]
struct OperationMapRecord {
operation: String,
operationName: Option<String>,
fields: Vec<String>,
}
#[allow(non_snake_case)]
#[derive(Serialize, Debug)]
struct Operation {
operationMapKey: String,
timestamp: u64,
execution: Execution,
metadata: Option<Metadata>,
}
#[allow(non_snake_case)]
#[derive(Serialize, Debug)]
struct Execution {
ok: bool,
duration: u128,
errorsTotal: usize,
}
#[derive(Serialize, Debug)]
struct Metadata {
client: Option<ClientInfo>,
}
#[derive(Serialize, Debug)]
struct ClientInfo {
name: Option<String>,
version: Option<String>,
}
#[derive(Debug, Clone)]
pub struct ExecutionReport {
pub client_name: Option<String>,
pub client_version: Option<String>,
pub timestamp: u64,
pub duration: Duration,
pub ok: bool,
pub errors: usize,
pub operation_body: String,
pub operation_name: Option<String>,
}
#[derive(Debug, Clone)]
pub struct State {
buffer: VecDeque<ExecutionReport>,
schema: Document<'static, String>,
}
impl State {
fn new(schema: Document<'static, String>) -> Self {
Self {
buffer: VecDeque::new(),
schema,
}
}
pub fn push(&mut self, report: ExecutionReport) -> usize {
self.buffer.push_back(report);
self.buffer.len()
}
pub fn drain(&mut self) -> Vec<ExecutionReport> {
self.buffer.drain(0..).collect::<Vec<ExecutionReport>>()
}
}
#[derive(Clone)]
pub struct UsageAgent {
pub sender: mpsc::Sender<ExecutionReport>,
token: String,
endpoint: String,
state: Arc<Mutex<State>>,
processor: Arc<Mutex<OperationProcessor>>,
}
fn non_empty_string(value: Option<String>) -> Option<String> {
match value {
Some(value) => match value.is_empty() {
true => None,
false => Some(value),
},
None => None,
}
}
impl UsageAgent {
pub fn new(
schema: String,
token: String,
endpoint: String,
_shutdown_signal: Option<oneshot::Receiver<()>>,
) -> Self {
let (tx, mut rx) = mpsc::channel::<ExecutionReport>(1024);
let schema = parse_schema::<String>(&schema)
.expect("parsed schema")
.into_static();
let state = Arc::new(Mutex::new(State::new(schema)));
let processor = Arc::new(Mutex::new(OperationProcessor::new()));
let agent = Self {
sender: tx,
state,
processor,
endpoint,
token,
};
let agent_for_report_receiver = agent.clone();
let agent_for_interval = agent.clone();
// TODO: make this working
// tokio::task::spawn(async move {
// if let Some(shutdown_signal) = shutdown_signal {
// tracing::info!("waiting for shutdown signal");
// shutdown_signal.await.expect("shutdown signal");
// tracing::info!("Flushing reports because of shutdown signal");
// // agent_for_shutdown_signal.clone().flush().await;
// // tracing::info!("Flushed because of shutdown");
// }
// tracing::info!("Closing spawn for shutdown signal receiver");
// });
tokio::task::spawn(async move {
while let Some(execution_report) = rx.recv().await {
agent_for_report_receiver
.clone()
.add_report(execution_report)
.await
}
});
tokio::task::spawn(async move {
loop {
sleep(Duration::from_secs(5)).await;
agent_for_interval.clone().flush().await;
}
});
agent
}
fn produce_report(&mut self, reports: Vec<ExecutionReport>) -> Report {
let mut report = Report {
size: 0,
map: HashMap::new(),
operations: Vec::new(),
};
let schema = self.state.lock().unwrap().schema.clone();
// iterate over reports and check if they are valid
for op in reports {
let operation = self
.processor
.lock()
.expect("lock normalizer")
.process(&op.operation_body, &schema);
match operation {
Err(e) => {
tracing::warn!("Dropping operation: {}", e);
continue;
}
Ok(operation) => {
let hash = operation.hash;
report.operations.push(Operation {
operationMapKey: hash.clone(),
timestamp: op.timestamp,
execution: Execution {
ok: op.ok,
duration: op.duration.as_nanos(),
errorsTotal: op.errors,
},
metadata: Some(Metadata {
client: Some(ClientInfo {
name: non_empty_string(op.client_name),
version: non_empty_string(op.client_version),
}),
}),
});
if !report.map.contains_key(&hash) {
report.map.insert(
hash,
OperationMapRecord {
operation: operation.operation,
operationName: non_empty_string(op.operation_name),
fields: operation.coordinates,
},
);
}
report.size += 1;
}
}
}
report
}
async fn send_report(&self, report: Report) -> Result<(), String> {
let client = reqwest::Client::new();
let mut delay = Duration::from_millis(0);
let mut error_message = "Unexpected error".to_string();
for _ in 0..3 {
let resp = client
.post(self.endpoint.clone())
.header(
reqwest::header::AUTHORIZATION,
format!("Bearer {}", self.token.clone()),
)
.header(
reqwest::header::USER_AGENT,
format!("graphql-hive-router@{}", COMMIT.unwrap_or_else(|| "local")),
)
.json(&report)
.send()
.await
.map_err(|e| e.to_string())?;
match resp.status() {
reqwest::StatusCode::OK => {
return Ok(());
}
reqwest::StatusCode::BAD_REQUEST => {
return Err("Token is missing".to_string());
}
reqwest::StatusCode::FORBIDDEN => {
return Err("No access".to_string());
}
_ => {
error_message = format!(
"Could not send usage report: ({}) {}",
resp.status().as_str(),
resp.text().await.unwrap_or_default()
);
}
}
delay += Duration::from_millis(500);
tokio::time::sleep(delay).await;
}
Err(error_message)
}
async fn add_report(&mut self, execution_report: ExecutionReport) {
let size = self
.state
.lock()
.expect("lock state")
.push(execution_report);
self.flush_if_full(size).await;
}
async fn flush_if_full(&mut self, size: usize) {
if size >= 5 {
self.flush().await;
}
}
pub async fn flush(&mut self) {
let execution_reports = self.state.clone().lock().unwrap().drain();
let size = execution_reports.len();
if size > 0 {
let report = self.produce_report(execution_reports);
match self.send_report(report).await {
Ok(_) => tracing::debug!("Reported {} operations", size),
Err(e) => tracing::error!("{}", e),
}
}
}
}

View file

@ -0,0 +1,477 @@
use graphql_tools::ast::ext::SchemaDocumentExtension;
use graphql_tools::ast::TypeDefinitionExtension;
use lru::LruCache;
use md5;
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use graphql_parser::minify_query;
use graphql_parser::parse_query;
use graphql_parser::query::{
Directive, Document, Field, FragmentDefinition, Number, Selection, SelectionSet, Text, Type,
Value, VariableDefinition,
};
use graphql_parser::schema::{Document as SchemaDocument, TypeDefinition};
use graphql_tools::ast::{
visit_document, OperationTransformer, OperationVisitor, OperationVisitorContext, Transformed,
TransformedValue,
};
struct SchemaCoordinatesContext {
pub schema_coordinates: HashSet<String>,
pub input_types_to_collect: HashSet<String>,
}
pub fn collect_schema_coordinates(
document: &Document<'static, String>,
schema: &SchemaDocument<'static, String>,
) -> HashSet<String> {
let mut ctx = SchemaCoordinatesContext {
schema_coordinates: HashSet::new(),
input_types_to_collect: HashSet::new(),
};
let mut visit_context = OperationVisitorContext::new(document, schema);
let mut visitor = SchemaCoordinatesVisitor {};
visit_document(&mut visitor, &document, &mut visit_context, &mut ctx);
for input_type_name in ctx.input_types_to_collect {
let named_type = schema.type_by_name(&input_type_name).unwrap();
match named_type {
TypeDefinition::InputObject(input_type) => {
for field in input_type.fields {
ctx.schema_coordinates
.insert(format!("{}.{}", input_type_name, field.name));
}
}
_ => {}
}
}
ctx.schema_coordinates
}
struct SchemaCoordinatesVisitor {}
impl SchemaCoordinatesVisitor {
fn resolve_type_name<'a>(&self, t: Type<'a, String>) -> String {
match t {
Type::NamedType(value) => return value,
Type::ListType(t) => return self.resolve_type_name(*t),
Type::NonNullType(t) => return self.resolve_type_name(*t),
}
}
}
impl<'a> OperationVisitor<'a, SchemaCoordinatesContext> for SchemaCoordinatesVisitor {
fn enter_field(
&mut self,
info: &mut OperationVisitorContext<'a>,
ctx: &mut SchemaCoordinatesContext,
field: &Field<'static, String>,
) {
let parent_name = info.current_parent_type().unwrap().name();
let field_name = field.name.to_string();
ctx.schema_coordinates
.insert(format!("{}.{}", parent_name, field_name));
}
fn enter_variable_definition(
&mut self,
_: &mut OperationVisitorContext<'a>,
ctx: &mut SchemaCoordinatesContext,
var: &graphql_tools::static_graphql::query::VariableDefinition,
) {
ctx.input_types_to_collect
.insert(self.resolve_type_name(var.var_type.clone()));
}
fn enter_argument(
&mut self,
info: &mut OperationVisitorContext<'a>,
ctx: &mut SchemaCoordinatesContext,
arg: &(String, Value<'static, String>),
) {
let type_name = info.current_parent_type().unwrap().name();
let field = info.current_field();
if let Some(field) = field {
let field_name = field.name.clone();
let arg_name = arg.0.clone();
ctx.schema_coordinates
.insert(format!("{type_name}.{field_name}.{arg_name}").to_string());
let arg_value = arg.1.clone();
match info.current_input_type() {
Some(input_type) => {
let input_type_name = input_type.name();
match arg_value {
Value::Enum(value) => {
let value_str = value.to_string();
ctx.schema_coordinates
.insert(format!("{input_type_name}.{value_str}").to_string());
}
Value::List(_) => {
// handled by enter_list_value
}
Value::Object(_) => {
// handled by enter_object_field
}
_ => {
ctx.input_types_to_collect.insert(input_type_name);
}
}
}
None => {}
}
}
}
fn enter_list_value(
&mut self,
info: &mut OperationVisitorContext<'a>,
ctx: &mut SchemaCoordinatesContext,
values: &Vec<Value<'static, String>>,
) {
if let Some(input_type) = info.current_input_type() {
for value in values {
match value {
Value::Object(_) => {
// object fields are handled by enter_object_field
}
_ => {
ctx.input_types_to_collect.insert(input_type.name());
}
}
}
}
}
fn enter_object_field(
&mut self,
info: &mut OperationVisitorContext<'a>,
ctx: &mut SchemaCoordinatesContext,
(name, value): &(String, Value<'static, String>),
) {
let input_type = info.current_input_type();
if let Some(input_type) = input_type {
ctx.schema_coordinates
.insert(format!("{}.{}", input_type.name(), name));
let input_type_name = input_type.name();
match value {
Value::Enum(value) => {
// Collect only a specific enum value
let value_str = value.to_string();
ctx.schema_coordinates
.insert(format!("{input_type_name}.{value_str}").to_string());
}
Value::List(_) => {
// handled by enter_list_value
}
Value::Object(_) => {
// handled by enter_object_field
}
_ => {
ctx.input_types_to_collect.insert(input_type_name);
}
}
}
}
}
struct StripLiteralsTransformer {}
impl<'a, T: Text<'a> + Clone> OperationTransformer<'a, T> for StripLiteralsTransformer {
fn transform_value(&mut self, node: &Value<'a, T>) -> TransformedValue<Value<'a, T>> {
match node {
Value::Float(_) => TransformedValue::Replace(Value::Float(0.0)),
Value::Int(_) => TransformedValue::Replace(Value::Int(Number::from(0))),
Value::String(_) => TransformedValue::Replace(Value::String(String::from(""))),
Value::Variable(_) => TransformedValue::Keep,
Value::Boolean(_) => TransformedValue::Keep,
Value::Null => TransformedValue::Keep,
Value::Enum(_) => TransformedValue::Keep,
Value::List(val) => {
let items: Vec<Value<'a, T>> = val
.iter()
.map(|item| self.transform_value(item).replace_or_else(|| item.clone()))
.collect();
TransformedValue::Replace(Value::List(items))
}
Value::Object(fields) => {
let fields: BTreeMap<T::Value, Value<'a, T>> = fields
.iter()
.map(|field| {
let (name, value) = field;
let new_value = self
.transform_value(value)
.replace_or_else(|| value.clone());
(name.clone(), new_value)
})
.collect();
TransformedValue::Replace(Value::Object(fields))
}
}
}
fn transform_field(
&mut self,
field: &graphql_parser::query::Field<'a, T>,
) -> Transformed<graphql_parser::query::Selection<'a, T>> {
let selection_set = self.transform_selection_set(&field.selection_set);
let arguments = self.transform_arguments(&field.arguments);
let directives = self.transform_directives(&field.directives);
Transformed::Replace(Selection::Field(Field {
arguments: arguments.replace_or_else(|| field.arguments.clone()),
directives: directives.replace_or_else(|| field.directives.clone()),
selection_set: SelectionSet {
items: selection_set.replace_or_else(|| field.selection_set.items.clone()),
span: field.selection_set.span,
},
position: field.position,
alias: None,
name: field.name.clone(),
}))
}
}
#[derive(Hash, Eq, PartialEq, Clone, Copy)]
pub struct PointerAddress(usize);
impl PointerAddress {
pub fn new<T>(ptr: &T) -> Self {
let ptr_address: usize = unsafe { std::mem::transmute(ptr) };
Self(ptr_address)
}
}
type Seen<'s, T> = HashMap<PointerAddress, Transformed<Selection<'s, T>>>;
pub struct SortSelectionsTransform<'s, T: Text<'s> + Clone> {
seen: Seen<'s, T>,
}
impl<'s, T: Text<'s> + Clone> SortSelectionsTransform<'s, T> {
pub fn new() -> Self {
Self {
seen: Default::default(),
}
}
}
impl<'s, T: Text<'s> + Clone> OperationTransformer<'s, T> for SortSelectionsTransform<'s, T> {
fn transform_selection_set(
&mut self,
selections: &SelectionSet<'s, T>,
) -> TransformedValue<Vec<Selection<'s, T>>> {
let mut next_selections = self
.transform_list(&selections.items, Self::transform_selection)
.replace_or_else(|| selections.items.to_vec());
next_selections.sort_unstable_by(|a, b| self.compare_selections(a, b));
TransformedValue::Replace(next_selections)
}
fn transform_directives(
&mut self,
directives: &Vec<Directive<'s, T>>,
) -> TransformedValue<Vec<Directive<'s, T>>> {
let mut next_directives = self
.transform_list(&directives, Self::transform_directive)
.replace_or_else(|| directives.to_vec());
next_directives.sort_unstable_by(|a, b| self.compare_directives(a, b));
TransformedValue::Replace(next_directives)
}
fn transform_arguments(
&mut self,
arguments: &[(T::Value, Value<'s, T>)],
) -> TransformedValue<Vec<(T::Value, Value<'s, T>)>> {
let mut next_arguments = self
.transform_list(&arguments, Self::transform_argument)
.replace_or_else(|| arguments.to_vec());
next_arguments.sort_unstable_by(|a, b| self.compare_arguments(a, b));
TransformedValue::Replace(next_arguments)
}
fn transform_variable_definitions(
&mut self,
variable_definitions: &Vec<VariableDefinition<'s, T>>,
) -> TransformedValue<Vec<VariableDefinition<'s, T>>> {
let mut next_variable_definitions = self
.transform_list(&variable_definitions, Self::transform_variable_definition)
.replace_or_else(|| variable_definitions.to_vec());
next_variable_definitions.sort_unstable_by(|a, b| self.compare_variable_definitions(a, b));
TransformedValue::Replace(next_variable_definitions)
}
fn transform_fragment(
&mut self,
fragment: &FragmentDefinition<'s, T>,
) -> Transformed<FragmentDefinition<'s, T>> {
let mut directives = fragment.directives.clone();
directives.sort_unstable_by_key(|var| var.name.clone());
let selections = self.transform_selection_set(&fragment.selection_set);
Transformed::Replace(FragmentDefinition {
selection_set: SelectionSet {
items: selections.replace_or_else(|| fragment.selection_set.items.clone()),
span: fragment.selection_set.span.clone(),
},
directives,
name: fragment.name.clone(),
position: fragment.position.clone(),
type_condition: fragment.type_condition.clone(),
})
}
fn transform_selection(
&mut self,
selection: &Selection<'s, T>,
) -> Transformed<Selection<'s, T>> {
match selection {
Selection::InlineFragment(selection) => {
let key = PointerAddress::new(selection);
if let Some(prev) = self.seen.get(&key) {
return prev.clone();
}
let transformed = self.transform_inline_fragment(selection);
self.seen.insert(key, transformed.clone());
transformed
}
Selection::Field(field) => {
let key = PointerAddress::new(field);
if let Some(prev) = self.seen.get(&key) {
return prev.clone();
}
let transformed = self.transform_field(field);
self.seen.insert(key, transformed.clone());
transformed
}
Selection::FragmentSpread(_) => Transformed::Keep,
}
}
}
impl<'s, T: Text<'s> + Clone> SortSelectionsTransform<'s, T> {
fn compare_selections(&self, a: &Selection<'s, T>, b: &Selection<'s, T>) -> Ordering {
match (a, b) {
(Selection::Field(a), Selection::Field(b)) => a.name.cmp(&b.name),
(Selection::FragmentSpread(a), Selection::FragmentSpread(b)) => {
a.fragment_name.cmp(&b.fragment_name)
}
_ => {
let a_ordering = selection_kind_ordering(a);
let b_ordering = selection_kind_ordering(b);
assert!(
a_ordering != b_ordering,
"expected different ordering, got {} == {}",
a_ordering,
b_ordering
);
a_ordering.cmp(&b_ordering)
}
}
}
fn compare_directives(&self, a: &Directive<'s, T>, b: &Directive<'s, T>) -> Ordering {
a.name.cmp(&b.name)
}
fn compare_arguments(
&self,
a: &(T::Value, Value<'s, T>),
b: &(T::Value, Value<'s, T>),
) -> Ordering {
a.0.cmp(&b.0)
}
fn compare_variable_definitions(
&self,
a: &VariableDefinition<'s, T>,
b: &VariableDefinition<'s, T>,
) -> Ordering {
a.name.cmp(&b.name)
}
}
/// Assigns an order to different variants of Selection.
fn selection_kind_ordering<'s, T: Text<'s>>(selection: &Selection<'s, T>) -> u8 {
match selection {
Selection::FragmentSpread(_) => 1,
Selection::InlineFragment(_) => 2,
Selection::Field(_) => 3,
}
}
#[derive(Clone)]
pub struct ProcessedOperation {
pub operation: String,
pub hash: String,
pub coordinates: Vec<String>,
}
pub struct OperationProcessor {
cache: LruCache<String, ProcessedOperation>,
}
impl OperationProcessor {
pub fn new() -> OperationProcessor {
OperationProcessor {
cache: LruCache::new(1000),
}
}
pub fn process(
&mut self,
query: &str,
schema: &SchemaDocument<'static, String>,
) -> Result<ProcessedOperation, String> {
let key = query.to_string();
if self.cache.contains(&key) {
Ok(self.cache.get(&key).expect("lock cache").clone())
} else {
let result = self.transform(query, schema)?;
self.cache.put(key, result.clone());
Ok(result)
}
}
fn transform(
&self,
operation: &str,
schema: &SchemaDocument<'static, String>,
) -> Result<ProcessedOperation, String> {
let mut strip_literals_transformer = StripLiteralsTransformer {};
let parsed = parse_query(operation)
.map_err(|e| e.to_string())?
.into_static();
let schema_coordinates: Vec<String> =
Vec::from_iter(collect_schema_coordinates(&parsed, schema));
let normalized = strip_literals_transformer
.transform_document(&parsed)
.replace_or_else(|| parsed.clone());
let normalized = SortSelectionsTransform::new()
.transform_document(&normalized)
.replace_or_else(|| normalized.clone());
let printed = minify_query(format!("{}", normalized.clone())).map_err(|e| e.to_string())?;
let hash = format!("{:x}", md5::compute(printed.clone()));
Ok(ProcessedOperation {
operation: printed,
hash,
coordinates: schema_coordinates,
})
}
}

View file

@ -0,0 +1,3 @@
mod agent;
mod graphql;
pub mod usage;

View file

@ -0,0 +1,24 @@
use registry::HiveRegistry;
mod agent;
mod graphql;
mod registry;
mod usage;
fn main() {
match HiveRegistry::new() {
Ok(_) => {}
Err(e) => {
eprintln!("{}", e);
std::process::exit(1);
}
}
match apollo_router::main() {
Ok(_) => {}
Err(e) => {
eprintln!("{}", e);
std::process::exit(1);
}
}
}

View file

@ -0,0 +1,113 @@
use anyhow::Result;
use sha2::Digest;
use sha2::Sha256;
use std::env;
use std::io::Write;
use std::thread;
#[derive(Debug, Clone)]
pub struct HiveRegistry {
endpoint: String,
key: String,
file_name: String,
}
impl HiveRegistry {
pub fn new() -> Result<(), String> {
let endpoint = env::var("HIVE_CDN_ENDPOINT").unwrap_or_default();
let key = env::var("HIVE_CDN_KEY").unwrap_or_default();
//.map_err(|_| "environment variable HIVE_CDN_KEY not found")?;
// .map_err(|_| "environment variable HIVE_CDN_ENDPOINT not found")?;
if endpoint.is_empty() && key.is_empty() {
tracing::info!("You're not using GraphQL Hive as the source of schema.");
tracing::info!(
"Reason: could not find HIVE_CDN_KEY and HIVE_CDN_ENDPOINT environment variables."
);
return Ok(());
}
if endpoint.is_empty() {
return Err("environment variable HIVE_CDN_ENDPOINT not found".to_string());
}
if key.is_empty() {
return Err("environment variable HIVE_CDN_KEY not found".to_string());
}
let file_name = "supergraph-schema.graphql".to_string();
let poll_interval: u64 = env::var("HIVE_CDN_POLL_INTERVAL")
// .or_else::<String, std::env::VarError>(Ok("10".to_string()))?
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("failed to parse HIVE_CDN_POLL_INTERVAL");
env::set_var("APOLLO_ROUTER_SUPERGRAPH_PATH", file_name.clone());
env::set_var("APOLLO_ROUTER_HOT_RELOAD", "true");
let registry = HiveRegistry {
endpoint,
key,
file_name,
};
match registry.initial_supergraph() {
Ok(_) => {
tracing::info!("Successfully fetched and saved supergraph");
}
Err(e) => {
eprintln!("{}", e);
std::process::exit(1);
}
}
thread::spawn(move || loop {
thread::sleep(std::time::Duration::from_secs(poll_interval));
registry.poll()
});
Ok(())
}
fn fetch_supergraph(&self) -> Result<String, String> {
let client = reqwest::blocking::Client::new();
let resp = client
.get(format!("{}/supergraph", self.endpoint))
.header("X-Hive-CDN-Key", self.key.to_string())
.send()
.map_err(|e| e.to_string())?;
Ok(resp.text().map_err(|e| e.to_string())?)
}
fn initial_supergraph(&self) -> Result<(), String> {
let mut file = std::fs::File::create(self.file_name.clone()).map_err(|e| e.to_string())?;
let resp = self.fetch_supergraph()?;
file.write_all(resp.as_bytes()).map_err(|e| e.to_string())?;
Ok(())
}
fn poll(&self) {
let current_file =
std::fs::read_to_string(self.file_name.clone()).expect("Could not read file");
let current_supergraph_hash = hash(current_file.as_bytes());
match self.fetch_supergraph() {
Ok(new_supergraph) => {
let new_supergraph_hash = hash(new_supergraph.as_bytes());
if current_supergraph_hash != new_supergraph_hash {
tracing::info!("New supergraph detected!");
std::fs::write(self.file_name.clone(), new_supergraph)
.expect("Could not write file");
}
}
Err(e) => tracing::error!("{}", e),
}
}
}
fn hash(bytes: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.update(bytes);
format!("{:X}", hasher.finalize())
}

View file

@ -0,0 +1,293 @@
use apollo_router::layers::ServiceBuilderExt;
use apollo_router::plugin::Plugin;
use apollo_router::plugin::PluginInit;
use apollo_router::register_plugin;
use apollo_router::services::RouterRequest;
use apollo_router::services::RouterResponse;
use apollo_router::Context;
use core::ops::Drop;
use futures::StreamExt;
use http::HeaderValue;
use rand::Rng;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::env;
use std::time::Instant;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tower::util::BoxService;
use tower::BoxError;
use tower::ServiceBuilder;
use tower::ServiceExt;
use crate::agent::{ExecutionReport, UsageAgent};
pub(crate) static OPERATION_CONTEXT: &str = "hive::operation_context";
#[derive(Serialize, Deserialize)]
struct OperationContext {
pub(crate) client_name: Option<String>,
pub(crate) client_version: Option<String>,
pub(crate) timestamp: u64,
pub(crate) operation_body: String,
pub(crate) operation_name: Option<String>,
pub(crate) dropped: bool,
}
struct UsagePlugin {
#[allow(dead_code)]
config: Config,
agent: UsageAgent,
shutdown_signal: Option<oneshot::Sender<()>>,
}
#[derive(Clone, Debug, Deserialize, JsonSchema)]
struct Config {
/// Sample rate to determine sampling.
/// 0.0 = 0% chance of being sent
/// 1.0 = 100% chance of being sent.
/// Default: 1.0
sample_rate: Option<f64>,
/// A list of operations (by name) to be ignored by Hive.
exclude: Option<Vec<String>>,
client_name_header: Option<String>,
client_version_header: Option<String>,
}
impl Default for Config {
fn default() -> Self {
Self {
sample_rate: Some(1.0),
exclude: None,
client_name_header: None,
client_version_header: None,
}
}
}
impl UsagePlugin {
fn populate_context(config: Config, req: &RouterRequest) {
let context = &req.context;
let http_request = &req.originating_request;
let headers = http_request.headers();
let client_name_header = config
.client_name_header
.unwrap_or("graphql-client-name".to_string());
let client_version_header = config
.client_version_header
.unwrap_or("graphql-client-version".to_string());
let client_name = headers
.get(client_name_header)
.cloned()
.unwrap_or_else(|| HeaderValue::from_static(""))
.to_str()
.ok()
.map(|v| v.to_string());
let client_version = headers
.get(client_version_header)
.cloned()
.unwrap_or_else(|| HeaderValue::from_static(""))
.to_str()
.ok()
.map(|v| v.to_string());
let operation_name = req.originating_request.body().operation_name.clone();
let operation_body = req
.originating_request
.body()
.query
.clone()
.expect("operation body");
let sample_rate = config.sample_rate.clone();
let excluded_operation_names: HashSet<String> = config
.exclude
.unwrap_or_else(|| vec![])
.clone()
.into_iter()
.collect();
let mut rng = rand::thread_rng();
let mut dropped = match sample_rate {
Some(rate) => {
let num: f64 = rng.gen();
num <= rate
}
None => false,
};
if !dropped {
match operation_name.clone() {
Some(name) => {
if excluded_operation_names.contains(&name) {
dropped = true;
}
}
None => {}
}
}
if dropped {
tracing::debug!("Dropped the operation");
}
let _ = context.insert(
OPERATION_CONTEXT,
OperationContext {
dropped,
client_name,
client_version,
operation_name,
operation_body,
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs()
* 1000,
},
);
}
pub fn add_report(sender: mpsc::Sender<ExecutionReport>, report: ExecutionReport) {
if let Err(e) = sender.to_owned().try_send(report) {
tracing::error!("Failed to send report: {}", e);
}
}
}
#[async_trait::async_trait]
impl Plugin for UsagePlugin {
type Config = Config;
async fn new(init: PluginInit<Config>) -> Result<Self, BoxError> {
tracing::debug!("Starting GraphQL Hive Usage plugin");
let token =
env::var("HIVE_TOKEN").map_err(|_| "environment variable HIVE_TOKEN not found")?;
let endpoint = env::var("HIVE_ENDPOINT");
let endpoint = match endpoint {
Ok(endpoint) => endpoint,
Err(_) => "https://app.graphql-hive.com/usage".to_string(),
};
let (shutdown_tx, shutdown_rx) = oneshot::channel::<()>();
Ok(UsagePlugin {
config: init.config,
agent: UsageAgent::new(
init.supergraph_sdl.to_string(),
token,
endpoint,
Some(shutdown_rx),
),
shutdown_signal: Some(shutdown_tx),
})
}
fn router_service(
&self,
service: BoxService<RouterRequest, RouterResponse, BoxError>,
) -> BoxService<RouterRequest, RouterResponse, BoxError> {
let config = self.config.clone();
let report_sender = self.agent.sender.clone();
ServiceBuilder::new()
.map_future_with_context(
move |req: &RouterRequest| {
Self::populate_context(config.clone(), req);
req.context.clone()
},
move |ctx: Context, fut| {
let start = Instant::now();
let sender = report_sender.clone();
async move {
let operation_context = ctx
.get::<_, OperationContext>(OPERATION_CONTEXT)
.unwrap_or_default()
.unwrap();
if operation_context.dropped {
let result: Result<RouterResponse, BoxError> = fut.await;
return result;
}
let result: Result<RouterResponse, BoxError> = fut.await;
let client_name = operation_context.client_name;
let client_version = operation_context.client_version;
let operation_name = operation_context.operation_name;
let operation_body = operation_context.operation_body;
let timestamp = operation_context.timestamp;
let duration = start.elapsed();
match result {
Err(e) => {
Self::add_report(
sender.clone(),
ExecutionReport {
client_name,
client_version,
timestamp,
duration,
ok: false,
errors: 1,
operation_body,
operation_name,
},
);
Err(e)
}
Ok(router_response) => {
let is_failure = !router_response.response.status().is_success();
Ok(router_response.map(move |response_stream| {
let sender = sender.clone();
let client_name = client_name.clone();
let client_version = client_version.clone();
let operation_body = operation_body.clone();
let operation_name = operation_name.clone();
response_stream
.map(move |response| {
// make sure we send a single report, not for each chunk
let response_has_errors = !response.errors.is_empty();
Self::add_report(
sender.clone(),
ExecutionReport {
client_name: client_name.clone(),
client_version: client_version.clone(),
timestamp,
duration,
ok: !is_failure && !response_has_errors,
errors: response.errors.len(),
operation_body: operation_body.clone(),
operation_name: operation_name.clone(),
},
);
response
})
.boxed()
}))
}
}
}
},
)
.service(service)
.boxed()
}
}
impl Drop for UsagePlugin {
fn drop(&mut self) {
if let Some(sender) = self.shutdown_signal.take() {
let _ = sender.send(());
}
}
}
register_plugin!("hive", "usage", UsagePlugin);

View file

@ -65,6 +65,47 @@ const envelopProxy = envelop({
})
```
### Apollo Router
GraphQL Hive ships a custom version of [Apollo Router](https://www.apollographql.com/docs/router/). The reason is that in order to extend Apollo Router, we need to write [a native Rust plugin](https://www.apollographql.com/docs/router/customizations/native).
Download Apollo Router for Linux (x86_64), MacOS (x86_64) or Windows (x86_64):
```bash
curl -fsSL https://graphql-hive.com/apollo-router-download.sh | bash
```
Write `router.yaml` file and enable `hive.usage` plugin:
```yaml
plugins:
hive.usage:
{}
# Sample rate to determine sampling.
# 0.0 = 0% chance of being sent
# 1.0 = 100% chance of being sent.
# Default: 1.0
# sample_rate: "0.5",
#
# A list of operations (by name) to be ignored by Hive.
# exclude: ["IntrospectionQuery", "MeQuery"],
#
# Uses graphql-client-name by default
# client_name_header: "x-client-name",
# Uses graphql-client-version by default
# client_version_header: "x-client-version",
```
You can also enable other feautes like sampling.
Now, start the router:
```bash
HIVE_TOKEN="your-token" ./router
```
If everything worked correctly, you should see the first operations in GraphQL Hive in 1-2 minutes.
### Other servers
The `createHive` function creates a generic Hive Client to be used with any GraphQL flow.

View file

@ -16,23 +16,24 @@ GraphQL Hive will now generate a unique key:
## Apollo Federation
You're a programmer (or if you prefer a Software Engineer) so no need to explain the code snippet below. Just two things:
There are gateways maintained by Apollo team, [Apollo Gateway](#apollo-gateway) and [Apollo Router](#apollo-router). GraphQL Hive supports both!
### Apollo Gateway
- `HIVE_CDN_ENDPOINT` - the endpoint Hive generated for you in the previous step
- `HIVE_CDN_KEY` - the access key
The `experimental_pollInterval` value is up to you. Apollo Gateway uses 10s (10.000 ms) by default but we think it's better to fetch a supergraph more often.
```typescript
import { createSupergraphSDLFetcher } from '@graphql-hive/client'
import { createSupergraphManager } from '@graphql-hive/client'
import { ApolloGateway } from '@apollo/gateway'
import { ApolloServer } from 'apollo-server'
const gateway = new ApolloGateway({
experimental_pollInterval: 10_000, // define the poll interval (in ms)
experimental_updateSupergraphSdl: createSupergraphFetcher({
endpoint: HIVE_CDN_ENDPOINT,
key: HIVE_CDN_KEY
// Apollo Gateway will fetch Supergraph from GraphQL Hive CDN
supergraphSdl: createSupergraphManager({
endpoint: process.env.HIVE_CDN_ENDPOINT,
key: process.env.HIVE_CDN_KEY,
pollIntervalInMs: 15_000
})
})
@ -45,6 +46,27 @@ server.listen().then(({ url }) => {
})
```
### Apollo Router
GraphQL Hive ships a custom version of [Apollo Router](https://www.apollographql.com/docs/router/). The reason is that in order to extend Apollo Router, we need to write [a native Rust plugin](https://www.apollographql.com/docs/router/customizations/native).
Download Apollo Router for Linux (x86_64), MacOS (x86_64) or Windows (x86_64):
```bash
curl -fsSL https://graphql-hive.com/apollo-router-download.sh | bash
```
Start the router:
```bash
HIVE_CDN_ENDPOINT="..." HIVE_CDN_KEY="..." ./router
```
- `HIVE_CDN_ENDPOINT` - the endpoint Hive generated for you in the previous step
- `HIVE_CDN_KEY` - the access key
> Apollo Router polls schema from the registry every 10 seconds. In order to change it to 15 seconds pass: `HIVE_CDN_POLL_INTERVAL=15`.
## Schema Stitching
Stitching could be done in many ways, that's why `@graphql-hive/client` provides generic functions, not something dedicated for stitching. Unfortunately the implementation of gateway + polling is up to you.

View file

@ -0,0 +1,68 @@
#!/bin/bash
set -u
COMMIT="latest"
OS="$(uname -s)"
# Parse Flags
parse_args() {
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-c | --commit)
COMMIT="$2"
shift # past commit argument
shift # past commit value
;;
*)
echo "Unrecognized argument $key"
exit 1
;;
esac
done
}
set_target() {
case "$OS" in
Linux)
TARGET="linux"
EXT=""
;;
Darwin)
TARGET=macos
EXT=""
;;
MINGW* | MSYS* | CYGWIN*)
TARGET=win
EXT=".exe"
;;
*)
echo "OS $OS is not supported."
echo "If you think that's a bug - please file an issue to https://github.com/kamilkisiela/graphql-hive/issues"
exit 1
;;
esac
}
download() {
DOWNLOAD_DIR=$(mktemp -d)
URL="https://apollo-router.theguild.workers.dev/$TARGET/$COMMIT"
echo "Downloading $URL"
if ! curl --progress-bar --fail -L "$URL" -o "$DOWNLOAD_DIR/router.tar.gz"; then
echo "Download failed."
exit 1
fi
tar xzf "$DOWNLOAD_DIR/router.tar.gz"
}
parse_args "$@"
set_target
download

View file

@ -0,0 +1,15 @@
[package]
name = "compress"
version = "0.0.1"
authors = ["Kamil Kisiela <kamil.kisiela@gmail.com>"]
edition = "2021"
license = "MIT"
publish = false
[[bin]]
name = "compress"
path = "src/main.rs"
[dependencies]
flate2 = "1.0.24"
tar = "0.4"

View file

@ -0,0 +1,39 @@
extern crate flate2;
use flate2::write::GzEncoder;
use flate2::Compression;
use std::env::args;
use std::fs::File;
use std::io::BufWriter;
use std::path::Path;
use std::time::Instant;
fn main() {
if args().len() != 3 {
eprintln!("Usage: ./compress `source` `target`");
return;
}
let start = Instant::now();
let input_path = args().nth(1).unwrap();
let output_path = args().nth(2).unwrap();
let cloned_input_path = input_path.clone();
let input_filename = Path::new(&cloned_input_path)
.file_name()
.unwrap()
.to_str()
.unwrap();
let mut input_file = File::open(input_path).expect("Failed to open the source file");
let mut file = GzEncoder::new(
BufWriter::new(File::create(&output_path).expect("Failed to create tgz")),
Compression::default(),
);
let mut ar = tar::Builder::new(&mut file);
ar.append_file(input_filename, &mut input_file)
.expect("Failed to add the source file");
ar.finish().expect("Failed to archive");
println!("Elapsed: {:?}", start.elapsed());
}