This commit is contained in:
Kamil Kisiela 2022-05-18 09:26:57 +02:00
commit dc3bc1ec87
860 changed files with 87123 additions and 0 deletions

8
.changeset/README.md Normal file
View file

@ -0,0 +1,8 @@
# Changesets
Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works
with multi-package repos, or single-package repos to help you version and publish your code. You can
find the full documentation for it [in our repository](https://github.com/changesets/changesets)
We have a quick list of common questions to get you started engaging with this project in
[our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md)

10
.changeset/config.json Normal file
View file

@ -0,0 +1,10 @@
{
"$schema": "https://unpkg.com/@changesets/config@1.6.0/schema.json",
"changelog": "@changesets/cli/changelog",
"commit": false,
"linked": [],
"access": "restricted",
"baseBranch": "main",
"updateInternalDependencies": "patch",
"ignore": []
}

46
.eslintrc.cjs Normal file
View file

@ -0,0 +1,46 @@
/* eslint-env node */
module.exports = {
reportUnusedDisableDirectives: true,
ignorePatterns: [
'scripts',
'out',
'public',
'packages/web/app/src/graphql/index.ts',
'packages/libraries/cli/src/sdk.ts',
],
parserOptions: {
ecmaVersion: 2020,
sourceType: 'module',
},
parser: '@typescript-eslint/parser',
plugins: ['@typescript-eslint', 'import'],
extends: ['eslint:recommended', 'plugin:@typescript-eslint/recommended'],
rules: {
'@typescript-eslint/no-unused-vars': [
'error',
{ argsIgnorePattern: '^_', ignoreRestSiblings: true },
],
'no-empty': ['error', { allowEmptyCatch: true }],
'import/no-absolute-path': 'error',
'import/no-self-import': 'error',
'import/no-extraneous-dependencies': [
'error',
{
devDependencies: ['packages/services/storage/tools/*.js'],
optionalDependencies: false,
},
],
'no-restricted-imports': ['error', { patterns: ['packages/*'] }],
// 🚨 The following rules needs to be fixed and was temporarily disabled to avoid printing warning
'@typescript-eslint/no-explicit-any': 'off',
'@typescript-eslint/explicit-module-boundary-types': 'off',
'@typescript-eslint/no-non-null-assertion': 'off',
'@typescript-eslint/no-namespace': 'off',
'@typescript-eslint/no-empty-function': 'off',
'@typescript-eslint/ban-types': 'off',
'@typescript-eslint/triple-slash-reference': 'off',
},
};

55
.github/workflows/integration.yaml vendored Normal file
View file

@ -0,0 +1,55 @@
name: Integration Tests
on:
pull_request:
branches:
- main
jobs:
integration-tests:
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v3
- uses: actions/setup-node@v2
with:
node-version: 16
- name: Install Dependencies
run: yarn --frozen-lockfile
- uses: actions/cache@v3
name: Turbo cache
with:
path: node_modules/.cache/turbo
key: ${{ runner.os }}-turbo-cache-v1-${{ hashFiles('yarn.lock') }}
- name: Generate Types
run: yarn graphql:generate
- name: Build
run: yarn workspace integration-tests run build-and-pack
env:
NEXT_PUBLIC_STRIPE_PUBLIC_KEY: ${{ secrets.TEST_STRIPE_PUBLIC_KEY }}
- name: Pull images
run: docker-compose -f integration-tests/docker-compose.yml pull
- name: Integration Tests
run: yarn workspace integration-tests run dockest
env:
AUTH0_DOMAIN: ${{ secrets.TEST_AUTH0_DOMAIN }}
AUTH0_CLIENT_ID: ${{ secrets.TEST_AUTH0_CLIENT_ID }}
AUTH0_CLIENT_SECRET: ${{ secrets.TEST_AUTH0_CLIENT_SECRET }}
AUTH0_USER_PASSWORD: ${{ secrets.AUTH0_TESTING_USER_PASSWORD }}
AUTH0_USER_MAIN_EMAIL: contact@the-guild.dev
AUTH0_USER_EXTRA_EMAIL: contact+extra@the-guild.dev
AUTH0_SECRET: ${{ secrets.TEST_AUTH0_SECRET }}
AUTH0_AUDIENCE: ${{ secrets.TEST_AUTH0_AUDIENCE }}
AUTH0_CONNECTION: Username-Password-Authentication
STRIPE_SECRET_KEY: ${{ secrets.TEST_STRIPE_SECRET_KEY }}
- name: Dockest logs
if: always()
run: cat integration-tests/*.log

77
.github/workflows/pr.yaml vendored Normal file
View file

@ -0,0 +1,77 @@
name: PR Checks
on:
pull_request:
branches:
- main
jobs:
pr-checks:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:13.1-alpine
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
env:
POSTGRES_HOST: localhost
POSTGRES_PORT: 5432
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
HIVE_TOKEN: ${{ secrets.HIVE_TOKEN }}
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: 16
- name: Install Dependencies
run: yarn --frozen-lockfile
- uses: actions/cache@v3
name: Turbo cache
with:
path: node_modules/.cache/turbo
key: ${{ runner.os }}-turbo-cache-v1-${{ hashFiles('yarn.lock') }}
- name: Generate Types
run: yarn graphql:generate
- name: Check PR label
if: contains(github.event.pull_request.labels.*.name, 'non-breaking')
run: echo '::set-output name=SAFE_FLAG::--forceSafe'
id: pr-label-check
- name: Schema Check
run: ./packages/libraries/cli/bin/dev schema:check "packages/services/api/src/modules/*/module.graphql.ts" ${{ steps.pr-label-check.outputs.SAFE_FLAG }} --github
- name: Create Database
working-directory: packages/services/storage
run: yarn db:create
- name: Migrate Database
working-directory: packages/services/storage
run: yarn db:migrator up
- name: Generate Database Types
working-directory: packages/services/storage
run: yarn db:generate
- name: Build
run: yarn build
- name: Test
run: yarn test
- name: Type Check
run: yarn typecheck

107
.github/workflows/release.yaml vendored Normal file
View file

@ -0,0 +1,107 @@
name: ci/cd
on:
push:
branches:
- main
jobs:
publish:
name: 'build'
runs-on: ubuntu-latest
env:
HIVE_TOKEN: ${{ secrets.HIVE_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/setup-node@v2
with:
node-version: 16
- name: Install Dependencies
run: yarn --frozen-lockfile
- uses: actions/cache@v3
name: Turbo cache
with:
path: node_modules/.cache/turbo
key: ${{ runner.os }}-turbo-cache-v1-${{ hashFiles('yarn.lock') }}
- name: Generate GraphQL Types
run: yarn graphql:generate
- name: Build
run: yarn build:libraries
- name: Schema Publish
run: ./packages/libraries/cli/bin/dev schema:publish "packages/services/api/src/modules/*/module.graphql.ts" --force --github
- name: Prepare NPM Credentials
run: |
echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> $HOME/.npmrc
npm config set always-auth true
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Create Release Pull Request or Publish packages
id: changesets
uses: changesets/action@master
with:
publish: yarn release
commit: 'chore(release): update monorepo packages versions'
title: 'Upcoming Release Changes'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Extract published version
if: steps.changesets.outputs.published && contains(steps.changesets.outputs.publishedPackages, '"@graphql-hive/cli"')
id: cli
run: |
echo '${{steps.changesets.outputs.publishedPackages}}' > cli-ver.json
VERSION=`echo $(jq -r '.[] | select(.name | contains("@graphql-hive/cli")).version' cli-ver.json)`
echo "::set-output name=version::$VERSION"
echo "::set-output name=publish::true"
- name: Pack tarballs
if: steps.cli.outputs.publish == 'true'
working-directory: packages/libraries/cli
run: yarn oclif pack tarballs --no-xz
- name: Upload tarballs
if: steps.cli.outputs.publish == 'true'
working-directory: packages/libraries/cli
run: yarn oclif upload tarballs --no-xz
- name: Promote tarballs
if: steps.cli.outputs.publish == 'true'
working-directory: packages/libraries/cli
env:
VERSION: ${{ steps.cli.outputs.version }}
run: yarn oclif promote --no-xz --sha ${GITHUB_SHA:0:7} --version $VERSION || yarn oclif promote --no-xz --sha ${GITHUB_SHA:0:8} --version $VERSION
deploy:
name: 'deploy to staging'
needs: publish
runs-on: ubuntu-latest
steps:
- name: Dispatch Deployment
run: |
curl --request POST \
--url 'https://api.github.com/repos/${{ secrets.PRIVATE_REPO_OWNER }}/${{ secrets.PRIVATE_REPO_NAME }}/dispatches' \
--header 'Accept: application/vnd.github.everest-preview+json' \
--header 'Authorization: token ${{ secrets.GH_PAT }}' \
--header 'Content-Type: application/json' \
--data '{
"event_type": "deploy",
"client_payload": {
"environment": "staging",
"ref": "${{ github.sha }}"
}
}'

111
.gitignore vendored Normal file
View file

@ -0,0 +1,111 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# TypeScript v1 declaration files
typings/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
.env.test
# parcel-bundler cache (https://parceljs.org/)
.cache
# Next.js build output
.next
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and *not* Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
temp
.DS_STORE
__generated__
integration-tests/testkit/gql
.turbo
.turbo/config.json
# IntelliJ's project specific settings files
.idea/

1
.husky/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
_

4
.husky/pre-commit Executable file
View file

@ -0,0 +1,4 @@
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
yarn pre-commit

1
.nvmrc Normal file
View file

@ -0,0 +1 @@
16

15
.prettierignore Normal file
View file

@ -0,0 +1,15 @@
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log
coverage
*.lcov
.env
.env.test
.next
out
dist
temp
__generated__

0
.turbo/.gitkeep Normal file
View file

7
.vscode/extensions.json vendored Normal file
View file

@ -0,0 +1,7 @@
{
"recommendations": [
"fabiospampinato.vscode-terminals",
"fabiospampinato.vscode-commands",
"esbenp.prettier-vscode"
]
}

24
.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,24 @@
{
"commands.commands": [
{
"command": "terminals.runTerminals",
"color": "#eab308",
"text": "$(rocket) Start Hive $(rocket)",
"tooltip": "Start dev environment"
}
],
"files.associations": {
"*.env.template": "dotenv"
},
"editor.codeActionsOnSave": {
"source.organizeImports": false
},
"files.autoSave": "onFocusChange",
"eslint.format.enable": true,
"editor.formatOnSave": true,
"editor.defaultFormatter": "esbenp.prettier-vscode",
"yaml.schemas": {
"https://json.schemastore.org/github-workflow.json": ".github/workflows/deploy.yaml"
},
"typescript.tsdk": "node_modules/typescript/lib"
}

83
.vscode/terminals.json vendored Normal file
View file

@ -0,0 +1,83 @@
{
"autorun": false,
"terminals": [
{
"name": "server:dev",
"description": "Run server",
"focus": true,
"open": true,
"cwd": "packages/services/server",
"command": "yarn dev"
},
{
"name": "app:dev",
"description": "Run application",
"open": true,
"cwd": "packages/web/app",
"command": "yarn dev"
},
{
"name": "tokens:dev",
"description": "Run tokens service",
"open": true,
"cwd": "packages/services/tokens",
"command": "yarn dev"
},
{
"name": "schema:dev",
"description": "Run schema service",
"open": true,
"cwd": "packages/services/schema",
"command": "yarn dev"
},
{
"name": "usage-estimator:dev",
"description": "Run Usage Estimator Service",
"open": true,
"cwd": "packages/services/usage-estimator",
"command": "yarn dev"
},
{
"name": "rate-limit:dev",
"description": "Run Rate Limiter Service",
"open": true,
"cwd": "packages/services/rate-limit",
"command": "yarn dev"
},
{
"name": "workdir",
"description": "Run empty",
"open": true,
"cwd": "./",
"command": ""
},
{
"name": "cdn:dev",
"description": "Run Local CDN",
"open": true,
"cwd": "packages/services/cdn-worker",
"command": "yarn dev"
},
{
"name": "billing:dev",
"description": "Run Billing Service",
"open": true,
"cwd": "packages/services/stripe-billing",
"command": "yarn dev"
},
{
"name": "usage:dev",
"description": "Run Usage Service",
"open": true,
"cwd": "packages/services/usage",
"command": "yarn dev"
},
{
"name": "usage-ingestor:dev",
"description": "Run Usage Ingestor",
"open": true,
"cwd": "packages/services/usage-ingestor",
"command": "yarn dev"
}
]
}

17
README.md Normal file
View file

@ -0,0 +1,17 @@
# GraphQL Hive
## Project Stack
- General: Auth0, TypeScript, GraphQL, GraphQL-Codegen
- Server: NodeJS, GraphQL-Modules
- App: React, NextJS, Tailwind, Twin.Macro
- CLI: Oclif
- Deployment: Pulumi, K8s, Nginx Proxy, Azure Cloud, CloudFlare Workers + KV Cache
- Monitoring: Promthues, Grafana (+LogzIo), Sentry
- DB: Postgres, Redis, ClickHouse
## Docs
- [Deployment](./docs/DEPLOYMENT.md)
- [Development](./docs/DEVELOPMENT.md)
- [Testing](./docs/TESTING.md)

103
codegen.yml Normal file
View file

@ -0,0 +1,103 @@
schema: ./packages/services/api/src/modules/*/module.graphql.ts
generates:
# API
./packages/services/api/src/modules:
preset: graphql-modules
presetConfig:
baseTypesPath: ../__generated__/types.ts
filename: __generated__/types.ts
encapsulateModuleTypes: namespace
config:
immutableTypes: true
contextType: GraphQLModules.ModuleContext
enumValues:
OrganizationType: ../shared/entities#OrganizationType
ProjectType: ../shared/entities#ProjectType
TargetAccessScope: ../modules/auth/providers/target-access#TargetAccessScope
ProjectAccessScope: ../modules/auth/providers/project-access#ProjectAccessScope
OrganizationAccessScope: ../modules/auth/providers/organization-access#OrganizationAccessScope
scalars:
DateTime: string
SafeInt: number
mappers:
SchemaChangeConnection: ../shared/mappers#SchemaChangeConnection as SchemaChangeConnectionMapper
SchemaErrorConnection: ../shared/mappers#SchemaErrorConnection as SchemaErrorConnectionMapper
OrganizationConnection: ../shared/mappers#OrganizationConnection as OrganizationConnectionMapper
UserConnection: ../shared/mappers#UserConnection as UserConnectionMapper
ActivityConnection: ../shared/mappers#ActivityConnection as ActivityConnectionMapper
MemberConnection: ../shared/mappers#MemberConnection as MemberConnectionMapper
ProjectConnection: ../shared/mappers#ProjectConnection as ProjectConnectionMapper
TargetConnection: ../shared/mappers#TargetConnection as TargetConnectionMapper
SchemaConnection: ../shared/mappers#SchemaConnection as SchemaConnectionMapper
TokenConnection: ../shared/mappers#TokenConnection as TokenConnectionMapper
OperationStatsConnection: ../shared/mappers#OperationStatsConnection as OperationStatsConnectionMapper
ClientStatsConnection: ../shared/mappers#ClientStatsConnection as ClientStatsConnectionMapper
OperationsStats: ../shared/mappers#OperationsStats as OperationsStatsMapper
DurationStats: ../shared/mappers#DurationStats as DurationStatsMapper
SchemaComparePayload: ../shared/mappers#SchemaComparePayload as SchemaComparePayloadMapper
SchemaCompareResult: ../shared/mappers#SchemaCompareResult as SchemaCompareResultMapper
SchemaVersionConnection: ../shared/mappers#SchemaVersionConnection as SchemaVersionConnectionMapper
SchemaVersion: ../shared/mappers#SchemaVersion as SchemaVersionMapper
Schema: ../shared/mappers#Schema as SchemaMapper
PersistedOperationConnection: ../shared/mappers#PersistedOperationConnection as PersistedOperationMapper
Organization: ../shared/entities#Organization as OrganizationMapper
Project: ../shared/entities#Project as ProjectMapper
Target: ../shared/entities#Target as TargetMapper
Member: ../shared/entities#Member as MemberMapper
Token: ../shared/entities#Token as TokenMapper
TokenInfo: ../shared/entities#Token as TokenInfoMapper
Activity: ../shared/entities#ActivityObject as ActivityMapper
AlertChannel: ../shared/entities#AlertChannel as AlertChannelMapper
AlertSlackChannel: AlertChannelMapper
AlertWebhookChannel: AlertChannelMapper
Alert: ../shared/entities#Alert as AlertMapper
AdminQuery: '{}'
AdminStats: '{ daysLimit?: number | null }'
AdminGeneralStats: '{ daysLimit?: number | null }'
AdminOrganizationStats: ../shared/entities#AdminOrganizationStats as AdminOrganizationStatsMapper
UsageEstimation: '../shared/mappers#TargetsEstimationFilter'
UsageEstimationScope: '../shared/mappers#TargetsEstimationDateFilter'
BillingPaymentMethod: 'StripeTypes.PaymentMethod.Card'
BillingDetails: 'StripeTypes.PaymentMethod.BillingDetails'
BillingInvoice: 'StripeTypes.Invoice'
plugins:
- add:
content: "import { StripeTypes } from '@hive/stripe-billing';"
- typescript
- typescript-resolvers
# App
./packages/web/app/src/graphql/index.ts:
documents: ./packages/web/app/src/graphql/*.graphql
config:
dedupeFragments: true
scalars:
DateTime: string
SafeInt: number
plugins:
- typescript
- typescript-operations
- typed-document-node
./packages/web/app/src/gql/:
documents:
- './packages/web/app/src/(components|lib)/**/*.ts(x)?'
preset: gql-tag-operations-preset
presetConfig:
augmentedModuleName: '@urql/core'
# CLI
packages/libraries/cli/src/sdk.ts:
documents: ./packages/libraries/cli/src/**/*.graphql
config:
flattenGeneratedTypes: true
plugins:
- typescript
- typescript-operations
- typescript-graphql-request
# Integration tests
./integration-tests/testkit/gql:
documents: ./integration-tests/**/*.ts
preset: gql-tag-operations-preset

2
deployment/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
/bin/
/node_modules/

207
deployment/index.ts Normal file
View file

@ -0,0 +1,207 @@
import * as pulumi from '@pulumi/pulumi';
import { DeploymentEnvironment } from './types';
import { deployDbMigrations } from './services/db-migrations';
import { deployTokens } from './services/tokens';
import { deployWebhooks } from './services/webhooks';
import { deploySchema } from './services/schema';
import { deployUsage } from './services/usage';
import { deployUsageIngestor } from './services/usage-ingestor';
import { deployGraphQL } from './services/graphql';
import { deployApp } from './services/app';
import { deployLandingPage } from './services/landing-page';
import { deployDocs } from './services/docs';
import { deployRedis } from './services/redis';
import { deployKafka } from './services/kafka';
import { deployMetrics } from './services/observability';
import { deployCloudflare } from './services/cloudflare';
import { deployCloudflarePolice } from './services/police';
import { deployBotKube } from './services/bot-kube';
import { deployProxy } from './services/proxy';
import { deployClickhouse } from './services/clickhouse';
import { deployUsageEstimation } from './services/usage-estimation';
import { createPackageHelper } from './utils/pack';
import * as azure from '@pulumi/azure';
import { optimizeAzureCluster } from './utils/azure-helpers';
import { deployRateLimit } from './services/rate-limit';
import { deployStripeBilling } from './services/billing';
const packageHelper = createPackageHelper();
optimizeAzureCluster();
const envName = pulumi.getStack();
const commonConfig = new pulumi.Config('common');
const appDns = 'app';
const docsDns = 'docs';
const rootDns = commonConfig.require('dnsZone');
const appHostname = `${appDns}.${rootDns}`;
const docsHostname = `${docsDns}.${rootDns}`;
const resourceGroup = new azure.core.ResourceGroup(`hive-${envName}-rg`, {
location: azure.Locations.EastUS,
});
const storageAccount = new azure.storage.Account(`hive${envName}`, {
resourceGroupName: resourceGroup.name,
accountReplicationType: 'LRS',
accountTier: 'Standard',
accountKind: 'StorageV2',
allowBlobPublicAccess: true,
});
const storageContainer = new azure.storage.Container('deploy-artifacts', {
storageAccountName: storageAccount.name,
containerAccessType: 'blob',
});
const deploymentEnv: DeploymentEnvironment = {
ENVIRONMENT: envName,
NODE_ENV: 'production',
DEPLOYED_DNS: appHostname,
};
deployBotKube({ envName });
deployMetrics({ envName });
const cloudflare = deployCloudflare({
envName,
rootDns,
});
deployCloudflarePolice({ envName, rootDns });
const redisApi = deployRedis({ deploymentEnv });
const kafkaApi = deployKafka();
const clickhouseApi = deployClickhouse();
const dbMigrations = deployDbMigrations({
storageContainer,
packageHelper,
clickhouse: clickhouseApi,
kafka: kafkaApi,
deploymentEnv,
});
const tokensApi = deployTokens({
packageHelper,
storageContainer,
deploymentEnv,
dbMigrations,
});
const webhooksApi = deployWebhooks({
packageHelper,
storageContainer,
deploymentEnv,
redis: redisApi,
});
const usageEstimationApi = deployUsageEstimation({
packageHelper,
storageContainer,
deploymentEnv,
clickhouse: clickhouseApi,
dbMigrations,
});
const billingApi = deployStripeBilling({
packageHelper,
storageContainer,
deploymentEnv,
dbMigrations,
usageEstimator: usageEstimationApi,
});
const rateLimitApi = deployRateLimit({
packageHelper,
storageContainer,
deploymentEnv,
dbMigrations,
usageEstimator: usageEstimationApi,
});
const usageApi = deployUsage({
packageHelper,
storageContainer,
deploymentEnv,
tokens: tokensApi,
kafka: kafkaApi,
dbMigrations,
rateLimit: rateLimitApi,
});
const usageIngestorApi = deployUsageIngestor({
clickhouse: clickhouseApi,
kafka: kafkaApi,
packageHelper,
storageContainer,
deploymentEnv,
dbMigrations,
});
const schemaApi = deploySchema({
packageHelper,
storageContainer,
deploymentEnv,
redis: redisApi,
});
const graphqlApi = deployGraphQL({
clickhouse: clickhouseApi,
packageHelper,
storageContainer,
deploymentEnv,
tokens: tokensApi,
webhooks: webhooksApi,
schema: schemaApi,
dbMigrations,
redis: redisApi,
usage: usageApi,
cloudflare,
usageEstimator: usageEstimationApi,
rateLimit: rateLimitApi,
billing: billingApi,
});
const app = deployApp({
deploymentEnv,
graphql: graphqlApi,
dbMigrations,
packageHelper,
storageContainer,
});
const landingPage = deployLandingPage({
rootDns,
packageHelper,
storageContainer,
});
const docs = deployDocs({
rootDns,
packageHelper,
storageContainer,
});
const proxy = deployProxy({
rootDns,
appHostname,
docsHostname,
app,
landingPage,
docs,
graphql: graphqlApi,
usage: usageApi,
});
export const graphqlApiServiceId = graphqlApi.service.id;
export const usageApiServiceId = usageApi.service.id;
export const usageIngestorApiServiceId = usageIngestorApi.service.id;
export const tokensApiServiceId = tokensApi.service.id;
export const schemaApiServiceId = schemaApi.service.id;
export const webhooksApiServiceId = webhooksApi.service.id;
export const appId = app.deployment.id;
export const publicIp = proxy!.status.loadBalancer.ingress[0].ip;

21
deployment/package.json Normal file
View file

@ -0,0 +1,21 @@
{
"name": "@hive/deployment",
"scripts": {
"test": "jest"
},
"devDependencies": {
"@types/mime-types": "2.1.1",
"@types/node": "17.0.17",
"typescript": "4.6.4"
},
"dependencies": {
"@manypkg/get-packages": "1.1.3",
"@pulumi/azure": "4.37.0",
"@pulumi/azure-native": "1.56.0",
"@pulumi/cloudflare": "4.3.0",
"@pulumi/kubernetes": "3.15.2",
"@pulumi/kubernetesx": "0.1.6",
"@pulumi/pulumi": "3.24.1",
"@pulumi/random": "4.3.1"
}
}

117
deployment/services/app.ts Normal file
View file

@ -0,0 +1,117 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { GraphQL } from './graphql';
import { DbMigrations } from './db-migrations';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { serviceLocalEndpoint } from '../utils/local-endpoint';
import { DeploymentEnvironment } from '../types';
import { PackageHelper } from '../utils/pack';
const appConfig = new pulumi.Config('app');
const commonConfig = new pulumi.Config('common');
const githubAppConfig = new pulumi.Config('ghapp');
const appEnv = appConfig.requireObject<Record<string, string>>('env');
const commonEnv = commonConfig.requireObject<Record<string, string>>('env');
export type App = ReturnType<typeof deployApp>;
export function deployApp({
deploymentEnv,
graphql,
dbMigrations,
storageContainer,
packageHelper,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
deploymentEnv: DeploymentEnvironment;
graphql: GraphQL;
dbMigrations: DbMigrations;
}) {
const appRelease = packageHelper.currentReleaseId();
return new RemoteArtifactAsServiceDeployment(
'app',
{
storageContainer,
packageInfo: packageHelper.npmPack('@hive/app'),
readinessProbe: '/api/health',
livenessProbe: '/api/health',
env: [
{ name: 'DEPLOYED_DNS', value: deploymentEnv.DEPLOYED_DNS },
{ name: 'NODE_ENV', value: 'production' },
{ name: 'ENVIRONMENT', value: deploymentEnv.ENVIRONMENT },
{
name: 'NEXT_PUBLIC_ENVIRONMENT',
value: deploymentEnv.ENVIRONMENT,
},
{
name: 'RELEASE',
value: appRelease,
},
{
name: 'NEXT_PUBLIC_RELEASE',
value: appRelease,
},
{ name: 'AUTH0_DOMAIN', value: commonConfig.require('auth0Domain') },
{
name: 'AUTH0_CLIENT_ID',
value: commonConfig.require('auth0ClientId'),
},
{
name: 'AUTH0_CLIENT_SECRET',
value: commonConfig.requireSecret('auth0ClientSecret'),
},
{
name: 'AUTH0_BASE_URL',
value: `https://${deploymentEnv.DEPLOYED_DNS}/`,
},
{
name: 'AUTH0_AUDIENCE',
value: `https://${commonConfig.require('auth0Domain')}/api/v2/`,
},
{
name: 'AUTH0_ISSUER_BASE_URL',
value: `https://${commonConfig.require('auth0Domain')}`,
},
{ name: 'AUTH0_CALLBACK', value: `/api/callback` },
{
name: 'POST_LOGOUT_REDIRECT_URI',
value: `https://${deploymentEnv.DEPLOYED_DNS}/`,
},
{
name: 'AUTH0_SECRET',
value: commonConfig.requireSecret('cookieSecret'),
},
{ name: 'AUTH0_SCOPE', value: 'openid profile offline_access' },
{ name: 'SENTRY_DSN', value: commonEnv.SENTRY_DSN },
{ name: 'NEXT_PUBLIC_SENTRY_DSN', value: commonEnv.SENTRY_DSN },
{
name: 'GRAPHQL_ENDPOINT',
value: serviceLocalEndpoint(graphql.service).apply(
(s) => `${s}/graphql`
),
},
{
name: 'APP_BASE_URL',
value: `https://${deploymentEnv.DEPLOYED_DNS}/`,
},
{
name: 'SLACK_CLIENT_ID',
value: appEnv.SLACK_CLIENT_ID,
},
{
name: 'SLACK_CLIENT_SECRET',
value: appEnv.SLACK_CLIENT_SECRET,
},
{
name: 'GITHUB_APP_NAME',
value: githubAppConfig.require('name'),
},
],
port: 3000,
},
[graphql.service, graphql.deployment, dbMigrations]
).deploy();
}

View file

@ -0,0 +1,53 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { PackageHelper } from '../utils/pack';
import { DeploymentEnvironment } from '../types';
import { DbMigrations } from './db-migrations';
import { UsageEstimator } from './usage-estimation';
import { serviceLocalEndpoint } from '../utils/local-endpoint';
const billingConfig = new pulumi.Config('billing');
const commonConfig = new pulumi.Config('common');
const commonEnv = commonConfig.requireObject<Record<string, string>>('env');
const apiConfig = new pulumi.Config('api');
export type StripeBillingService = ReturnType<typeof deployStripeBilling>;
export function deployStripeBilling({
storageContainer,
packageHelper,
deploymentEnv,
dbMigrations,
usageEstimator,
}: {
usageEstimator: UsageEstimator;
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
deploymentEnv: DeploymentEnvironment;
dbMigrations: DbMigrations;
}) {
return new RemoteArtifactAsServiceDeployment(
'stripe-billing',
{
storageContainer,
replicas: 1,
readinessProbe: '/_readiness',
livenessProbe: '/_health',
env: {
...deploymentEnv,
...commonEnv,
RELEASE: packageHelper.currentReleaseId(),
USAGE_ESTIMATOR_ENDPOINT: serviceLocalEndpoint(usageEstimator.service),
STRIPE_SECRET_KEY: billingConfig.requireSecret('stripePrivateKey'),
POSTGRES_CONNECTION_STRING: apiConfig.requireSecret(
'postgresConnectionString'
),
},
exposesMetrics: true,
packageInfo: packageHelper.npmPack('@hive/stripe-billing'),
port: 4000,
},
[dbMigrations, usageEstimator.service, usageEstimator.deployment]
).deploy();
}

View file

@ -0,0 +1,23 @@
import * as pulumi from '@pulumi/pulumi';
import { BotKube } from '../utils/botkube';
const botkubeConfig = new pulumi.Config('botkube');
export function deployBotKube({ envName }: { envName: string }) {
if (!botkubeConfig.getBoolean('enabled')) {
return;
}
if (
botkubeConfig &&
botkubeConfig.get('slackChannel') &&
botkubeConfig.getSecret('slackToken')
) {
new BotKube().deploy({
clusterName: envName,
enableKubectl: true,
slackChannelName: botkubeConfig.require('slackChannel'),
slackToken: botkubeConfig.requireSecret('slackToken'),
});
}
}

View file

@ -0,0 +1,61 @@
import * as pulumi from '@pulumi/pulumi';
import { serviceLocalHost } from '../utils/local-endpoint';
import { Clickhouse as ClickhouseDeployment } from '../utils/clickhouse';
const clickhouseConfig = new pulumi.Config('clickhouse');
const commonConfig = new pulumi.Config('common');
const commonEnv = commonConfig.getObject<Record<string, string>>('env')!;
export type Clickhouse = ReturnType<typeof deployClickhouse>;
type ClickhouseConfig = {
protocol: pulumi.Output<string> | string;
host: pulumi.Output<string> | string;
port: pulumi.Output<string> | string;
username: pulumi.Output<string> | string;
password: pulumi.Output<string>;
};
function getRemoteClickhouseConfig(): ClickhouseConfig {
return {
host: clickhouseConfig.require('host'),
port: clickhouseConfig.require('port'),
username: clickhouseConfig.require('username'),
password: clickhouseConfig.requireSecret('password'),
protocol: clickhouseConfig.requireSecret('protocol'),
};
}
export function deployClickhouse() {
if (!clickhouseConfig.getBoolean('inCluster')) {
return {
config: getRemoteClickhouseConfig(),
deployment: null,
service: null,
};
}
const password = clickhouseConfig.requireSecret('password');
const username = clickhouseConfig.requireSecret('username');
const chApi = new ClickhouseDeployment('clickhouse', {
env: {
CLICKHOUSE_USER: username,
CLICKHOUSE_PASSWORD: password,
},
sentryDsn: commonEnv.SENTRY_DSN,
}).deploy();
const config: ClickhouseConfig = {
protocol: 'http',
host: serviceLocalHost(chApi.service),
port: String(chApi.port),
password: password,
username,
};
return {
deployment: chApi.deployment,
service: chApi.service,
config,
};
}

View file

@ -0,0 +1,27 @@
import * as pulumi from '@pulumi/pulumi';
import { CloudflareCDN } from '../utils/cdn';
const commonConfig = new pulumi.Config('common');
const cfConfig = new pulumi.Config('cloudflareCustom');
export type Cloudflare = ReturnType<typeof deployCloudflare>;
export function deployCloudflare({
rootDns,
envName,
}: {
rootDns: string;
envName: string;
}) {
const cdnAuthPrivateKey = commonConfig.requireSecret('cdnAuthPrivateKey');
const cdn = new CloudflareCDN(
envName,
cfConfig.require('zoneId'),
// We can't use `cdn.staging.graphql-hive.com` for staging env, since CF certificate only covers
// one level of subdomains. See: https://community.cloudflare.com/t/ssl-handshake-error-cloudflare-proxy/175088
// So for staging env, we are going to use `cdn-staging` instead of `cdn.staging`.
envName === 'staging' ? `cdn-${rootDns}` : `cdn.${rootDns}`,
cdnAuthPrivateKey
);
return cdn.deploy();
}

View file

@ -0,0 +1,50 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { Clickhouse } from './clickhouse';
import { Kafka } from './kafka';
import { PackageHelper } from '../utils/pack';
import { DeploymentEnvironment } from '../types';
const apiConfig = new pulumi.Config('api');
export type DbMigrations = ReturnType<typeof deployDbMigrations>;
export function deployDbMigrations({
storageContainer,
packageHelper,
deploymentEnv,
clickhouse,
kafka,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
deploymentEnv: DeploymentEnvironment;
clickhouse: Clickhouse;
kafka: Kafka;
}) {
const { job } = new RemoteArtifactAsServiceDeployment(
'db-migrations',
{
env: {
POSTGRES_CONNECTION_STRING: apiConfig.requireSecret(
'postgresConnectionString'
),
MIGRATOR: 'up',
CLICKHOUSE_MIGRATOR: 'up',
CLICKHOUSE_HOST: clickhouse.config.host,
CLICKHOUSE_PORT: clickhouse.config.port,
CLICKHOUSE_USERNAME: clickhouse.config.username,
CLICKHOUSE_PASSWORD: clickhouse.config.password,
CLICKHOUSE_PROTOCOL: clickhouse.config.protocol,
KAFKA_BROKER: kafka.config.endpoint,
...deploymentEnv,
},
storageContainer,
packageInfo: packageHelper.npmPack('@hive/storage'),
},
[clickhouse.deployment, clickhouse.service],
clickhouse.service
).deployAsJob();
return job;
}

View file

@ -0,0 +1,28 @@
import * as azure from '@pulumi/azure';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { PackageHelper } from '../utils/pack';
export type Docs = ReturnType<typeof deployDocs>;
export function deployDocs({
rootDns,
storageContainer,
packageHelper,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
rootDns: string;
}) {
return new RemoteArtifactAsServiceDeployment('docs', {
storageContainer,
readinessProbe: '/api/health',
livenessProbe: '/api/health',
env: [
{ name: 'RELEASE', value: packageHelper.currentReleaseId() },
{ name: 'DEPLOYED_DNS', value: rootDns },
{ name: 'NODE_ENV', value: 'production' },
],
packageInfo: packageHelper.npmPack('@hive/docs'),
port: 3000,
}).deploy();
}

View file

@ -0,0 +1,119 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { Cloudflare } from './cloudflare';
import { Tokens } from './tokens';
import { Webhooks } from './webhooks';
import { Redis } from './redis';
import { DbMigrations } from './db-migrations';
import { Schema } from './schema';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { serviceLocalEndpoint } from '../utils/local-endpoint';
import { DeploymentEnvironment } from '../types';
import { Clickhouse } from './clickhouse';
import { Usage } from './usage';
import { PackageHelper } from '../utils/pack';
import { UsageEstimator } from './usage-estimation';
import { RateLimitService } from './rate-limit';
import { StripeBillingService } from './billing';
const commonConfig = new pulumi.Config('common');
const cloudflareConfig = new pulumi.Config('cloudflare');
const apiConfig = new pulumi.Config('api');
const githubAppConfig = new pulumi.Config('ghapp');
const commonEnv = commonConfig.requireObject<Record<string, string>>('env');
const apiEnv = apiConfig.requireObject<Record<string, string>>('env');
export type GraphQL = ReturnType<typeof deployGraphQL>;
export function deployGraphQL({
clickhouse,
packageHelper,
storageContainer,
deploymentEnv,
tokens,
webhooks,
schema,
cloudflare,
redis,
usage,
usageEstimator,
dbMigrations,
rateLimit,
billing,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
clickhouse: Clickhouse;
deploymentEnv: DeploymentEnvironment;
tokens: Tokens;
webhooks: Webhooks;
schema: Schema;
redis: Redis;
cloudflare: Cloudflare;
usage: Usage;
usageEstimator: UsageEstimator;
dbMigrations: DbMigrations;
rateLimit: RateLimitService;
billing: StripeBillingService;
}) {
return new RemoteArtifactAsServiceDeployment(
'graphql-api',
{
storageContainer,
replicas: 1,
readinessProbe: '/_readiness',
livenessProbe: '/_health',
env: {
...apiEnv,
...deploymentEnv,
...apiConfig.requireObject<Record<string, string>>('env'),
...commonEnv,
CLICKHOUSE_PROTOCOL: clickhouse.config.protocol,
CLICKHOUSE_HOST: clickhouse.config.host,
CLICKHOUSE_PORT: clickhouse.config.port,
CLICKHOUSE_USERNAME: clickhouse.config.username,
CLICKHOUSE_PASSWORD: clickhouse.config.password,
REDIS_HOST: redis.config.host,
REDIS_PORT: String(redis.config.port),
REDIS_PASSWORD: redis.config.password,
RELEASE: packageHelper.currentReleaseId(),
POSTGRES_CONNECTION_STRING: apiConfig.requireSecret(
'postgresConnectionString'
),
AUTH0_DOMAIN: commonConfig.require('auth0Domain'),
AUTH0_CLIENT_ID: commonConfig.require('auth0ClientId'),
AUTH0_CLIENT_SECRET: commonConfig.requireSecret('auth0ClientSecret'),
BILLING_ENDPOINT: serviceLocalEndpoint(billing.service),
TOKENS_ENDPOINT: serviceLocalEndpoint(tokens.service),
WEBHOOKS_ENDPOINT: serviceLocalEndpoint(webhooks.service),
SCHEMA_ENDPOINT: serviceLocalEndpoint(schema.service),
CF_BASE_PATH: 'https://api.cloudflare.com/client/v4/accounts',
CF_ACCOUNT_ID: cloudflareConfig.require('accountId'),
CF_AUTH_TOKEN: cloudflareConfig.requireSecret('apiToken'),
CF_NAMESPACE_ID: cloudflare.cfStorageNamespaceId,
CDN_BASE_URL: cloudflare.workerBaseUrl,
CDN_AUTH_PRIVATE_KEY: cloudflare.authPrivateKey,
HIVE_USAGE_ENDPOINT: serviceLocalEndpoint(usage.service),
USAGE_ESTIMATOR_ENDPOINT: serviceLocalEndpoint(usageEstimator.service),
HIVE_REPORTING_ENDPOINT: 'http://0.0.0.0:4000/graphql',
GITHUB_APP_PRIVATE_KEY: githubAppConfig.requireSecret('key'),
RATE_LIMIT_ENDPOINT: serviceLocalEndpoint(rateLimit.service),
GITHUB_APP_ID: githubAppConfig.require('id'),
ENCRYPTION_SECRET: commonConfig.requireSecret('encryptionSecret'),
},
packageInfo: packageHelper.npmPack('@hive/server'),
exposesMetrics: true,
port: 4000,
},
[
dbMigrations,
redis.deployment,
redis.service,
clickhouse.deployment,
clickhouse.service,
rateLimit.deployment,
rateLimit.service,
]
).deploy();
}

View file

@ -0,0 +1,18 @@
import * as pulumi from '@pulumi/pulumi';
export type Kafka = ReturnType<typeof deployKafka>;
export function deployKafka() {
const eventhubConfig = new pulumi.Config('eventhub');
return {
config: {
key: eventhubConfig.requireSecret('key'),
user: '$ConnectionString',
endpoint: eventhubConfig.require('endpoint'),
bufferSize: eventhubConfig.require('bufferSize'),
bufferInterval: eventhubConfig.require('bufferInterval'),
bufferDynamic: eventhubConfig.require('bufferDynamic'),
},
};
}

View file

@ -0,0 +1,28 @@
import * as azure from '@pulumi/azure';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { PackageHelper } from '../utils/pack';
export type LandingPage = ReturnType<typeof deployLandingPage>;
export function deployLandingPage({
rootDns,
storageContainer,
packageHelper,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
rootDns: string;
}) {
return new RemoteArtifactAsServiceDeployment('landing-page', {
storageContainer,
readinessProbe: '/api/health',
livenessProbe: '/api/health',
env: [
{ name: 'RELEASE', value: packageHelper.currentReleaseId() },
{ name: 'DEPLOYED_DNS', value: rootDns },
{ name: 'NODE_ENV', value: 'production' },
],
packageInfo: packageHelper.npmPack('@hive/landing-page'),
port: 3000,
}).deploy();
}

View file

@ -0,0 +1,25 @@
import * as pulumi from '@pulumi/pulumi';
import { Observability } from '../utils/observability';
const observabilityConfig = new pulumi.Config('observability');
export function deployMetrics(config: { envName: string }) {
if (!observabilityConfig.getBoolean('enabled')) {
return;
}
const observability = new Observability(config.envName, {
prom: {
endpoint: observabilityConfig.require('promEndpoint'),
username: observabilityConfig.require('promUsername'),
password: observabilityConfig.requireSecret('promPassword'),
},
loki: {
endpoint: observabilityConfig.require('lokiEndpoint'),
username: observabilityConfig.require('lokiUsername'),
password: observabilityConfig.requireSecret('lokiPassword'),
},
});
// logging.deployMetrics(logzioConfig.requireSecret('metricsSecret'));
observability.deploy();
}

View file

@ -0,0 +1,21 @@
import * as pulumi from '@pulumi/pulumi';
import { HivePolice } from '../utils/police';
const cfCustomConfig = new pulumi.Config('cloudflareCustom');
export function deployCloudflarePolice({
envName,
rootDns,
}: {
envName: string;
rootDns: string;
}) {
const police = new HivePolice(
envName,
cfCustomConfig.require('zoneId'),
cfCustomConfig.requireSecret('policeApiToken'),
rootDns
);
return police.deploy();
}

View file

@ -0,0 +1,85 @@
import * as pulumi from '@pulumi/pulumi';
import { Proxy } from '../utils/reverse-proxy';
import { CertManager } from '../utils/cert-manager';
import { GraphQL } from './graphql';
import { LandingPage } from './landing-page';
import { App } from './app';
import { Usage } from './usage';
import { Docs } from './docs';
const commonConfig = new pulumi.Config('common');
export function deployProxy({
appHostname,
docsHostname,
rootDns,
graphql,
app,
docs,
usage,
landingPage,
}: {
appHostname: string;
docsHostname: string;
rootDns: string;
graphql: GraphQL;
app: App;
usage: Usage;
docs: Docs;
landingPage: LandingPage;
}) {
const { tlsIssueName } = new CertManager().deployCertManagerAndIssuer();
return new Proxy(tlsIssueName, {
address: commonConfig.get('staticIp'),
})
.deployProxy({ replicas: 2 })
.registerService({ record: rootDns, apex: true }, [
{
name: 'landing-page',
path: '/',
service: landingPage.service,
},
])
.registerService(
{
record: docsHostname,
},
[
{
name: 'docs',
path: '/',
service: docs.service,
},
]
)
.registerService({ record: appHostname }, [
{
name: 'app',
path: '/',
service: app.service,
},
{
name: 'server',
path: '/server',
service: graphql.service,
},
{
name: 'registry-api-health',
path: '/registry/_health',
customRewrite: '/_health',
service: graphql.service,
},
{
name: 'registry-api',
path: '/registry',
customRewrite: '/graphql',
service: graphql.service,
},
{
name: 'usage',
path: '/usage',
service: usage.service,
},
])
.get();
}

View file

@ -0,0 +1,54 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { PackageHelper } from '../utils/pack';
import { DeploymentEnvironment } from '../types';
import { DbMigrations } from './db-migrations';
import { UsageEstimator } from './usage-estimation';
import { serviceLocalEndpoint } from '../utils/local-endpoint';
const rateLimitConfig = new pulumi.Config('rateLimit');
const commonConfig = new pulumi.Config('common');
const commonEnv = commonConfig.requireObject<Record<string, string>>('env');
const apiConfig = new pulumi.Config('api');
export type RateLimitService = ReturnType<typeof deployRateLimit>;
export function deployRateLimit({
storageContainer,
packageHelper,
deploymentEnv,
dbMigrations,
usageEstimator,
}: {
usageEstimator: UsageEstimator;
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
deploymentEnv: DeploymentEnvironment;
dbMigrations: DbMigrations;
}) {
return new RemoteArtifactAsServiceDeployment(
'rate-limiter',
{
storageContainer,
replicas: 1,
readinessProbe: '/_readiness',
livenessProbe: '/_health',
env: {
...deploymentEnv,
...commonEnv,
LIMIT_CACHE_UPDATE_INTERVAL_MS:
rateLimitConfig.require('updateIntervalMs'),
RELEASE: packageHelper.currentReleaseId(),
USAGE_ESTIMATOR_ENDPOINT: serviceLocalEndpoint(usageEstimator.service),
POSTGRES_CONNECTION_STRING: apiConfig.requireSecret(
'postgresConnectionString'
),
},
exposesMetrics: true,
packageInfo: packageHelper.npmPack('@hive/rate-limit'),
port: 4000,
},
[dbMigrations, usageEstimator.service, usageEstimator.deployment]
).deploy();
}

View file

@ -0,0 +1,40 @@
import * as pulumi from '@pulumi/pulumi';
import { serviceLocalHost } from '../utils/local-endpoint';
import { Redis as RedisStore } from '../utils/redis';
import { isStaging } from '../utils/helpers';
import { DeploymentEnvironment } from '../types';
const redisConfig = new pulumi.Config('redis');
export type Redis = ReturnType<typeof deployRedis>;
export function deployRedis({
deploymentEnv,
}: {
deploymentEnv: DeploymentEnvironment;
}) {
const redisPassword = redisConfig.require('password');
const redisApi = new RedisStore({
password: redisPassword,
}).deploy({
limits: isStaging(deploymentEnv)
? {
memory: '80Mi',
cpu: '50m',
}
: {
memory: '800Mi',
cpu: '1000m',
},
});
return {
deployment: redisApi.deployment,
service: redisApi.service,
config: {
host: serviceLocalHost(redisApi.service),
port: redisApi.port,
password: redisPassword,
},
};
}

View file

@ -0,0 +1,45 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { isProduction } from '../utils/helpers';
import { DeploymentEnvironment } from '../types';
import { Redis } from './redis';
import { PackageHelper } from '../utils/pack';
const commonConfig = new pulumi.Config('common');
const commonEnv = commonConfig.requireObject<Record<string, string>>('env');
export type Schema = ReturnType<typeof deploySchema>;
export function deploySchema({
deploymentEnv,
redis,
packageHelper,
storageContainer,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
deploymentEnv: DeploymentEnvironment;
redis: Redis;
}) {
return new RemoteArtifactAsServiceDeployment(
'schema-service',
{
storageContainer,
env: {
...deploymentEnv,
...commonEnv,
RELEASE: packageHelper.currentReleaseId(),
REDIS_HOST: redis.config.host,
REDIS_PORT: String(redis.config.port),
REDIS_PASSWORD: redis.config.password,
},
readinessProbe: '/_readiness',
livenessProbe: '/_health',
exposesMetrics: true,
packageInfo: packageHelper.npmPack('@hive/schema'),
replicas: isProduction(deploymentEnv) ? 2 : 1,
},
[redis.deployment, redis.service]
).deploy();
}

View file

@ -0,0 +1,44 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { DbMigrations } from './db-migrations';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { DeploymentEnvironment } from '../types';
import { PackageHelper } from '../utils/pack';
const commonConfig = new pulumi.Config('common');
const apiConfig = new pulumi.Config('api');
const commonEnv = commonConfig.requireObject<Record<string, string>>('env');
export type Tokens = ReturnType<typeof deployTokens>;
export function deployTokens({
deploymentEnv,
dbMigrations,
storageContainer,
packageHelper,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
deploymentEnv: DeploymentEnvironment;
dbMigrations: DbMigrations;
}) {
return new RemoteArtifactAsServiceDeployment(
'tokens-service',
{
storageContainer,
env: {
...deploymentEnv,
...commonEnv,
POSTGRES_CONNECTION_STRING: apiConfig.requireSecret(
'postgresConnectionString'
),
RELEASE: packageHelper.currentReleaseId(),
},
readinessProbe: '/_readiness',
livenessProbe: '/_health',
exposesMetrics: true,
packageInfo: packageHelper.npmPack('@hive/tokens'),
},
[dbMigrations]
).deploy();
}

View file

@ -0,0 +1,54 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { PackageHelper } from '../utils/pack';
import { DeploymentEnvironment } from '../types';
import { Clickhouse } from './clickhouse';
import { DbMigrations } from './db-migrations';
const commonConfig = new pulumi.Config('common');
const commonEnv = commonConfig.requireObject<Record<string, string>>('env');
const apiConfig = new pulumi.Config('api');
export type UsageEstimator = ReturnType<typeof deployUsageEstimation>;
export function deployUsageEstimation({
storageContainer,
packageHelper,
deploymentEnv,
clickhouse,
dbMigrations,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
deploymentEnv: DeploymentEnvironment;
clickhouse: Clickhouse;
dbMigrations: DbMigrations;
}) {
return new RemoteArtifactAsServiceDeployment(
'usage-estimator',
{
storageContainer,
replicas: 1,
readinessProbe: '/_readiness',
livenessProbe: '/_health',
env: {
...deploymentEnv,
...commonEnv,
CLICKHOUSE_PROTOCOL: clickhouse.config.protocol,
CLICKHOUSE_HOST: clickhouse.config.host,
CLICKHOUSE_PORT: clickhouse.config.port,
CLICKHOUSE_USERNAME: clickhouse.config.username,
CLICKHOUSE_PASSWORD: clickhouse.config.password,
RELEASE: packageHelper.currentReleaseId(),
POSTGRES_CONNECTION_STRING: apiConfig.requireSecret(
'postgresConnectionString'
),
},
exposesMetrics: true,
packageInfo: packageHelper.npmPack('@hive/usage-estimator'),
port: 4000,
},
[dbMigrations]
).deploy();
}

View file

@ -0,0 +1,65 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { DbMigrations } from './db-migrations';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { PackageHelper } from '../utils/pack';
import { DeploymentEnvironment } from '../types';
import { Clickhouse } from './clickhouse';
import { Kafka } from './kafka';
import { isProduction } from '../utils/helpers';
const commonConfig = new pulumi.Config('common');
const commonEnv = commonConfig.requireObject<Record<string, string>>('env');
export type UsageIngestor = ReturnType<typeof deployUsageIngestor>;
export function deployUsageIngestor({
storageContainer,
packageHelper,
deploymentEnv,
clickhouse,
kafka,
dbMigrations,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
deploymentEnv: DeploymentEnvironment;
clickhouse: Clickhouse;
kafka: Kafka;
dbMigrations: DbMigrations;
}) {
const numberOfPartitions = 4;
const replicas = isProduction(deploymentEnv) ? 2 : 1;
const partitionsConsumedConcurrently = Math.floor(
numberOfPartitions / replicas
);
return new RemoteArtifactAsServiceDeployment(
'usage-ingestor-service',
{
storageContainer,
replicas,
readinessProbe: '/_readiness',
livenessProbe: '/_health',
env: {
...deploymentEnv,
...commonEnv,
KAFKA_CONNECTION_MODE: 'hosted',
KAFKA_KEY: kafka.config.key,
KAFKA_USER: kafka.config.user,
KAFKA_BROKER: kafka.config.endpoint,
KAFKA_CONCURRENCY: `${partitionsConsumedConcurrently}`,
CLICKHOUSE_PROTOCOL: clickhouse.config.protocol,
CLICKHOUSE_HOST: clickhouse.config.host,
CLICKHOUSE_PORT: clickhouse.config.port,
CLICKHOUSE_USERNAME: clickhouse.config.username,
CLICKHOUSE_PASSWORD: clickhouse.config.password,
RELEASE: packageHelper.currentReleaseId(),
},
exposesMetrics: true,
packageInfo: packageHelper.npmPack('@hive/usage-ingestor'),
port: 4000,
},
[clickhouse.deployment, clickhouse.service, dbMigrations]
).deploy();
}

View file

@ -0,0 +1,67 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { Tokens } from './tokens';
import { DbMigrations } from './db-migrations';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { PackageHelper } from '../utils/pack';
import { serviceLocalEndpoint } from '../utils/local-endpoint';
import { DeploymentEnvironment } from '../types';
import { Kafka } from './kafka';
import { RateLimitService } from './rate-limit';
const commonConfig = new pulumi.Config('common');
const commonEnv = commonConfig.requireObject<Record<string, string>>('env');
export type Usage = ReturnType<typeof deployUsage>;
export function deployUsage({
storageContainer,
packageHelper,
deploymentEnv,
tokens,
kafka,
dbMigrations,
rateLimit,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
deploymentEnv: DeploymentEnvironment;
tokens: Tokens;
kafka: Kafka;
dbMigrations: DbMigrations;
rateLimit: RateLimitService;
}) {
return new RemoteArtifactAsServiceDeployment(
'usage-service',
{
storageContainer,
replicas: 1,
readinessProbe: '/_readiness',
livenessProbe: '/_health',
env: {
...deploymentEnv,
...commonEnv,
KAFKA_CONNECTION_MODE: 'hosted',
KAFKA_KEY: kafka.config.key,
KAFKA_USER: kafka.config.user,
KAFKA_BROKER: kafka.config.endpoint,
KAFKA_BUFFER_SIZE: kafka.config.bufferSize,
KAFKA_BUFFER_INTERVAL: kafka.config.bufferInterval,
KAFKA_BUFFER_DYNAMIC: kafka.config.bufferDynamic,
RELEASE: packageHelper.currentReleaseId(),
TOKENS_ENDPOINT: serviceLocalEndpoint(tokens.service),
RATE_LIMIT_ENDPOINT: serviceLocalEndpoint(rateLimit.service),
},
exposesMetrics: true,
packageInfo: packageHelper.npmPack('@hive/usage'),
port: 4000,
},
[
dbMigrations,
tokens.deployment,
tokens.service,
rateLimit.deployment,
rateLimit.service,
]
).deploy();
}

View file

@ -0,0 +1,45 @@
import * as pulumi from '@pulumi/pulumi';
import * as azure from '@pulumi/azure';
import { RemoteArtifactAsServiceDeployment } from '../utils/remote-artifact-as-service';
import { DeploymentEnvironment } from '../types';
import { Redis } from './redis';
import { PackageHelper } from '../utils/pack';
const commonConfig = new pulumi.Config('common');
const commonEnv = commonConfig.requireObject<Record<string, string>>('env');
export type Webhooks = ReturnType<typeof deployWebhooks>;
export function deployWebhooks({
storageContainer,
packageHelper,
deploymentEnv,
redis,
}: {
storageContainer: azure.storage.Container;
packageHelper: PackageHelper;
deploymentEnv: DeploymentEnvironment;
redis: Redis;
}) {
return new RemoteArtifactAsServiceDeployment(
'webhooks-service',
{
storageContainer,
env: {
...deploymentEnv,
...commonEnv,
RELEASE: packageHelper.currentReleaseId(),
REDIS_HOST: redis.config.host,
REDIS_PORT: String(redis.config.port),
REDIS_PASSWORD: redis.config.password,
BULLMQ_COMMANDS_FROM_ROOT: 'true',
},
readinessProbe: '/_readiness',
livenessProbe: '/_health',
exposesMetrics: true,
packageInfo: packageHelper.npmPack('@hive/webhooks'),
replicas: 1,
},
[redis.deployment, redis.service]
).deploy();
}

16
deployment/tsconfig.json Normal file
View file

@ -0,0 +1,16 @@
{
"compilerOptions": {
"strict": true,
"outDir": "bin",
"target": "es2016",
"module": "commonjs",
"moduleResolution": "node",
"sourceMap": true,
"experimentalDecorators": true,
"pretty": true,
"noFallthroughCasesInSwitch": true,
"noImplicitReturns": true,
"forceConsistentCasingInFileNames": true
},
"files": ["index.ts"]
}

13
deployment/types.ts Normal file
View file

@ -0,0 +1,13 @@
import * as pulumi from '@pulumi/pulumi';
export interface DeploymentEnvironment {
ENVIRONMENT: string;
NODE_ENV: string;
DEPLOYED_DNS: string;
}
export interface RegistryConfig {
registry: string;
registryToken: pulumi.Output<string>;
registryScope: string;
}

View file

@ -0,0 +1,193 @@
import * as pulumi from '@pulumi/pulumi';
import * as resources from '@pulumi/azure-native/resources';
import * as storage from '@pulumi/azure-native/storage';
import * as web from '@pulumi/azure-native/web';
import { tmpdir } from 'os';
import {
mkdtempSync,
copyFileSync,
writeFileSync,
mkdirSync,
readFileSync,
} from 'fs';
import { join } from 'path';
import { createHash } from 'crypto';
function createFunctionFolder({
name,
functionDefinition,
functionFile,
}: {
name: string;
functionDefinition: Record<string, any>;
functionFile: string;
}) {
const hostDir = mkdtempSync(
join(tmpdir(), Math.random().toString(16).slice(2))
);
const fnDir = join(hostDir, name);
mkdirSync(fnDir);
writeFileSync(
join(hostDir, 'host.json'),
JSON.stringify(
{
version: '2.0',
},
null,
2
)
);
copyFileSync(functionFile, join(fnDir, 'index.js'));
writeFileSync(
join(fnDir, 'function.json'),
JSON.stringify(functionDefinition, null, 2)
);
return {
checksum: createHash('sha256')
.update(readFileSync(functionFile, 'utf-8'))
.update(JSON.stringify(functionDefinition))
.digest('hex'),
dir: hostDir,
};
}
export class AzureFunction {
constructor(
private config: {
name: string;
envName: string;
functionFile: string;
functionDefinition: Record<string, any>;
env: Record<string, string>;
}
) {}
deployAsJob() {
const resourceGroup = new resources.ResourceGroup(
`hive-${this.config.envName}-fn-rg`
);
const storageAccount = new storage.StorageAccount(
`hive${this.config.envName}fn`,
{
resourceGroupName: resourceGroup.name,
sku: {
name: storage.SkuName.Standard_LRS,
},
kind: storage.Kind.StorageV2,
}
);
const codeContainer = new storage.BlobContainer('functions', {
resourceGroupName: resourceGroup.name,
accountName: storageAccount.name,
});
const { dir, checksum } = createFunctionFolder({
name: this.config.name,
functionDefinition: this.config.functionDefinition,
functionFile: this.config.functionFile,
});
const codeBlob = new storage.Blob(this.config.name, {
resourceGroupName: resourceGroup.name,
accountName: storageAccount.name,
containerName: codeContainer.name,
source: new pulumi.asset.FileArchive(dir),
});
const plan = new web.AppServicePlan('plan', {
resourceGroupName: resourceGroup.name,
sku: {
name: 'Y1',
tier: 'Dynamic',
},
});
const storageConnectionString = getConnectionString(
resourceGroup.name,
storageAccount.name
);
const codeBlobUrl = signedBlobReadUrl(
codeBlob,
codeContainer,
storageAccount,
resourceGroup
);
const app = new web.WebApp(
`${this.config.name}-${this.config.envName}-fn`,
{
resourceGroupName: resourceGroup.name,
serverFarmId: plan.id,
kind: 'functionapp',
siteConfig: {
appSettings: [
{ name: 'AzureWebJobsStorage', value: storageConnectionString },
{ name: 'FUNCTIONS_EXTENSION_VERSION', value: '~3' },
{ name: 'FUNCTIONS_WORKER_RUNTIME', value: 'node' },
{ name: 'WEBSITE_NODE_DEFAULT_VERSION', value: '~16' },
{ name: 'WEBSITE_RUN_FROM_PACKAGE', value: codeBlobUrl },
{
name: 'FUNCTION_CHECKSUM',
value: checksum,
},
...Object.entries(this.config.env).map(([name, value]) => ({
name,
value,
})),
],
http20Enabled: true,
nodeVersion: '~16',
},
},
{
additionalSecretOutputs: [],
}
);
return {
endpoint: pulumi.interpolate`https://${app.defaultHostName}/api/index`,
};
}
}
function getConnectionString(
resourceGroupName: pulumi.Input<string>,
accountName: pulumi.Input<string>
): pulumi.Output<string> {
// Retrieve the primary storage account key.
const storageAccountKeys = storage.listStorageAccountKeysOutput({
resourceGroupName,
accountName,
});
const primaryStorageKey = storageAccountKeys.keys[0].value;
// Build the connection string to the storage account.
return pulumi.interpolate`DefaultEndpointsProtocol=https;AccountName=${accountName};AccountKey=${primaryStorageKey}`;
}
function signedBlobReadUrl(
blob: storage.Blob,
container: storage.BlobContainer,
account: storage.StorageAccount,
resourceGroup: resources.ResourceGroup
): pulumi.Output<string> {
const blobSAS = storage.listStorageAccountServiceSASOutput({
accountName: account.name,
protocols: storage.HttpProtocol.Https,
sharedAccessExpiryTime: '2030-01-01',
sharedAccessStartTime: '2021-01-01',
resourceGroupName: resourceGroup.name,
resource: storage.SignedResource.C,
permissions: storage.Permissions.R,
canonicalizedResource: pulumi.interpolate`/blob/${account.name}/${container.name}`,
contentType: 'application/json',
cacheControl: 'max-age=5',
contentDisposition: 'inline',
contentEncoding: 'deflate',
});
return pulumi.interpolate`https://${account.name}.blob.core.windows.net/${container.name}/${blob.name}?${blobSAS.serviceSasToken}`;
}

View file

@ -0,0 +1,32 @@
import * as k8s from '@pulumi/kubernetes';
export function optimizeAzureCluster() {
/**
* The following disabled Azure logging. We are not really using it.
*/
new k8s.core.v1.ConfigMap('optimize-azure-cluster', {
metadata: {
name: 'container-azm-ms-agentconfig',
namespace: 'kube-system',
},
data: {
'schema-version': 'v1',
'config-version': 'v1',
'log-data-collection-settings': `
[log_collection_settings]
[log_collection_settings.stdout]
enabled = false
[log_collection_settings.stderr]
enabled = false
[log_collection_settings.env_var]
enabled = false
[log_collection_settings.enrich_container_logs]
enabled = false
[log_collection_settings.collect_all_kube_events]
enabled = false
`,
},
});
}

View file

@ -0,0 +1,93 @@
import * as k8s from '@pulumi/kubernetes';
import { Output } from '@pulumi/pulumi';
export class BotKube {
deploy(config: {
slackChannelName: string;
slackToken: Output<string>;
clusterName: string;
enableKubectl: boolean;
}) {
const ns = new k8s.core.v1.Namespace('botkube', {
metadata: {
name: 'botkube',
},
});
new k8s.helm.v3.Chart(
'botkube',
{
chart: 'botkube',
version: '0.12.4',
namespace: ns.metadata.name,
fetchOpts: {
repo: 'https://infracloudio.github.io/charts',
},
values: {
communications: {
slack: {
enabled: true,
channel: config.slackChannelName,
token: config.slackToken,
notiftype: 'short',
},
},
config: {
resources: [
{
name: 'apps/v1/deployments',
namespaces: {
include: ['default', 'ingress-nginx'],
},
events: ['all'],
},
{
name: 'v1/pods',
namespaces: {
include: ['default', 'ingress-nginx'],
},
events: ['all'],
},
],
recommendations: true,
settings: {
clustername: config.clusterName,
kubectl: {
defaultNamespace: 'default',
restrictAccess: 'true',
enabled: String(config.enableKubectl),
commands: {
verbs: [
'cluster-info',
'describe',
'get',
'logs',
'top',
'restart',
],
resources: [
'deployments',
'pods',
'namespaces',
'services',
'daemonsets',
'httpproxy',
'statefulsets',
'nodes',
],
},
},
},
},
image: {
repository: 'infracloudio/botkube',
tag: 'v0.12.4',
},
},
},
{
dependsOn: [ns],
}
);
}
}

57
deployment/utils/cdn.ts Normal file
View file

@ -0,0 +1,57 @@
import * as cf from '@pulumi/cloudflare';
import * as pulumi from '@pulumi/pulumi';
import { readFileSync } from 'fs';
import { resolve } from 'path';
export class CloudflareCDN {
constructor(
private envName: string,
private zoneId: string,
private cdnDnsRecord: string,
private authPrivateKey: pulumi.Output<string>
) {}
deploy() {
const kvStorage = new cf.WorkersKvNamespace('hive-ha-storage', {
title: `hive-ha-cdn-${this.envName}`,
});
const script = new cf.WorkerScript('hive-ha-worker', {
content: readFileSync(
resolve(__dirname, '../../packages/services/cdn-worker/dist/worker.js'),
'utf-8'
),
name: `hive-storage-cdn-${this.envName}`,
kvNamespaceBindings: [
{
// HIVE_DATA is in use in cdn-script.js as well, its the name of the global variable
name: 'HIVE_DATA',
namespaceId: kvStorage.id,
},
],
secretTextBindings: [
{
// KEY_DATA is in use in cdn-script.js as well, its the name of the global variable,
// basically it's the private key for the hmac key.
name: 'KEY_DATA',
text: this.authPrivateKey,
},
],
});
const workerBase = this.cdnDnsRecord;
const workerUrl = `https://${workerBase}`;
new cf.WorkerRoute('cf-hive-worker', {
scriptName: script.name,
pattern: `${workerBase}/*`,
zoneId: this.zoneId,
});
return {
authPrivateKey: this.authPrivateKey,
workerBaseUrl: workerUrl,
cfStorageNamespaceId: kvStorage.id,
};
}
}

View file

@ -0,0 +1,47 @@
import * as k8s from '@pulumi/kubernetes';
export class CertManager {
public deployCertManagerAndIssuer() {
const certManager = new k8s.yaml.ConfigFile('cert-manager', {
file: 'https://github.com/jetstack/cert-manager/releases/download/v1.8.0/cert-manager.yaml',
});
const issuerName = 'letsencrypt-prod';
new k8s.apiextensions.CustomResource(
'cert-manager-issuer',
{
apiVersion: 'cert-manager.io/v1',
kind: 'ClusterIssuer',
metadata: {
name: issuerName,
},
spec: {
acme: {
server: 'https://acme-v02.api.letsencrypt.org/directory',
email: 'contact@the-guild.dev',
privateKeySecretRef: {
name: issuerName,
},
solvers: [
{
http01: {
ingress: {
class: 'contour',
},
},
},
],
},
},
},
{
dependsOn: [certManager],
}
);
return {
tlsIssueName: issuerName,
};
}
}

View file

@ -0,0 +1,114 @@
import * as kx from '@pulumi/kubernetesx';
import * as k8s from '@pulumi/kubernetes';
import { PodBuilder } from './pod-builder';
export class Clickhouse {
constructor(
protected name: string,
protected options: {
env?: kx.types.Container['env'];
sentryDsn: string;
}
) {}
deploy() {
const image = 'clickhouse/clickhouse-server:22.3.3.44-alpine';
const port = 8123;
const env: any[] = Array.isArray(this.options.env)
? this.options.env
: Object.keys(this.options.env as kx.types.EnvMap).map((name) => ({
name,
value: (this.options.env as kx.types.EnvMap)[name],
}));
const cm = new kx.ConfigMap('clickhouse-config', {
data: {
'config.xml': createConfig({
sentryDsn: this.options.sentryDsn,
}),
},
});
const pb = new PodBuilder({
restartPolicy: 'Always',
containers: [
{
name: this.name,
image,
env,
volumeMounts: [cm.mount('/etc/clickhouse-server/conf.d')],
ports: {
http: port,
},
readinessProbe: {
initialDelaySeconds: 5,
periodSeconds: 20,
failureThreshold: 5,
timeoutSeconds: 5,
httpGet: {
path: '/ping',
port,
},
},
livenessProbe: {
initialDelaySeconds: 3,
periodSeconds: 20,
failureThreshold: 10,
timeoutSeconds: 5,
httpGet: {
path: '/ping',
port,
},
},
},
],
});
const metadata: k8s.types.input.meta.v1.ObjectMeta = {
annotations: {},
};
const deployment = new kx.Deployment(this.name, {
spec: pb.asExtendedDeploymentSpec(
{
replicas: 1,
strategy: {
type: 'RollingUpdate',
},
},
{
annotations: metadata.annotations,
}
),
});
const service = deployment.createService({});
return { deployment, service, port };
}
}
const createConfig = ({ sentryDsn }: { sentryDsn: string }) => `<yandex>
<listen_host>::</listen_host>
<interserver_http_host>0.0.0.0</interserver_http_host>
<send_crash_reports>
<enabled>true</enabled>
<anonymize>false</anonymize>
<endpoint>${sentryDsn}</endpoint>
</send_crash_reports>
<profiles>
<default>
<!-- Data is inserted after max_data_size is exceeded or after busy_timeout_ms after first INSERT query -->
<async_insert>1</async_insert>
<!-- The maximum number of threads for background data parsing and insertion. Default is 16 -->
<async_insert_threads>16</async_insert_threads>
<!-- The maximum size of the unparsed data in bytes collected per query before being inserted. -->
<async_insert_max_data_size>5000000</async_insert_max_data_size>
<!-- The maximum timeout in milliseconds since the first INSERT query before inserting collected data. -->
<async_insert_busy_timeout_ms>1000</async_insert_busy_timeout_ms>
<!-- will return OK even if the data wasn't inserted yet -->
<wait_for_async_insert>0</wait_for_async_insert>
</default>
</profiles>
</yandex>
`;

View file

@ -0,0 +1,29 @@
import { DeploymentEnvironment } from '../types';
export function isProduction(
deploymentEnv: DeploymentEnvironment | string
): boolean {
return !isStaging(deploymentEnv);
}
export function isStaging(
deploymentEnv: DeploymentEnvironment | string
): boolean {
return isDeploymentEnvironment(deploymentEnv)
? deploymentEnv.ENVIRONMENT === 'staging'
: deploymentEnv === 'staging';
}
export function isDeploymentEnvironment(
value: any
): value is DeploymentEnvironment {
return (
value &&
typeof value === 'object' &&
typeof value['ENVIRONMENT'] === 'string'
);
}
export function isDefined<T>(value: T | null | undefined): value is T {
return value !== null && value !== undefined;
}

View file

@ -0,0 +1,33 @@
import * as k8s from '@pulumi/kubernetes';
import * as pulumi from '@pulumi/pulumi';
export function serviceLocalEndpoint(service: k8s.types.input.core.v1.Service) {
return pulumi
.all([service.metadata, service.spec])
.apply(([metadata, spec]) => {
const defaultPort = (spec.ports || [])[0];
const portText = defaultPort ? `:${defaultPort.port}` : '';
return `http://${metadata.name}.${
metadata.namespace || 'default'
}.svc.cluster.local${portText}`;
});
}
export function serviceLocalHost(service: k8s.types.input.core.v1.Service) {
return pulumi.all([service.metadata]).apply(([metadata]) => {
return `${metadata.name}.${
metadata.namespace || 'default'
}.svc.cluster.local`;
});
}
export function serviceLocalMetricsEndpoint(
service: k8s.types.input.core.v1.Service
) {
return pulumi.all([service.metadata]).apply(([metadata]) => {
return `${metadata.name}.${
metadata.namespace || 'default'
}.svc.cluster.local:10254/metrics`;
});
}

View file

@ -0,0 +1,363 @@
import * as k8s from '@pulumi/kubernetes';
import { Output, interpolate } from '@pulumi/pulumi';
export type ObservabilityConfig = {
loki: {
endpoint: Output<string> | string;
username: Output<string> | string;
password: Output<string>;
};
prom: {
endpoint: Output<string> | string;
username: Output<string> | string;
password: Output<string>;
};
};
export class Observability {
constructor(private envName: string, private config: ObservabilityConfig) {}
deploy() {
const ns = new k8s.core.v1.Namespace('observability', {
metadata: {
name: 'observability',
},
});
// We are using otel-collector to scrape metrics from Pods
// dotansimha: once Vector supports scraping K8s metrics based on Prom, we can drop this.
new k8s.helm.v3.Chart('metrics', {
chart: 'opentelemetry-collector',
namespace: ns.metadata.name,
version: '0.16.1',
fetchOpts: {
repo: 'https://open-telemetry.github.io/opentelemetry-helm-charts',
},
// https://github.com/open-telemetry/opentelemetry-helm-charts/blob/main/charts/opentelemetry-collector/values.yaml
values: {
agentCollector: {
enabled: false,
},
standaloneCollector: {
enabled: true,
resources: {
limits: {
cpu: '256m',
memory: '512Mi',
},
},
},
clusterRole: {
create: true,
rules: [
{
apiGroups: [''],
resources: [
'events',
'namespaces',
'namespaces/status',
'nodes',
'nodes/spec',
'pods',
'pods/metrics',
'nodes/metrics',
'pods/status',
'replicationcontrollers',
'replicationcontrollers/status',
'resourcequotas',
'services',
'endpoints',
],
verbs: ['get', 'list', 'watch'],
},
{
apiGroups: ['apps'],
resources: [
'daemonsets',
'deployments',
'replicasets',
'statefulsets',
],
verbs: ['get', 'list', 'watch'],
},
{
apiGroups: ['extensions'],
resources: ['daemonsets', 'deployments', 'replicasets'],
verbs: ['get', 'list', 'watch'],
},
{
apiGroups: ['batch'],
resources: ['jobs', 'cronjobs'],
verbs: ['get', 'list', 'watch'],
},
{
apiGroups: ['autoscaling'],
resources: ['horizontalpodautoscalers'],
verbs: ['get', 'list', 'watch'],
},
],
},
config: {
exporters: {
logging: {
loglevel: 'info',
},
prometheusremotewrite: {
endpoint: interpolate`https://${this.config.prom.username}:${this.config.prom.password}@${this.config.prom.endpoint}`,
},
},
extensions: {
health_check: {},
},
processors: {
batch: {},
memory_limiter: {
check_interval: '5s',
limit_mib: 409,
spike_limit_mib: 128,
},
},
receivers: {
prometheus: {
config: {
global: {
evaluation_interval: '10s',
scrape_interval: '30s',
scrape_timeout: '10s',
},
scrape_configs: [
// {
// job_name: 'ingress-contour-endpoints',
// kubernetes_sd_configs: [
// {
// role: 'pod',
// namespaces: {
// names: ['contour'],
// },
// },
// ],
// relabel_configs: [
// {
// source_labels: [
// '__meta_kubernetes_pod_container_port_name',
// ],
// action: 'keep',
// regex: 'metrics',
// },
// {
// source_labels: [
// '__meta_kubernetes_pod_annotation_prometheus_io_scrape',
// ],
// action: 'keep',
// regex: true,
// },
// {
// source_labels: [
// '__meta_kubernetes_pod_annotation_prometheus_io_scheme',
// ],
// action: 'replace',
// target_label: '__scheme__',
// regex: '(https?)',
// },
// {
// source_labels: [
// '__meta_kubernetes_pod_annotation_prometheus_io_path',
// ],
// action: 'replace',
// target_label: '__metrics_path__',
// regex: '(.+)',
// },
// {
// source_labels: [
// '__address__',
// '__meta_kubernetes_pod_annotation_prometheus_io_port',
// ],
// action: 'replace',
// regex: '([^:]+)(?::d+)?;(d+)',
// replacement: '$1:$2',
// target_label: '__address__',
// },
// ],
// },
{
honor_labels: true,
honor_timestamps: true,
job_name: 'service-metrics',
kubernetes_sd_configs: [
{
role: 'pod',
namespaces: {
names: ['default'],
},
},
],
metrics_path: '/metrics',
relabel_configs: [
{
source_labels: [
'__meta_kubernetes_pod_container_port_name',
],
action: 'keep',
regex: 'metrics',
},
{
source_labels: [
'__meta_kubernetes_pod_annotation_prometheus_io_scrape',
],
action: 'keep',
regex: true,
},
{
source_labels: [
'__meta_kubernetes_pod_annotation_prometheus_io_scheme',
],
action: 'replace',
target_label: '__scheme__',
regex: '(https?)',
},
{
source_labels: [
'__meta_kubernetes_pod_annotation_prometheus_io_path',
],
action: 'replace',
target_label: '__metrics_path__',
regex: '(.+)',
},
{
source_labels: [
'__address__',
'__meta_kubernetes_pod_annotation_prometheus_io_port',
],
action: 'replace',
regex: '([^:]+)(?::d+)?;(d+)',
replacement: '$1:$2',
target_label: '__address__',
},
{
action: 'labelmap',
regex: '__meta_kubernetes_service_label_(.+)',
},
{
action: 'replace',
source_labels: ['__meta_kubernetes_namespace'],
target_label: 'namespace',
},
{
action: 'replace',
source_labels: ['__meta_kubernetes_service_name'],
target_label: 'service',
},
{
action: 'replace',
source_labels: ['__meta_kubernetes_pod_name'],
target_label: 'pod',
},
{
action: 'replace',
source_labels: ['__meta_kubernetes_pod_node_name'],
target_label: 'kubernetes_node',
},
],
scheme: 'http',
},
// {
// bearer_token_file:
// '/var/run/secrets/kubernetes.io/serviceaccount/token',
// job_name: 'kubernetes-cadvisor',
// kubernetes_sd_configs: [
// {
// role: 'node',
// },
// ],
// metrics_path: '/metrics/cadvisor',
// relabel_configs: [
// {
// action: 'labelmap',
// regex: '__meta_kubernetes_node_label_(.+)',
// },
// ],
// scheme: 'https',
// tls_config: {
// ca_file:
// '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
// insecure_skip_verify: true,
// },
// },
],
},
},
},
service: {
extensions: ['health_check'],
pipelines: {
metrics: {
exporters: ['logging', 'prometheusremotewrite'],
processors: ['memory_limiter', 'batch'],
receivers: ['prometheus'],
},
},
},
},
},
});
// We are using Vector to scrape logs from the K8s Pods, and send it to Grafana Cloud
new k8s.helm.v3.Chart(
'vector-logging',
{
chart: 'vector',
version: '0.10.3',
namespace: ns.metadata.name,
fetchOpts: {
repo: 'https://helm.vector.dev',
},
// https://vector.dev/docs/reference/configuration/
values: {
role: 'Agent',
customConfig: {
data_dir: '/vector-data-dir',
api: {
enabled: true,
playground: false,
address: '127.0.0.1:7676',
},
sources: {
kubernetes_logs: {
type: 'kubernetes_logs',
extra_field_selector: 'metadata.namespace=default',
},
},
sinks: {
// enable if you need to debug the raw vector messages
// stdout: {
// type: 'console',
// inputs: ['kubernetes_logs'],
// encoding: { codec: 'json' },
// },
grafana_lab: {
type: 'loki',
inputs: ['kubernetes_logs'],
endpoint: interpolate`https://${this.config.loki.endpoint}`,
auth: {
strategy: 'basic',
user: this.config.loki.username,
password: this.config.loki.password,
},
labels: {
namespace: '{{`{{ kubernetes.pod_namespace }}`}}',
container_name: '{{`{{ kubernetes.container_name }}`}}',
env: this.envName,
},
encoding: {
codec: 'text',
},
},
},
},
},
},
{
dependsOn: [ns],
}
);
}
}

51
deployment/utils/pack.ts Normal file
View file

@ -0,0 +1,51 @@
import { resolve } from 'path';
import { getPackagesSync } from '@manypkg/get-packages';
import { execSync } from 'child_process';
export function createPackageHelper(dir = resolve(process.cwd(), '../')) {
const { packages } = getPackagesSync(dir);
const revision = execSync('git rev-parse HEAD')
.toString()
.trim()
.replace(/\r?\n|\r/g, '');
return {
currentReleaseId: () => revision,
npmPack(name: string): PackageInfo {
const dir = packages.find((p) => p.packageJson.name === name)?.dir;
if (!dir) {
throw new Error(`Failed to find package "${name}" in workspace!`);
}
const distDir = resolve(dir, './dist/');
const fileName = execSync('npm pack --pack-destination ../', {
cwd: distDir,
stdio: ['ignore', 'pipe', 'ignore'],
})
.toString()
.trim()
.replace(/\r?\n|\r/g, '');
// TODO: maybe manypkg can give it to us?
const withoutOrg = name.split('/');
const packName = withoutOrg.length === 2 ? withoutOrg[1] : withoutOrg[0];
const binName = packName.split('@')[0];
return {
runtime: 'node',
name,
file: resolve(dir, fileName),
bin: binName,
};
},
};
}
export type PackageHelper = ReturnType<typeof createPackageHelper>;
export type PackageInfo = {
runtime: 'node' | 'rust';
name: string;
file: string;
bin: string;
};

View file

@ -0,0 +1,36 @@
import * as kx from '@pulumi/kubernetesx';
import * as k8s from '@pulumi/kubernetes';
import * as pulumi from '@pulumi/pulumi';
export function normalizeEnv(env: kx.types.Container['env']): any[] {
return Array.isArray(env)
? env
: Object.keys(env as kx.types.EnvMap).map((name) => ({
name,
value: (env as kx.types.EnvMap)[name],
}));
}
export class PodBuilder extends kx.PodBuilder {
public asExtendedDeploymentSpec(
args?: kx.types.PodBuilderDeploymentSpec,
metadata?: k8s.types.input.meta.v1.ObjectMeta
): pulumi.Output<k8s.types.input.apps.v1.DeploymentSpec> {
const podName = this.podSpec.containers.apply((containers: any) => {
return pulumi.output(containers[0].name);
});
const appLabels = { app: podName };
const _args = args || {};
const deploymentSpec: k8s.types.input.apps.v1.DeploymentSpec = {
..._args,
selector: { matchLabels: appLabels },
replicas: _args.replicas ?? 1,
template: {
metadata: { labels: appLabels, ...(metadata || {}) },
spec: this.podSpec,
},
};
return pulumi.output(deploymentSpec);
}
}

View file

@ -0,0 +1,68 @@
import * as cf from '@pulumi/cloudflare';
import * as pulumi from '@pulumi/pulumi';
import { readFileSync } from 'fs';
import { resolve } from 'path';
export class HivePolice {
constructor(
private envName: string,
private zoneId: string,
private cfToken: pulumi.Output<string>,
private rootDns: string
) {}
deploy() {
const kvStorage = new cf.WorkersKvNamespace('hive-police-kv', {
title: `hive-police-${this.envName}`,
});
const script = new cf.WorkerScript('hive-police-worker', {
content: readFileSync(
resolve(
__dirname,
'../../packages/services/police-worker/dist/worker.js'
),
'utf-8'
),
name: `hive-police-${this.envName}`,
kvNamespaceBindings: [
{
// HIVE_POLICE is in use in police-script js as well, its the name of the global variable
name: 'HIVE_POLICE',
namespaceId: kvStorage.id,
},
],
//
secretTextBindings: [
{
name: 'CF_BEARER_TOKEN',
text: this.cfToken,
},
{
name: 'ZONE_IDENTIFIER',
text: this.zoneId,
},
{
name: 'HOSTNAMES',
text: `${this.rootDns},app.${this.rootDns},cdn.${this.rootDns}`,
},
{
name: 'WAF_RULE_NAME',
text: `hive-police-rule-${this.envName}`,
},
],
});
new cf.WorkerCronTrigger('cf-police-trigger', {
scriptName: script.name,
// https://developers.cloudflare.com/workers/platform/cron-triggers/#examples
schedules: [
'*/10 * * * *', // every 10 minutes
],
});
return {
cfStorageNamespaceId: kvStorage.id,
};
}
}

119
deployment/utils/redis.ts Normal file
View file

@ -0,0 +1,119 @@
import * as kx from '@pulumi/kubernetesx';
import * as k8s from '@pulumi/kubernetes';
import { normalizeEnv, PodBuilder } from './pod-builder';
const DEFAULT_IMAGE = 'bitnami/redis:6.2.6';
const PORT = 6379;
export class Redis {
constructor(
protected options: {
env?: kx.types.Container['env'];
password: string;
}
) {}
deploy({
limits,
}: {
limits: k8s.types.input.core.v1.ResourceRequirements['limits'];
}) {
const name = 'redis-store';
const image = DEFAULT_IMAGE;
const env = normalizeEnv(this.options.env ?? {}).concat([
{
name: 'REDIS_PASSWORD',
value: this.options.password,
},
{
name: 'POD_NAME',
valueFrom: {
fieldRef: {
fieldPath: 'metadata.name',
},
},
},
]);
const cm = new kx.ConfigMap('redis-scripts', {
data: {
'readiness.sh': `#!/bin/bash
response=$(timeout -s SIGTERM 3 $1 redis-cli -h localhost -a ${this.options.password} -p ${PORT} ping)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
`,
'liveness.sh': `#!/bin/bash
response=$(timeout -s SIGTERM 3 $1 redis-cli -h localhost -a ${this.options.password} -p ${PORT} ping)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
`,
},
});
const volumeMounts = [cm.mount('/scripts')];
const pb = new PodBuilder({
restartPolicy: 'Always',
containers: [
{
name,
image,
env,
volumeMounts,
ports: [{ containerPort: PORT, hostPort: PORT, protocol: 'TCP' }],
resources: {
limits,
},
livenessProbe: {
initialDelaySeconds: 3,
periodSeconds: 10,
failureThreshold: 10,
timeoutSeconds: 3,
exec: {
command: ['/bin/sh', '/scripts/liveness.sh'],
},
},
readinessProbe: {
initialDelaySeconds: 5,
periodSeconds: 8,
failureThreshold: 5,
timeoutSeconds: 3,
exec: {
command: ['/bin/sh', '/scripts/readiness.sh'],
},
},
},
],
});
const metadata: k8s.types.input.meta.v1.ObjectMeta = {
annotations: {},
};
const deployment = new kx.Deployment(name, {
spec: pb.asExtendedDeploymentSpec(
{
replicas: 1,
strategy: {
type: 'RollingUpdate',
rollingUpdate: {
maxSurge: 1,
maxUnavailable: 0,
},
},
},
{
annotations: metadata.annotations,
}
),
});
const service = deployment.createService({});
return { deployment, service, port: PORT };
}
}

View file

@ -0,0 +1,212 @@
import * as kx from '@pulumi/kubernetesx';
import * as k8s from '@pulumi/kubernetes';
import * as azure from '@pulumi/azure';
import * as pulumi from '@pulumi/pulumi';
import { PodBuilder, normalizeEnv } from './pod-builder';
import { PackageInfo } from './pack';
import { isDefined } from './helpers';
const DEFAULT_IMAGE = 'node:16.13.2-alpine3.15';
export class RemoteArtifactAsServiceDeployment {
constructor(
protected name: string,
protected options: {
storageContainer: azure.storage.Container;
env?: kx.types.Container['env'];
packageInfo: PackageInfo;
port?: number;
image?: string;
livenessProbe?: string;
readinessProbe?: string;
memoryLimit?: string;
cpuLimit?: string;
bin?: string;
/**
* Enables /metrics endpoint on port 10254
*/
exposesMetrics?: boolean;
replicas?: number;
},
protected dependencies?: Array<pulumi.Resource | undefined | null>,
protected parent?: pulumi.Resource | null
) {}
deployAsJob() {
const artifactUrl = this.makeArtifactUrl();
const { pb } = this.createPod(artifactUrl, true);
const job = new kx.Job(
this.name,
{
spec: pb.asJobSpec(),
},
{ dependsOn: this.dependencies?.filter(isDefined) }
);
return { job };
}
createPod(artifactUrl: pulumi.Output<string>, asJob: boolean) {
const port = this.options.port || 3000;
const additionalEnv: any[] = normalizeEnv(this.options.env);
let livenessProbe: k8s.types.input.core.v1.Probe | undefined = undefined;
let readinessProbe: k8s.types.input.core.v1.Probe | undefined = undefined;
if (this.options.livenessProbe) {
livenessProbe = {
initialDelaySeconds: 3,
periodSeconds: 20,
failureThreshold: 10,
timeoutSeconds: 5,
httpGet: {
path: this.options.livenessProbe,
port,
},
};
}
if (this.options.readinessProbe) {
readinessProbe = {
initialDelaySeconds: 5,
periodSeconds: 20,
failureThreshold: 5,
timeoutSeconds: 5,
httpGet: {
path: this.options.readinessProbe,
port,
},
};
}
const image = this.options.image || DEFAULT_IMAGE;
const appVolume = {
mountPath: '/app',
name: 'app',
};
const volumeMounts = [appVolume];
if (this.options.exposesMetrics) {
additionalEnv.push({ name: 'METRICS_ENABLED', value: 'true' });
}
const pb = new PodBuilder({
restartPolicy: asJob ? 'Never' : 'Always',
volumes: [
{
name: appVolume.name,
emptyDir: {},
},
],
initContainers: [
{
name: `${this.name}-init`,
image,
workingDir: appVolume.mountPath,
volumeMounts,
command:
this.options.packageInfo.runtime === 'node'
? ['/bin/sh', '-c', artifactUrl.apply((v) => `yarn add ${v}`)]
: this.options.packageInfo.runtime === 'rust'
? ['/bin/sh', '-c', artifactUrl.apply((v) => `wget ${v}`)]
: ['echo missing script!'],
},
],
containers: [
{
livenessProbe,
readinessProbe,
env: [
{ name: 'PORT', value: String(port) },
{
name: 'POD_NAME',
valueFrom: {
fieldRef: {
fieldPath: 'metadata.name',
},
},
},
].concat(additionalEnv),
name: this.name,
image,
workingDir: appVolume.mountPath,
volumeMounts: [appVolume],
command:
this.options.packageInfo.runtime === 'node'
? ['yarn', this.options.bin || this.options.packageInfo.bin]
: this.options.packageInfo.runtime === 'rust'
? [this.options.packageInfo.bin]
: [],
ports: {
http: port,
...(this.options.exposesMetrics
? {
metrics: 10254,
}
: {}),
},
},
],
});
return { pb };
}
private makeArtifactUrl() {
const azureStaticFile = new azure.storage.Blob(`${this.name}-artifact`, {
storageAccountName: this.options.storageContainer.storageAccountName,
storageContainerName: this.options.storageContainer.name,
type: 'Block',
source: new pulumi.asset.FileAsset(this.options.packageInfo.file),
});
return azureStaticFile.url;
}
deploy() {
const artifactUrl = this.makeArtifactUrl();
const { pb } = this.createPod(artifactUrl, false);
const metadata: k8s.types.input.meta.v1.ObjectMeta = {
annotations: {},
};
if (this.options.exposesMetrics) {
metadata.annotations = {
'prometheus.io/port': '10254',
'prometheus.io/path': '/metrics',
'prometheus.io/scrape': 'true',
};
}
const deployment = new kx.Deployment(
this.name,
{
spec: pb.asExtendedDeploymentSpec(
{
replicas: this.options.replicas ?? 1,
strategy: {
type: 'RollingUpdate',
rollingUpdate: {
maxSurge: this.options.replicas ?? 1,
maxUnavailable: 0,
},
},
},
{
annotations: metadata.annotations,
}
),
},
{
dependsOn: this.dependencies?.filter(isDefined),
parent: this.parent ?? undefined,
}
);
const service = deployment.createService({});
return { deployment, service };
}
}

View file

@ -0,0 +1,213 @@
import * as k8s from '@pulumi/kubernetes';
import { Output } from '@pulumi/pulumi';
export class Proxy {
private lbService: Output<k8s.core.v1.Service> | null = null;
constructor(
private tlsSecretName: string,
private staticIp?: { address?: string }
) {}
registerService(
dns: { record: string; apex?: boolean },
routes: {
name: string;
path: string;
service: k8s.core.v1.Service;
customRewrite?: string;
virtualHost?: Output<string>;
httpsUpstream?: boolean;
withWwwDomain?: boolean;
}[]
) {
const cert = new k8s.apiextensions.CustomResource(`cert-${dns.record}`, {
apiVersion: 'cert-manager.io/v1',
kind: 'Certificate',
metadata: {
name: dns.record,
},
spec: {
commonName: dns.record,
dnsNames: [dns.record],
issuerRef: {
name: this.tlsSecretName,
kind: 'ClusterIssuer',
},
secretName: dns.record,
},
});
new k8s.apiextensions.CustomResource(
`httpproxy-${dns.record}`,
{
apiVersion: 'projectcontour.io/v1',
kind: 'HTTPProxy',
metadata: {
annotations: {
'ingress.kubernetes.io/force-ssl-redirect': 'true',
},
name: `ingress-${dns.record}`,
},
spec: {
virtualhost: {
fqdn: dns.record,
tls: {
secretName: dns.record,
},
corsPolicy: {
allowOrigin: [
'https://app.graphql-hive.com',
'https://graphql-hive.com',
],
allowMethods: ['GET', 'POST', 'OPTIONS'],
allowHeaders: ['*'],
exposeHeaders: ['*'],
},
},
routes: routes.map((route) => ({
conditions: [
{
prefix: route.path,
},
],
services: [
{
name: route.service.metadata.name,
port: route.service.spec.ports[0].port,
},
],
...(route.path === '/'
? {}
: {
pathRewritePolicy: {
replacePrefix: [
{
replacement: route.customRewrite || '/',
},
],
},
}),
})),
},
},
{
dependsOn: [cert, this.lbService!],
}
);
return this;
}
deployProxy(options: { replicas?: number }) {
const ns = new k8s.core.v1.Namespace('contour', {
metadata: {
name: 'contour',
},
});
const proxyController = new k8s.helm.v3.Chart('contour-proxy', {
chart: 'contour',
version: '7.8.0',
namespace: ns.metadata.name,
fetchOpts: {
repo: 'https://charts.bitnami.com/bitnami',
},
// https://github.com/bitnami/charts/tree/master/bitnami/contour
values: {
commonLabels: {
'vector.dev/exclude': 'true',
},
configInline: {
// https://projectcontour.io/docs/main/configuration/
'accesslog-format': 'json',
// https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage
'json-fields': [
'@timestamp',
'bytes_received',
'bytes_sent',
'downstream_local_address',
'duration',
'method',
'path',
'request_id',
'response_code',
'response_flags',
'upstream_cluster',
'upstream_host',
'upstream_service_time',
'user_agent',
'x_forwarded_for',
],
},
contour: {
podAnnotations: {
'prometheus.io/scrape': 'true',
'prometheus.io/port': '8000',
'prometheus.io/scheme': 'http',
'prometheus.io/path': '/metrics',
},
podLabels: {
'vector.dev/exclude': 'true',
},
},
envoy: {
service: {
loadBalancerIP: this.staticIp?.address,
},
podAnnotations: {
'prometheus.io/scrape': 'true',
'prometheus.io/port': '8002',
'prometheus.io/scheme': 'http',
'prometheus.io/path': '/stats/prometheus',
},
podLabels: {
'vector.dev/exclude': 'true',
},
autoscaling:
options?.replicas && options?.replicas > 1
? {
enabled: true,
minReplicas: 1,
maxReplicas: options.replicas,
}
: {},
},
},
});
this.lbService = proxyController.getResource(
'v1/Service',
'contour/contour-proxy-envoy'
);
new k8s.apiextensions.CustomResource(
'secret-delegation',
{
apiVersion: 'projectcontour.io/v1',
kind: 'TLSCertificateDelegation',
metadata: {
name: this.tlsSecretName,
namespace: 'cert-manager',
},
spec: {
delegations: [
{
secretName: this.tlsSecretName,
targetNamespaces: ['*'],
},
],
},
},
{
dependsOn: [this.lbService],
}
);
return this;
}
get() {
return this.lbService;
}
}

1903
deployment/yarn.lock Normal file

File diff suppressed because it is too large Load diff

15
docs/DEPLOYMENT.md Normal file
View file

@ -0,0 +1,15 @@
## Deployment
Deployment is based on NPM packages. That means we are bundling (as much as possible) each service or package, and publish it to the private GitHub Packages artifactory.
Doing that allows us to have a simple, super fast deployments, because we don't need to deal with Docker images (which are heavy).
We create an executable package (with `bin` entrypoint) and then use `npx PACKAGE_NAME@PACKAGE_VERSION` as command for a base Docker image of NodeJS. So instead of building a Docker image for each change, we build NPM package, and the Docker image we are using in prod is the same.
Think of it as Lambda (bundled JS, runtime is predefined) without all the crap (weird cache, weird pricing, cold start and so on).
### How to deploy?
We are using Pulumi (infrastructure as code) to describe and run our deployment. It's managed as GitHub Actions that runs on every bump release by Changesets.
So changes are aggregated in a Changesets PR, and when merge, it updated the deployment manifest `package.json`, leading to a deployment of only the updated packages to production.

55
docs/DEVELOPMENT.md Normal file
View file

@ -0,0 +1,55 @@
# Development
## Setup Instructions
- Clone the repository locally
- Make sure to install the recommended VSCode extensions (defined in `.vscode/extensions.json`)
- In the root of the repo, run `nvm use` to use the same version of node as mentioned
- Run `yarn` at the root to install all the dependencies and run the hooks
- Run `yarn setup` to create and apply migrations on the PostgreSQL database
- Run `yarn generate` to generate the typings from the graphql files (use `yarn graphql:generate` if you only need to run GraphQL Codegen)
- Run `yarn build` to build all services
- Click on `Start Hive` in the bottom bar of VSCode
- Open the UI (`http://localhost:3000` by default) and Sign in with any of the identity provider
- If you are not added to the list of guest users, request access from The Guild maintainers
- Once this is done, you should be able to login and use the project
- Once you generate the token against your organization/personal account in hive, the same can be added locally to `hive.json` within `packages/libraries/cli` which can be used to interact via the hive cli with the registry
## Development Seed
We have a script to feed your local instance of Hive.
1. Use `Start Hive` to run your local Hive instance.
2. Make sure `usage` and `usage-ingestor` are running as well (with `yarn dev`)
3. Open Hive app, create a project and a target, then create a token.
4. Run the seed script: `TOKEN="MY_TOKEN_HERE" yarn seed`
5. This should report a dummy schema and some dummy usage data to your local instance of Hive, allowing you to test features e2e.
> Note: You can set `STAGING=1` in order to target staging env and seed a target there.
> To send more operations and test heavy load on Hive instance, you can also set `OPERATIONS` (amount of operations in each interval round, default is `1`) and `INTERVAL` (frequency of sending operations, default: `1000`ms). For example, using `INTERVAL=1000 OPERATIONS=1000` will send 1000 requests per second.
## Publish your first schema (manually)
1. Start Hive locally
1. Create a project and a target
1. Create a token from that target
1. Go to `packages/libraries/cli` and run `yarn build`
1. Inside `packages/libraries/cli`, run: `yarn start schema:publish --token "YOUR_TOKEN_HERE" --registry "http://localhost:4000/graphql" examples/single.graphql`
### Setting up Slack App for developing
1. [Download](https://loophole.cloud/download) Loophole CLI (same as ngrok but supports non-random urls)
2. Log in to Loophole `$ loophole account login`
3. Start the proxy by running `$ loophole http 3000 --hostname hive-<your-name>` (@kamilkisiela I use `hive-kamil`). It creates `https://hive-<your-name>.loophole.site` endpoint.
4. Message @kamilkisiela and send him the url (He will update the list of accepted redirect urls in both Auth0 and Slack App).
5. Update `APP_BASE_URL` and `AUTH0_BASE_URL` in [`packages/web/app/.env`](./packages/web/app/.env)
6. Run `packages/web/app` and open `https://hive-<your-name>.loophole.site`.
> We have a special slack channel called `#hive-tests` to not spam people :)
### Setting up GitHub App for developing
1. Follow the steps above for Slack App.
2. Update `Setup URL` in [GraphQL Hive Development](https://github.com/organizations/the-guild-org/settings/apps/graphql-hive-development) app and set it to `https://hive-<your-name>.loophole.site/api/github/setup-callback`.

23
docs/TESTING.md Normal file
View file

@ -0,0 +1,23 @@
# Testing
## Unit tests
We are using Jest. Simply run `yarn test` to run all the tests.
## Integration Tests
We are using Dockest to test the following concerns:
1. Main application flows and integration of different services
2. Build and pack process of all packages
3. Containerize execution of all services
4. Cross-service network calls
To run integration tests locally, follow:
1. Make sure you have Docker installed. If you are having issues, try to run `docker system prune` to clean the Docker caches.
2. Install all deps: `yarn install`
3. Generate types: `yarn graphql:generate`
4. Build and pack all services: `yarn workspace integration-tests run build-and-pack`
5. Pull the images: `docker-compose -f integration-tests/docker-compose.yml pull`
6. Run the tests: `yarn workspace integration-tests run dockest`

View file

@ -0,0 +1,12 @@
AUTH0_DOMAIN="<sync>"
AUTH0_CLIENT_ID="<sync>"
AUTH0_CLIENT_SECRET="<sync>"
AUTH0_USER_PASSWORD="<sync>"
AUTH0_USER_MAIN_EMAIL="<sync>"
AUTH0_USER_EXTRA_EMAIL="<sync>"
AUTH0_SECRET="<sync>"
AUTH0_AUDIENCE="<sync>"
AUTH0_CONNECTION="<sync>"
STRIPE_SECRET_KEY="<sync>"

4
integration-tests/.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
docker-compose.dockest-generated.yml
db-clickhouse
tarballs
volumes

View file

@ -0,0 +1,494 @@
version: '3.8'
services:
db:
image: postgres:13.4-alpine
ports:
- '5432:5432'
environment:
POSTGRES_DB: registry
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
PGDATA: /var/lib/postgresql/data
healthcheck:
test: ['CMD-SHELL', 'pg_isready']
interval: 5s
timeout: 5s
retries: 6
networks:
- 'stack'
clickhouse:
image: clickhouse/clickhouse-server:22.3.5.5-alpine
volumes:
- ../packages/services/storage/configs/clickhouse:/etc/clickhouse-server/conf.d
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:8123/ping']
interval: 5s
timeout: 5s
retries: 6
start_period: 10s
environment:
CLICKHOUSE_USER: test
CLICKHOUSE_PASSWORD: test
KAFKA_BROKER: broker:29092
ports:
- '8123:8123'
networks:
- 'stack'
zookeeper:
image: confluentinc/cp-zookeeper:6.2.2-3-ubi8
hostname: zookeeper
networks:
- 'stack'
ports:
- '2181:2181'
ulimits:
nofile:
soft: 20000
hard: 40000
healthcheck:
test: ['CMD', 'cub', 'zk-ready', '127.0.0.1:2181', '10']
interval: 5s
timeout: 10s
retries: 6
start_period: 15s
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
broker:
image: confluentinc/cp-kafka:6.2.2-3-ubi8
hostname: borker
depends_on:
zookeeper:
condition: service_healthy
networks:
- 'stack'
ports:
- '29092:29092'
- '9092:9092'
ulimits:
nofile:
soft: 20000
hard: 40000
healthcheck:
test:
[
'CMD',
'cub',
'kafka-ready',
'1',
'5',
'-b',
'127.0.0.1:9092',
'-c',
'/etc/kafka/kafka.properties',
]
interval: 15s
timeout: 10s
retries: 6
start_period: 15s
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
redis:
image: bitnami/redis:6.2
networks:
- 'stack'
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
interval: 5s
timeout: 10s
retries: 6
start_period: 5s
ports:
- '6379:6379'
environment:
- REDIS_PASSWORD=test
- REDIS_DISABLE_COMMANDS=FLUSHDB,FLUSHALL
migrations:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-migrations.sh'
networks:
- 'stack'
depends_on:
clickhouse:
condition: service_healthy
db:
condition: service_healthy
broker:
condition: service_healthy
environment:
MIGRATOR: 'up'
CLICKHOUSE_MIGRATOR: 'up'
POSTGRES_CONNECTION_STRING: 'postgresql://postgres:postgres@db:5432/registry'
CLICKHOUSE_PROTOCOL: 'http'
CLICKHOUSE_HOST: 'clickhouse'
CLICKHOUSE_PORT: '8123'
CLICKHOUSE_USERNAME: 'test'
CLICKHOUSE_PASSWORD: 'test'
KAFKA_BROKER: 'broker:29092'
volumes:
- './tarballs/storage.tgz:/storage.tgz'
- './run-migrations.sh:/run-migrations.sh'
server:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-server.sh'
networks:
- 'stack'
depends_on:
redis:
condition: service_healthy
clickhouse:
condition: service_healthy
migrations:
condition: service_completed_successfully
tokens:
condition: service_healthy
webhooks:
condition: service_healthy
schema:
condition: service_healthy
usage_estimator:
condition: service_healthy
rate_limit:
condition: service_healthy
stripe_billing:
condition: service_healthy
local_cdn:
condition: service_healthy
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:3001/_readiness']
interval: 5s
timeout: 5s
retries: 6
start_period: 5s
ports:
- '3001:3001'
volumes:
- './tarballs/server.tgz:/server.tgz'
- './run-server.sh:/run-server.sh'
environment:
POSTGRES_HOST: db
POSTGRES_PORT: 5432
POSTGRES_DB: registry
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
CLICKHOUSE_PROTOCOL: 'http'
CLICKHOUSE_HOST: clickhouse
CLICKHOUSE_PORT: 8123
CLICKHOUSE_USERNAME: test
CLICKHOUSE_PASSWORD: test
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_PASSWORD: test
TOKENS_ENDPOINT: http://tokens:3003
WEBHOOKS_ENDPOINT: http://webhooks:3005
SCHEMA_ENDPOINT: http://schema:3002
USAGE_ESTIMATOR_ENDPOINT: http://usage_estimator:3008
RATE_LIMIT_ENDPOINT: http://rate_limit:3009
BILLING_ENDPOINT: http://stripe_billing:3010
CF_BASE_PATH: http://local_cdn:3004
CF_ACCOUNT_ID: 103df45224310d669213971ce28b5b70
CF_AUTH_TOKEN: 85e20c26c03759603c0f45884824a1c3
CF_NAMESPACE_ID: 33b1e3bbb4a4707d05ea0307cbb55c79
CDN_AUTH_PRIVATE_KEY: 1e1064ef9cda8bf38936b77317e90dc3
CDN_BASE_URL: http://localhost:3004
GITHUB_APP_ID: 123123
GITHUB_APP_PRIVATE_KEY: 5f938d51a065476c4dc1b04aeba13afb
ENCRYPTION_SECRET: 8ebe95cf24c1fbe306e9fa32c8c33148
FEEDBACK_SLACK_TOKEN: ''
FEEDBACK_SLACK_CHANNEL: '#hive'
AUTH0_SECRET: ${AUTH0_SECRET}
AUTH0_DOMAIN: ${AUTH0_DOMAIN}
AUTH0_CLIENT_ID: ${AUTH0_CLIENT_ID}
AUTH0_CLIENT_SECRET: ${AUTH0_CLIENT_SECRET}
AUTH0_SCOPE: 'openid profile offline_access'
AUTH0_AUDIENCE: ${AUTH0_AUDIENCE}
AUTH0_CONNECTION: ${AUTH0_CONNECTION}
PORT: 3001
schema:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-schema.sh'
networks:
- 'stack'
depends_on:
redis:
condition: service_healthy
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:3002/_readiness']
interval: 5s
timeout: 5s
retries: 6
start_period: 5s
ports:
- '3002:3002'
volumes:
- './tarballs/schema.tgz:/schema.tgz'
- './run-schema.sh:/run-schema.sh'
environment:
PORT: 3002
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_PASSWORD: test
tokens:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-tokens.sh'
networks:
- 'stack'
depends_on:
migrations:
condition: service_completed_successfully
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:3003/_readiness']
interval: 5s
timeout: 5s
retries: 6
start_period: 5s
ports:
- '3003:3003'
volumes:
- './tarballs/tokens.tgz:/tokens.tgz'
- './run-tokens.sh:/run-tokens.sh'
environment:
POSTGRES_HOST: db
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_PORT: 5432
POSTGRES_DB: registry
PORT: 3003
local_cdn:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-local-cdn.sh'
networks:
- 'stack'
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:3004/_readiness']
interval: 5s
timeout: 5s
retries: 6
start_period: 5s
ports:
- '3004:3004'
volumes:
- '../packages/services/cdn-worker/dist/dev.js:/cdn.js'
- './run-local-cdn.sh:/run-local-cdn.sh'
environment:
PORT: 3004
webhooks:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-webhooks.sh'
networks:
- 'stack'
depends_on:
redis:
condition: service_healthy
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:3005/_readiness']
interval: 5s
timeout: 5s
retries: 6
start_period: 5s
ports:
- '3005:3005'
volumes:
- './tarballs/webhooks.tgz:/webhooks.tgz'
- './run-webhooks.sh:/run-webhooks.sh'
environment:
BULLMQ_COMMANDS_FROM_ROOT: 'true'
PORT: 3005
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_PASSWORD: test
usage:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-usage.sh'
networks:
- 'stack'
ports:
- '3006:3006'
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:3006/_readiness']
interval: 5s
timeout: 5s
retries: 6
start_period: 5s
volumes:
- './tarballs/usage.tgz:/usage.tgz'
- './run-usage.sh:/run-usage.sh'
depends_on:
broker:
condition: service_healthy
rate_limit:
condition: service_healthy
tokens:
condition: service_healthy
environment:
TOKENS_ENDPOINT: http://tokens:3003
RATE_LIMIT_ENDPOINT: http://rate_limit:3009
KAFKA_CONNECTION_MODE: 'docker'
KAFKA_BROKER: broker:29092
KAFKA_BUFFER_SIZE: 350
KAFKA_BUFFER_INTERVAL: 1000
KAFKA_BUFFER_DYNAMIC: 'true'
PORT: 3006
usage_ingestor:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-usage-ingestor.sh'
networks:
- 'stack'
depends_on:
broker:
condition: service_healthy
clickhouse:
condition: service_healthy
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:3007/_readiness']
interval: 5s
timeout: 5s
retries: 6
start_period: 5s
ports:
- '3007:3007'
volumes:
- './tarballs/usage-ingestor.tgz:/usage-ingestor.tgz'
- './run-usage-ingestor.sh:/run-usage-ingestor.sh'
environment:
KAFKA_CONNECTION_MODE: 'docker'
KAFKA_BROKER: broker:29092
KAFKA_CONCURRENCY: 1
CLICKHOUSE_PROTOCOL: 'http'
CLICKHOUSE_HOST: clickhouse
CLICKHOUSE_PORT: 8123
CLICKHOUSE_USERNAME: test
CLICKHOUSE_PASSWORD: test
PORT: 3007
usage_estimator:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-usage-estimator.sh'
networks:
- 'stack'
ports:
- '3008:3008'
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:3008/_readiness']
interval: 5s
timeout: 5s
retries: 6
start_period: 5s
volumes:
- './tarballs/usage-estimator.tgz:/usage-estimator.tgz'
- './run-usage-estimator.sh:/run-usage-estimator.sh'
depends_on:
clickhouse:
condition: service_healthy
migrations:
condition: service_completed_successfully
environment:
POSTGRES_CONNECTION_STRING: 'postgresql://postgres:postgres@db:5432/registry'
CLICKHOUSE_PROTOCOL: 'http'
CLICKHOUSE_HOST: 'clickhouse'
CLICKHOUSE_PORT: '8123'
CLICKHOUSE_USERNAME: 'test'
CLICKHOUSE_PASSWORD: 'test'
PORT: 3008
rate_limit:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-rate-limit.sh'
networks:
- 'stack'
ports:
- '3009:3009'
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:3009/_readiness']
interval: 5s
timeout: 5s
retries: 6
start_period: 5s
volumes:
- './tarballs/rate-limit.tgz:/rate-limit.tgz'
- './run-rate-limit.sh:/run-rate-limit.sh'
depends_on:
clickhouse:
condition: service_healthy
migrations:
condition: service_completed_successfully
usage_estimator:
condition: service_healthy
environment:
POSTGRES_CONNECTION_STRING: 'postgresql://postgres:postgres@db:5432/registry'
USAGE_ESTIMATOR_ENDPOINT: http://usage_estimator:3008
PORT: 3009
stripe_billing:
image: node:16.13.2-alpine3.14
entrypoint:
- '/bin/sh'
- '/run-stripe-billing.sh'
networks:
- 'stack'
ports:
- '3010:3010'
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'localhost:3010/_readiness']
interval: 5s
timeout: 5s
retries: 6
start_period: 5s
volumes:
- './tarballs/stripe-billing.tgz:/stripe-billing.tgz'
- './run-stripe-billing.sh:/run-stripe-billing.sh'
depends_on:
clickhouse:
condition: service_healthy
migrations:
condition: service_completed_successfully
usage_estimator:
condition: service_healthy
environment:
STRIPE_SECRET_KEY: ${STRIPE_SECRET_KEY}
POSTGRES_CONNECTION_STRING: 'postgresql://postgres:postgres@db:5432/registry'
USAGE_ESTIMATOR_ENDPOINT: http://usage_estimator:3008
PORT: 3010
networks:
stack: {}

View file

@ -0,0 +1,33 @@
import { Dockest, logLevel } from 'dockest';
import { cleanDockerContainers, createServices } from './testkit/dockest';
import dotenv from 'dotenv';
async function main() {
dotenv.config();
const dockest = new Dockest({
logLevel: logLevel.DEBUG,
jestOpts: {
runInBand: true,
config: JSON.stringify({
roots: ['<rootDir>/tests'],
transform: {
'^.+\\.ts$': 'ts-jest',
},
testTimeout: 45_000,
maxConcurrency: 1,
setupFiles: ['dotenv/config'],
setupFilesAfterEnv: ['./jest-setup.ts'],
}),
},
});
cleanDockerContainers();
return dockest.run(createServices());
}
await main().catch((err) => {
console.error(err);
process.exit(1);
});

View file

@ -0,0 +1,8 @@
type Query {
users: [User!]
}
type User {
id: ID!
name: String!
}

View file

@ -0,0 +1,9 @@
type Query {
users: [User!]
}
type User {
id: ID!
name: String!
email: String!
}

View file

@ -0,0 +1,10 @@
type Query {
users: [User!]
}
type User {
id: ID!
name: String!
email: String!
nickname: String
}

View file

@ -0,0 +1,20 @@
import { createPool } from 'slonik';
import * as utils from 'dockest/test-helper';
import { resetDb } from './testkit/db';
import { resetClickHouse } from './testkit/clickhouse';
import { resetRedis } from './testkit/redis';
const dbAddress = utils.getServiceAddress('db', 5432);
const redisAddress = utils.getServiceAddress('redis', 6379);
const pool = createPool(`postgresql://postgres:postgres@${dbAddress}/registry`);
beforeEach(() => resetDb(pool));
beforeEach(() => resetClickHouse());
beforeEach(() =>
resetRedis({
host: redisAddress.replace(':6379', ''),
port: 6379,
password: 'test',
})
);

View file

@ -0,0 +1,23 @@
{
"name": "integration-tests",
"type": "module",
"private": true,
"version": "0.0.0",
"dependencies": {
"@app/gql": "link:./testkit/gql",
"@graphql-typed-document-node/core": "3.1.1",
"auth0": "2.36.2",
"axios": "0.27.2",
"dotenv": "10.0.0",
"date-fns": "2.25.0",
"dependency-graph": "0.11.0",
"dockest": "npm:@n1ru4l/dockest@2.1.0-rc.6",
"slonik": "23.9.0",
"tsup": "5.12.7",
"yaml": "2.1.0"
},
"scripts": {
"build-and-pack": "(cd ../ && yarn build:services && yarn build:libraries && yarn build:local-cdn) && node ./scripts/pack.mjs",
"dockest": "tsup-node dockest.ts --format esm --target node16 --onSuccess 'node dist/dockest.js'"
}
}

View file

@ -0,0 +1,5 @@
#!/bin/sh
set -e
node cdn.js

View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
npm install -g file:storage.tgz
storage

View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
npm install -g file:rate-limit.tgz
rate-limit

View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
npm install -g file:schema.tgz
schema

View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
npm install -g file:server.tgz
server

View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
npm install -g file:stripe-billing.tgz
stripe-billing

View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
npm install -g file:tokens.tgz
tokens

View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
npm install -g file:usage-estimator.tgz
usage-estimator

View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
npm install -g file:usage-ingestor.tgz
usage-ingestor

6
integration-tests/run-usage.sh Executable file
View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
npm install -g file:usage.tgz
usage

View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
npm install -g file:webhooks.tgz
webhooks

View file

@ -0,0 +1,103 @@
/**
* !! Node !!
*
* Gets all the packages from the manifest and packs them.
* As a result, we get a tarball for each package in the integration-tests/tarballs directory.
*
* Naming convention:
* @hive/tokens -> tokens.tgz
*/
import { exec } from 'child_process';
import path from 'path';
import fs from 'fs';
import fsExtra from 'fs-extra';
import glob from 'glob';
import rimraf from 'rimraf';
import { fileURLToPath } from 'url';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const cwd = path.resolve(__dirname, '../..');
const tarballDir = path.resolve(cwd, 'integration-tests/tarballs');
async function main() {
rimraf.sync(`${tarballDir}`, {});
fsExtra.mkdirSync(tarballDir, { recursive: true });
function isBackendPackage(manifestPath) {
return JSON.parse(
fs.readFileSync(manifestPath, 'utf-8')
).buildOptions?.tags.includes('backend');
}
function listBackendPackages() {
const manifestPathCollection = glob.sync(
'packages/services/*/package.json',
{
cwd,
absolute: true,
ignore: ['**/node_modules/**', '**/dist/**'],
}
);
return manifestPathCollection
.filter(isBackendPackage)
.map((filepath) => path.relative(cwd, path.dirname(filepath)));
}
async function pack(location) {
const { version, name } = JSON.parse(
await fsExtra.readFile(path.join(cwd, location, 'package.json'), 'utf-8')
);
const stdout = await new Promise((resolve, reject) => {
exec(
`npm pack ${path.join(cwd, location, 'dist')}`,
{
cwd,
encoding: 'utf8',
},
(err, stdout, stderr) => {
console.log(stderr);
if (err) {
reject(err);
} else {
resolve(stdout);
}
}
);
});
const lines = stdout.split('\n');
const org_filename = path.resolve(cwd, lines[lines.length - 2]);
let filename = org_filename
.replace(cwd, tarballDir)
.replace('hive-', '')
.replace(`-${version}`, '');
if (/-\d+\.\d+\.\d+\.tgz$/.test(filename)) {
throw new Error(`Build ${name} package first!`);
}
await fsExtra.rename(org_filename, filename);
return filename;
}
const locations = listBackendPackages();
await Promise.all(
locations.map(async (loc) => {
try {
const filename = await pack(loc);
console.log('[pack] Done', path.resolve(cwd, filename));
} catch (error) {
console.error(`[pack] Failed to pack ${loc}: ${error}`);
console.error('[pack] Maybe you forgot to build the packages first?');
process.exit(1);
}
})
);
}
await main();

View file

@ -0,0 +1,35 @@
import { AuthenticationClient, TokenResponse } from 'auth0';
import { ensureEnv } from './env';
const authenticationApi = new AuthenticationClient({
domain: ensureEnv('AUTH0_DOMAIN'),
clientId: ensureEnv('AUTH0_CLIENT_ID'),
clientSecret: ensureEnv('AUTH0_CLIENT_SECRET'),
});
type UserID = 'main' | 'extra';
const password = ensureEnv('AUTH0_USER_PASSWORD');
const userEmails: Record<UserID, string> = {
main: ensureEnv('AUTH0_USER_MAIN_EMAIL'),
extra: ensureEnv('AUTH0_USER_EXTRA_EMAIL'),
};
const tokenResponsePromise: Record<UserID, Promise<TokenResponse> | null> = {
main: null,
extra: null,
};
export function authenticate(userId: UserID) {
if (!tokenResponsePromise[userId]) {
tokenResponsePromise[userId] = authenticationApi.passwordGrant({
username: userEmails[userId],
password,
audience: `https://${ensureEnv('AUTH0_DOMAIN')}/api/v2/`,
scope: 'openid profile email offline_access',
realm: 'Username-Password-Authentication',
});
}
return tokenResponsePromise[userId]!;
}

View file

@ -0,0 +1,22 @@
import * as utils from 'dockest/test-helper';
import { run } from '../../packages/libraries/cli/src/index';
const registryAddress = utils.getServiceAddress('server', 3001);
export async function schemaPublish(args: string[]) {
return run([
'schema:publish',
`--registry`,
`http://${registryAddress}/graphql`,
...args,
]);
}
export async function schemaCheck(args: string[]) {
return run([
'schema:check',
`--registry`,
`http://${registryAddress}/graphql`,
...args,
]);
}

View file

@ -0,0 +1,43 @@
import * as utils from 'dockest/test-helper';
import axios from 'axios';
const clickhouseAddress = utils.getServiceAddress('clickhouse', 8123);
const endpoint = `http://${clickhouseAddress}/?default_format=JSON`;
export async function resetClickHouse() {
const queries = [
`operations_registry`,
`operations_new_hourly_mv`,
`operations_new`,
`schema_coordinates_daily`,
`client_names_daily`,
].map((table) => `TRUNCATE TABLE default.${table}`);
for await (const query of queries) {
await axios.post(endpoint, query, {
method: 'POST',
timeout: 10_000,
headers: {
'Accept-Encoding': 'gzip',
Accept: 'application/json',
Authorization: `Basic ${Buffer.from('test:test').toString('base64')}`,
},
});
}
}
export async function clickHouseQuery<T>(query: string) {
const res = await axios.post<{
data: T[];
rows: number;
}>(endpoint, query, {
timeout: 10_000,
headers: {
'Accept-Encoding': 'gzip',
Authorization: `Basic ${Buffer.from('test:test').toString('base64')}`,
},
responseType: 'json',
});
return res.data;
}

View file

@ -0,0 +1,27 @@
import { sql, DatabasePoolConnectionType } from 'slonik';
export const resetDb = async (conn: DatabasePoolConnectionType) => {
const migrationTables = ['migrations'];
const result = await conn.many<{ tablename: string }>(sql`
SELECT "tablename"
FROM "pg_tables"
WHERE "schemaname" = 'public';
`);
const tablenames = result
.map(({ tablename }) => tablename)
.filter((tablename) => !migrationTables.includes(tablename));
if (tablenames.length) {
await conn.query(sql`
TRUNCATE TABLE
${sql.join(
tablenames.map((name) => sql.identifier([name])),
sql`,`
)}
RESTART IDENTITY
;
`);
}
};

View file

@ -0,0 +1,91 @@
import { DockestService, execa } from 'dockest';
import {
containerIsHealthyReadinessCheck,
zeroExitCodeReadinessCheck,
} from 'dockest/dist/readiness-check/index.js';
import { DepGraph } from 'dependency-graph';
import { readFileSync } from 'fs';
import { join } from 'path';
import { parse } from 'yaml';
export function createServices() {
const dockerComposeFile: {
services: {
[key: string]: {
depends_on?: { [key: string]: unknown };
healthcheck?: any;
};
};
} = parse(readFileSync(join(process.cwd(), 'docker-compose.yml'), 'utf8'));
const serviceNameCollection = Object.keys(dockerComposeFile.services);
const graph = new DepGraph<DockestService>();
// First, add all services to the graph
for (const serviceName of serviceNameCollection) {
const service = dockerComposeFile.services[serviceName];
graph.addNode(serviceName, {
serviceName,
dependsOn: [],
readinessCheck: service.healthcheck
? containerIsHealthyReadinessCheck
: zeroExitCodeReadinessCheck,
});
}
// Now, create dependencies between them
for (const serviceName of serviceNameCollection) {
const dockerService = dockerComposeFile.services[serviceName];
if (dockerService.depends_on) {
const dependsOn = Object.keys(dockerService.depends_on);
for (const depName of dependsOn) {
graph.addDependency(serviceName, depName);
}
}
}
// Next, sort the graph
const allServices = graph.overallOrder();
// Finally, create the services
const registry: {
[key: string]: DockestService;
} = {};
for (const serviceName of allServices) {
const service = graph.getNodeData(serviceName);
registry[serviceName] = {
...service,
dependsOn: graph
.directDependenciesOf(serviceName)
.map((dep) => graph.getNodeData(dep)),
};
}
// And return a list of services
return allServices.map((serviceName) => graph.getNodeData(serviceName));
}
export function cleanDockerContainers() {
const output = execa(
`docker ps --all --filter "name=integration-tests" --format={{.ID}}:{{.Status}}`
);
if (output.stdout.length) {
const runningContainers = output.stdout.split('\n');
for (const line of runningContainers) {
const [containerId, containerStatus] = line.split(':');
const containerRunning = containerStatus?.toLowerCase().includes('up');
if (containerRunning) {
console.log(`Stopping container ${containerId}`);
execa(`docker stop ${containerId}`);
}
console.log(`Removing container ${containerId} with its volumes`);
execa(`docker rm -v -f ${containerId}`);
}
console.log('Stopped and removed all containers');
}
}

View file

@ -0,0 +1,52 @@
type ValueType = 'string' | 'number' | 'boolean';
const prefix = 'Invariant failed';
// Throw an error if the condition fails
// > Not providing an inline default argument for message as the result is smaller
export function invariant(
condition: any,
// Can provide a string, or a function that returns a string for cases where
// the message takes a fair amount of effort to compute
message?: string | (() => string)
): asserts condition {
if (condition) {
return;
}
// Condition not passed
// When not in production we allow the message to pass through
// *This block will be removed in production builds*
const provided: string | undefined =
typeof message === 'function' ? message() : message;
// Options:
// 1. message provided: `${prefix}: ${provided}`
// 2. message not provided: prefix
const value: string = provided ? `${prefix}: ${provided}` : prefix;
throw new Error(value);
}
export function ensureEnv(key: string): string;
export function ensureEnv(key: string, valueType: 'string'): string;
export function ensureEnv(key: string, valueType: 'number'): number;
export function ensureEnv(key: string, valueType: 'boolean'): boolean;
export function ensureEnv(key: string, valueType?: ValueType) {
let value = process.env[key];
if (value === '<sync>') {
value = undefined;
}
invariant(typeof value === 'string', `Missing "${key}" environment variable`);
switch (valueType) {
case 'number':
return parseInt(value, 10);
case 'boolean':
return value === 'true';
default:
return value;
}
}

View file

@ -0,0 +1,600 @@
import { gql } from '@app/gql';
import axios from 'axios';
import type {
CreateOrganizationInput,
SchemaPublishInput,
CreateProjectInput,
CreateTokenInput,
OrganizationMemberAccessInput,
SchemaCheckInput,
PublishPersistedOperationInput,
SetTargetValidationInput,
UpdateTargetValidationSettingsInput,
OperationsStatsSelectorInput,
UpdateBaseSchemaInput,
SchemaVersionsInput,
CreateTargetInput,
SchemaVersionUpdateInput,
TargetSelectorInput,
SchemaSyncCdnInput,
} from './gql/graphql';
import { execute } from './graphql';
export function waitFor(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
export function createOrganization(
input: CreateOrganizationInput,
authToken: string
) {
return execute({
document: gql(/* GraphQL */ `
mutation createOrganization($input: CreateOrganizationInput!) {
createOrganization(input: $input) {
organization {
id
name
cleanId
inviteCode
owner {
id
organizationAccessScopes
projectAccessScopes
targetAccessScopes
}
}
}
}
`),
authToken,
variables: {
input,
},
});
}
export function joinOrganization(code: string, authToken: string) {
return execute({
document: gql(/* GraphQL */ `
mutation joinOrganization($code: String!) {
joinOrganization(code: $code) {
__typename
... on OrganizationPayload {
organization {
id
name
cleanId
me {
id
organizationAccessScopes
projectAccessScopes
targetAccessScopes
}
}
}
... on OrganizationInvitationError {
message
}
}
}
`),
authToken,
variables: {
code,
},
});
}
export function createProject(input: CreateProjectInput, authToken: string) {
return execute({
document: gql(/* GraphQL */ `
mutation createProject($input: CreateProjectInput!) {
createProject(input: $input) {
createdProject {
id
cleanId
}
createdTarget {
id
cleanId
}
}
}
`),
authToken,
variables: {
input,
},
});
}
export function createTarget(input: CreateTargetInput, authToken: string) {
return execute({
document: gql(/* GraphQL */ `
mutation createTarget($input: CreateTargetInput!) {
createTarget(input: $input) {
createdTarget {
id
cleanId
}
}
}
`),
authToken,
variables: {
input,
},
});
}
export function createToken(input: CreateTokenInput, authToken: string) {
return execute({
document: gql(/* GraphQL */ `
mutation createToken($input: CreateTokenInput!) {
createToken(input: $input) {
secret
}
}
`),
authToken,
variables: {
input,
},
});
}
export function updateMemberAccess(
input: OrganizationMemberAccessInput,
authToken: string
) {
return execute({
document: gql(/* GraphQL */ `
mutation updateOrganizationMemberAccess(
$input: OrganizationMemberAccessInput!
) {
updateOrganizationMemberAccess(input: $input) {
organization {
cleanId
members {
nodes {
id
organizationAccessScopes
projectAccessScopes
targetAccessScopes
}
}
me {
id
}
}
}
}
`),
authToken,
variables: {
input,
},
});
}
export function publishSchema(input: SchemaPublishInput, token: string) {
return execute({
document: gql(/* GraphQL */ `
mutation schemaPublish($input: SchemaPublishInput!) {
schemaPublish(input: $input) {
__typename
... on SchemaPublishSuccess {
initial
valid
message
changes {
nodes {
message
criticality
}
total
}
}
... on SchemaPublishError {
valid
changes {
nodes {
message
criticality
}
total
}
errors {
nodes {
message
}
total
}
}
}
}
`),
token,
variables: {
input,
},
});
}
export function checkSchema(input: SchemaCheckInput, token: string) {
return execute({
document: gql(/* GraphQL */ `
mutation schemaCheck($input: SchemaCheckInput!) {
schemaCheck(input: $input) {
... on SchemaCheckSuccess {
__typename
valid
changes {
nodes {
message
criticality
}
total
}
}
... on SchemaCheckError {
__typename
valid
changes {
nodes {
message
criticality
}
total
}
errors {
nodes {
message
}
total
}
}
}
}
`),
token,
variables: {
input,
},
});
}
export function setTargetValidation(
input: SetTargetValidationInput,
access:
| {
token: string;
}
| {
authToken: string;
}
) {
return execute({
document: gql(/* GraphQL */ `
mutation setTargetValidation($input: SetTargetValidationInput!) {
setTargetValidation(input: $input) {
enabled
period
percentage
}
}
`),
...access,
variables: {
input,
},
});
}
export function updateTargetValidationSettings(
input: UpdateTargetValidationSettingsInput,
access:
| {
token: string;
}
| {
authToken: string;
}
) {
return execute({
document: gql(/* GraphQL */ `
mutation updateTargetValidationSettings(
$input: UpdateTargetValidationSettingsInput!
) {
updateTargetValidationSettings(input: $input) {
enabled
period
percentage
targets {
id
}
}
}
`),
...access,
variables: {
input,
},
});
}
export function updateBaseSchema(input: UpdateBaseSchemaInput, token: string) {
return execute({
document: gql(/* GraphQL */ `
mutation updateBaseSchema($input: UpdateBaseSchemaInput!) {
updateBaseSchema(input: $input) {
__typename
}
}
`),
token,
variables: {
input,
},
});
}
export function readOperationsStats(
input: OperationsStatsSelectorInput,
token: string
) {
return execute({
document: gql(/* GraphQL */ `
query readOperationsStats($input: OperationsStatsSelectorInput!) {
operationsStats(selector: $input) {
totalOperations
operations {
nodes {
id
document
operationHash
kind
name
count
percentage
duration {
p75
p90
p95
p99
}
}
}
}
}
`),
token,
variables: {
input,
},
});
}
export function fetchLatestSchema(token: string) {
return execute({
document: gql(/* GraphQL */ `
query latestVersion {
latestVersion {
baseSchema
schemas {
nodes {
source
commit
}
total
}
}
}
`),
token,
});
}
export function fetchLatestValidSchema(token: string) {
return execute({
document: gql(/* GraphQL */ `
query latestValidVersion {
latestValidVersion {
id
baseSchema
schemas {
nodes {
source
commit
}
total
}
}
}
`),
token,
});
}
export function fetchVersions(
selector: SchemaVersionsInput,
limit: number,
token: string
) {
return execute({
document: gql(/* GraphQL */ `
query schemaVersions($limit: Int!, $selector: SchemaVersionsInput!) {
schemaVersions(selector: $selector, limit: $limit) {
nodes {
id
valid
date
commit {
source
commit
}
baseSchema
schemas {
nodes {
source
commit
}
}
}
}
}
`),
token,
variables: {
selector,
limit,
},
});
}
export function publishPersistedOperations(
input: PublishPersistedOperationInput[],
token: string
) {
return execute({
document: gql(/* GraphQL */ `
mutation publishPersistedOperations(
$input: [PublishPersistedOperationInput!]!
) {
publishPersistedOperations(input: $input) {
summary {
total
unchanged
}
operations {
id
operationHash
content
name
kind
}
}
}
`),
token,
variables: {
input,
},
});
}
export function updateSchemaVersionStatus(
input: SchemaVersionUpdateInput,
token: string
) {
return execute({
document: gql(/* GraphQL */ `
mutation updateSchemaVersionStatus($input: SchemaVersionUpdateInput!) {
updateSchemaVersionStatus(input: $input) {
id
date
valid
commit {
id
commit
}
}
}
`),
token,
variables: {
input,
},
});
}
export function schemaSyncCDN(input: SchemaSyncCdnInput, token: string) {
return execute({
document: gql(/* GraphQL */ `
mutation schemaSyncCDN($input: SchemaSyncCDNInput!) {
schemaSyncCDN(input: $input) {
__typename
... on SchemaSyncCDNSuccess {
message
}
... on SchemaSyncCDNError {
message
}
}
}
`),
token,
variables: {
input,
},
});
}
export function createCdnAccess(selector: TargetSelectorInput, token: string) {
return execute({
document: gql(/* GraphQL */ `
mutation createCdnToken($selector: TargetSelectorInput!) {
createCdnToken(selector: $selector) {
url
token
}
}
`),
token,
variables: {
selector,
},
});
}
export async function fetchSchemaFromCDN(
selector: TargetSelectorInput,
token: string
) {
const cdnAccessResult = await createCdnAccess(selector, token);
if (cdnAccessResult.body.errors) {
throw new Error(cdnAccessResult.body.errors[0].message);
}
const cdn = cdnAccessResult.body.data!.createCdnToken;
const res = await axios.get<{ sdl: string }>(`${cdn.url}/schema`, {
headers: {
'Content-Type': 'application/json',
'X-Hive-CDN-Key': cdn.token,
},
responseType: 'json',
});
return {
body: res.data,
status: res.status,
};
}
export async function fetchMetadataFromCDN(
selector: TargetSelectorInput,
token: string
) {
const cdnAccessResult = await createCdnAccess(selector, token);
if (cdnAccessResult.body.errors) {
throw new Error(cdnAccessResult.body.errors[0].message);
}
const cdn = cdnAccessResult.body.data!.createCdnToken;
const res = await axios.get(`${cdn.url}/metadata`, {
headers: {
'Content-Type': 'application/json',
'X-Hive-CDN-Key': cdn.token,
},
responseType: 'json',
});
return {
body: res.data,
status: res.status,
};
}

View file

@ -0,0 +1,44 @@
import * as utils from 'dockest/test-helper';
import axios from 'axios';
import type { ExecutionResult } from 'graphql';
import { TypedDocumentNode } from '@graphql-typed-document-node/core';
const registryAddress = utils.getServiceAddress('server', 3001);
export async function execute<R, V>(params: {
document: TypedDocumentNode<R, V>;
operationName?: string;
variables?: V;
authToken?: string;
token?: string;
}) {
const res = await axios.post<ExecutionResult<R>>(
`http://${registryAddress}/graphql`,
{
query: params.document,
operationName: params.operationName,
variables: params.variables,
},
{
headers: {
'Content-Type': 'application/json',
...(params.authToken
? {
Authorization: `Bearer ${params.authToken}`,
}
: {}),
...(params.token
? {
'X-API-Token': params.token,
}
: {}),
},
responseType: 'json',
}
);
return {
body: res.data,
status: res.status,
};
}

View file

@ -0,0 +1,22 @@
/* eslint-disable import/no-extraneous-dependencies */
import Redis from 'ioredis';
export const resetRedis = async (conn: {
host: string;
port: number;
password: string;
}) => {
const redis = new Redis({
host: conn.host,
port: conn.port,
password: conn.password,
db: 0,
maxRetriesPerRequest: 5,
enableReadyCheck: true,
});
const keys = await redis.keys('*');
if (keys?.length) {
await redis.del(keys);
}
};

View file

@ -0,0 +1,38 @@
import * as utils from 'dockest/test-helper';
import axios from 'axios';
const usageAddress = utils.getServiceAddress('usage', 3006);
export interface CollectedOperation {
timestamp?: number;
operation: string;
operationName?: string;
fields: string[];
execution: {
ok: boolean;
duration: number;
errorsTotal: number;
};
metadata?: {
client?: {
name?: string;
version?: string;
};
};
}
export async function collect(params: {
operations: CollectedOperation[];
token: string;
}) {
const res = await axios.post(`http://${usageAddress}`, params.operations, {
headers: {
'Content-Type': 'application/json',
'X-API-Token': params.token,
},
});
return {
status: res.status,
};
}

View file

@ -0,0 +1,141 @@
import {
OrganizationAccessScope,
ProjectAccessScope,
TargetAccessScope,
} from '@app/gql/graphql';
import {
createOrganization,
joinOrganization,
updateMemberAccess,
} from '../../../testkit/flow';
import { authenticate } from '../../../testkit/auth';
test('owner of an organization should have all scopes', async () => {
const { access_token } = await authenticate('main');
const result = await createOrganization(
{
name: 'foo',
},
access_token
);
expect(result.body.errors).not.toBeDefined();
const owner = result.body.data!.createOrganization.organization.owner;
Object.values(OrganizationAccessScope).forEach((scope) => {
expect(owner.organizationAccessScopes).toContain(scope);
});
Object.values(ProjectAccessScope).forEach((scope) => {
expect(owner.projectAccessScopes).toContain(scope);
});
Object.values(TargetAccessScope).forEach((scope) => {
expect(owner.targetAccessScopes).toContain(scope);
});
});
test('regular member of an organization should have basic scopes', async () => {
const { access_token: owner_access_token } = await authenticate('main');
const orgResult = await createOrganization(
{
name: 'foo',
},
owner_access_token
);
// Join
const { access_token: member_access_token } = await authenticate('extra');
const code = orgResult.body.data!.createOrganization.organization.inviteCode;
const joinResult = await joinOrganization(code, member_access_token);
expect(joinResult.body.errors).not.toBeDefined();
expect(joinResult.body.data?.joinOrganization.__typename).toBe(
'OrganizationPayload'
);
if (
joinResult.body.data!.joinOrganization.__typename !== 'OrganizationPayload'
) {
throw new Error('Join failed');
}
const member = joinResult.body.data!.joinOrganization.organization.me;
// Should have only organization:read access
expect(member.organizationAccessScopes).toContainEqual(
OrganizationAccessScope.Read
);
// Nothing more
expect(member.organizationAccessScopes).toHaveLength(1);
// Should have only project:read and project:operations-store:read access
expect(member.projectAccessScopes).toContainEqual(ProjectAccessScope.Read);
expect(member.projectAccessScopes).toContainEqual(
ProjectAccessScope.OperationsStoreRead
);
// Nothing more
expect(member.projectAccessScopes).toHaveLength(2);
// Should have only target:read and target:registry:read access
expect(member.targetAccessScopes).toContainEqual(TargetAccessScope.Read);
expect(member.targetAccessScopes).toContainEqual(
TargetAccessScope.RegistryRead
);
// Nothing more
expect(member.targetAccessScopes).toHaveLength(2);
});
test('cannot grant an access scope to another user if user has no access to that scope', async () => {
const { access_token: owner_access_token } = await authenticate('main');
const orgResult = await createOrganization(
{
name: 'foo',
},
owner_access_token
);
// Join
const { access_token: member_access_token } = await authenticate('extra');
const org = orgResult.body.data!.createOrganization.organization;
const code = org.inviteCode;
const joinResult = await joinOrganization(code, member_access_token);
if (
joinResult.body.data!.joinOrganization.__typename !== 'OrganizationPayload'
) {
throw new Error(
`Join failed: ${joinResult.body.data!.joinOrganization.message}`
);
}
const member = joinResult.body.data!.joinOrganization.organization.me;
// Grant organization:members access
await updateMemberAccess(
{
organization: org.cleanId,
organizationScopes: [OrganizationAccessScope.Members],
projectScopes: [],
targetScopes: [],
user: member.id,
},
owner_access_token
);
// Grant access to target:tokens:write
const accessResult = await updateMemberAccess(
{
organization: org.cleanId,
organizationScopes: [],
projectScopes: [],
targetScopes: [TargetAccessScope.TokensWrite],
user: member.id,
},
member_access_token
);
expect(accessResult.body.errors).toHaveLength(1);
expect(accessResult.body.errors![0].message).toMatch('target:tokens:write');
});

View file

@ -0,0 +1,212 @@
import { ProjectType, ProjectAccessScope } from '@app/gql/graphql';
import {
createOrganization,
publishPersistedOperations,
createProject,
createToken,
} from '../../../testkit/flow';
import { authenticate } from '../../../testkit/auth';
test('can publish persisted operations only with project:operations-store:write', async () => {
const { access_token: owner_access_token } = await authenticate('main');
const orgResult = await createOrganization(
{
name: 'foo',
},
owner_access_token
);
const org = orgResult.body.data!.createOrganization.organization;
const projectResult = await createProject(
{
organization: org.cleanId,
type: ProjectType.Single,
name: 'foo',
},
owner_access_token
);
const project = projectResult.body.data!.createProject.createdProject;
const target = projectResult.body.data!.createProject.createdTarget;
// Create a token with no rights
const noAccessTokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [],
targetScopes: [],
},
owner_access_token
);
expect(noAccessTokenResult.body.errors).not.toBeDefined();
// Create a token with read rights
const readTokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [ProjectAccessScope.OperationsStoreRead],
targetScopes: [],
},
owner_access_token
);
expect(readTokenResult.body.errors).not.toBeDefined();
// Create a token with write rights
const writeTokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [
ProjectAccessScope.OperationsStoreRead,
ProjectAccessScope.OperationsStoreWrite,
],
targetScopes: [],
},
owner_access_token
);
expect(writeTokenResult.body.errors).not.toBeDefined();
const writeToken = writeTokenResult.body.data!.createToken.secret;
const readToken = readTokenResult.body.data!.createToken.secret;
const noAccessToken = noAccessTokenResult.body.data!.createToken.secret;
const operations = [
{
content: `query Me { me { id } }`,
operationHash: 'meme',
},
{
content: `query user($id: ID!) { user(id: $id) { id } }`,
},
];
// Cannot persist operations with no read and write rights
let result = await publishPersistedOperations(operations, noAccessToken);
expect(result.body.errors).toHaveLength(1);
expect(result.body.errors![0].message).toMatch(
'project:operations-store:write'
);
// Cannot persist operations with read rights
result = await publishPersistedOperations(operations, readToken);
expect(result.body.errors).toHaveLength(1);
expect(result.body.errors![0].message).toMatch(
'project:operations-store:write'
);
// Persist operations with write rights
result = await publishPersistedOperations(operations, writeToken);
expect(result.body.errors).not.toBeDefined();
const persisted = result.body.data!.publishPersistedOperations;
// Check the result
expect(persisted.summary.total).toEqual(2);
expect(persisted.summary.unchanged).toEqual(0);
expect(persisted.operations).toHaveLength(2);
expect(persisted.operations[0].operationHash).toEqual(
operations[0].operationHash
);
expect(persisted.operations[1].operationHash).toBeDefined();
});
test('should skip on already persisted operations', async () => {
const { access_token: owner_access_token } = await authenticate('main');
const orgResult = await createOrganization(
{
name: 'foo',
},
owner_access_token
);
const org = orgResult.body.data!.createOrganization.organization;
const projectResult = await createProject(
{
organization: org.cleanId,
type: ProjectType.Single,
name: 'foo',
},
owner_access_token
);
const project = projectResult.body.data!.createProject.createdProject;
const target = projectResult.body.data!.createProject.createdTarget;
// Create a token with write rights
const writeTokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [
ProjectAccessScope.OperationsStoreRead,
ProjectAccessScope.OperationsStoreWrite,
],
targetScopes: [],
},
owner_access_token
);
expect(writeTokenResult.body.errors).not.toBeDefined();
const writeToken = writeTokenResult.body.data!.createToken.secret;
const operations = [
{
content: `query Me { me { id } }`,
operationHash: 'meme',
},
{
content: `query user($id: ID!) { user(id: $id) { id } }`,
},
];
// Persist operations
let result = await publishPersistedOperations(operations, writeToken);
expect(result.body.errors).not.toBeDefined();
let persisted = result.body.data!.publishPersistedOperations;
// Check the result
expect(persisted.summary.total).toEqual(2);
expect(persisted.summary.unchanged).toEqual(0);
expect(persisted.operations).toHaveLength(2);
expect(persisted.operations[0].operationHash).toEqual(
operations[0].operationHash
);
expect(persisted.operations[1].operationHash).toBeDefined();
// Persist operations with read rights
operations[1].operationHash = 'useruser';
result = await publishPersistedOperations(operations, writeToken);
expect(result.body.errors).not.toBeDefined();
persisted = result.body.data!.publishPersistedOperations;
// Check the result
expect(persisted.summary.total).toEqual(2);
expect(persisted.summary.unchanged).toEqual(1);
expect(persisted.operations).toHaveLength(2);
const meOperation = persisted.operations.find(
(op) => op.operationHash === operations[0].operationHash
);
const userOperation = persisted.operations.find(
(op) => op.operationHash === operations[1].operationHash
);
expect(meOperation?.operationHash).toEqual(operations[0].operationHash);
expect(userOperation?.operationHash).toEqual(operations[1].operationHash);
});

View file

@ -0,0 +1,242 @@
import { TargetAccessScope, ProjectType } from '@app/gql/graphql';
import {
createOrganization,
joinOrganization,
publishSchema,
checkSchema,
createProject,
createToken,
} from '../../../testkit/flow';
import { authenticate } from '../../../testkit/auth';
test('can check a schema with target:registry:read access', async () => {
const { access_token: owner_access_token } = await authenticate('main');
const orgResult = await createOrganization(
{
name: 'foo',
},
owner_access_token
);
const org = orgResult.body.data!.createOrganization.organization;
const code = org.inviteCode;
// Join
const { access_token: member_access_token } = await authenticate('extra');
await joinOrganization(code, member_access_token);
const projectResult = await createProject(
{
organization: org.cleanId,
type: ProjectType.Single,
name: 'foo',
},
owner_access_token
);
const project = projectResult.body.data!.createProject.createdProject;
const target = projectResult.body.data!.createProject.createdTarget;
// Create a token with write rights
const writeTokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [],
targetScopes: [
TargetAccessScope.RegistryRead,
TargetAccessScope.RegistryWrite,
],
},
owner_access_token
);
expect(writeTokenResult.body.errors).not.toBeDefined();
const writeToken = writeTokenResult.body.data!.createToken.secret;
// Publish schema with write rights
const publishResult = await publishSchema(
{
author: 'Kamil',
commit: 'abc123',
sdl: `type Query { ping: String }`,
},
writeToken
);
// Schema publish should be successful
expect(publishResult.body.errors).not.toBeDefined();
expect(publishResult.body.data!.schemaPublish.__typename).toBe(
'SchemaPublishSuccess'
);
// Create a token with no rights
const noAccessTokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [],
targetScopes: [],
},
owner_access_token
);
expect(noAccessTokenResult.body.errors).not.toBeDefined();
// Create a token with read rights
const readTokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [],
targetScopes: [TargetAccessScope.RegistryRead],
},
owner_access_token
);
expect(readTokenResult.body.errors).not.toBeDefined();
const readToken = readTokenResult.body.data!.createToken.secret;
const noAccessToken = noAccessTokenResult.body.data!.createToken.secret;
// Check schema with no read and write rights
let checkResult = await checkSchema(
{
sdl: `type Query { ping: String foo: String }`,
},
noAccessToken
);
expect(checkResult.body.errors).toHaveLength(1);
expect(checkResult.body.errors![0].message).toMatch('target:registry:read');
// Check schema with read rights
checkResult = await checkSchema(
{
sdl: `type Query { ping: String foo: String }`,
},
readToken
);
expect(checkResult.body.errors).not.toBeDefined();
expect(checkResult.body.data!.schemaCheck.__typename).toBe(
'SchemaCheckSuccess'
);
});
test('should match indentation of previous description', async () => {
const { access_token: owner_access_token } = await authenticate('main');
const orgResult = await createOrganization(
{
name: 'foo',
},
owner_access_token
);
const org = orgResult.body.data!.createOrganization.organization;
const code = org.inviteCode;
// Join
const { access_token: member_access_token } = await authenticate('extra');
await joinOrganization(code, member_access_token);
const projectResult = await createProject(
{
organization: org.cleanId,
type: ProjectType.Single,
name: 'foo',
},
owner_access_token
);
const project = projectResult.body.data!.createProject.createdProject;
const target = projectResult.body.data!.createProject.createdTarget;
// Create a token with write rights
const writeTokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [],
targetScopes: [
TargetAccessScope.RegistryRead,
TargetAccessScope.RegistryWrite,
],
},
owner_access_token
);
expect(writeTokenResult.body.errors).not.toBeDefined();
const writeToken = writeTokenResult.body.data!.createToken.secret;
// Publish schema with write rights
const publishResult = await publishSchema(
{
author: 'Kamil',
commit: 'abc123',
sdl: `
type Query {
" ping-ping "
ping: String
"pong-pong"
pong: String
}
`,
},
writeToken
);
// Schema publish should be successful
expect(publishResult.body.errors).not.toBeDefined();
expect(publishResult.body.data!.schemaPublish.__typename).toBe(
'SchemaPublishSuccess'
);
// Create a token with read rights
const readTokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [],
targetScopes: [TargetAccessScope.RegistryRead],
},
owner_access_token
);
expect(readTokenResult.body.errors).not.toBeDefined();
const readToken = readTokenResult.body.data!.createToken.secret;
// Check schema with read rights
const checkResult = await checkSchema(
{
sdl: `
type Query {
"""
ping-ping
"""
ping: String
" pong-pong "
pong: String
}
`,
},
readToken
);
expect(checkResult.body.errors).not.toBeDefined();
const check = checkResult.body.data!.schemaCheck;
if (check.__typename !== 'SchemaCheckSuccess') {
throw new Error(`Expected SchemaCheckSuccess, got ${check.__typename}`);
}
expect(check.__typename).toBe('SchemaCheckSuccess');
expect(check.changes!.total).toBe(0);
});

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,103 @@
import { TargetAccessScope, ProjectType } from '@app/gql/graphql';
import {
createOrganization,
joinOrganization,
publishSchema,
createProject,
createToken,
fetchSchemaFromCDN,
schemaSyncCDN,
} from '../../../testkit/flow';
import { authenticate } from '../../../testkit/auth';
test('marking only the most recent version as valid result in an update of CDN', async () => {
const { access_token: owner_access_token } = await authenticate('main');
const orgResult = await createOrganization(
{
name: 'foo',
},
owner_access_token
);
// Join
const { access_token: member_access_token } = await authenticate('extra');
const org = orgResult.body.data!.createOrganization.organization;
const code = org.inviteCode;
await joinOrganization(code, member_access_token);
const projectResult = await createProject(
{
organization: org.cleanId,
type: ProjectType.Single,
name: 'foo',
},
owner_access_token
);
const project = projectResult.body.data!.createProject.createdProject;
const target = projectResult.body.data!.createProject.createdTarget;
const tokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [],
targetScopes: [
TargetAccessScope.RegistryRead,
TargetAccessScope.RegistryWrite,
],
},
owner_access_token
);
expect(tokenResult.body.errors).not.toBeDefined();
const token = tokenResult.body.data!.createToken.secret;
// Initial schema
const publishResult = await publishSchema(
{
author: 'Kamil',
commit: 'c0',
sdl: `type Query { ping: String }`,
},
token
);
expect(publishResult.body.errors).not.toBeDefined();
expect(publishResult.body.data!.schemaPublish.__typename).toBe(
'SchemaPublishSuccess'
);
const targetSelector = {
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
};
// the initial version should available on CDN
let cdnResult = await fetchSchemaFromCDN(targetSelector, token);
expect(cdnResult.body.sdl).toContain('ping');
// Force a re-upload of the schema to CDN
const syncResult = await schemaSyncCDN(
{
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
},
token
);
expect(syncResult.body.errors).not.toBeDefined();
expect(syncResult.body.data!.schemaSyncCDN.__typename).toBe(
'SchemaSyncCDNSuccess'
);
// the initial version should available on CDN
cdnResult = await fetchSchemaFromCDN(targetSelector, token);
expect(cdnResult.body.sdl).toContain('ping');
});

View file

@ -0,0 +1,62 @@
import { gql } from '@app/gql';
import { execute } from '../../testkit/graphql';
import { authenticate } from '../../testkit/auth';
test('should auto-create an organization for freshly signed-up user', async () => {
const { access_token } = await authenticate('main');
const result = await execute({
document: gql(/* GraphQL */ `
query organizations {
organizations {
total
nodes {
id
name
}
}
}
`),
authToken: access_token,
});
expect(result.body.errors).not.toBeDefined();
expect(result.body.data?.organizations.total).toBe(1);
});
test('should auto-create an organization for freshly signed-up user with no race-conditions', async () => {
const { access_token } = await authenticate('main');
const query1 = execute({
document: gql(/* GraphQL */ `
query organizations {
organizations {
total
nodes {
id
name
}
}
}
`),
authToken: access_token,
});
const query2 = execute({
document: gql(/* GraphQL */ `
query organizations {
organizations {
total
nodes {
id
name
}
}
}
`),
authToken: access_token,
});
const [result1, result2] = await Promise.all([query1, query2]);
expect(result1.body.errors).not.toBeDefined();
expect(result1.body.data?.organizations.total).toBe(1);
expect(result2.body.errors).not.toBeDefined();
expect(result2.body.data?.organizations.total).toBe(1);
});

View file

@ -0,0 +1,80 @@
import { TargetAccessScope, ProjectType } from '@app/gql/graphql';
import {
createOrganization,
joinOrganization,
createProject,
createToken,
updateMemberAccess,
} from '../../../testkit/flow';
import { authenticate } from '../../../testkit/auth';
test('cannot set a scope on a token if user has no access to that scope', async () => {
const { access_token: owner_access_token } = await authenticate('main');
const orgResult = await createOrganization(
{
name: 'foo',
},
owner_access_token
);
// Join
const { access_token: member_access_token } = await authenticate('extra');
const org = orgResult.body.data!.createOrganization.organization;
const code = org.inviteCode;
const joinResult = await joinOrganization(code, member_access_token);
const projectResult = await createProject(
{
organization: org.cleanId,
type: ProjectType.Single,
name: 'foo',
},
owner_access_token
);
if (
joinResult.body.data!.joinOrganization.__typename !== 'OrganizationPayload'
) {
throw new Error(
`Join failed: ${joinResult.body.data!.joinOrganization.message}`
);
}
const member = joinResult.body.data!.joinOrganization.organization.me;
const project = projectResult.body.data!.createProject.createdProject;
const target = projectResult.body.data!.createProject.createdTarget;
// Give access to tokens
await updateMemberAccess(
{
organization: org.cleanId,
organizationScopes: [],
projectScopes: [],
targetScopes: [
TargetAccessScope.Read,
TargetAccessScope.RegistryRead,
TargetAccessScope.TokensRead,
TargetAccessScope.TokensWrite,
],
user: member.id,
},
owner_access_token
);
// member should not have access to target:registry:write
const tokenResult = await createToken(
{
name: 'test',
organization: org.cleanId,
project: project.cleanId,
target: target.cleanId,
organizationScopes: [],
projectScopes: [],
targetScopes: [TargetAccessScope.RegistryWrite],
},
member_access_token
);
expect(tokenResult.body.errors).toHaveLength(1);
expect(tokenResult.body.errors![0].message).toMatch('target:registry:write');
});

Some files were not shown because too many files have changed in this diff Show more