Optional s3_session_token variable and remove setting region to auto (#2674)

Co-authored-by: Saurav Tapader <tapaderster@gmail.com>
This commit is contained in:
Kamil Kisiela 2023-07-27 13:34:29 +02:00 committed by GitHub
parent efc044d498
commit 7c89f7a310
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 288 additions and 282 deletions

View file

@ -66,7 +66,7 @@ jobs:
recursive: false
files:
packages/services/broker-worker/dist/index.worker.js
packages/services/cdn-worker/dist/index.worker.js
packages/services/cdn-worker/dist/index.worker.mjs
dest: ${{ inputs.imageTag }}.zip
- name: upload artifact

View file

@ -21,14 +21,6 @@ jobs:
- name: lint .env.template files
run: pnpm lint:env-template
- name: eslint cache
uses: actions/cache@v3
with:
path: '.eslintcache'
key: ${{ runner.os }}-eslint-${{ hashFiles('pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-eslint
- name: Cache ESLint and Prettier
uses: actions/cache@v3
with:
@ -41,22 +33,30 @@ jobs:
- name: eslint
id: eslint
run: pnpm lint
run: |
pnpm lint || \
( \
echo "" && \
echo "To fix it, run" && \
echo "$ pnpm run lint:fix" && \
echo "" && \
exit 1 \
)
env:
TIMING: 'true'
DEBUG: 'eslint:cli-engine'
- name: eslint tip
if: ${{ steps.eslint.outcome }} == 'failure'
run: echo "Run pnpm lint --fix"
- name: prettier
id: prettier
run: pnpm lint:prettier
- name: eslint tip
if: ${{ steps.prettier.outcome }} == 'failure'
run: echo "Run pnpm format"
shell: sh
run: |
pnpm lint:prettier || \
( \
echo "" && \
echo "To fix it, run" && \
echo "$ pnpm run prettier" && \
echo "" && \
exit 1 \
)
- name: Generate Types
run: yarn graphql:generate

View file

@ -29,10 +29,11 @@ export class CloudflareCDN {
content: readFileSync(
// eslint-disable-next-line no-process-env
process.env.CDN_WORKER_ARTIFACT_PATH ||
resolve(__dirname, '../../packages/services/cdn-worker/dist/index.worker.js'),
resolve(__dirname, '../../packages/services/cdn-worker/dist/index.worker.mjs'),
'utf-8',
),
name: `hive-storage-cdn-${this.config.envName}`,
module: true,
kvNamespaceBindings: [
{
// HIVE_DATA is in use in cdn-script.js as well, its the name of the global variable

View file

@ -18,7 +18,7 @@ Integration tests are based pre-built Docker images, so you can run it in 2 mode
#### Running from Source Code
**TL;DR**: Use `pnpm integration:prepare` command to setup the complete environment from locally
**TL;DR**: Use `pnpm integration:prepare` command to set up the complete environment from locally
running integration tests. You can ignore the rest of the commands in this section, if this script
worked for you, and just run `pnpm test:integration` to run the actual tests.
@ -27,32 +27,30 @@ image.
To do so, follow these instructions:
2. Install all deps: `pnpm i`
3. Generate types: `pnpm graphql:generate`
4. Build source code: `pnpm build`
5. Set env vars:
```bash
export COMMIT_SHA="local"
export RELEASE="local"
export BRANCH_NAME="local"
export BUILD_TYPE=""
export DOCKER_TAG=":local"
```
6. Compile a local Docker image by running:
1. Install all deps: `pnpm i`
2. Generate types: `pnpm graphql:generate`
3. Build source code: `pnpm build`
4. Set env vars:
```bash
export COMMIT_SHA="local"
export RELEASE="local"
export BRANCH_NAME="local"
export BUILD_TYPE=""
export DOCKER_TAG=":local"
```
5. Compile a local Docker image by running:
`docker buildx bake -f docker/docker.hcl integration-tests --load`
7. Use Docker Compose to run the built containers (based on `community` compose file), along with
6. Use Docker Compose to run the built containers (based on `community` compose file), along with
the extra containers:
```bash
export DOCKER_TAG=":local"
export DOCKER_REGISTRY=""
```bash
export DOCKER_TAG=":local"
export DOCKER_REGISTRY=""
docker compose -f ./docker/docker-compose.community.yml -f ./integration-tests/docker-compose.integration.yaml --env-file ./integration-tests/.env up -d --wait
```
docker compose -f ./docker/docker-compose.community.yml -f ./integration-tests/docker-compose.integration.yaml --env-file ./integration-tests/.env up -d --wait
```
8. Run the tests: `pnpm --filter integration-tests test:integration`
7. Run the tests: `pnpm --filter integration-tests test:integration`
#### Running from Pre-Built Docker Image
@ -66,12 +64,12 @@ To run integration tests locally, from the pre-build Docker image, follow:
is done successfully)
5. Set the needed env vars, and use Docker Compose to run all local services:
```bash
export DOCKER_REGISTRY="ghcr.io/kamilkisiela/graphql-hive/"
export DOCKER_TAG=":IMAGE_TAG_HERE"
docker compose -f ./docker/docker-compose.community.yml -f ./integration-tests/docker-compose.integration.yaml --env-file ./integration-tests/.env up -d --wait
```
```bash
export DOCKER_REGISTRY="ghcr.io/kamilkisiela/graphql-hive/"
export DOCKER_TAG=":IMAGE_TAG_HERE"
docker compose -f ./docker/docker-compose.community.yml -f ./integration-tests/docker-compose.integration.yaml --env-file ./integration-tests/.env up -d --wait
```
6. Run the tests: `pnpm --filter integration-tests test:integration`
@ -90,15 +88,13 @@ To run e2e tests locally, from the local source code, follow:
3. Generate types: `pnpm graphql:generate`
4. Build source code: `pnpm build`
5. Set env vars:
```bash
export COMMIT_SHA="local"
export RELEASE="local"
export BRANCH_NAME="local"
export BUILD_TYPE=""
export DOCKER_TAG=":local"
```
```bash
export COMMIT_SHA="local"
export RELEASE="local"
export BRANCH_NAME="local"
export BUILD_TYPE=""
export DOCKER_TAG=":local"
```
6. Compile a local Docker image by running: `docker buildx bake -f docker/docker.hcl build --load`
7. Run the e2e environment, by running:
`docker compose -f ./docker/docker-compose.community.yml -f ./docker/docker-compose.end2end.yml --env-file ./integration-tests/.env up -d --wait`
@ -114,12 +110,10 @@ To run integration tests locally, from the pre-build Docker image, follow:
3. Generate types: `pnpm graphql:generate`
4. Build source code: `pnpm build`
5. Decide on the commit ID / Docker image tag you would like to use and set it as env var:
```bash
export DOCKER_REGISTRY="ghcr.io/kamilkisiela/graphql-hive/"
export DOCKER_TAG=":IMAGE_TAG_HERE"
```
```bash
export DOCKER_REGISTRY="ghcr.io/kamilkisiela/graphql-hive/"
export DOCKER_TAG=":IMAGE_TAG_HERE"
```
6. Run the e2e environment, by running:
`docker compose -f ./docker/docker-compose.community.yml --env-file ./integration-tests/.env up -d --wait`
7. Run Cypress: `pnpm test:e2e`
@ -142,6 +136,6 @@ Keep in mind that integration tests are running a combination of 2 Docker Compos
If you are having issues with running Docker images, follow these instructions:
1. Make sure you have the latest Docker installed.
1. Make sure no containers are running (`docker ps` and then `docker stop CONTAINER_ID`).
1. Delete the local volume used for testing, it's located under `.hive` directory.
1. Try to run `docker system prune` to clean all the Docker images, containers, networks and caches.
2. Make sure no containers are running (`docker ps` and then `docker stop CONTAINER_ID`).
3. Delete the local volume used for testing, it's located under `.hive` directory.
4. Try to run `docker system prune` to clean all the Docker images, containers, networks and caches.

View file

@ -127,6 +127,7 @@ export function createRegistry({
endpoint: string;
accessKeyId: string;
secretAccessKeyId: string;
sessionToken?: string;
};
encryptionSecret: string;
feedback: {
@ -145,8 +146,8 @@ export function createRegistry({
client: new AwsClient({
accessKeyId: s3.accessKeyId,
secretAccessKey: s3.secretAccessKeyId,
sessionToken: s3.sessionToken,
service: 's3',
region: 'auto',
}),
bucket: s3.bucketName,
endpoint: s3.endpoint,

View file

@ -29,6 +29,7 @@ import { build } from 'esbuild';
bundle: true,
platform: 'browser',
target: 'chrome95',
format: 'esm',
minify: false,
outfile: workerOutputPath,
treeShaking: true,

View file

@ -6,7 +6,7 @@ Hive comes with a CDN worker (deployed to CF Workers), along with KV cache to st
To run Hive CDN locally, you can use the following command: `pnpm dev`.
> Note: during dev, KV is mocked using JS `Map`, so it's ephermal and will be deleted with any
> Note: during dev, KV is mocked using JS `Map`, so it's ephemeral and will be deleted with any
> change in code.
To publish manually a schema, for target id `1`:

View file

@ -3,38 +3,37 @@ import { dirname } from 'path';
import { fileURLToPath } from 'url';
import { build } from 'esbuild';
(async function main() {
console.log('🚀 Building CDN Worker...');
const __dirname = dirname(fileURLToPath(import.meta.url));
const nodeOutputPath = `${__dirname}/dist/index.nodejs.js`;
const workerOutputPath = `${__dirname}/dist/index.worker.js`;
console.log('🚀 Building CDN Worker...');
const __dirname = dirname(fileURLToPath(import.meta.url));
const nodeOutputPath = `${__dirname}/dist/index.nodejs.js`;
const workerOutputPath = `${__dirname}/dist/index.worker.mjs`;
await Promise.all([
// Build for integration tests, and expect it to run on NodeJS
build({
entryPoints: [`${__dirname}/src/dev.ts`],
bundle: true,
platform: 'node',
target: 'node18',
minify: false,
outfile: nodeOutputPath,
treeShaking: true,
}).then(result => {
console.log(`✅ Built for NodeJS: "${nodeOutputPath}"`);
return result;
}),
// Build for CloudFlare Worker environment
build({
entryPoints: [`${__dirname}/src/index.ts`],
bundle: true,
platform: 'browser',
target: 'chrome95',
minify: false,
outfile: workerOutputPath,
treeShaking: true,
}).then(result => {
console.log(`✅ Built for CloudFlare Worker: "${workerOutputPath}"`);
return result;
}),
]);
})();
await Promise.all([
// Build for integration tests, and expect it to run on NodeJS
build({
entryPoints: [`${__dirname}/src/dev.ts`],
bundle: true,
platform: 'node',
target: 'node18',
minify: false,
outfile: nodeOutputPath,
treeShaking: true,
}).then(result => {
console.log(`✅ Built for NodeJS: "${nodeOutputPath}"`);
return result;
}),
// Build for CloudFlare Worker environment
build({
entryPoints: [`${__dirname}/src/index.ts`],
bundle: true,
platform: 'browser',
target: 'chrome95',
format: 'esm',
minify: false,
outfile: workerOutputPath,
treeShaking: true,
}).then(result => {
console.log(`✅ Built for CloudFlare Worker: "${workerOutputPath}"`);
return result;
}),
]);

View file

@ -1,5 +1,6 @@
import 'dotenv/config';
import { crypto, Headers, ReadableStream, Request, Response } from '@whatwg-node/fetch';
import type { Env } from './env';
if (!globalThis.Response) {
globalThis.Response = Response;
@ -19,24 +20,28 @@ if (!globalThis.crypto) {
export const devStorage = new Map<string, string>();
(globalThis as any).HIVE_DATA = devStorage;
// eslint-disable-next-line no-process-env
(globalThis as any).S3_ENDPOINT = process.env.S3_ENDPOINT || '';
// eslint-disable-next-line no-process-env
(globalThis as any).S3_ACCESS_KEY_ID = process.env.S3_ACCESS_KEY_ID || '';
// eslint-disable-next-line no-process-env
(globalThis as any).S3_SECRET_ACCESS_KEY = process.env.S3_SECRET_ACCESS_KEY || '';
// eslint-disable-next-line no-process-env
(globalThis as any).S3_BUCKET_NAME = process.env.S3_BUCKET_NAME || '';
// eslint-disable-next-line no-process-env
(globalThis as any).S3_PUBLIC_URL = process.env.S3_PUBLIC_URL || '';
(globalThis as any).USAGE_ANALYTICS = {
writeDataPoint(_input: any) {},
};
(globalThis as any).ERROR_ANALYTICS = {
writeDataPoint(_input: any) {},
};
(globalThis as any).KEY_VALIDATION_ANALYTICS = {
writeDataPoint(_input: any) {},
export const env: Env = {
HIVE_DATA: devStorage,
// eslint-disable-next-line no-process-env
S3_ENDPOINT: process.env.S3_ENDPOINT || '',
// eslint-disable-next-line no-process-env
S3_ACCESS_KEY_ID: process.env.S3_ACCESS_KEY_ID || '',
// eslint-disable-next-line no-process-env
S3_SECRET_ACCESS_KEY: process.env.S3_SECRET_ACCESS_KEY || '',
// eslint-disable-next-line no-process-env
S3_BUCKET_NAME: process.env.S3_BUCKET_NAME || '',
// eslint-disable-next-line no-process-env
S3_PUBLIC_URL: process.env.S3_PUBLIC_URL || '',
USAGE_ANALYTICS: {
writeDataPoint(_input: any) {},
},
ERROR_ANALYTICS: {
writeDataPoint(_input: any) {},
},
KEY_VALIDATION_ANALYTICS: {
writeDataPoint(_input: any) {},
},
SENTRY_DSN: '',
SENTRY_ENVIRONMENT: '',
SENTRY_RELEASE: '',
};

View file

@ -6,42 +6,29 @@ import { createArtifactRequestHandler } from './artifact-handler';
import { ArtifactStorageReader } from './artifact-storage-reader';
import { AwsClient } from './aws';
import './dev-polyfill';
import { devStorage } from './dev-polyfill';
import { devStorage, env } from './dev-polyfill';
import { createRequestHandler } from './handler';
import { createIsKeyValid } from './key-validation';
declare let S3_ENDPOINT: string;
declare let S3_ACCESS_KEY_ID: string;
declare let S3_SECRET_ACCESS_KEY: string;
declare let S3_BUCKET_NAME: string;
declare let S3_PUBLIC_URL: string;
const s3 = {
client: new AwsClient({
accessKeyId: S3_ACCESS_KEY_ID,
secretAccessKey: S3_SECRET_ACCESS_KEY,
accessKeyId: env.S3_ACCESS_KEY_ID,
secretAccessKey: env.S3_SECRET_ACCESS_KEY,
service: 's3',
}),
bucketName: S3_BUCKET_NAME,
endpoint: S3_ENDPOINT,
bucketName: env.S3_BUCKET_NAME,
endpoint: env.S3_ENDPOINT,
};
// eslint-disable-next-line no-process-env
const PORT = process.env.PORT ? parseInt(process.env.PORT, 10) : 4010;
/**
* KV Storage for the CDN
*/
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
declare let HIVE_DATA: KVNamespace;
const handleRequest = createRequestHandler({
getRawStoreValue: value => HIVE_DATA.get(value),
getRawStoreValue: value => env.HIVE_DATA.get(value),
isKeyValid: createIsKeyValid({ s3, getCache: null, waitUntil: null, analytics: null }),
});
const artifactStorageReader = new ArtifactStorageReader(s3, S3_PUBLIC_URL);
const artifactStorageReader = new ArtifactStorageReader(s3, env.S3_PUBLIC_URL);
const handleArtifactRequest = createArtifactRequestHandler({
isKeyValid: createIsKeyValid({ s3, getCache: null, waitUntil: null, analytics: null }),

View file

@ -0,0 +1,26 @@
import type { AnalyticsEngine } from './analytics';
export type Env = {
S3_ENDPOINT: string;
S3_ACCESS_KEY_ID: string;
S3_SECRET_ACCESS_KEY: string;
S3_BUCKET_NAME: string;
S3_SESSION_TOKEN?: string;
S3_PUBLIC_URL: string;
/**
* KV Storage for the CDN
*/
HIVE_DATA: KVNamespace;
SENTRY_DSN: string;
/**
* Name of the environment, e.g. staging, production
*/
SENTRY_ENVIRONMENT: string;
/**
* Id of the release
*/
SENTRY_RELEASE: string;
USAGE_ANALYTICS: AnalyticsEngine;
ERROR_ANALYTICS: AnalyticsEngine;
KEY_VALIDATION_ANALYTICS: AnalyticsEngine;
};

View file

@ -8,148 +8,136 @@ import { UnexpectedError } from './errors';
import { createRequestHandler } from './handler';
import { createIsKeyValid } from './key-validation';
declare let S3_ENDPOINT: string;
declare let S3_ACCESS_KEY_ID: string;
declare let S3_SECRET_ACCESS_KEY: string;
declare let S3_BUCKET_NAME: string;
const s3 = {
client: new AwsClient({
accessKeyId: S3_ACCESS_KEY_ID,
secretAccessKey: S3_SECRET_ACCESS_KEY,
service: 's3',
}),
bucketName: S3_BUCKET_NAME,
endpoint: S3_ENDPOINT,
type Env = {
S3_ENDPOINT: string;
S3_ACCESS_KEY_ID: string;
S3_SECRET_ACCESS_KEY: string;
S3_BUCKET_NAME: string;
S3_SESSION_TOKEN?: string;
/**
* KV Storage for the CDN
*/
HIVE_DATA: KVNamespace;
SENTRY_DSN: string;
/**
* Name of the environment, e.g. staging, production
*/
SENTRY_ENVIRONMENT: string;
/**
* Id of the release
*/
SENTRY_RELEASE: string;
USAGE_ANALYTICS: AnalyticsEngine;
ERROR_ANALYTICS: AnalyticsEngine;
KEY_VALIDATION_ANALYTICS: AnalyticsEngine;
};
const artifactStorageReader = new ArtifactStorageReader(s3, null);
const handler: ExportedHandler<Env> = {
async fetch(request: Request, env, ctx) {
const s3 = {
client: new AwsClient({
accessKeyId: env.S3_ACCESS_KEY_ID,
secretAccessKey: env.S3_SECRET_ACCESS_KEY,
sessionToken: env.S3_SESSION_TOKEN,
service: 's3',
}),
bucketName: env.S3_BUCKET_NAME,
endpoint: env.S3_ENDPOINT,
};
/**
* KV Storage for the CDN
*/
declare let HIVE_DATA: KVNamespace;
const artifactStorageReader = new ArtifactStorageReader(s3, null);
declare let SENTRY_DSN: string;
/**
* Name of the environment, e.g. staging, production
*/
declare let SENTRY_ENVIRONMENT: string;
/**
* Id of the release
*/
declare let SENTRY_RELEASE: string;
const analytics = createAnalytics({
usage: env.USAGE_ANALYTICS,
error: env.ERROR_ANALYTICS,
keyValidation: env.KEY_VALIDATION_ANALYTICS,
});
/**
* Default cache on Cloudflare
* See https://developers.cloudflare.com/workers/runtime-apis/cache/
*/
declare let caches: {
default: Cache;
open: (namespace: string) => Promise<Cache>;
const isKeyValid = createIsKeyValid({
waitUntil: p => ctx.waitUntil(p),
getCache: () => caches.open('artifacts-auth'),
s3,
analytics,
});
const handleRequest = createRequestHandler({
getRawStoreValue: value => env.HIVE_DATA.get(value),
isKeyValid,
analytics,
});
const handleArtifactRequest = createArtifactRequestHandler({
isKeyValid,
analytics,
async getArtifactAction(targetId, artifactType, eTag) {
return artifactStorageReader.generateArtifactReadUrl(targetId, artifactType, eTag);
},
async fallback(request: Request, params: { targetId: string; artifactType: string }) {
const artifactTypeMap: Record<string, string> = {
metadata: 'metadata',
sdl: 'sdl',
services: 'schema',
supergraph: 'supergraph',
};
const artifactType = artifactTypeMap[params.artifactType];
if (artifactType) {
const url = request.url.replace(
`/artifacts/v1/${params.targetId}/${params.artifactType}`,
`/${params.targetId}/${artifactType}`,
);
return handleRequest(new Request(url, request));
}
return;
},
});
const router = itty
.Router()
.get(
'/_health',
() =>
new Response('OK', {
status: 200,
}),
)
.get('*', handleArtifactRequest)
// Legacy CDN Handlers
.get('*', handleRequest);
const sentry = new Toucan({
dsn: env.SENTRY_DSN,
environment: env.SENTRY_ENVIRONMENT,
release: env.SENTRY_RELEASE,
context: ctx,
requestDataOptions: {
allowedHeaders: [
'user-agent',
'cf-ipcountry',
'accept-encoding',
'accept',
'x-real-ip',
'cf-connecting-ip',
],
allowedSearchParams: /(.*)/,
},
});
try {
return await router.handle(request, sentry.captureException).then(response => {
if (response) {
return response;
}
return new Response('Not found', { status: 404 });
});
} catch (error) {
console.error(error);
sentry.captureException(error);
return new UnexpectedError(analytics);
}
},
};
declare let USAGE_ANALYTICS: AnalyticsEngine;
declare let ERROR_ANALYTICS: AnalyticsEngine;
declare let KEY_VALIDATION_ANALYTICS: AnalyticsEngine;
const analytics = createAnalytics({
usage: USAGE_ANALYTICS,
error: ERROR_ANALYTICS,
keyValidation: KEY_VALIDATION_ANALYTICS,
});
self.addEventListener('fetch', async (event: FetchEvent) => {
const isKeyValid = createIsKeyValid({
waitUntil: p => event.waitUntil(p),
getCache: () => caches.open('artifacts-auth'),
s3,
analytics,
});
const handleRequest = createRequestHandler({
getRawStoreValue: value => HIVE_DATA.get(value),
isKeyValid,
analytics,
});
const handleArtifactRequest = createArtifactRequestHandler({
isKeyValid,
analytics,
async getArtifactAction(targetId, artifactType, eTag) {
return artifactStorageReader.generateArtifactReadUrl(targetId, artifactType, eTag);
},
async fallback(request: Request, params: { targetId: string; artifactType: string }) {
const artifactTypeMap: Record<string, string> = {
metadata: 'metadata',
sdl: 'sdl',
services: 'schema',
supergraph: 'supergraph',
};
const artifactType = artifactTypeMap[params.artifactType];
if (artifactType) {
const url = request.url.replace(
`/artifacts/v1/${params.targetId}/${params.artifactType}`,
`/${params.targetId}/${artifactType}`,
);
return handleRequest(new Request(url, request));
}
return;
},
});
const router = itty
.Router()
.get(
'/_health',
() =>
new Response('OK', {
status: 200,
}),
)
.get('*', handleArtifactRequest)
// Legacy CDN Handlers
.get('*', handleRequest);
const sentry = new Toucan({
dsn: SENTRY_DSN,
environment: SENTRY_ENVIRONMENT,
release: SENTRY_RELEASE,
context: event,
requestDataOptions: {
allowedHeaders: [
'user-agent',
'cf-ipcountry',
'accept-encoding',
'accept',
'x-real-ip',
'cf-connecting-ip',
],
allowedSearchParams: /(.*)/,
},
});
try {
event.respondWith(
router
.handle(event.request, sentry.captureException)
.then(response => {
if (response) {
return response;
}
return new Response('Not found', { status: 404 });
})
.catch(err => {
console.error(err);
sentry.captureException(err);
return new UnexpectedError(analytics);
}),
);
} catch (error) {
sentry.captureException(error);
event.respondWith(new UnexpectedError(analytics));
}
});
export default handler;

View file

@ -33,6 +33,7 @@ The GraphQL API for GraphQL Hive.
| `S3_ACCESS_KEY_ID` | **Yes** | The S3 access key id. | `minioadmin` |
| `S3_SECRET_ACCESS_KEY` | **Yes** | The S3 secret access key. | `minioadmin` |
| `S3_BUCKET_NAME` | **Yes** | The S3 bucket name. | `artifacts` |
| `S3_SESSION_TOKEN` | No | The S3 session token. | `dummytoken` |
| `S3_PUBLIC_URL` | No | The public URL of the S3, in case it differs from the `S#_ENDPOINT`. | `http://localhost:8083` |
| `CDN_API` | No | Whether the CDN exposed via API is enabled. | `1` (enabled) or `0` (disabled) |
| `CDN_API_BASE_URL` | No (Yes if `CDN_API` is set to `1`) | The public base url of the API service. | `http://localhost:8082` |

View file

@ -139,6 +139,7 @@ const S3Model = zod.object({
S3_ENDPOINT: zod.string().url(),
S3_ACCESS_KEY_ID: zod.string(),
S3_SECRET_ACCESS_KEY: zod.string(),
S3_SESSION_TOKEN: emptyString(zod.string().optional()),
S3_BUCKET_NAME: zod.string(),
S3_PUBLIC_URL: emptyString(zod.string().url().optional()),
});
@ -328,6 +329,7 @@ export const env = {
credentials: {
accessKeyId: s3.S3_ACCESS_KEY_ID,
secretAccessKey: s3.S3_SECRET_ACCESS_KEY,
sessionToken: s3.S3_SESSION_TOKEN,
},
},
organizationOIDC: base.AUTH_ORGANIZATION_OIDC === '1',

View file

@ -214,6 +214,7 @@ export async function main() {
s3: {
accessKeyId: env.s3.credentials.accessKeyId,
secretAccessKeyId: env.s3.credentials.secretAccessKey,
sessionToken: env.s3.credentials.sessionToken,
bucketName: env.s3.bucketName,
endpoint: env.s3.endpoint,
},