chore: Use local clickhouse instance for playwright tests (#1711)

TLDR: This PR changes playwright full-stack tests to run against a local clickhouse instance (with seeded data) instead of relying on the clickhouse demo server, which can be unpredictable at times. This workflow allows us to fully control the data to make tests more predictable.

This PR: 
* Adds local CH instance to the e2e dockerfile
* Adds a schema creation script
* Adds a data seeding script
* Updates playwright config 
* Updates various tests to change hardcoded fields, metrics, or areas relying on play demo data
* Updates github workflow to use the dockerfile instead of separate services
* Runs against a local clickhouse instead of the demo server

Fixes: HDX-3193
This commit is contained in:
Tom Alexander 2026-02-13 10:43:12 -05:00 committed by GitHub
parent cfba838ed3
commit 75ff28dd68
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 1679 additions and 501 deletions

View file

@ -124,17 +124,6 @@ jobs:
name: E2E Tests - Shard ${{ matrix.shard }}
runs-on: ubuntu-24.04
timeout-minutes: 15
services:
mongodb:
image: mongo:5.0.32-focal
options: >-
--health-cmd "mongosh --quiet --eval 'db.adminCommand({ping: 1});
db.getSiblingDB(\"test\").test.insertOne({_id: \"hc\"});
db.getSiblingDB(\"test\").test.deleteOne({_id: \"hc\"})'"
--health-interval 10s --health-timeout 5s --health-retries 10
--health-start-period 10s
ports:
- 27017:27017
permissions:
contents: read
pull-requests: write
@ -163,15 +152,43 @@ jobs:
- name: Install Playwright browsers
run: cd packages/app && npx playwright install --with-deps chromium
- name: Start E2E Docker Compose
run: |
docker compose -p e2e -f packages/app/tests/e2e/docker-compose.yml up -d
echo "Waiting for MongoDB..."
for i in $(seq 1 30); do
if docker compose -p e2e -f packages/app/tests/e2e/docker-compose.yml exec -T db mongosh --port 29998 --quiet --eval "db.adminCommand({ping:1})" >/dev/null 2>&1; then
echo "MongoDB is ready"
break
fi
if [ "$i" -eq 30 ]; then
echo "MongoDB failed to become ready after 30 seconds"
exit 1
fi
echo "Waiting for MongoDB... ($i/30)"
sleep 1
done
echo "Waiting for ClickHouse..."
for i in $(seq 1 60); do
if curl -sf http://localhost:8123/ping >/dev/null 2>&1; then
echo "ClickHouse is ready"
break
fi
if [ "$i" -eq 60 ]; then
echo "ClickHouse failed to become ready after 60 seconds"
exit 1
fi
echo "Waiting for ClickHouse... ($i/60)"
sleep 1
done
- name: Run Playwright tests (full-stack mode)
# MongoDB service health check ensures it's ready before this step runs
# Note: Tests use ClickHouse demo instance (otel_demo with empty password)
# This is intentionally public - it's ClickHouse's read-only demo instance
# E2E uses local docker-compose (MongoDB on 29998, ClickHouse on 8123)
env:
E2E_FULLSTACK: 'true'
E2E_UNIQUE_USER: 'true'
E2E_API_HEALTH_CHECK_MAX_RETRIES: '60'
MONGO_URI: mongodb://localhost:27017/hyperdx-e2e
MONGO_URI: mongodb://localhost:29998/hyperdx-e2e
run: |
cd packages/app
yarn test:e2e --shard=${{ matrix.shard }}/4
@ -192,6 +209,12 @@ jobs:
path: packages/app/test-results/
retention-days: 30
- name: Stop E2E containers
if: always()
run:
docker compose -p e2e -f packages/app/tests/e2e/docker-compose.yml
down -v
e2e-report:
name: End-to-End Tests
if: always()

View file

@ -67,13 +67,10 @@ ci-unit:
.PHONY: e2e
e2e:
@# Run full-stack by default (MongoDB + API + demo ClickHouse)
@# Use 'make e2e local=true' to skip MongoDB and run local mode only
@# Use 'make e2e ui=true' to run tests with UI
if [ "$(local)" = "true" ]; then set -- "$$@" --local; fi; \
if [ -n "$(tags)" ]; then set -- "$$@" --tags "$(tags)"; fi; \
if [ "$(ui)" = "true" ]; then set -- "$$@" --ui; fi; \
./scripts/test-e2e.sh "$$@"
# Run full-stack by default (MongoDB + API + local Docker ClickHouse)
# For more control (--ui, --last-failed, --headed, etc), call the script directly:
# ./scripts/test-e2e.sh --ui --last-failed
./scripts/test-e2e.sh

View file

@ -0,0 +1,209 @@
#!/bin/bash
set -e
# E2E-specific database initialization script
# Creates tables with e2e_ prefix to avoid collision with local dev data
# We don't have a JSON schema yet, so let's let the collector create the tables
if [ "$BETA_CH_OTEL_JSON_SCHEMA_ENABLED" = "true" ]; then
exit 0
fi
DATABASE=${HYPERDX_OTEL_EXPORTER_CLICKHOUSE_DATABASE:-default}
clickhouse client -n <<EOFSQL
CREATE DATABASE IF NOT EXISTS ${DATABASE};
CREATE TABLE IF NOT EXISTS ${DATABASE}.e2e_otel_logs
(
\`Timestamp\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
\`TimestampTime\` DateTime DEFAULT toDateTime(Timestamp),
\`TraceId\` String CODEC(ZSTD(1)),
\`SpanId\` String CODEC(ZSTD(1)),
\`TraceFlags\` UInt8,
\`SeverityText\` LowCardinality(String) CODEC(ZSTD(1)),
\`SeverityNumber\` UInt8,
\`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
\`Body\` String CODEC(ZSTD(1)),
\`ResourceSchemaUrl\` LowCardinality(String) CODEC(ZSTD(1)),
\`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`ScopeSchemaUrl\` LowCardinality(String) CODEC(ZSTD(1)),
\`ScopeName\` String CODEC(ZSTD(1)),
\`ScopeVersion\` LowCardinality(String) CODEC(ZSTD(1)),
\`ScopeAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`LogAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`__hdx_materialized_k8s.cluster.name\` LowCardinality(String) MATERIALIZED ResourceAttributes['k8s.cluster.name'] CODEC(ZSTD(1)),
\`__hdx_materialized_k8s.container.name\` LowCardinality(String) MATERIALIZED ResourceAttributes['k8s.container.name'] CODEC(ZSTD(1)),
\`__hdx_materialized_k8s.deployment.name\` LowCardinality(String) MATERIALIZED ResourceAttributes['k8s.deployment.name'] CODEC(ZSTD(1)),
\`__hdx_materialized_k8s.namespace.name\` LowCardinality(String) MATERIALIZED ResourceAttributes['k8s.namespace.name'] CODEC(ZSTD(1)),
\`__hdx_materialized_k8s.node.name\` LowCardinality(String) MATERIALIZED ResourceAttributes['k8s.node.name'] CODEC(ZSTD(1)),
\`__hdx_materialized_k8s.pod.name\` LowCardinality(String) MATERIALIZED ResourceAttributes['k8s.pod.name'] CODEC(ZSTD(1)),
\`__hdx_materialized_k8s.pod.uid\` LowCardinality(String) MATERIALIZED ResourceAttributes['k8s.pod.uid'] CODEC(ZSTD(1)),
\`__hdx_materialized_deployment.environment.name\` LowCardinality(String) MATERIALIZED ResourceAttributes['deployment.environment.name'] CODEC(ZSTD(1)),
INDEX idx_trace_id TraceId TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_log_attr_key mapKeys(LogAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_log_attr_value mapValues(LogAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_lower_body lower(Body) TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 8
)
ENGINE = MergeTree
PARTITION BY toDate(TimestampTime)
PRIMARY KEY (ServiceName, TimestampTime)
ORDER BY (ServiceName, TimestampTime, Timestamp)
TTL TimestampTime + toIntervalDay(30)
SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1;
CREATE TABLE IF NOT EXISTS ${DATABASE}.e2e_otel_traces
(
\`Timestamp\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
\`TraceId\` String CODEC(ZSTD(1)),
\`SpanId\` String CODEC(ZSTD(1)),
\`ParentSpanId\` String CODEC(ZSTD(1)),
\`TraceState\` String CODEC(ZSTD(1)),
\`SpanName\` LowCardinality(String) CODEC(ZSTD(1)),
\`SpanKind\` LowCardinality(String) CODEC(ZSTD(1)),
\`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
\`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`ScopeName\` String CODEC(ZSTD(1)),
\`ScopeVersion\` String CODEC(ZSTD(1)),
\`SpanAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`Duration\` UInt64 CODEC(ZSTD(1)),
\`StatusCode\` LowCardinality(String) CODEC(ZSTD(1)),
\`StatusMessage\` String CODEC(ZSTD(1)),
\`Events.Timestamp\` Array(DateTime64(9)) CODEC(ZSTD(1)),
\`Events.Name\` Array(LowCardinality(String)) CODEC(ZSTD(1)),
\`Events.Attributes\` Array(Map(LowCardinality(String), String)) CODEC(ZSTD(1)),
\`Links.TraceId\` Array(String) CODEC(ZSTD(1)),
\`Links.SpanId\` Array(String) CODEC(ZSTD(1)),
\`Links.TraceState\` Array(String) CODEC(ZSTD(1)),
\`Links.Attributes\` Array(Map(LowCardinality(String), String)) CODEC(ZSTD(1)),
\`__hdx_materialized_rum.sessionId\` String MATERIALIZED ResourceAttributes['rum.sessionId'] CODEC(ZSTD(1)),
INDEX idx_trace_id TraceId TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX idx_rum_session_id __hdx_materialized_rum.sessionId TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_span_attr_key mapKeys(SpanAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_span_attr_value mapValues(SpanAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_duration Duration TYPE minmax GRANULARITY 1,
INDEX idx_lower_span_name lower(SpanName) TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 8
)
ENGINE = MergeTree
PARTITION BY toDate(Timestamp)
ORDER BY (ServiceName, SpanName, toDateTime(Timestamp))
TTL toDate(Timestamp) + toIntervalDay(30)
SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1;
CREATE TABLE ${DATABASE}.e2e_hyperdx_sessions
(
\`Timestamp\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
\`TimestampTime\` DateTime DEFAULT toDateTime(Timestamp),
\`TraceId\` String CODEC(ZSTD(1)),
\`SpanId\` String CODEC(ZSTD(1)),
\`TraceFlags\` UInt8,
\`SeverityText\` LowCardinality(String) CODEC(ZSTD(1)),
\`SeverityNumber\` UInt8,
\`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
\`Body\` String CODEC(ZSTD(1)),
\`ResourceSchemaUrl\` LowCardinality(String) CODEC(ZSTD(1)),
\`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`ScopeSchemaUrl\` LowCardinality(String) CODEC(ZSTD(1)),
\`ScopeName\` String CODEC(ZSTD(1)),
\`ScopeVersion\` LowCardinality(String) CODEC(ZSTD(1)),
\`ScopeAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`LogAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`__hdx_materialized_rum.sessionId\` String MATERIALIZED ResourceAttributes['rum.sessionId'] CODEC(ZSTD(1)),
\`__hdx_materialized_type\` LowCardinality(String) MATERIALIZED toString(simpleJSONExtractInt(Body, 'type')) CODEC(ZSTD(1)),
INDEX idx_trace_id TraceId TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX idx_rum_session_id __hdx_materialized_rum.sessionId TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_log_attr_key mapKeys(LogAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_log_attr_value mapValues(LogAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_body Body TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 8
)
ENGINE = MergeTree
PARTITION BY toDate(TimestampTime)
PRIMARY KEY (ServiceName, TimestampTime)
ORDER BY (ServiceName, TimestampTime, Timestamp)
TTL TimestampTime + toIntervalDay(30)
SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1;
CREATE TABLE IF NOT EXISTS ${DATABASE}.e2e_otel_metrics_gauge
(
\`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`ResourceSchemaUrl\` String CODEC(ZSTD(1)),
\`ScopeName\` String CODEC(ZSTD(1)),
\`ScopeVersion\` String CODEC(ZSTD(1)),
\`ScopeAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`ScopeDroppedAttrCount\` UInt32 CODEC(ZSTD(1)),
\`ScopeSchemaUrl\` String CODEC(ZSTD(1)),
\`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
\`MetricName\` String CODEC(ZSTD(1)),
\`MetricDescription\` String CODEC(ZSTD(1)),
\`MetricUnit\` String CODEC(ZSTD(1)),
\`Attributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`StartTimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
\`TimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
\`Value\` Float64 CODEC(ZSTD(1)),
\`Flags\` UInt32 CODEC(ZSTD(1)),
\`Exemplars.FilteredAttributes\` Array(Map(LowCardinality(String), String)) CODEC(ZSTD(1)),
\`Exemplars.TimeUnix\` Array(DateTime64(9)) CODEC(ZSTD(1)),
\`Exemplars.Value\` Array(Float64) CODEC(ZSTD(1)),
\`Exemplars.SpanId\` Array(String) CODEC(ZSTD(1)),
\`Exemplars.TraceId\` Array(String) CODEC(ZSTD(1)),
INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1
)
ENGINE = MergeTree
PARTITION BY toDate(TimeUnix)
ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix))
TTL toDate(TimeUnix) + toIntervalDay(30)
SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1;
CREATE TABLE IF NOT EXISTS ${DATABASE}.e2e_otel_metrics_sum
(
\`ResourceAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`ResourceSchemaUrl\` String CODEC(ZSTD(1)),
\`ScopeName\` String CODEC(ZSTD(1)),
\`ScopeVersion\` String CODEC(ZSTD(1)),
\`ScopeAttributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`ScopeDroppedAttrCount\` UInt32 CODEC(ZSTD(1)),
\`ScopeSchemaUrl\` String CODEC(ZSTD(1)),
\`ServiceName\` LowCardinality(String) CODEC(ZSTD(1)),
\`MetricName\` String CODEC(ZSTD(1)),
\`MetricDescription\` String CODEC(ZSTD(1)),
\`MetricUnit\` String CODEC(ZSTD(1)),
\`Attributes\` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
\`StartTimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
\`TimeUnix\` DateTime64(9) CODEC(Delta(8), ZSTD(1)),
\`Value\` Float64 CODEC(ZSTD(1)),
\`Flags\` UInt32 CODEC(ZSTD(1)),
\`AggregationTemporality\` Int32 CODEC(ZSTD(1)),
\`IsMonotonic\` Bool CODEC(Delta(1), ZSTD(1)),
\`Exemplars.FilteredAttributes\` Array(Map(LowCardinality(String), String)) CODEC(ZSTD(1)),
\`Exemplars.TimeUnix\` Array(DateTime64(9)) CODEC(ZSTD(1)),
\`Exemplars.Value\` Array(Float64) CODEC(ZSTD(1)),
\`Exemplars.SpanId\` Array(String) CODEC(ZSTD(1)),
\`Exemplars.TraceId\` Array(String) CODEC(ZSTD(1)),
INDEX idx_res_attr_key mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_scope_attr_key mapKeys(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_scope_attr_value mapValues(ScopeAttributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_attr_key mapKeys(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1,
INDEX idx_attr_value mapValues(Attributes) TYPE bloom_filter(0.01) GRANULARITY 1
)
ENGINE = MergeTree
PARTITION BY toDate(TimeUnix)
ORDER BY (ServiceName, MetricName, Attributes, toUnixTimestamp64Nano(TimeUnix))
TTL toDate(TimeUnix) + toIntervalDay(30)
SETTINGS index_granularity = 8192, ttl_only_drop_parts = 1
EOFSQL

View file

@ -1,8 +1,8 @@
# ClickHouse connection to public demo instance
# Password is intentionally empty - this is a read-only public demo instance
CLICKHOUSE_HOST=https://sql-clickhouse.clickhouse.com
# ClickHouse connection to local e2e test instance
# Uses local ClickHouse instance from docker-compose.yml
CLICKHOUSE_HOST=http://localhost:8123
CLICKHOUSE_PASSWORD=
CLICKHOUSE_USER=otel_demo
CLICKHOUSE_USER=default
RUN_SCHEDULED_TASKS_EXTERNALLY=true
FRONTEND_URL=http://localhost:28081
# MongoDB connection string
@ -13,11 +13,8 @@ NODE_ENV=test
PORT=29000
OPAMP_PORT=24320
# Auto-create default connections and sources for new teams
# Uses public demo ClickHouse instance with pre-populated data
DEFAULT_CONNECTIONS='[{"name":"local","host":"https://sql-clickhouse.clickhouse.com","username":"otel_demo","password":""}]'
DEFAULT_SOURCES='[{"kind":"log","name":"Demo Logs","connection":"local","from":{"databaseName":"otel_v2","tableName":"otel_logs"},"timestampValueExpression":"TimestampTime","defaultTableSelectExpression":"Timestamp, ServiceName, SeverityText, Body","serviceNameExpression":"ServiceName","severityTextExpression":"SeverityText","eventAttributesExpression":"LogAttributes","resourceAttributesExpression":"ResourceAttributes","traceIdExpression":"TraceId","spanIdExpression":"SpanId","implicitColumnExpression":"Body","displayedTimestampValueExpression":"Timestamp","sessionSourceId":"Demo Sessions","traceSourceId":"Demo Traces","metricSourceId":"Demo Metrics"},{"kind":"trace","name":"Demo Traces","connection":"local","from":{"databaseName":"otel_v2","tableName":"otel_traces"},"timestampValueExpression":"Timestamp","defaultTableSelectExpression":"Timestamp, ServiceName, StatusCode, round(Duration / 1e6), SpanName","serviceNameExpression":"ServiceName","eventAttributesExpression":"SpanAttributes","resourceAttributesExpression":"ResourceAttributes","traceIdExpression":"TraceId","spanIdExpression":"SpanId","implicitColumnExpression":"SpanName","durationExpression":"Duration","durationPrecision":9,"parentSpanIdExpression":"ParentSpanId","spanKindExpression":"SpanKind","spanNameExpression":"SpanName","logSourceId":"Demo Logs","statusCodeExpression":"StatusCode","statusMessageExpression":"StatusMessage","spanEventsValueExpression":"Events","metricSourceId":"Demo Metrics","sessionSourceId":"Demo Sessions","materializedViews": [{"databaseName": "otel_v2","tableName": "otel_traces_1m","dimensionColumns": "ServiceName, StatusCode","minGranularity": "1 minute","timestampColumn": "Timestamp","aggregatedColumns": [{"mvColumn": "count","aggFn": "count","sourceColumn": ""},{"mvColumn": "max__Duration","aggFn": "max","sourceColumn": "Duration"},{"mvColumn": "avg__Duration","aggFn": "avg","sourceColumn": "Duration"}]},{"databaseName": "otel_v2","tableName": "otel_traces_1m_v2","dimensionColumns": "ServiceName, SpanName, SpanKind","minGranularity": "1 minute","timestampColumn": "Timestamp","aggregatedColumns": [{"mvColumn": "count","aggFn": "count","sourceColumn": ""},{"mvColumn": "max__Duration","aggFn": "max","sourceColumn": "Duration"},{"mvColumn": "avg__Duration","aggFn": "avg","sourceColumn": "Duration"},{"mvColumn": "quantile__Duration","aggFn": "quantile","sourceColumn": "Duration"}]}]},{"kind":"metric","name":"Demo Metrics","connection":"local","from":{"databaseName":"otel_v2","tableName":""},"timestampValueExpression":"TimeUnix","serviceNameExpression":"ServiceName","metricTables":{"gauge":"otel_metrics_gauge","histogram":"otel_metrics_histogram","sum":"otel_metrics_sum","summary":"otel_metrics_summary","exponential histogram":"otel_metrics_exponential_histogram"},"resourceAttributesExpression":"ResourceAttributes","logSourceId":"Demo Logs"},{"kind":"session","name":"Demo Sessions","connection":"local","from":{"databaseName":"otel_v2","tableName":"hyperdx_sessions"},"timestampValueExpression":"TimestampTime","defaultTableSelectExpression":"Timestamp, ServiceName, Body","serviceNameExpression":"ServiceName","severityTextExpression":"SeverityText","eventAttributesExpression":"LogAttributes","resourceAttributesExpression":"ResourceAttributes","traceSourceId":"Demo Traces","traceIdExpression":"TraceId","spanIdExpression":"SpanId","implicitColumnExpression":"Body"},{"kind":"trace","name":"ClickPy Traces","connection":"local","from":{"databaseName":"otel_clickpy","tableName":"otel_traces"},"timestampValueExpression":"Timestamp","defaultTableSelectExpression":"Timestamp, ServiceName, StatusCode, round(Duration / 1e6), SpanName","serviceNameExpression":"ServiceName","eventAttributesExpression":"SpanAttributes","resourceAttributesExpression":"ResourceAttributes","traceIdExpression":"TraceId","spanIdExpression":"SpanId","implicitColumnExpression":"SpanName","durationExpression":"Duration","durationPrecision":9,"parentSpanIdExpression":"ParentSpanId","spanKindExpression":"SpanKind","spanNameExpression":"SpanName","statusCodeExpression":"StatusCode","statusMessageExpression":"StatusMessage","spanEventsValueExpression":"Events","highlightedTraceAttributeExpressions":[{"sqlExpression":"if((SpanAttributes['http.route']) LIKE '%dashboard%', concat('https://clickpy.clickhouse.com', path(SpanAttributes['http.target'])), '')","alias":"clickpy_link"}],"sessionSourceId":"ClickPy Sessions"},{"kind":"session","name":"ClickPy Sessions","connection":"local","from":{"databaseName":"otel_clickpy","tableName":"hyperdx_sessions"},"timestampValueExpression":"TimestampTime","defaultTableSelectExpression":"Timestamp, ServiceName, Body","serviceNameExpression":"ServiceName","severityTextExpression":"SeverityText","eventAttributesExpression":"LogAttributes","resourceAttributesExpression":"ResourceAttributes","traceSourceId":"ClickPy Traces","traceIdExpression":"TraceId","spanIdExpression":"SpanId","implicitColumnExpression":"Body"}]'
# DEFAULT_CONNECTIONS and DEFAULT_SOURCES are injected from packages/app/tests/e2e/fixtures/e2e-fixtures.json
# by the e2e API runner (run-api-with-fixtures.js) and by base-test for local mode.
# Disable usage stats for e2e tests
USAGE_STATS_ENABLED=false

View file

@ -18,7 +18,7 @@ export default defineConfig({
/* Global setup to ensure server is ready */
globalSetup: USE_FULLSTACK
? require.resolve('./tests/e2e/global-setup-fullstack.ts')
: require.resolve('./global-setup.js'),
: require.resolve('./tests/e2e/global-setup-local.ts'),
/* Run tests in files in parallel */
fullyParallel: true,
/* Fail the build on CI if you accidentally left test.only in the source code. */
@ -72,8 +72,7 @@ export default defineConfig({
? [
// Full-stack mode: Start API and App servers (infrastructure started separately)
{
// Loads configuration from .env.e2e (connections, settings)
// Environment variables (MONGO_URI, etc.) can override .env.e2e values
// Connections/sources come from env (injected by run-e2e.js from e2e-fixtures.json)
command: `cd ../api && ${process.env.MONGO_URI ? `MONGO_URI="${process.env.MONGO_URI}"` : ''} DOTENV_CONFIG_PATH=.env.e2e npx ts-node --transpile-only -r tsconfig-paths/register -r dotenv-expand/config -r @hyperdx/node-opentelemetry/build/src/tracing src/index.ts`,
port: 29000,
reuseExistingServer: !process.env.CI,
@ -82,9 +81,8 @@ export default defineConfig({
stderr: 'pipe',
},
{
command: process.env.CI
? 'SERVER_URL=http://localhost:29000 PORT=28081 yarn build && SERVER_URL=http://localhost:29000 PORT=28081 yarn start'
: 'SERVER_URL=http://localhost:29000 PORT=28081 NEXT_TELEMETRY_DISABLED=1 yarn run dev',
command:
'SERVER_URL=http://localhost:29000 PORT=28081 yarn build && SERVER_URL=http://localhost:29000 PORT=28081 yarn start',
port: 28081,
reuseExistingServer: !process.env.CI,
timeout: APP_SERVER_STARTUP_TIMEOUT_MS,
@ -94,9 +92,8 @@ export default defineConfig({
]
: {
// Local mode: Frontend only
command: process.env.CI
? 'NEXT_PUBLIC_IS_LOCAL_MODE=true yarn build && NEXT_PUBLIC_IS_LOCAL_MODE=true PORT=8081 yarn start'
: 'NEXT_PUBLIC_IS_LOCAL_MODE=true NEXT_TELEMETRY_DISABLED=1 PORT=8081 yarn run dev',
command:
'NEXT_PUBLIC_IS_LOCAL_MODE=true yarn build && NEXT_PUBLIC_IS_LOCAL_MODE=true PORT=8081 yarn start',
port: 8081,
reuseExistingServer: !process.env.CI,
timeout: APP_SERVER_STARTUP_TIMEOUT_MS,

View file

@ -13,6 +13,7 @@
*/
import { spawn } from 'child_process';
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
@ -50,12 +51,23 @@ if (useLocal) {
// Add any additional playwright arguments
playwrightCmd.push(...playwrightArgs);
// Set environment variables
// Set environment variables (Playwright and its webServer children inherit these)
const env = {
...process.env,
...(!useLocal && { E2E_FULLSTACK: 'true' }),
};
// Full-stack: inject DEFAULT_CONNECTIONS/DEFAULT_SOURCES from fixture so the API gets them
if (!useLocal) {
const fixturePath = path.join(
__dirname,
'../tests/e2e/fixtures/e2e-fixtures.json',
);
const fixture = JSON.parse(fs.readFileSync(fixturePath, 'utf8'));
env.DEFAULT_CONNECTIONS = JSON.stringify(fixture.connections ?? []);
env.DEFAULT_SOURCES = JSON.stringify(fixture.sources ?? []);
}
// Run playwright
// eslint-disable-next-line no-console
console.info(`Running: ${playwrightCmd.join(' ')}`);

View file

@ -781,6 +781,7 @@ export const FilterGroup = ({
{showShowMoreButton && (
<div className="d-flex m-1">
<TextButton
data-testid={`filter-show-more-${name}`}
label={
shouldShowMore ? (
<>
@ -809,6 +810,7 @@ export const FilterGroup = ({
!loadMoreLoading && (
<div className="d-flex m-1">
<TextButton
data-testid={`filter-load-more-${name}`}
display={hasLoadedMore ? 'none' : undefined}
label={
<>

View file

@ -15,34 +15,29 @@ feature-specific test suites.
### Default: Full-Stack Mode
By default, `make e2e` runs tests in **full-stack mode** with MongoDB + API +
demo ClickHouse for maximum consistency and real backend features:
local Docker ClickHouse for maximum consistency and real backend features:
```bash
# Run all tests (full-stack with MongoDB + API + demo ClickHouse)
# Run all tests (full-stack with MongoDB + API + local Docker ClickHouse)
make e2e
# Run specific tests (full-stack)
make e2e tags="@kubernetes"
make e2e tags="@smoke"
# Run tests with UI
make e2e ui=true
# For UI, specific tests, or other options, use the script from repo root:
./scripts/test-e2e.sh --ui # Run with Playwright UI
./scripts/test-e2e.sh --grep "@kubernetes" # Run specific tests
./scripts/test-e2e.sh --grep "@smoke"
./scripts/test-e2e.sh --ui --last-failed # Re-run only failed tests with UI
```
### Optional: Local Mode (Frontend Only)
For faster iteration during development, use `local=true` to skip MongoDB and
run frontend-only tests:
For faster iteration during development, use the script with `--local` to skip
MongoDB and run frontend-only tests:
```bash
# Run all tests in local mode (no MongoDB, frontend only)
make e2e local=true
# Run tests with UI
make e2e local=true ui=true
# Run specific tests in local mode
make e2e local=true tags="@search"
# From repo root - run local tests (no MongoDB, frontend only)
./scripts/test-e2e.sh --local
./scripts/test-e2e.sh --local --ui
./scripts/test-e2e.sh --local --grep "@search"
# From packages/app - run local tests (frontend only)
cd packages/app
@ -98,7 +93,7 @@ ClickHouse data.
- MongoDB (port 29998) - authentication, teams, users, persistence
- API Server (port 29000) - full backend logic
- App Server (port 28081) - frontend
- **Demo ClickHouse** (remote) - pre-populated logs/traces/metrics/K8s data
- **Local Docker ClickHouse** (localhost:8123) - seeded E2E test data (logs/traces/metrics/K8s). Seeded timestamps span a past+future window (~1h past, ~2h future from seed time) so relative ranges like "last 5 minutes" keep finding data. If you run tests more than ~2 hours after the last seed, re-run the global setup (or full test run) to re-seed.
**Benefits:**
@ -111,30 +106,34 @@ ClickHouse data.
```bash
# Default: full-stack mode
make e2e
make e2e tags="@kubernetes"
./scripts/test-e2e.sh --grep "@kubernetes" # from repo root, for specific tags
```
#### Local Mode (for testing frontend-only mode)
#### Local Mode (for testing frontend-only features)
**Frontend-only mode** - skips MongoDB/API, connects directly to demo ClickHouse
from browser.
**Frontend + ClickHouse mode** - skips MongoDB/API, uses local Docker ClickHouse
with seeded test data.
**Use for:**
- Quick frontend iteration during development
- Testing UI components that don't need auth
- Faster test execution when backend features aren't needed
- Consistent test data (same as full-stack mode)
**Limitations:**
- No authentication (no login/signup)
- No persistence (can't save searches/dashboards)
- No API calls (queries go directly to demo ClickHouse)
- No persistence (can't save searches/dashboards via API)
- No API calls (queries go directly to local ClickHouse)
**Note:** Uses the same Docker ClickHouse and seeded data as full-stack mode,
ensuring consistency between local and full-stack tests.
```bash
# Opt-in to local mode for speed
make e2e local=true
make e2e local=true tags="@search"
# Opt-in to local mode for speed (from repo root)
./scripts/test-e2e.sh --local
./scripts/test-e2e.sh --local --grep "@search"
```
## Writing Tests
@ -150,7 +149,7 @@ test.describe('My Feature', () => {
// User is already authenticated (via global setup in full-stack mode)
await page.goto('/search');
// Query demo ClickHouse data
// Query local Docker ClickHouse seeded data
await page.fill('[data-testid="search-input"]', 'ServiceName:"frontend"');
await page.click('[data-testid="search-submit-button"]');
@ -167,8 +166,8 @@ test.describe('My Feature', () => {
```
**Note:** Tests that need to run in full stack mode should be tagged with
`@full-stack` so that when `make e2e local=true` is run, they are skipped
appropriately.
`@full-stack` so that when running with `./scripts/test-e2e.sh --local`, they
are skipped appropriately.
## Test Organization
@ -367,13 +366,13 @@ multiple servers:
**Sources don't appear in UI:**
- Check API logs for `setupTeamDefaults` errors
- Verify `DEFAULT_SOURCES` in `.env.e2e` points to demo ClickHouse
- Verify `DEFAULT_SOURCES` in `.env.e2e` points to local Docker ClickHouse (localhost:8123)
- Ensure you registered a new user (DEFAULT_SOURCES only applies to new teams)
**Tests can't find demo data:**
- Verify sources use `otel_v2` database (demo ClickHouse)
- Check Network tab - should query `sql-clickhouse.clickhouse.com`
- Verify sources use `default` database with `e2e_` prefixed tables
- Check Network tab - should query `localhost:8123`
- Verify a source is selected in UI dropdown
### Flaky Tests
@ -391,7 +390,7 @@ For intermittent failures:
Tests run in **full-stack mode** on CI (GitHub Actions) with:
- MongoDB service container for authentication and persistence
- Demo ClickHouse for telemetry data
- Local Docker ClickHouse for telemetry data (same as local mode)
- 60-second test timeout (same as local)
- Multiple retry attempts (2 retries on CI vs 1 locally)
- Artifact collection for failed tests

View file

@ -83,6 +83,10 @@ export class ChartEditorComponent {
*/
async runQuery() {
await this.runQueryButton.click();
// need to wait for the recharts graph to render
await this.page
.locator('.recharts-responsive-container')
.waitFor({ state: 'visible', timeout: 10000 });
}
/**

View file

@ -11,6 +11,20 @@ export class FilterComponent {
this.page = page;
}
private async scrollAndClick(locator: Locator, testId: string) {
// Filters live in a side nav with its own ScrollArea. Use native scrollIntoView
// so the browser scrolls within that container; Playwright's scrollIntoViewIfNeeded
// can be unreliable with nested scroll containers.
await locator.evaluate(el =>
el.scrollIntoView({ block: 'nearest', inline: 'nearest' }),
);
await locator.hover();
const button = locator.getByTestId(testId);
await button.waitFor({ state: 'visible' });
await button.click();
}
/**
* Get filter group by name
* @param filterName - e.g., 'SeverityText', 'ServiceName'
@ -62,10 +76,7 @@ export class FilterComponent {
*/
async excludeFilter(valueName: string) {
const filterCheckbox = this.getFilterCheckbox(valueName);
await filterCheckbox.hover();
const excludeButton = this.page.getByTestId(`filter-exclude-${valueName}`);
await excludeButton.first().click();
await this.scrollAndClick(filterCheckbox, `filter-exclude-${valueName}`);
}
/**
@ -73,10 +84,7 @@ export class FilterComponent {
*/
async pinFilter(valueName: string) {
const filterCheckbox = this.getFilterCheckbox(valueName);
await filterCheckbox.hover();
const pinButton = this.page.getByTestId(`filter-pin-${valueName}`);
await pinButton.click();
await this.scrollAndClick(filterCheckbox, `filter-pin-${valueName}`);
}
/**
@ -164,4 +172,74 @@ export class FilterComponent {
getFilterValues(filterGroupName: string) {
return this.page.getByTestId(`filter-checkbox-${filterGroupName}`);
}
/**
* Click "Load more" or "Show more" for a filter group if visible, so that
* all options (or more options) are shown. Use when pickVisibleFilterValues
* might otherwise only see a limited initial set.
*/
async ensureFilterOptionsExpanded(filterGroupName: string): Promise<void> {
const group = this.getFilterGroup(filterGroupName);
const loadMore = this.page.getByTestId(
`filter-load-more-${filterGroupName}`,
);
const showMore = this.page.getByTestId(
`filter-show-more-${filterGroupName}`,
);
if (await loadMore.isVisible()) {
await loadMore.click();
await group
.getByText('Loading more...')
.waitFor({ state: 'hidden', timeout: 15000 })
.catch(() => {});
}
if (await showMore.isVisible()) {
const text = (await showMore.textContent()) ?? '';
if (text.includes('Show more')) {
await showMore.click();
}
}
}
/**
* Open a filter group and return the first N filter values from the candidate
* list that are visible in the UI. Use seed constants (e.g. SEVERITIES) as
* candidates so tests don't rely on a single value that may not be present.
* Expands "Load more" / "Show more" if needed so hidden options are visible.
* @param filterGroupName - e.g. 'SeverityText', 'ServiceName'
* @param candidates - possible values from seed (e.g. SEVERITIES from seed-clickhouse)
* @param count - number of visible values to return (default 2)
* @returns array of up to `count` values that are visible
*/
async pickVisibleFilterValues(
filterGroupName: string,
candidates: readonly string[],
count: number = 2,
): Promise<string[]> {
await this.openFilterGroup(filterGroupName);
// Wait for initial facet options to load
const group = this.getFilterGroup(filterGroupName);
await group
.locator('[data-testid^="filter-checkbox-input-"]')
.first()
.waitFor({ state: 'visible', timeout: 10000 });
await this.ensureFilterOptionsExpanded(filterGroupName);
const visible: string[] = [];
for (const value of candidates) {
if (visible.length >= count) break;
const input = this.getFilterCheckboxInput(value);
if (await input.isVisible()) visible.push(value);
}
if (visible.length < count) {
throw new Error(
`pickVisibleFilterValues: expected at least ${count} visible values in ${filterGroupName} from [${candidates.join(', ')}], got ${visible.length}`,
);
}
return visible;
}
}

View file

@ -3,7 +3,7 @@
* Used for creating and managing saved searches
* Not used until Saved Search functionality is implemented
*/
import { Locator, Page } from '@playwright/test';
import { expect, Locator, Page } from '@playwright/test';
export class SavedSearchModalComponent {
readonly page: Page;
@ -69,6 +69,37 @@ export class SavedSearchModalComponent {
await this.submit();
}
/**
* Save search and wait for URL to change to the saved search page
* This is more reliable than waiting separately for modal close and URL change
*/
async saveSearchAndWaitForNavigation(
name: string,
tags: string[] = [],
): Promise<void> {
await this.fillName(name);
for (const tag of tags) {
await this.addTag(tag);
}
// Wait for submit button to be enabled (form might need validation time)
await expect(this.submitButton).toBeEnabled({ timeout: 5000 });
// Start waiting for URL change BEFORE clicking submit to avoid race condition
const urlPromise = this.page.waitForURL(/\/search\/[a-f0-9]+/, {
timeout: 15000,
});
await this.submit();
// Wait for navigation to complete
await urlPromise;
// Wait for modal to fully close
await expect(this.container).toBeHidden();
}
/**
* Get tag elements
*/

View file

@ -24,8 +24,12 @@ export class TableComponent {
/**
* Wait for at least one row to populate
*/
async waitForRowsToPopulate(timeout: number = 5000) {
await this.firstRow.waitFor({ state: 'visible', timeout });
async waitForRowsToPopulate(allowEmpty: boolean = false) {
if (allowEmpty) {
await this.tableContainer.waitFor({ state: 'visible', timeout: 5000 });
} else {
await this.firstRow.waitFor({ state: 'visible', timeout: 5000 });
}
}
/**

View file

@ -71,6 +71,7 @@ test.describe('Navigation', { tag: ['@core'] }, () => {
await test.step('Navigate between each page', async () => {
for (const { testId, contentTestId } of navLinks) {
const link = page.locator(`[data-testid="${testId}"]`);
await link.scrollIntoViewIfNeeded();
await link.click();
const content = page.locator(`[data-testid="${contentTestId}"]`);

View file

@ -15,6 +15,33 @@ services:
reservations:
memory: 512M
cpus: '0.5'
ch-server:
image: clickhouse/clickhouse-server:25.6-alpine
ports:
- 8123:8123 # http api
- 9000:9000 # native
environment:
CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT: 1
volumes:
- ../../../../docker/clickhouse/local/config.xml:/etc/clickhouse-server/config.xml
- ../../../../docker/clickhouse/local/users.xml:/etc/clickhouse-server/users.xml
- ../../../../docker/clickhouse/local/init-db-e2e.sh:/docker-entrypoint-initdb.d/init-db.sh
networks:
- internal
deploy:
resources:
limits:
memory: 2G
cpus: '2.0'
reservations:
memory: 512M
cpus: '0.5'
healthcheck:
test: ['CMD', 'wget', '--spider', '-q', 'http://127.0.0.1:8123/ping']
interval: 5s
timeout: 3s
retries: 10
start_period: 30s
networks:
internal:
name: 'hyperdx-e2e-internal-network'

View file

@ -1,5 +1,9 @@
import { DashboardPage } from '../page-objects/DashboardPage';
import { expect, test } from '../utils/base-test';
import {
DEFAULT_LOGS_SOURCE_NAME,
DEFAULT_METRICS_SOURCE_NAME,
} from '../utils/constants';
test.describe('Dashboard', { tag: ['@dashboard'] }, () => {
let dashboardPage: DashboardPage;
@ -137,10 +141,10 @@ test.describe('Dashboard', { tag: ['@dashboard'] }, () => {
// Select source and create chart with specific metric
await expect(dashboardPage.chartEditor.source).toBeVisible();
await dashboardPage.chartEditor.createChartWithMetric(
'K8s CPU Chart',
'Demo Metrics',
'k8s.container.cpu_limit',
'k8s.container.cpu_limit:::::::gauge',
'K8s Pod CPU Chart',
DEFAULT_METRICS_SOURCE_NAME,
'k8s.pod.cpu.utilization',
'k8s.pod.cpu.utilization:::::::gauge',
);
});
@ -253,92 +257,102 @@ test.describe('Dashboard', { tag: ['@dashboard'] }, () => {
},
);
test('should create and populate filters', async () => {
test.setTimeout(30000);
test(
'should create and populate filters',
{ tag: '@full-stack' },
async () => {
test.setTimeout(30000);
await test.step('Create new dashboard', async () => {
await expect(dashboardPage.createButton).toBeVisible();
await dashboardPage.createNewDashboard();
});
await test.step('Create a table tile to filter', async () => {
await dashboardPage.addTile();
await dashboardPage.chartEditor.createTable({
chartName: 'Test Table',
sourceName: 'Demo Logs',
groupBy: 'ServiceName',
await test.step('Create new dashboard', async () => {
await expect(dashboardPage.createButton).toBeVisible();
await dashboardPage.createNewDashboard();
});
const accountCell = dashboardPage.page.getByTitle('accounting', {
exact: true,
await test.step('Create a table tile to filter', async () => {
await dashboardPage.addTile();
await dashboardPage.chartEditor.createTable({
chartName: 'Test Table',
sourceName: DEFAULT_LOGS_SOURCE_NAME,
groupBy: 'ServiceName',
});
const accountCell = dashboardPage.page.getByTitle('accounting', {
exact: true,
});
const adCell = dashboardPage.page.getByTitle('ad', { exact: true });
await expect(accountCell).toBeVisible();
await expect(adCell).toBeVisible();
});
const adCell = dashboardPage.page.getByTitle('ad', { exact: true });
await expect(accountCell).toBeVisible();
await expect(adCell).toBeVisible();
});
await test.step('Add ServiceName filter to dashboard', async () => {
await dashboardPage.openEditFiltersModal();
await expect(dashboardPage.emptyFiltersList).toBeVisible();
await test.step('Add ServiceName filter to dashboard', async () => {
await dashboardPage.openEditFiltersModal();
await expect(dashboardPage.emptyFiltersList).toBeVisible();
await dashboardPage.addFilterToDashboard(
'Service',
'Demo Logs',
'ServiceName',
);
await dashboardPage.addFilterToDashboard(
'Service',
DEFAULT_LOGS_SOURCE_NAME,
'ServiceName',
);
await expect(dashboardPage.getFilterItemByName('Service')).toBeVisible();
await expect(
dashboardPage.getFilterItemByName('Service'),
).toBeVisible();
await dashboardPage.closeFiltersModal();
});
await test.step('Add MetricName filter to dashboard', async () => {
await dashboardPage.openEditFiltersModal();
await expect(dashboardPage.filtersList).toBeVisible();
await dashboardPage.addFilterToDashboard(
'Metric',
'Demo Metrics',
'MetricName',
'gauge',
);
await expect(dashboardPage.getFilterItemByName('Metric')).toBeVisible();
await dashboardPage.closeFiltersModal();
});
await test.step('Verify tiles are filtered', async () => {
// Select 'accounting' in Service filter
await dashboardPage.clickFilterOption('Service', 'accounting');
const accountCell = dashboardPage.page.getByTitle('accounting', {
exact: true,
await dashboardPage.closeFiltersModal();
});
await expect(accountCell).toBeVisible();
// 'ad' ServiceName row should be filtered out
const adCell = dashboardPage.page.getByTitle('ad', { exact: true });
await expect(adCell).toHaveCount(0);
});
await test.step('Add MetricName filter to dashboard', async () => {
await dashboardPage.openEditFiltersModal();
await expect(dashboardPage.filtersList).toBeVisible();
await test.step('Verify metric filter is populated', async () => {
await dashboardPage.clickFilterOption(
'Metric',
'container.cpu.utilization',
);
});
await dashboardPage.addFilterToDashboard(
'Metric',
DEFAULT_METRICS_SOURCE_NAME,
'MetricName',
'gauge',
);
await test.step('Delete a filter and verify it is removed', async () => {
await dashboardPage.openEditFiltersModal();
await dashboardPage.deleteFilterFromDashboard('Metric');
await expect(dashboardPage.getFilterItemByName('Metric')).toBeVisible();
// Service filter should still be visible
await expect(dashboardPage.getFilterItemByName('Service')).toBeVisible();
await dashboardPage.closeFiltersModal();
});
// Metric filter should be gone
await expect(dashboardPage.getFilterItemByName('Metric')).toHaveCount(0);
});
});
await test.step('Verify tiles are filtered', async () => {
// Select 'accounting' in Service filter
await dashboardPage.clickFilterOption('Service', 'accounting');
const accountCell = dashboardPage.page.getByTitle('accounting', {
exact: true,
});
await expect(accountCell).toBeVisible();
// 'ad' ServiceName row should be filtered out
const adCell = dashboardPage.page.getByTitle('ad', { exact: true });
await expect(adCell).toHaveCount(0);
});
await test.step('Verify metric filter is populated', async () => {
await dashboardPage.clickFilterOption(
'Metric',
'container.cpu.utilization',
);
});
await test.step('Delete a filter and verify it is removed', async () => {
await dashboardPage.openEditFiltersModal();
await dashboardPage.deleteFilterFromDashboard('Metric');
// Service filter should still be visible
await expect(
dashboardPage.getFilterItemByName('Service'),
).toBeVisible();
// Metric filter should be gone
await expect(dashboardPage.getFilterItemByName('Metric')).toHaveCount(
0,
);
});
},
);
});

View file

@ -1,3 +1,9 @@
import { SERVICES, SEVERITIES } from 'tests/e2e/seed-clickhouse';
import {
DEFAULT_LOGS_SOURCE_NAME,
DEFAULT_TRACES_SOURCE_NAME,
} from 'tests/e2e/utils/constants';
import { SearchPage } from '../../page-objects/SearchPage';
import { expect, test } from '../../utils/base-test';
@ -34,10 +40,9 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
await searchPage.setCustomSELECT(customSelect);
await searchPage.submitEmptySearch();
await searchPage.openSaveSearchModal();
await searchPage.savedSearchModal.saveSearch('Custom Select Search');
await expect(searchPage.savedSearchModal.container).toBeHidden();
await page.waitForURL(/\/search\/[a-f0-9]+/, { timeout: 5000 });
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'Custom Select Search',
);
});
const savedSearchAUrl = page.url().split('?')[0];
@ -48,10 +53,9 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
// Keep default SELECT (don't modify it)
await searchPage.submitEmptySearch();
await searchPage.openSaveSearchModal();
await searchPage.savedSearchModal.saveSearch('Default Select Search');
await expect(searchPage.savedSearchModal.container).toBeHidden();
await page.waitForURL(/\/search\/[a-f0-9]+/, { timeout: 10000 });
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'Default Select Search',
);
});
await test.step('Navigate back to first saved search', async () => {
@ -87,12 +91,9 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
await searchPage.setCustomSELECT(customSelect);
await searchPage.submitEmptySearch();
await searchPage.openSaveSearchModal();
await searchPage.savedSearchModal.saveSearch(
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'Custom Select Source Test',
);
await expect(searchPage.savedSearchModal.container).toBeHidden();
await page.waitForURL(/\/search\/[a-f0-9]+/, { timeout: 5000 });
});
const savedSearchUrl = page.url().split('?')[0];
@ -132,27 +133,22 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
'should use default SELECT when switching sources within a saved search',
{ tag: '@full-stack' },
async ({ page }) => {
let originalSourceName: string | null = null;
await test.step('Create and navigate to saved search', async () => {
const customSelect =
'Timestamp, Body, lower(ServiceName) as service_name';
await searchPage.setCustomSELECT(customSelect);
await searchPage.submitEmptySearch();
await searchPage.openSaveSearchModal();
await searchPage.savedSearchModal.saveSearch('Source Switching Test');
await expect(searchPage.savedSearchModal.container).toBeHidden();
await page.waitForURL(/\/search\/[a-f0-9]+/, { timeout: 5000 });
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'Source Switching Test',
);
});
await test.step('Switch to different source via dropdown', async () => {
originalSourceName = await searchPage.currentSource.inputValue();
await searchPage.sourceDropdown.click();
await searchPage.otherSources.first().click();
await searchPage.selectSource(DEFAULT_TRACES_SOURCE_NAME);
await page.waitForLoadState('networkidle');
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
});
await test.step('Verify SELECT changed to the new source default', async () => {
@ -167,14 +163,9 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
await test.step('Switch back to original source via dropdown', async () => {
await searchPage.sourceDropdown.click();
await page
.getByRole('option', {
name: originalSourceName || '',
exact: true,
})
.click();
await searchPage.selectSource(DEFAULT_LOGS_SOURCE_NAME);
await page.waitForLoadState('networkidle');
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
});
await test.step('Verify SELECT is search custom SELECT', async () => {
@ -214,18 +205,14 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
// Submit the search to ensure configuration is applied
await searchPage.submitButton.click();
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
// Save the search
await searchPage.openSaveSearchModal();
await searchPage.savedSearchModal.saveSearch(
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'Info Logs Navigation Test',
);
// Wait for save to complete and URL to change
await expect(searchPage.savedSearchModal.container).toBeHidden();
await page.waitForURL(/\/search\/[a-f0-9]+/, { timeout: 5000 });
// Capture the saved search URL (without query params)
savedSearchUrl = page.url().split('?')[0];
});
@ -285,13 +272,10 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
await searchPage.setCustomSELECT(customSelect);
await searchPage.performSearch('ServiceName:frontend');
await searchPage.openSaveSearchModal();
await searchPage.savedSearchModal.saveSearch(
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'Custom Select Navigation Test',
);
await expect(searchPage.savedSearchModal.container).toBeHidden();
await page.waitForURL(/\/search\/[a-f0-9]+/, { timeout: 5000 });
savedSearchUrl = page.url().split('?')[0];
});
@ -307,7 +291,7 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
await test.step('Verify custom SELECT is preserved', async () => {
// Wait for results to load
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
// Verify SELECT content
const selectEditor = searchPage.getSELECTEditor();
@ -331,10 +315,9 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
await test.step('Create and save a search', async () => {
await searchPage.performSearch('SeverityText:info');
await searchPage.openSaveSearchModal();
await searchPage.savedSearchModal.saveSearch('Browser Navigation Test');
await expect(searchPage.savedSearchModal.container).toBeHidden();
await page.waitForURL(/\/search\/[a-f0-9]+/, { timeout: 5000 });
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'Browser Navigation Test',
);
});
await test.step('Navigate to sessions page', async () => {
@ -386,19 +369,16 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
// Submit and save the search
await searchPage.submitEmptySearch();
await searchPage.openSaveSearchModal();
await searchPage.savedSearchModal.saveSearch(
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'ORDER BY Multiple Source Switch Test',
);
await expect(searchPage.savedSearchModal.container).toBeHidden();
await page.waitForURL(/\/search\/[a-f0-9]+/, { timeout: 5000 });
});
await test.step('Switch to second source', async () => {
await searchPage.sourceDropdown.click();
await searchPage.otherSources.first().click();
await page.waitForLoadState('networkidle');
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
});
await test.step('Verify ORDER BY changed to second source default', async () => {
@ -423,7 +403,7 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
})
.click();
await page.waitForLoadState('networkidle');
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
});
await test.step('Verify ORDER BY restored to saved search custom value', async () => {
@ -452,7 +432,12 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
let savedSearchUrl: string;
let appliedFilterValue: string;
await test.step('Apply filters in the sidebar', async () => {
appliedFilterValue = 'accounting';
const [picked] = await searchPage.filters.pickVisibleFilterValues(
'SeverityText',
SEVERITIES,
1,
);
appliedFilterValue = picked;
// Apply the filter
await searchPage.filters.applyFilter(appliedFilterValue);
@ -464,25 +449,22 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
// Submit search to apply filters
await searchPage.submitButton.click();
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
});
await test.step('Create and save the search with filters', async () => {
await searchPage.openSaveSearchModal();
await searchPage.savedSearchModal.saveSearch(
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'Search with Filters Test',
);
await expect(searchPage.savedSearchModal.container).toBeHidden();
await page.waitForURL(/\/search\/[a-f0-9]+/, { timeout: 5000 });
// Capture the saved search URL
savedSearchUrl = page.url().split('?')[0];
});
await test.step('Navigate to a fresh search page', async () => {
await searchPage.goto();
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
});
await test.step('Verify filters are cleared on new search page', async () => {
@ -498,7 +480,7 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
await test.step('Navigate back to the saved search', async () => {
await page.goto(savedSearchUrl);
await expect(page.getByTestId('search-page')).toBeVisible();
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
});
await test.step('Verify filters are restored from saved search', async () => {
@ -520,48 +502,59 @@ test.describe('Saved Search Functionality', { tag: '@full-stack' }, () => {
/**
* Verifies that updating a saved search with additional filters
* persists and restores both the original and new filters.
* Uses the same fixed filter values as "should save and restore filters"
* for consistency and reliability.
* Picks visible filter values from seed (SEVERITIES) so tests don't
* rely on a single value that may not appear in the UI.
*/
const firstFilter = 'accounting';
const secondFilter = 'info';
const [firstFilter] = await searchPage.filters.pickVisibleFilterValues(
'ServiceName',
SERVICES,
1,
);
const [secondFilter] = await searchPage.filters.pickVisibleFilterValues(
'SeverityText',
SEVERITIES,
1,
);
const firstFilterGroup = 'ServiceName';
const secondFilterGroup = 'SeverityText';
let savedSearchUrl: string;
await test.step('Create saved search with one filter', async () => {
await searchPage.filters.openFilterGroup('SeverityText');
await searchPage.filters.openFilterGroup(firstFilterGroup);
await searchPage.filters.applyFilter(firstFilter);
await searchPage.submitButton.click();
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
await searchPage.openSaveSearchModal();
await searchPage.savedSearchModal.saveSearch('Updatable Filter Search');
await expect(searchPage.savedSearchModal.container).toBeHidden();
await page.waitForURL(/\/search\/[a-f0-9]+/, { timeout: 5000 });
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'Updatable Filter Search',
);
savedSearchUrl = page.url().split('?')[0];
});
await test.step('Update saved search with second filter', async () => {
await searchPage.filters.openFilterGroup('SeverityText');
await searchPage.filters.openFilterGroup(secondFilterGroup);
await searchPage.filters.applyFilter(secondFilter);
await searchPage.submitButton.click();
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
await searchPage.openSaveSearchModal({ update: true });
await searchPage.savedSearchModal.submit();
await page.waitForLoadState('networkidle');
await searchPage.savedSearchModal.saveSearchAndWaitForNavigation(
'Updatable Filter Search updated',
);
});
await test.step('Navigate away and back', async () => {
await searchPage.goto();
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
await page.goto(savedSearchUrl);
await expect(page.getByTestId('search-page')).toBeVisible();
await searchPage.table.waitForRowsToPopulate();
await searchPage.table.waitForRowsToPopulate(true);
});
await test.step('Verify both filters are restored', async () => {
await searchPage.filters.openFilterGroup('SeverityText');
await searchPage.filters.openFilterGroup(firstFilterGroup);
await searchPage.filters.openFilterGroup(secondFilterGroup);
await expect(
searchPage.filters.getFilterCheckboxInput(firstFilter),
).toBeChecked();

View file

@ -3,40 +3,22 @@ import { expect, test } from '../../utils/base-test';
test.describe('Search Filters', { tag: ['@search'] }, () => {
let searchPage: SearchPage;
let availableFilterValue: string | null = null;
// Using known seeded data - 'info' severity always exists in test data
const TEST_FILTER_VALUE = 'info';
test.beforeEach(async ({ page }) => {
searchPage = new SearchPage(page);
await searchPage.goto();
// Find an available filter value once and reuse across tests
if (!availableFilterValue) {
await searchPage.filters.openFilterGroup('SeverityText');
// Get first visible filter checkbox
const firstCheckbox = searchPage.page
.locator('[data-testid^="filter-checkbox-"]')
.first();
const testId = await firstCheckbox.getAttribute('data-testid');
// Extract the value name from data-testid="filter-checkbox-{value}"
if (testId) {
availableFilterValue = testId.replace('filter-checkbox-', '');
}
}
await searchPage.filters.openFilterGroup('SeverityText');
});
test('Should apply filters', async () => {
// Use filter component to open filter group
await searchPage.filters.openFilterGroup('SeverityText');
// Apply the filter using component method
const filterInput = searchPage.filters.getFilterCheckboxInput(
availableFilterValue!,
);
const filterInput =
searchPage.filters.getFilterCheckboxInput(TEST_FILTER_VALUE);
await expect(filterInput).toBeVisible();
await searchPage.filters.applyFilter(availableFilterValue!);
await searchPage.filters.applyFilter(TEST_FILTER_VALUE);
// Verify filter is checked
await expect(filterInput).toBeChecked();
@ -47,63 +29,47 @@ test.describe('Search Filters', { tag: ['@search'] }, () => {
test('Should exclude filters', async () => {
// Use filter component to exclude the filter
await searchPage.filters.excludeFilter(availableFilterValue!);
await searchPage.filters.excludeFilter(TEST_FILTER_VALUE);
// Verify filter shows as excluded using web-first assertion
const isExcluded = await searchPage.filters.isFilterExcluded(
availableFilterValue!,
);
const isExcluded =
await searchPage.filters.isFilterExcluded(TEST_FILTER_VALUE);
expect(isExcluded).toBe(true);
});
test('Should clear filters', async () => {
await searchPage.filters.clearFilter(availableFilterValue!);
await searchPage.filters.clearFilter(TEST_FILTER_VALUE);
// Verify filter is no longer checked
const filterInput = searchPage.filters.getFilterCheckboxInput(
availableFilterValue!,
);
const filterInput =
searchPage.filters.getFilterCheckboxInput(TEST_FILTER_VALUE);
await expect(filterInput).not.toBeChecked();
});
test('Should search for and apply filters', async () => {
// Use filter component's helper to find a filter with search capability
const skipFilters = ['severity', 'level'];
const filterName =
await searchPage.filters.findFilterWithSearch(skipFilters);
if (filterName) {
// Search input is already visible from findFilterWithSearch
// Test the search functionality
await searchPage.filters.searchFilterValues(filterName, 'test');
// Verify search input has the value
const searchInput = searchPage.filters.getFilterSearchInput(filterName);
await expect(searchInput).toHaveValue('test');
// Clear the search
await searchPage.filters.clearFilterSearch(filterName);
// Verify search input is cleared
await expect(searchInput).toHaveValue('');
}
const filterName = 'SeverityText';
await searchPage.filters.openFilterGroup(filterName);
await searchPage.filters.searchFilterValues(filterName, 'test');
const searchInput = searchPage.filters.getFilterSearchInput(filterName);
await expect(searchInput).toHaveValue('test');
await searchPage.filters.clearFilterSearch(filterName);
await expect(searchInput).toHaveValue('');
});
test('Should pin filter and verify it persists after reload', async () => {
await searchPage.filters.pinFilter(availableFilterValue!);
await searchPage.filters.pinFilter(TEST_FILTER_VALUE);
// Reload page and verify filter persists
await searchPage.page.reload();
// Verify filter checkbox is still visible
const filterCheckbox = searchPage.filters.getFilterCheckbox(
availableFilterValue!,
);
const filterCheckbox =
searchPage.filters.getFilterCheckbox(TEST_FILTER_VALUE);
await expect(filterCheckbox).toBeVisible();
//verify there is a pin icon
const pinIcon = searchPage.page.getByTestId(
`filter-pin-${availableFilterValue!}-pinned`,
`filter-pin-${TEST_FILTER_VALUE}-pinned`,
);
await expect(pinIcon).toBeVisible();
});

View file

@ -73,9 +73,9 @@ test.describe('Search', { tag: '@search' }, () => {
test('Search with Different Query Types - Lucene', async () => {
await test.step('Test multiple search query types', async () => {
const queries = [
'cart',
'ServiceName:"accounting"',
'*info*',
'Order',
'ServiceName:"CartService"',
'*Order*',
'SeverityText:"info"',
];
@ -152,7 +152,7 @@ test.describe('Search', { tag: '@search' }, () => {
await test.step('Perform search with selected time range', async () => {
// Clear and submit using page object methods
await searchPage.clearSearch();
await searchPage.performSearch('test');
await searchPage.performSearch('Order');
});
await test.step('Verify search results', async () => {

View file

@ -1,5 +1,11 @@
import { SearchPage } from '../page-objects/SearchPage';
import { expect, test } from '../utils/base-test';
import {
DEFAULT_LOGS_SOURCE_NAME,
DEFAULT_METRICS_SOURCE_NAME,
DEFAULT_SESSIONS_SOURCE_NAME,
DEFAULT_TRACES_SOURCE_NAME,
} from '../utils/constants';
const COMMON_FIELDS = [
'Name',
@ -59,18 +65,30 @@ const METRIC_FIELDS = [
];
const editableSourcesData = [
{ name: 'Demo Logs', fields: LOG_FIELDS, radioButtonName: 'Log' },
{ name: 'Demo Traces', fields: TRACE_FIELDS, radioButtonName: 'Trace' },
{
name: DEFAULT_LOGS_SOURCE_NAME,
fields: LOG_FIELDS,
radioButtonName: 'Log',
},
{
name: DEFAULT_TRACES_SOURCE_NAME,
fields: TRACE_FIELDS,
radioButtonName: 'Trace',
},
];
const allSourcesData = [
...editableSourcesData,
{
name: 'Demo Metrics',
name: DEFAULT_METRICS_SOURCE_NAME,
fields: METRIC_FIELDS,
radioButtonName: 'OTEL Metrics',
},
{ name: 'Demo Sessions', fields: SESSION_FIELDS, radioButtonName: 'Session' },
{
name: DEFAULT_SESSIONS_SOURCE_NAME,
fields: SESSION_FIELDS,
radioButtonName: 'Session',
},
];
test.describe('Sources Functionality', { tag: ['@sources'] }, () => {

View file

@ -0,0 +1,97 @@
{
"connections": [
{
"id": "local",
"name": "local",
"host": "http://localhost:8123",
"username": "default",
"password": ""
}
],
"sources": [
{
"id": "E2E Logs",
"kind": "log",
"name": "E2E Logs",
"connection": "local",
"from": { "databaseName": "default", "tableName": "e2e_otel_logs" },
"timestampValueExpression": "TimestampTime",
"defaultTableSelectExpression": "Timestamp, ServiceName, SeverityText, Body",
"serviceNameExpression": "ServiceName",
"severityTextExpression": "SeverityText",
"eventAttributesExpression": "LogAttributes",
"resourceAttributesExpression": "ResourceAttributes",
"traceIdExpression": "TraceId",
"spanIdExpression": "SpanId",
"implicitColumnExpression": "Body",
"displayedTimestampValueExpression": "Timestamp",
"sessionSourceId": "E2E Sessions",
"traceSourceId": "E2E Traces",
"metricSourceId": "E2E Metrics"
},
{
"id": "E2E Traces",
"kind": "trace",
"name": "E2E Traces",
"connection": "local",
"from": { "databaseName": "default", "tableName": "e2e_otel_traces" },
"timestampValueExpression": "Timestamp",
"defaultTableSelectExpression": "Timestamp, ServiceName, StatusCode, round(Duration / 1e6), SpanName",
"serviceNameExpression": "ServiceName",
"eventAttributesExpression": "SpanAttributes",
"resourceAttributesExpression": "ResourceAttributes",
"traceIdExpression": "TraceId",
"spanIdExpression": "SpanId",
"implicitColumnExpression": "SpanName",
"durationExpression": "Duration",
"durationPrecision": 9,
"parentSpanIdExpression": "ParentSpanId",
"spanKindExpression": "SpanKind",
"spanNameExpression": "SpanName",
"logSourceId": "E2E Logs",
"statusCodeExpression": "StatusCode",
"statusMessageExpression": "StatusMessage",
"spanEventsValueExpression": "Events",
"metricSourceId": "E2E Metrics",
"sessionSourceId": "E2E Sessions"
},
{
"id": "E2E Metrics",
"kind": "metric",
"name": "E2E Metrics",
"connection": "local",
"from": { "databaseName": "default", "tableName": "" },
"timestampValueExpression": "TimeUnix",
"serviceNameExpression": "ServiceName",
"metricTables": {
"gauge": "e2e_otel_metrics_gauge",
"histogram": "e2e_otel_metrics_histogram",
"sum": "e2e_otel_metrics_sum",
"summary": "e2e_otel_metrics_summary",
"exponential histogram": "e2e_otel_metrics_exponential_histogram"
},
"resourceAttributesExpression": "ResourceAttributes",
"logSourceId": "E2E Logs"
},
{
"id": "E2E Sessions",
"kind": "session",
"name": "E2E Sessions",
"connection": "local",
"from": {
"databaseName": "default",
"tableName": "e2e_hyperdx_sessions"
},
"timestampValueExpression": "TimestampTime",
"defaultTableSelectExpression": "Timestamp, ServiceName, Body",
"serviceNameExpression": "ServiceName",
"severityTextExpression": "SeverityText",
"eventAttributesExpression": "LogAttributes",
"resourceAttributesExpression": "ResourceAttributes",
"traceSourceId": "E2E Traces",
"traceIdExpression": "TraceId",
"spanIdExpression": "SpanId",
"implicitColumnExpression": "Body"
}
]
}

View file

@ -18,6 +18,8 @@ import fs from 'fs';
import path from 'path';
import { chromium, FullConfig } from '@playwright/test';
import { seedClickHouse } from './seed-clickhouse';
// Configuration constants
const API_HEALTH_CHECK_MAX_RETRIES = parseInt(
process.env.E2E_API_HEALTH_CHECK_MAX_RETRIES || '30',
@ -72,11 +74,14 @@ function clearDatabase() {
async function globalSetup(_config: FullConfig) {
console.log('Setting up full-stack E2E environment');
console.log(' MongoDB: local (auth, teams, persistence)');
console.log(' ClickHouse: demo instance (telemetry data)');
console.log(' ClickHouse: local instance (telemetry data)');
// Set timezone
process.env.TZ = 'America/New_York';
// Seed ClickHouse with test data
await seedClickHouse();
// Clean up any existing auth state to ensure fresh setup
if (fs.existsSync(AUTH_FILE)) {
console.log(' Removing existing auth state');
@ -261,7 +266,7 @@ async function globalSetup(_config: FullConfig) {
console.log('Full-stack E2E setup complete');
console.log(
' Using demo ClickHouse data for logs, traces, metrics, and K8s',
' Using local ClickHouse with seeded test data for logs, traces, metrics, and K8s',
);
} catch (error) {
console.error('Setup failed:', error);

View file

@ -0,0 +1,20 @@
/**
* Global setup for local E2E mode
*
* Local mode only needs to seed ClickHouse (no MongoDB, no auth)
*/
import { FullConfig } from '@playwright/test';
import { seedClickHouse } from './seed-clickhouse';
async function globalSetupLocal(_config: FullConfig): Promise<void> {
console.log('Setting up local E2E environment (seeding ClickHouse)');
process.env.TZ = 'America/New_York';
await seedClickHouse();
console.log('Local E2E setup complete');
}
export default globalSetupLocal;

View file

@ -94,7 +94,7 @@ export class DashboardPage {
* Navigate to dashboards list
*/
async goto() {
await this.page.goto('/dashboards');
await this.page.goto('/dashboards', { waitUntil: 'networkidle' });
}
/**

View file

@ -0,0 +1,739 @@
/**
* Seeds local ClickHouse instance with test data for E2E tests
*
* Populates e2e_otel_logs, e2e_otel_traces, and e2e_hyperdx_sessions tables
* with sample data. Timestamps are spread across a window that includes both
* past and future relative to seed time ([seedRef - PAST_MS, seedRef + FUTURE_MS]).
* This keeps "last 5 minutes" and similar relative time ranges finding data for
* a reasonable period after seeding (e.g. ~2h). Optional: E2E_SEED_FUTURE_MS env
* to tune the future buffer.
*/
interface ClickHouseConfig {
host: string;
user: string;
password: string;
}
const DEFAULT_CONFIG: ClickHouseConfig = {
host: process.env.CLICKHOUSE_HOST || 'http://localhost:8123',
user: process.env.CLICKHOUSE_USER || 'default',
password: process.env.CLICKHOUSE_PASSWORD || '',
};
function createClickHouseClient(config: ClickHouseConfig = DEFAULT_CONFIG) {
const baseUrl = new URL(config.host);
baseUrl.searchParams.set('user', config.user);
if (config.password) {
baseUrl.searchParams.set('password', config.password);
}
return {
async query(sql: string): Promise<string> {
const response = await fetch(baseUrl.toString(), {
method: 'POST',
body: sql,
headers: { 'Content-Type': 'text/plain' },
});
if (!response.ok) {
const error = await response.text();
throw new Error(
`ClickHouse query failed (${response.status}): ${error}`,
);
}
return response.text();
},
};
}
// Test data constants (exported for E2E tests to pick visible filter values)
export const SEVERITIES = ['info', 'warn', 'error', 'debug'] as const;
export const SERVICES = [
'api-server',
'frontend',
'CartService',
'worker',
'database',
'accounting',
'ad',
'payment-service',
'notification-service',
'inventory-service',
] as const;
const LOG_MESSAGES = [
'Request processed successfully',
'Database connection established',
'Cache hit for key',
'User authentication successful',
'Background info job completed',
'Health check passed',
'Configuration loaded',
'Metrics exported',
'Order created',
'Order info updated',
'Order deleted',
'Order info fetched',
'Order listed',
'Order searched',
'Order canceled',
'Order completed',
'Order info refunded',
] as const;
const SPAN_NAMES = [
'GET /api/logs',
'POST /api/traces',
'AddItem',
'database.query',
'http.request',
'cache.get',
'auth.verify',
'Order create',
'Order update',
] as const;
const SPAN_KINDS = [
'SPAN_KIND_SERVER',
'SPAN_KIND_CLIENT',
'SPAN_KIND_INTERNAL',
'SPAN_KIND_PRODUCER',
'SPAN_KIND_CONSUMER',
'SPAN_KIND_UNSPECIFIED',
] as const;
// Kubernetes test data constants
const K8S_NAMESPACES = [
'default',
'kube-system',
'production',
'staging',
'development',
'monitoring',
'logging',
] as const;
const K8S_NODES = [
'node-1',
'node-2',
'node-3',
'node-4',
'node-5',
'node-6',
'node-7',
] as const;
const K8S_CLUSTERS = ['test-cluster'] as const;
// KubePhase enum: Pending = 1, Running = 2, Succeeded = 3, Failed = 4, Unknown = 5
const K8S_POD_PHASES = {
PENDING: 1,
RUNNING: 2,
SUCCEEDED: 3,
FAILED: 4,
UNKNOWN: 5,
} as const;
// Time window for seeded data: past + future so "last N minutes" finds data after seed
const PAST_MS = 60 * 60 * 1000; // 1 hour
const FUTURE_MS =
(process.env.E2E_SEED_FUTURE_MS &&
parseInt(process.env.E2E_SEED_FUTURE_MS, 10)) ||
2 * 60 * 60 * 1000; // 2 hours default
function generateLogData(
count: number,
startMs: number,
endMs: number,
): string {
const rows: string[] = [];
const span = endMs - startMs;
for (let i = 0; i < count; i++) {
const t = count > 1 ? startMs + (i / (count - 1)) * span : startMs;
const timestampNs = Math.round(t) * 1000000;
const severity = SEVERITIES[i % SEVERITIES.length];
const service = SERVICES[i % SERVICES.length];
const message = LOG_MESSAGES[i % LOG_MESSAGES.length];
const traceId = i < 10 ? `trace-${i}` : ''; // Link first 10 logs to traces
rows.push(
`('${timestampNs}', '${traceId}', '', 0, '${severity}', 0, '${service}', '${message}', '', {'service.name':'${service}','environment':'test'}, '', '', '', {}, {'request.id':'req-${i}','user.id':'user-${i % 5}'})`,
);
}
return rows.join(',\n');
}
function generateK8sLogData(
count: number,
startMs: number,
endMs: number,
): string {
const rows: string[] = [];
const span = endMs - startMs;
// Use a distinct message prefix so (Timestamp, Body, ServiceName, SeverityText) is unique
// vs regular logs. The table builds row WHERE from those columns only; if they matched a
// regular log, the side panel would fetch the wrong row and the Infrastructure tab wouldn't show.
const K8S_LOG_MESSAGE_PREFIX = 'K8s ';
for (let i = 0; i < count; i++) {
const t = count > 1 ? startMs + (i / (count - 1)) * span : startMs;
const timestampNs = Math.round(t) * 1000000;
const severity = SEVERITIES[i % SEVERITIES.length];
const message =
K8S_LOG_MESSAGE_PREFIX + LOG_MESSAGES[i % LOG_MESSAGES.length];
// Use existing pod/node/namespace data to correlate with metrics
const podIdx = i % 30; // Match with the pods we generate in K8s metrics
const namespace = K8S_NAMESPACES[podIdx % K8S_NAMESPACES.length];
const node = K8S_NODES[podIdx % K8S_NODES.length];
const cluster = K8S_CLUSTERS[0];
const podName = `pod-${namespace}-${podIdx}`;
const podUid = `uid-${podName}`;
const containerName = `container-${podIdx}`;
const serviceName = SERVICES[podIdx % SERVICES.length];
const traceId = i < 10 ? `trace-${i}` : '';
rows.push(
`('${timestampNs}', '${traceId}', '', 0, '${severity}', 0, '${serviceName}', '${message}', '', {'k8s.cluster.name':'${cluster}','k8s.namespace.name':'${namespace}','k8s.node.name':'${node}','k8s.pod.name':'${podName}','k8s.pod.uid':'${podUid}','k8s.container.name':'${containerName}','service.name':'${podName}','environment':'test'}, '', '', '', {}, {'request.id':'req-${i}','container.id':'${containerName}'})`,
);
}
return rows.join(',\n');
}
function generateTraceData(
count: number,
startMs: number,
endMs: number,
): string {
const rows: string[] = [];
const spansPerTrace = 4; // Each trace will have 4 spans
const numTraces = Math.ceil(count / spansPerTrace);
const span = endMs - startMs;
for (let traceIdx = 0; traceIdx < numTraces; traceIdx++) {
const traceId = `trace-${traceIdx}`;
// Anchor each trace in the window; spans stay close (10s apart)
const traceAnchor =
numTraces > 1 ? startMs + (traceIdx / (numTraces - 1)) * span : startMs;
const traceStartTime = traceAnchor;
// Create spans within this trace
for (let spanIdx = 0; spanIdx < spansPerTrace; spanIdx++) {
const spanId = `span-${traceIdx}-${spanIdx}`;
const parentSpanId =
spanIdx === 0 ? '' : `span-${traceIdx}-${spanIdx - 1}`;
// Each span starts slightly after the previous one (same trace grouping)
const timestampNs = (traceStartTime - spanIdx * 100) * 1000000;
const service = SERVICES[spanIdx % SERVICES.length];
// Use row count to cycle through all span names, not just first 4
const spanName = SPAN_NAMES[rows.length % SPAN_NAMES.length];
const spanKind =
spanIdx === 0
? 'SPAN_KIND_SERVER'
: SPAN_KINDS[spanIdx % SPAN_KINDS.length];
// 10% error rate for root spans
const isError = traceIdx % 10 === 0 && spanIdx === 0;
const statusCode = isError ? 'STATUS_CODE_ERROR' : 'STATUS_CODE_OK';
const httpStatusCode = isError ? '500' : '200';
// Duration decreases for nested spans (child spans take less time)
const duration =
Math.floor(Math.random() * 500000000) +
(spansPerTrace - spanIdx) * 100000000;
rows.push(
`('${timestampNs}', '${traceId}', '${spanId}', '${parentSpanId}', '', '${spanName}', '${spanKind}', '${service}', {'service.name':'${service}','environment':'test'}, '', '', {'http.method':'GET','http.status_code':'${httpStatusCode}'}, ${duration}, '${statusCode}', '', [], [], [], [], [], [], [])`,
);
// Stop if we've generated enough spans
if (rows.length >= count) {
return rows.join(',\n');
}
}
}
return rows.join(',\n');
}
function generateSessionData(
count: number,
startMs: number,
endMs: number,
): string {
const rows: string[] = [];
const span = endMs - startMs;
for (let i = 0; i < count; i++) {
const t = count > 1 ? startMs + (i / (count - 1)) * span : startMs;
const timestampNs = Math.round(t) * 1000000;
const sessionId = `session-${i}`;
const traceId = `trace-${i}`;
rows.push(
`('${timestampNs}', '${traceId}', '', 0, 'INFO', 0, 'browser', '{"type":1,"data":"page_view"}', '', {'rum.sessionId':'${sessionId}','service.name':'browser'}, '', '', '', {}, {'page.url':'https://example.com/dashboard','user.id':'user-${i % 5}','teamId':'test-team','teamName':'Test Team','userEmail':'test${i % 5}@example.com','userName':'Test User ${i % 5}'})`,
);
}
return rows.join(',\n');
}
function generateSessionTraces(
count: number,
startMs: number,
endMs: number,
): string {
const rows: string[] = [];
const span = endMs - startMs;
for (let i = 0; i < count; i++) {
const sessionId = `session-${i}`;
const baseTime = count > 1 ? startMs + (i / (count - 1)) * span : startMs;
const eventsPerSession = 5 + Math.floor(Math.random() * 10); // 5-15 events per session
for (let eventIdx = 0; eventIdx < eventsPerSession; eventIdx++) {
const timestampNs = (baseTime - eventIdx * 10000) * 1000000; // Events 10s apart
const traceId = `session-trace-${i}-${eventIdx}`;
const spanId = `session-span-${i}-${eventIdx}`;
// Some sessions should have user interactions
const isUserInteraction = eventIdx % 3 === 0;
const hasRecording = i % 2 === 0; // 50% of sessions have recordings
const isError = eventIdx === 0 && i % 5 === 0; // 20% of sessions have errors
const spanName =
hasRecording && eventIdx === 0 ? 'record init' : 'page_view';
const component = isUserInteraction ? 'user-interaction' : 'page-view';
const statusCode = isError ? 'STATUS_CODE_ERROR' : 'STATUS_CODE_OK';
const userIndex = i % 5;
const userEmail = `test${userIndex}@example.com`;
const userName = `Test User ${userIndex}`;
const teamId = 'test-team-id';
const teamName = 'Test Team';
rows.push(
`('${timestampNs}', '${traceId}', '${spanId}', '', '', '${spanName}', 'SPAN_KIND_INTERNAL', 'browser', {'rum.sessionId':'${sessionId}','service.name':'browser'}, '', '', {'component':'${component}','page.url':'https://example.com/dashboard','teamId':'${teamId}','teamName':'${teamName}','userEmail':'${userEmail}','userName':'${userName}'}, 0, '${statusCode}', '', [], [], [], [], [], [], [])`,
);
}
}
return rows.join(',\n');
}
function generateK8sGaugeMetrics(
podCount: number,
samplesPerPod: number,
startMs: number,
endMs: number,
): string {
const rows: string[] = [];
const span = endMs - startMs;
const seriesDurationMs = samplesPerPod * 60000; // 12 min per series
const stepMs = 60000; // 60 second intervals
// Generate metrics for each pod; spread each pod's series in the window
for (let podIdx = 0; podIdx < podCount; podIdx++) {
const namespace = K8S_NAMESPACES[podIdx % K8S_NAMESPACES.length];
const node = K8S_NODES[podIdx % K8S_NODES.length];
const cluster = K8S_CLUSTERS[0];
const podName = `pod-${namespace}-${podIdx}`;
const containerName = `container-${podIdx}`;
let phase: number;
if (podIdx % 15 === 0) {
phase = K8S_POD_PHASES.FAILED;
} else if (podIdx % 7 === 0) {
phase = K8S_POD_PHASES.PENDING;
} else {
phase = K8S_POD_PHASES.RUNNING;
}
const restarts = podIdx % 7;
const podSlotStart =
podCount > 1
? startMs + (podIdx / (podCount - 1)) * (span - seriesDurationMs)
: startMs;
for (let sample = 0; sample < samplesPerPod; sample++) {
const timestampMs = podSlotStart + sample * stepMs;
const timestampNs = timestampMs * 1000000;
const _timeUnix = timestampMs / 1000;
// CPU metrics (percentage, 0-100)
const cpuUsage = 10 + (podIdx % 40) + Math.sin(sample / 2) * 5;
const cpuLimit = 100;
const cpuLimitUtilization = (cpuUsage / cpuLimit) * 100;
// Memory metrics (bytes)
const memoryUsage = (100 + (podIdx % 400)) * 1024 * 1024; // 100-500 MB
const memoryLimit = 1024 * 1024 * 1024; // 1 GB
const memoryLimitUtilization = (memoryUsage / memoryLimit) * 100;
const resourceAttrs = `{'k8s.cluster.name':'${cluster}','k8s.namespace.name':'${namespace}','k8s.node.name':'${node}','k8s.pod.name':'${podName}','k8s.pod.uid':'uid-${podName}','k8s.container.name':'${containerName}'}`;
// k8s.pod.phase
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.pod.phase', 'Pod phase', '', {}, ${timestampNs}, ${timestampNs}, ${phase}, 0, [], [], [], [], [])`,
);
// k8s.container.restarts
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.container.restarts', 'Container restarts', '', {}, ${timestampNs}, ${timestampNs}, ${restarts}, 0, [], [], [], [], [])`,
);
// container.cpu.utilization (0-100%)
const containerCpuUtilization =
5 + (podIdx % 30) + Math.sin(sample / 3) * 3;
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'container.cpu.utilization', 'Container CPU utilization', '%', {}, ${timestampNs}, ${timestampNs}, ${containerCpuUtilization}, 0, [], [], [], [], [])`,
);
// k8s.pod.cpu.utilization
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.pod.cpu.utilization', 'Pod CPU utilization', '', {}, ${timestampNs}, ${timestampNs}, ${cpuUsage}, 0, [], [], [], [], [])`,
);
// k8s.pod.cpu_limit_utilization
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.pod.cpu_limit_utilization', 'Pod CPU limit utilization', '%', {}, ${timestampNs}, ${timestampNs}, ${cpuLimitUtilization}, 0, [], [], [], [], [])`,
);
// k8s.pod.memory.usage
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.pod.memory.usage', 'Pod memory usage', 'bytes', {}, ${timestampNs}, ${timestampNs}, ${memoryUsage}, 0, [], [], [], [], [])`,
);
// k8s.pod.memory_limit_utilization
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.pod.memory_limit_utilization', 'Pod memory limit utilization', '%', {}, ${timestampNs}, ${timestampNs}, ${memoryLimitUtilization}, 0, [], [], [], [], [])`,
);
}
}
// Generate node metrics (spread across window)
for (let nodeIdx = 0; nodeIdx < K8S_NODES.length; nodeIdx++) {
const node = K8S_NODES[nodeIdx];
const cluster = K8S_CLUSTERS[0];
const nodeSlotStart =
K8S_NODES.length > 1
? startMs +
(nodeIdx / (K8S_NODES.length - 1)) * (span - seriesDurationMs)
: startMs;
for (let sample = 0; sample < samplesPerPod; sample++) {
const timestampMs = nodeSlotStart + sample * stepMs;
const timestampNs = timestampMs * 1000000;
const nodeCpuUsage = 30 + (nodeIdx % 30) + Math.sin(sample / 2) * 10;
const nodeMemoryUsage = (2 + nodeIdx) * 1024 * 1024 * 1024; // 2-4 GB
const nodeConditionReady = 1; // 1 = Ready, 0 = NotReady
const resourceAttrs = `{'k8s.cluster.name':'${cluster}','k8s.node.name':'${node}'}`;
// k8s.node.cpu.utilization
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.node.cpu.utilization', 'Node CPU utilization', '', {}, ${timestampNs}, ${timestampNs}, ${nodeCpuUsage}, 0, [], [], [], [], [])`,
);
// k8s.node.memory.usage
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.node.memory.usage', 'Node memory usage', 'bytes', {}, ${timestampNs}, ${timestampNs}, ${nodeMemoryUsage}, 0, [], [], [], [], [])`,
);
// k8s.node.condition_ready
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.node.condition_ready', 'Node condition ready', '', {}, ${timestampNs}, ${timestampNs}, ${nodeConditionReady}, 0, [], [], [], [], [])`,
);
}
}
// Generate namespace metrics (spread across window)
for (let nsIdx = 0; nsIdx < K8S_NAMESPACES.length; nsIdx++) {
const namespace = K8S_NAMESPACES[nsIdx];
const cluster = K8S_CLUSTERS[0];
const namespacePhase = 1;
const nsSlotStart =
K8S_NAMESPACES.length > 1
? startMs +
(nsIdx / (K8S_NAMESPACES.length - 1)) * (span - seriesDurationMs)
: startMs;
for (let sample = 0; sample < samplesPerPod; sample++) {
const timestampMs = nsSlotStart + sample * stepMs;
const timestampNs = timestampMs * 1000000;
const resourceAttrs = `{'k8s.cluster.name':'${cluster}','k8s.namespace.name':'${namespace}'}`;
// k8s.namespace.phase
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.namespace.phase', 'Namespace phase', '', {}, ${timestampNs}, ${timestampNs}, ${namespacePhase}, 0, [], [], [], [], [])`,
);
}
}
return rows.join(',\n');
}
function generateK8sSumMetrics(
podCount: number,
startMs: number,
endMs: number,
): string {
const rows: string[] = [];
const span = endMs - startMs;
for (let podIdx = 0; podIdx < podCount; podIdx++) {
const namespace = K8S_NAMESPACES[podIdx % K8S_NAMESPACES.length];
const node = K8S_NODES[podIdx % K8S_NODES.length];
const cluster = K8S_CLUSTERS[0];
const podName = `pod-${namespace}-${podIdx}`;
const containerName = `container-${podIdx}`;
const timestampMs =
podCount > 1 ? startMs + (podIdx / (podCount - 1)) * span : startMs;
const timestampNs = timestampMs * 1000000;
// Pod uptime in seconds (1-10 hours)
const uptimeSeconds = (1 + (podIdx % 10)) * 3600;
const resourceAttrs = `{'k8s.cluster.name':'${cluster}','k8s.namespace.name':'${namespace}','k8s.node.name':'${node}','k8s.pod.name':'${podName}','k8s.pod.uid':'uid-${podName}','k8s.container.name':'${containerName}'}`;
// k8s.pod.uptime (Sum metric)
// AggregationTemporality: 1 = Delta, 2 = Cumulative
rows.push(
`(${resourceAttrs}, '', '', '', {}, 0, '', 'k8s-metrics', 'k8s.pod.uptime', 'Pod uptime', 's', {}, ${timestampNs}, ${timestampNs}, ${uptimeSeconds}, 0, 2, true, [], [], [], [], [])`,
);
}
return rows.join(',\n');
}
function generateK8sEventLogs(
count: number,
startMs: number,
endMs: number,
): string {
const rows: string[] = [];
const span = endMs - startMs;
for (let i = 0; i < count; i++) {
const t = count > 1 ? startMs + (i / (count - 1)) * span : startMs;
const timestampNs = Math.round(t) * 1000000;
const namespace = K8S_NAMESPACES[i % K8S_NAMESPACES.length];
const node = K8S_NODES[i % K8S_NODES.length];
const cluster = K8S_CLUSTERS[0];
const podName = `pod-${namespace}-${i % 10}`;
const podUid = `uid-${podName}`;
const isWarning = i % 3 === 0; // More warning events (33%)
const severity = isWarning ? 'Warning' : 'Normal';
const eventType = isWarning ? 'Warning' : 'Normal';
const message = isWarning
? `Back-off restarting failed container ${podName}`
: `Started container ${podName}`;
const regardingKind = isWarning ? 'Node' : 'Pod';
const regardingName = isWarning ? node : podName;
// Create the event object JSON
const eventObject = {
type: eventType,
regarding: {
kind: regardingKind,
name: regardingName,
},
note: message,
};
const eventObjectJson = JSON.stringify(eventObject)
.replace(/"/g, '\\"')
.replace(/'/g, "\\'");
// Include k8s.pod.uid so the infrastructure tab shows the Pod subpanel when this row is
// clicked (DBInfraPanel requires rowData.__hdx_resource_attributes['k8s.pod.uid']).
rows.push(
`('${timestampNs}', '', '', 0, '${severity}', 0, 'k8s-events', '${message}', '', {'k8s.cluster.name':'${cluster}','k8s.namespace.name':'${namespace}','k8s.node.name':'${node}','k8s.pod.name':'${podName}','k8s.pod.uid':'${podUid}','service.name':'k8s-events'}, '', '', '', {}, {'k8s.resource.name':'events','object':'${eventObjectJson}'})`,
);
}
return rows.join(',\n');
}
// CI can be slower, so use a longer timeout
const CLICKHOUSE_READY_TIMEOUT_SECONDS = parseInt(
process.env.E2E_CLICKHOUSE_READY_TIMEOUT || '60',
10,
);
async function waitForClickHouse(
client: ReturnType<typeof createClickHouseClient>,
): Promise<void> {
console.log(' Waiting for ClickHouse to be ready...');
console.log(
` Attempting connection to: ${DEFAULT_CONFIG.host} (user: ${DEFAULT_CONFIG.user})`,
);
let lastError: Error | null = null;
for (let attempt = 0; attempt < CLICKHOUSE_READY_TIMEOUT_SECONDS; attempt++) {
try {
await client.query('SELECT 1');
console.log(' ClickHouse is ready');
return;
} catch (error) {
lastError = error instanceof Error ? error : new Error(String(error));
if (attempt % 5 === 0) {
// Log every 5 seconds
console.log(
` Still waiting... (${attempt}/${CLICKHOUSE_READY_TIMEOUT_SECONDS}s)`,
);
}
await new Promise(resolve => setTimeout(resolve, 1000));
}
}
console.error(' Last connection error:', lastError?.message);
throw new Error(
`ClickHouse not ready after ${CLICKHOUSE_READY_TIMEOUT_SECONDS} seconds. ` +
`Host: ${DEFAULT_CONFIG.host}. ` +
`Last error: ${lastError?.message || 'Unknown'}`,
);
}
async function clearTestData(
client: ReturnType<typeof createClickHouseClient>,
): Promise<void> {
console.log(' Clearing existing test data...');
await client.query('TRUNCATE TABLE IF EXISTS default.e2e_otel_logs');
await client.query('TRUNCATE TABLE IF EXISTS default.e2e_otel_traces');
await client.query('TRUNCATE TABLE IF EXISTS default.e2e_hyperdx_sessions');
await client.query('TRUNCATE TABLE IF EXISTS default.e2e_otel_metrics_gauge');
await client.query('TRUNCATE TABLE IF EXISTS default.e2e_otel_metrics_sum');
console.log(' Existing data cleared');
}
export async function seedClickHouse(): Promise<void> {
console.log('Seeding ClickHouse with test data...');
const client = createClickHouseClient();
await waitForClickHouse(client);
await clearTestData(client);
const seedRef = Date.now();
const startMs = seedRef - PAST_MS;
const endMs = seedRef + FUTURE_MS;
const numDataPoints = 500;
// Insert log data
console.log(' Inserting log data...');
await client.query(`
INSERT INTO default.e2e_otel_logs (
Timestamp, TraceId, SpanId, TraceFlags, SeverityText, SeverityNumber,
ServiceName, Body, ResourceSchemaUrl, ResourceAttributes, ScopeSchemaUrl,
ScopeName, ScopeVersion, ScopeAttributes, LogAttributes
) VALUES ${generateLogData(numDataPoints, startMs, endMs)}
`);
console.log(` Inserted ${numDataPoints} log entries`);
// Insert K8s-aware log data (logs with k8s resource attributes for infrastructure correlation)
console.log(' Inserting K8s log data...');
await client.query(`
INSERT INTO default.e2e_otel_logs (
Timestamp, TraceId, SpanId, TraceFlags, SeverityText, SeverityNumber,
ServiceName, Body, ResourceSchemaUrl, ResourceAttributes, ScopeSchemaUrl,
ScopeName, ScopeVersion, ScopeAttributes, LogAttributes
) VALUES ${generateK8sLogData(numDataPoints, startMs, endMs)}
`);
console.log(` Inserted ${numDataPoints} K8s log entries`);
// Insert trace data
console.log(' Inserting trace data...');
await client.query(`
INSERT INTO default.e2e_otel_traces (
Timestamp, TraceId, SpanId, ParentSpanId, TraceState, SpanName, SpanKind,
ServiceName, ResourceAttributes, ScopeName, ScopeVersion, SpanAttributes,
Duration, StatusCode, StatusMessage, \`Events.Timestamp\`, \`Events.Name\`,
\`Events.Attributes\`, \`Links.TraceId\`, \`Links.SpanId\`, \`Links.TraceState\`,
\`Links.Attributes\`
) VALUES ${generateTraceData(numDataPoints, startMs, endMs)}
`);
console.log(` Inserted ${numDataPoints} trace spans`);
// Insert session trace data (spans with rum.sessionId for session tracking)
console.log(' Inserting session trace data...');
await client.query(`
INSERT INTO default.e2e_otel_traces (
Timestamp, TraceId, SpanId, ParentSpanId, TraceState, SpanName, SpanKind,
ServiceName, ResourceAttributes, ScopeName, ScopeVersion, SpanAttributes,
Duration, StatusCode, StatusMessage, \`Events.Timestamp\`, \`Events.Name\`,
\`Events.Attributes\`, \`Links.TraceId\`, \`Links.SpanId\`, \`Links.TraceState\`,
\`Links.Attributes\`
) VALUES ${generateSessionTraces(numDataPoints, startMs, endMs)}
`);
console.log(` Inserted ${numDataPoints} session trace data`);
// Insert session data
console.log(' Inserting session data...');
await client.query(`
INSERT INTO default.e2e_hyperdx_sessions (
Timestamp, TraceId, SpanId, TraceFlags, SeverityText, SeverityNumber,
ServiceName, Body, ResourceSchemaUrl, ResourceAttributes, ScopeSchemaUrl,
ScopeName, ScopeVersion, ScopeAttributes, LogAttributes
) VALUES ${generateSessionData(numDataPoints, startMs, endMs)}
`);
console.log(` Inserted ${numDataPoints} session entries`);
// Insert Kubernetes gauge metrics (pods and nodes)
console.log(' Inserting Kubernetes gauge metrics...');
await client.query(`
INSERT INTO default.e2e_otel_metrics_gauge (
ResourceAttributes, ResourceSchemaUrl, ScopeName, ScopeVersion, ScopeAttributes,
ScopeDroppedAttrCount, ScopeSchemaUrl, ServiceName, MetricName, MetricDescription,
MetricUnit, Attributes, StartTimeUnix, TimeUnix, Value, Flags,
\`Exemplars.FilteredAttributes\`, \`Exemplars.TimeUnix\`, \`Exemplars.Value\`,
\`Exemplars.SpanId\`, \`Exemplars.TraceId\`
) VALUES ${generateK8sGaugeMetrics(numDataPoints, 12, startMs, endMs)}
`);
console.log(
` Inserted ${numDataPoints} Kubernetes gauge metrics (pods and nodes)`,
);
// Insert Kubernetes sum metrics (uptime)
console.log(' Inserting Kubernetes sum metrics...');
await client.query(`
INSERT INTO default.e2e_otel_metrics_sum (
ResourceAttributes, ResourceSchemaUrl, ScopeName, ScopeVersion, ScopeAttributes,
ScopeDroppedAttrCount, ScopeSchemaUrl, ServiceName, MetricName, MetricDescription,
MetricUnit, Attributes, StartTimeUnix, TimeUnix, Value, Flags,
AggregationTemporality, IsMonotonic,
\`Exemplars.FilteredAttributes\`, \`Exemplars.TimeUnix\`, \`Exemplars.Value\`,
\`Exemplars.SpanId\`, \`Exemplars.TraceId\`
) VALUES ${generateK8sSumMetrics(numDataPoints, startMs, endMs)}
`);
console.log(` Inserted ${numDataPoints} Kubernetes sum metrics (uptime)`);
// Insert Kubernetes event logs
console.log(' Inserting Kubernetes event logs...');
await client.query(`
INSERT INTO default.e2e_otel_logs (
Timestamp, TraceId, SpanId, TraceFlags, SeverityText, SeverityNumber,
ServiceName, Body, ResourceSchemaUrl, ResourceAttributes, ScopeSchemaUrl,
ScopeName, ScopeVersion, ScopeAttributes, LogAttributes
) VALUES ${generateK8sEventLogs(numDataPoints, startMs, endMs)}
`);
console.log(` Inserted ${numDataPoints} Kubernetes event logs`);
console.log('ClickHouse seeding complete');
}
// Allow running directly for testing
if (require.main === module) {
seedClickHouse()
.then(() => {
console.log('Seeding completed successfully');
process.exit(0);
})
.catch(error => {
console.error('Seeding failed:', error);
process.exit(1);
});
}

View file

@ -1,158 +1,46 @@
import fs from 'fs';
import path from 'path';
import { expect, test as base } from '@playwright/test';
const USE_FULLSTACK = process.env.E2E_FULLSTACK === 'true';
// Single source of truth: e2e-fixtures.json (connections/sources). API gets them via run-api-with-fixtures.js.
const E2E_FIXTURES_PATH = path.join(__dirname, '../fixtures/e2e-fixtures.json');
function loadE2EFixtures(): { connections: unknown[]; sources: unknown[] } {
try {
const raw = fs.readFileSync(E2E_FIXTURES_PATH, 'utf8');
const fixture = JSON.parse(raw);
return {
connections: Array.isArray(fixture.connections)
? fixture.connections
: [],
sources: Array.isArray(fixture.sources) ? fixture.sources : [],
};
} catch {
return { connections: [], sources: [] };
}
}
const e2eFixtures = loadE2EFixtures();
// Extend the base test to automatically handle Tanstack devtools
export const test = base.extend({
page: async ({ page }, fn) => {
// Note: page.addInitScript runs in the browser context, which cannot access Node.js
// environment variables directly. We pass USE_FULLSTACK as a parameter so the browser
// script can determine whether to set up demo connections (local mode) or rely on
// API-provided connections (full-stack mode).
await page.addInitScript(isFullstack => {
window.localStorage.setItem('TanstackQueryDevtools.open', 'false');
// Only set up demo connections for local mode
if (!isFullstack) {
// environment variables directly. We pass USE_FULLSTACK and connection/sources from
// e2e-fixtures.json so local mode uses the same data as full-stack.
await page.addInitScript(
(arg: unknown[]) => {
const [connections, sources] = arg;
window.localStorage.setItem('TanstackQueryDevtools.open', 'false');
window.sessionStorage.setItem(
'connections',
'[{"name":"Demo","host":"https://sql-clickhouse.clickhouse.com","username":"otel_demo","password":"","id":"local"}]',
JSON.stringify(connections),
);
window.localStorage.setItem(
'hdx-local-source',
JSON.stringify([
{
kind: 'log',
name: 'Demo Logs',
connection: 'local',
from: { databaseName: 'otel_v2', tableName: 'otel_logs' },
timestampValueExpression: 'TimestampTime',
defaultTableSelectExpression:
'Timestamp, ServiceName, SeverityText, Body',
serviceNameExpression: 'ServiceName',
severityTextExpression: 'SeverityText',
eventAttributesExpression: 'LogAttributes',
resourceAttributesExpression: 'ResourceAttributes',
traceIdExpression: 'TraceId',
spanIdExpression: 'SpanId',
implicitColumnExpression: 'Body',
displayedTimestampValueExpression: 'Timestamp',
id: 'l956912644',
sessionSourceId: 'l1155456738',
traceSourceId: 'l1073165478',
metricSourceId: 'l-517210123',
},
{
kind: 'trace',
name: 'Demo Traces',
connection: 'local',
from: { databaseName: 'otel_v2', tableName: 'otel_traces' },
timestampValueExpression: 'Timestamp',
defaultTableSelectExpression:
'Timestamp, ServiceName, StatusCode, round(Duration / 1e6), SpanName',
serviceNameExpression: 'ServiceName',
eventAttributesExpression: 'SpanAttributes',
resourceAttributesExpression: 'ResourceAttributes',
traceIdExpression: 'TraceId',
spanIdExpression: 'SpanId',
implicitColumnExpression: 'SpanName',
durationExpression: 'Duration',
durationPrecision: 9,
parentSpanIdExpression: 'ParentSpanId',
spanKindExpression: 'SpanKind',
spanNameExpression: 'SpanName',
logSourceId: 'l956912644',
statusCodeExpression: 'StatusCode',
statusMessageExpression: 'StatusMessage',
spanEventsValueExpression: 'Events',
id: 'l1073165478',
metricSourceId: 'l-517210123',
sessionSourceId: 'l1155456738',
},
{
kind: 'metric',
name: 'Demo Metrics',
connection: 'local',
from: { databaseName: 'otel_v2', tableName: '' },
timestampValueExpression: 'TimeUnix',
serviceNameExpression: 'ServiceName',
metricTables: {
gauge: 'otel_metrics_gauge',
histogram: 'otel_metrics_histogram',
sum: 'otel_metrics_sum',
summary: 'otel_metrics_summary',
'exponential histogram': 'otel_metrics_exponential_histogram',
},
resourceAttributesExpression: 'ResourceAttributes',
logSourceId: 'l956912644',
id: 'l-517210123',
},
{
kind: 'session',
name: 'Demo Sessions',
connection: 'local',
from: { databaseName: 'otel_v2', tableName: 'hyperdx_sessions' },
timestampValueExpression: 'TimestampTime',
defaultTableSelectExpression: 'Timestamp, ServiceName, Body',
serviceNameExpression: 'ServiceName',
severityTextExpression: 'SeverityText',
eventAttributesExpression: 'LogAttributes',
resourceAttributesExpression: 'ResourceAttributes',
traceSourceId: 'l1073165478',
traceIdExpression: 'TraceId',
spanIdExpression: 'SpanId',
implicitColumnExpression: 'Body',
id: 'l1155456738',
},
{
kind: 'trace',
name: 'ClickPy Traces',
connection: 'local',
from: { databaseName: 'otel_clickpy', tableName: 'otel_traces' },
timestampValueExpression: 'Timestamp',
defaultTableSelectExpression:
'Timestamp, ServiceName, StatusCode, round(Duration / 1e6), SpanName',
serviceNameExpression: 'ServiceName',
eventAttributesExpression: 'SpanAttributes',
resourceAttributesExpression: 'ResourceAttributes',
traceIdExpression: 'TraceId',
spanIdExpression: 'SpanId',
implicitColumnExpression: 'SpanName',
durationExpression: 'Duration',
durationPrecision: 9,
parentSpanIdExpression: 'ParentSpanId',
spanKindExpression: 'SpanKind',
spanNameExpression: 'SpanName',
statusCodeExpression: 'StatusCode',
statusMessageExpression: 'StatusMessage',
spanEventsValueExpression: 'Events',
id: 'l-1156687249',
sessionSourceId: 'l-1709901146',
},
{
kind: 'session',
name: 'ClickPy Sessions',
connection: 'local',
from: {
databaseName: 'otel_clickpy',
tableName: 'hyperdx_sessions',
},
timestampValueExpression: 'TimestampTime',
defaultTableSelectExpression: 'Timestamp, ServiceName, Body',
serviceNameExpression: 'ServiceName',
severityTextExpression: 'SeverityText',
eventAttributesExpression: 'LogAttributes',
resourceAttributesExpression: 'ResourceAttributes',
traceSourceId: 'l-1156687249',
traceIdExpression: 'TraceId',
spanIdExpression: 'SpanId',
implicitColumnExpression: 'Body',
id: 'l-1709901146',
},
]),
JSON.stringify(sources),
);
}
}, USE_FULLSTACK);
},
[e2eFixtures.connections, e2eFixtures.sources],
);
await fn(page);
},
});

View file

@ -1,3 +1,4 @@
// We may want to add logs/metrics source names here in the future.
export const DEFAULT_SESSIONS_SOURCE_NAME = 'ClickPy Sessions';
export const DEFAULT_TRACES_SOURCE_NAME = 'ClickPy Traces';
export const DEFAULT_SESSIONS_SOURCE_NAME = 'E2E Sessions';
export const DEFAULT_TRACES_SOURCE_NAME = 'E2E Traces';
export const DEFAULT_METRICS_SOURCE_NAME = 'E2E Metrics';
export const DEFAULT_LOGS_SOURCE_NAME = 'E2E Logs';

View file

@ -1,7 +1,25 @@
#!/bin/bash
# Run E2E tests in full-stack or local mode
# Full-stack mode (default): MongoDB + API + demo ClickHouse
# Local mode: Frontend only, no backend
# Full-stack mode (default): MongoDB + API + local ClickHouse
# Local mode: Frontend + local ClickHouse (no MongoDB/API)
#
# Usage:
# ./scripts/test-e2e.sh # Run all tests in fullstack mode
# ./scripts/test-e2e.sh --local # Run in local mode (frontend + ClickHouse only)
# ./scripts/test-e2e.sh --keep-running # Keep containers running after tests (fast iteration!)
# ./scripts/test-e2e.sh --ui # Run with Playwright UI
# ./scripts/test-e2e.sh --last-failed # Run only failed tests
# ./scripts/test-e2e.sh --headed # Run with visible browser
# ./scripts/test-e2e.sh --debug # Run in debug mode
# ./scripts/test-e2e.sh --grep "dashboard" # Run tests matching pattern
#
# Development workflow (recommended):
# ./scripts/test-e2e.sh --keep-running --ui # Start containers and open UI
# # Make changes, tests auto-rerun in UI mode
# # When done:
# docker compose -p e2e -f packages/app/tests/e2e/docker-compose.yml down -v
#
# All Playwright flags are passed through automatically
set -e
@ -12,11 +30,13 @@ DOCKER_COMPOSE_FILE="$REPO_ROOT/packages/app/tests/e2e/docker-compose.yml"
# Configuration constants
readonly MAX_MONGODB_WAIT_ATTEMPTS=15
readonly MONGODB_WAIT_DELAY_SECONDS=1
readonly MAX_CLICKHOUSE_WAIT_ATTEMPTS=30
readonly CLICKHOUSE_WAIT_DELAY_SECONDS=1
# Parse arguments
LOCAL_MODE=false
TAGS=""
UI_MODE=false
SKIP_CLEANUP=false
PLAYWRIGHT_FLAGS=()
while [[ $# -gt 0 ]]; do
case $1 in
@ -24,35 +44,21 @@ while [[ $# -gt 0 ]]; do
LOCAL_MODE=true
shift
;;
--tags)
TAGS="$2"
shift 2
;;
--ui)
UI_MODE=true
--keep-running|--no-cleanup)
SKIP_CLEANUP=true
shift
;;
*)
echo "Unknown option: $1"
echo "Usage: $0 [--local] [--tags <tag>]"
exit 1
# Pass any other flags through to Playwright
PLAYWRIGHT_FLAGS+=("$1")
shift
;;
esac
done
# Build additional flags
ADDITIONAL_FLAGS=""
if [ -n "$TAGS" ]; then
ADDITIONAL_FLAGS="--grep $TAGS"
fi
if [ "$UI_MODE" = true ]; then
ADDITIONAL_FLAGS="$ADDITIONAL_FLAGS --ui"
fi
cleanup_mongodb() {
echo "Stopping MongoDB..."
cleanup_services() {
echo "Stopping E2E services and removing volumes..."
docker compose -p e2e -f "$DOCKER_COMPOSE_FILE" down -v
}
@ -72,6 +78,35 @@ check_mongodb_health() {
" 2>&1
}
check_clickhouse_health() {
# Health check from HOST perspective (not inside container)
# This ensures the port is actually accessible to Playwright
curl -sf http://localhost:8123/ping >/dev/null 2>&1 || wget --spider -q http://localhost:8123/ping 2>&1
}
wait_for_clickhouse() {
echo "Waiting for ClickHouse to be ready..."
local attempt=1
while [ $attempt -le $MAX_CLICKHOUSE_WAIT_ATTEMPTS ]; do
if check_clickhouse_health >/dev/null 2>&1; then
echo "ClickHouse is ready"
return 0
fi
if [ $attempt -eq $MAX_CLICKHOUSE_WAIT_ATTEMPTS ]; then
local total_wait=$((MAX_CLICKHOUSE_WAIT_ATTEMPTS * CLICKHOUSE_WAIT_DELAY_SECONDS))
echo "ClickHouse failed to become ready after $total_wait seconds"
echo "Try running: docker compose -p e2e -f $DOCKER_COMPOSE_FILE logs ch-server"
return 1
fi
echo "Waiting for ClickHouse... ($attempt/$MAX_CLICKHOUSE_WAIT_ATTEMPTS)"
attempt=$((attempt + 1))
sleep $CLICKHOUSE_WAIT_DELAY_SECONDS
done
}
wait_for_mongodb() {
echo "Waiting for MongoDB to be ready..."
local attempt=1
@ -106,35 +141,56 @@ wait_for_mongodb() {
done
}
run_local_mode() {
echo "Running E2E tests in local mode (frontend only)..."
cd "$REPO_ROOT/packages/app"
yarn test:e2e --local $ADDITIONAL_FLAGS
# Main execution
setup_cleanup_trap() {
if [ "$SKIP_CLEANUP" = false ]; then
trap cleanup_services EXIT ERR
else
echo "⚠️ Skipping cleanup - containers will remain running"
echo " Use 'docker compose -p e2e -f $DOCKER_COMPOSE_FILE down -v' to stop them manually"
fi
}
run_fullstack_mode() {
echo "Running E2E tests in full-stack mode (MongoDB + API + demo ClickHouse)..."
setup_clickhouse() {
echo "Starting ClickHouse..."
docker compose -p e2e -f "$DOCKER_COMPOSE_FILE" up -d ch-server
# Set up cleanup trap for both normal exit and errors
trap cleanup_mongodb EXIT ERR
if ! wait_for_clickhouse; then
exit 1
fi
# Note: ClickHouse seeding is handled by Playwright global setup
# - Fullstack mode: global-setup-fullstack.ts
# - Local mode: global-setup-local.ts
}
# Start MongoDB
echo "Starting MongoDB for full-stack tests..."
docker compose -p e2e -f "$DOCKER_COMPOSE_FILE" up -d
run_tests() {
cd "$REPO_ROOT/packages/app"
if [ "$LOCAL_MODE" = true ]; then
echo "Running tests in local mode (frontend + ClickHouse)..."
yarn test:e2e --local "${PLAYWRIGHT_FLAGS[@]}"
else
echo "Running tests in full-stack mode (MongoDB + API + ClickHouse)..."
yarn test:e2e "${PLAYWRIGHT_FLAGS[@]}"
fi
}
# Wait for MongoDB to be ready
# Set up cleanup trap
setup_cleanup_trap
# Always start and seed ClickHouse (shared by both modes)
setup_clickhouse
# Conditionally start MongoDB for full-stack mode
if [ "$LOCAL_MODE" = false ]; then
echo "Starting MongoDB for full-stack mode..."
docker compose -p e2e -f "$DOCKER_COMPOSE_FILE" up -d db
if ! wait_for_mongodb; then
exit 1
fi
# Run tests in full-stack mode (default for yarn test:e2e)
cd "$REPO_ROOT/packages/app"
yarn test:e2e $ADDITIONAL_FLAGS
}
# Main execution
if [ "$LOCAL_MODE" = true ]; then
run_local_mode
else
run_fullstack_mode
fi
# Run tests
run_tests