feat: allow applying session settings to queries (#1609)

Closes HDX-3154

This PR adds a feature that allows the user to add settings to a source. These settings are then added to the end of every query that is rendered through the `renderChartConfig` function, along with any other chart specific settings. 

See: https://clickhouse.com/docs/sql-reference/statements/select#settings-in-select-query

Most of the work was to pass the `source` or `source.querySettings` value through the code to the `renderChartConfig` calls and to update the related tests. There are also some UI changes in the `SourceForm` components.

`SQLParser.Parser` from the `node-sql-parser` throws an error when it encounters a SETTINGS clause in a sql string, so a function was added to remove that clause from any sql that is passed to the parser. It assumes that the SETTINGS clause will always be at the end of the sql string, it removes any part of the string including and after the SETTINGS clause.


https://github.com/user-attachments/assets/7ac3b852-2c86-4431-88bc-106f982343bb
This commit is contained in:
Karl Power 2026-01-21 17:07:30 +01:00 committed by GitHub
parent 00854da8e2
commit bc8c4eec9a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
37 changed files with 902 additions and 126 deletions

View file

@ -0,0 +1,7 @@
---
"@hyperdx/common-utils": minor
"@hyperdx/api": minor
"@hyperdx/app": minor
---
feat: allow applying session settings to queries

View file

@ -473,6 +473,17 @@ Array [
]
`;
exports[`renderChartConfig Query settings handles the the query settings 1`] = `
Array [
Object {
"Body": "Oh no! Something went wrong!",
},
Object {
"Body": "This is a test message.",
},
]
`;
exports[`renderChartConfig aggFn numeric agg functions should handle numeric values as strings 1`] = `
Array [
Object {

View file

@ -1,13 +1,12 @@
// TODO: we might want to move this test file to common-utils package
import { ChSql } from '@hyperdx/common-utils/dist/clickhouse';
import { ChSql, chSql } from '@hyperdx/common-utils/dist/clickhouse';
import { ClickhouseClient } from '@hyperdx/common-utils/dist/clickhouse/node';
import { getMetadata } from '@hyperdx/common-utils/dist/core/metadata';
import { renderChartConfig } from '@hyperdx/common-utils/dist/core/renderChartConfig';
import {
AggregateFunctionSchema,
DerivedColumn,
MetricsDataType,
QuerySettings,
} from '@hyperdx/common-utils/dist/types';
import _ from 'lodash';
import ms from 'ms';
@ -36,6 +35,13 @@ const TEST_METRIC_TABLES = {
'exponential histogram': DEFAULT_METRICS_TABLE.EXPONENTIAL_HISTOGRAM,
};
const querySettings: QuerySettings = [
{ setting: 'optimize_read_in_order', value: '0' },
{ setting: 'cast_keep_nullable', value: '1' },
{ setting: 'count_distinct_implementation', value: 'uniqCombined64' },
{ setting: 'async_insert_busy_timeout_min_ms', value: '20000' },
];
describe('renderChartConfig', () => {
const server = getServer();
@ -152,6 +158,7 @@ describe('renderChartConfig', () => {
timestampValueExpression: 'ts',
},
metadata,
querySettings,
);
const res = await queryData(query);
expect(res).toMatchSnapshot();
@ -190,6 +197,7 @@ describe('renderChartConfig', () => {
timestampValueExpression: 'ts',
},
metadata,
querySettings,
);
const res = await queryData(query);
expect(res).toMatchSnapshot();
@ -226,16 +234,10 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const resp = await clickhouseClient
.query<'JSON'>({
query: query.sql,
query_params: query.params,
format: 'JSON',
})
.then(res => res.json() as any);
expect(resp.data).toMatchSnapshot();
expect(await queryData(query)).toMatchSnapshot();
});
it('simple select + group by query logs', async () => {
@ -272,6 +274,7 @@ describe('renderChartConfig', () => {
groupBy: 'ServiceName',
},
metadata,
querySettings,
);
expect(await queryData(query)).toMatchSnapshot();
});
@ -342,6 +345,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(avgQuery)).toMatchSnapshot();
const maxQuery = await renderChartConfig(
@ -363,6 +367,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(maxQuery)).toMatchSnapshot();
const sumQuery = await renderChartConfig(
@ -384,6 +389,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(sumQuery)).toMatchSnapshot();
});
@ -409,6 +415,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(query)).toMatchSnapshot();
});
@ -434,6 +441,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(query)).toMatchSnapshot();
});
@ -459,6 +467,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(query)).toMatchSnapshot();
});
@ -485,6 +494,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(query)).toMatchSnapshot();
});
@ -657,6 +667,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(query)).toMatchSnapshot();
});
@ -681,6 +692,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(query)).toMatchSnapshot();
});
@ -705,6 +717,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(query)).toMatchSnapshot();
});
@ -749,6 +762,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(minQuery)).toMatchSnapshot('minSum');
@ -771,6 +785,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
expect(await queryData(maxQuery)).toMatchSnapshot('maxSum');
});
@ -1151,6 +1166,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const res = await queryData(query);
expect(res).toMatchSnapshot();
@ -1200,6 +1216,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const res = await queryData(query);
expect(res).toMatchSnapshot();
@ -1250,6 +1267,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const res = await queryData(query);
expect(res).toMatchSnapshot();
@ -1294,6 +1312,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const res = await queryData(query);
expect(res).toMatchSnapshot();
@ -1320,6 +1339,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const res = await queryData(query);
expect(res).toMatchSnapshot();
@ -1347,6 +1367,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const res = await queryData(query);
expect(res).toMatchSnapshot();
@ -1374,6 +1395,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const res = await queryData(query);
expect(res).toMatchSnapshot();
@ -1444,6 +1466,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const res = await queryData(query);
@ -1480,6 +1503,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const res = await queryData(query);
@ -1509,6 +1533,7 @@ describe('renderChartConfig', () => {
connection: connection.id,
},
metadata,
querySettings,
);
const res = await queryData(query);
@ -1520,4 +1545,43 @@ describe('renderChartConfig', () => {
expect(query.sql).not.toMatch(/MetricName IN /);
});
});
describe('Query settings', () => {
it('handles the the query settings', async () => {
const now = new Date('2023-11-16T22:12:00.000Z');
await bulkInsertLogs([
{
ServiceName: 'api',
Timestamp: now,
SeverityText: 'error',
Body: 'Oh no! Something went wrong!',
},
{
ServiceName: 'api',
Timestamp: now,
SeverityText: 'info',
Body: 'This is a test message.',
},
]);
const query = await renderChartConfig(
{
select: [{ valueExpression: 'Body' }],
from: logSource.from,
where: '',
timestampValueExpression: 'Timestamp',
connection: connection.id,
settings: chSql`max_result_rows = 1`,
},
metadata,
[...querySettings, { setting: 'result_overflow_mode', value: 'break' }],
);
const res = await queryData(query);
// ensures `result_overflow_mode = break` is applied, otherwise query would error.
expect(res).toHaveLength(2);
expect(res).toMatchSnapshot();
});
});
});

View file

@ -86,6 +86,17 @@ export const Source = mongoose.model<ISource>(
},
default: undefined,
},
querySettings: {
type: [
{
setting: { type: String, required: true },
value: { type: String, required: true },
},
],
default: undefined,
maxlength: 10,
},
},
{
toJSON: { virtuals: true },

View file

@ -274,6 +274,7 @@ router.post(
const keyValues = await metadata.getKeyValues({
chartConfig: cc,
keys: keysToFetch.map(f => f.key),
source,
});
const anthropic = createAnthropic({

View file

@ -588,6 +588,7 @@ router.post(
const result = await clickhouseClient.queryChartConfig({
config: chartConfig,
metadata,
querySettings: source.querySettings,
});
return {

View file

@ -448,6 +448,7 @@ export const processAlert = async (
const checksData = await clickhouseClient.queryChartConfig({
config: optimizedChartConfig,
metadata,
querySettings: source.querySettings,
});
logger.info(

View file

@ -602,7 +602,11 @@ ${targetTemplate}`;
let truncatedResults = '';
try {
const query = await renderChartConfig(chartConfig, metadata);
const query = await renderChartConfig(
chartConfig,
metadata,
source.querySettings,
);
const raw = await clickhouseClient
.query<'CSV'>({
query: query.sql,

View file

@ -4,7 +4,6 @@ import logger from '@/utils/logger';
export default class PingPongTask implements HdxTask<PingTaskArgs> {
constructor(private args: PingTaskArgs) {}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
async execute(): Promise<void> {
logger.info(`
O .

View file

@ -1,9 +1,9 @@
import { useCallback, useState } from 'react';
import Head from 'next/head';
import { CopyToClipboard } from 'react-copy-to-clipboard';
import { SubmitHandler, useForm, useWatch } from 'react-hook-form';
import { SubmitHandler, useForm } from 'react-hook-form';
import { DEFAULT_METADATA_MAX_ROWS_TO_READ } from '@hyperdx/common-utils/dist/core/metadata';
import { TeamClickHouseSettings } from '@hyperdx/common-utils/dist/types';
import { type TeamClickHouseSettings } from '@hyperdx/common-utils/dist/types';
import {
Box,
Button,

View file

@ -8,6 +8,7 @@ import {
formatAttributeClause,
formatNumber,
getMetricTableName,
mapKeyBy,
orderByStringToSortingState,
sortingStateToOrderByString,
stripTrailingSlash,
@ -630,3 +631,25 @@ describe('orderByStringToSortingState', () => {
expect(roundTripSort).toEqual(originalSort);
});
});
describe('mapKeyBy', () => {
it('returns a map', () => {
const result = mapKeyBy([{ id: 'a' }, { id: 'b' }], 'id');
expect(result).toBeInstanceOf(Map);
});
it('adds each item to the map, keyed by the provided `key` param', () => {
const data = [{ id: 'a' }, { id: 'b' }];
const result = mapKeyBy(data, 'id');
expect(result.size).toBe(2);
expect(result.get('a')).toBe(data.at(0));
expect(result.get('b')).toBe(data.at(1));
});
it('overwrites items with the same key', () => {
const data = [{ id: 'a' }, { id: 'a' }];
const result = mapKeyBy(data, 'id');
expect(result.size).toBe(1);
expect(result.get('a')).toBe(data.at(1));
});
});

View file

@ -7,7 +7,7 @@ import type {
PresetDashboardFilter,
} from '@hyperdx/common-utils/dist/types';
import type { UseQueryOptions } from '@tanstack/react-query';
import { useMutation, useQuery } from '@tanstack/react-query';
import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query';
import { IS_LOCAL_MODE } from './config';
import { Dashboard } from './dashboard';

View file

@ -99,6 +99,7 @@ export const useV2LogBatch = <T = any,>(
orderBy: `${logSource.timestampValueExpression} ${order}`,
},
metadata,
logSource.querySettings,
);
const json = await clickhouseClient

View file

@ -1,4 +1,10 @@
import React, { useCallback, useEffect, useRef, useState } from 'react';
import React, {
Fragment,
useCallback,
useEffect,
useRef,
useState,
} from 'react';
import {
Control,
Controller,
@ -1582,6 +1588,7 @@ export function TableSourceForm({
databaseName: 'default',
tableName: '',
},
querySettings: source?.querySettings,
},
// TODO: HDX-1768 remove type assertion
values: source as TSourceUnion,
@ -1937,6 +1944,12 @@ export function TableSourceForm({
defaultValue: source?.connection,
});
const {
fields: querySettingFields,
append: appendSetting,
remove: removeSetting,
} = useFieldArray({ control, name: 'querySettings' });
return (
<div
style={
@ -1999,6 +2012,66 @@ export function TableSourceForm({
/>
</FormRow>
)}
<FormRow
label={
<Anchor
href="https://clickhouse.com/docs/operations/settings/settings"
size="sm"
target="_blank"
>
Query Settings
</Anchor>
}
helpText="Query-level Session Settings that will be added to each query for this source."
>
<Grid columns={11}>
{querySettingFields.map((field, index) => (
<Fragment key={field.id}>
<Grid.Col span={5} pe={0}>
<InputControlled
placeholder="Setting"
control={control}
name={`querySettings.${index}.setting`}
/>
</Grid.Col>
<Grid.Col span={5} pe={0}>
<InputControlled
placeholder="Value"
control={control}
name={`querySettings.${index}.value`}
/>
</Grid.Col>
<Grid.Col span={1} ps={0}>
<Flex align="center" justify="center" gap="sm" h="100%">
<ActionIcon
variant="subtle"
color="gray"
title="Remove setting"
onClick={() => removeSetting(index)}
>
<IconTrash size={16} />
</ActionIcon>
</Flex>
</Grid.Col>
</Fragment>
))}
</Grid>
<Button
variant="secondary"
size="sm"
color="gray"
mt="md"
disabled={querySettingFields.length >= 10}
onClick={() => {
if (querySettingFields.length < 10) {
appendSetting({ setting: '', value: '' });
}
}}
>
<IconCirclePlus size={14} className="me-2" />
Add Setting
</Button>
</FormRow>
</Stack>
<TableModelForm control={control} setValue={setValue} kind={kind} />
<Group justify="flex-end" mt="lg">

View file

@ -15,6 +15,7 @@ import {
AggregateFunction,
ChartConfigWithOptDateRange,
DerivedColumn,
QuerySettings,
SQLInterval,
} from '@hyperdx/common-utils/dist/types';
@ -97,6 +98,7 @@ const buildMTViewDDL = (name: string, table: string, query: ChSql) => {
export const buildMTViewSelectQuery = async (
chartConfig: ChartConfigWithOptDateRange,
metadata: Metadata,
querySettings: QuerySettings | undefined,
customGranularity?: SQLInterval,
) => {
const _config = {
@ -116,7 +118,7 @@ export const buildMTViewSelectQuery = async (
orderBy: undefined,
limit: undefined,
};
const mtViewSQL = await renderChartConfig(_config, metadata);
const mtViewSQL = await renderChartConfig(_config, metadata, querySettings);
const mtViewSQLHash = objectHash.sha1(mtViewSQL);
const mtViewName = `${chartConfig.from.tableName}_mv_${mtViewSQLHash}`;
const renderMTViewConfig = {
@ -148,7 +150,11 @@ export const buildMTViewSelectQuery = async (
),
renderMTViewConfig: async () => {
try {
return await renderChartConfig(renderMTViewConfig, metadata);
return await renderChartConfig(
renderMTViewConfig,
metadata,
querySettings,
);
} catch (e) {
console.error('Failed to render MTView config', e);
return null;

View file

@ -446,6 +446,16 @@ describe('useDashboardFilterKeyValues', () => {
limit: 10000,
disableRowLimit: true,
signal: expect.any(AbortSignal),
source: {
connection: 'clickhouse-conn',
from: {
databaseName: 'telemetry',
tableName: 'logs',
},
id: 'logs-source',
name: 'Logs',
timestampValueExpression: 'timestamp',
},
});
});

View file

@ -17,6 +17,7 @@ import { format } from '@hyperdx/common-utils/dist/sqlFormatter';
import {
ChartConfigWithDateRange,
ChartConfigWithOptDateRange,
QuerySettings,
} from '@hyperdx/common-utils/dist/types';
import {
useQuery,
@ -128,6 +129,7 @@ async function* fetchDataInChunks({
enableQueryChunking = false,
enableParallelQueries = false,
metadata,
querySettings,
}: {
config: ChartConfigWithOptDateRange;
clickhouseClient: ClickhouseClient;
@ -135,6 +137,7 @@ async function* fetchDataInChunks({
enableQueryChunking?: boolean;
enableParallelQueries?: boolean;
metadata: Metadata;
querySettings: QuerySettings | undefined;
}) {
const windows =
enableQueryChunking && shouldUseChunking(config)
@ -143,7 +146,7 @@ async function* fetchDataInChunks({
if (IS_MTVIEWS_ENABLED) {
const { dataTableDDL, mtViewDDL, renderMTViewConfig } =
await buildMTViewSelectQuery(config, metadata);
await buildMTViewSelectQuery(config, metadata, querySettings);
// TODO: show the DDLs in the UI so users can run commands manually
// eslint-disable-next-line no-console
console.log('dataTableDDL:', dataTableDDL);
@ -167,6 +170,7 @@ async function* fetchDataInChunks({
opts: {
abort_signal: signal,
},
querySettings,
}),
};
});
@ -208,6 +212,7 @@ async function* fetchDataInChunks({
opts: {
abort_signal: signal,
},
querySettings,
});
yield { chunk: result, isComplete: i === windows.length - 1 };
@ -260,6 +265,10 @@ export function useQueriedChartConfig(
placeholderData: undefined,
});
const { data: source, isLoading: isSourceLoading } = useSource({
id: config.source,
});
const query = useQuery<TQueryFnData, ClickHouseQueryError | Error>({
// Include enableQueryChunking in the query key to ensure that queries with the
// same config but different enableQueryChunking values do not share a query
@ -291,6 +300,7 @@ export function useQueriedChartConfig(
enableQueryChunking: options?.enableQueryChunking,
enableParallelQueries: options?.enableParallelQueries,
metadata,
querySettings: source?.querySettings,
});
let accumulatedChunks: TQueryFnData = emptyValue;
@ -322,7 +332,7 @@ export function useQueriedChartConfig(
retry: 1,
refetchOnWindowFocus: false,
...options,
enabled: enabled && !isLoadingMVOptimization,
enabled: enabled && !isLoadingMVOptimization && !isSourceLoading,
});
if (query.isError && options?.onError) {
@ -348,15 +358,23 @@ export function useRenderedSqlChartConfig(
placeholderData: undefined,
});
const { data: source, isLoading: isSourceLoading } = useSource({
id: config.source,
});
const query = useQuery({
queryKey: ['renderedSql', config],
queryFn: async () => {
const optimizedConfig = mvOptimizationData?.optimizedConfig ?? config;
const query = await renderChartConfig(optimizedConfig, metadata);
const query = await renderChartConfig(
optimizedConfig,
metadata,
source?.querySettings,
);
return format(parameterizedQueryToSql(query));
},
...options,
enabled: enabled && !isLoadingMVOptimization,
enabled: enabled && !isLoadingMVOptimization && !isSourceLoading,
});
return {
@ -379,6 +397,10 @@ export function useAliasMapFromChartConfig(
const metadata = useMetadataWithSettings();
const { data: source, isLoading: isSourceLoading } = useSource({
id: config?.source,
});
return useQuery<Record<string, string>>({
// Only include config properties that affect SELECT structure and aliases.
// When adding new ChartConfig fields, check renderChartConfig.ts to see if they
@ -400,13 +422,17 @@ export function useAliasMapFromChartConfig(
return {};
}
const query = await renderChartConfig(config, metadata);
const query = await renderChartConfig(
config,
metadata,
undefined, // no query settings for creating alias map
);
const aliasMap = chSqlToAliasMap(query);
return aliasMap;
},
enabled: config != null,
enabled: config != null && !isSourceLoading,
...options,
});
}

View file

@ -16,7 +16,7 @@ import {
import { useClickhouseClient } from '@/clickhouse';
import { useSources } from '@/source';
import { getMetricTableName } from '@/utils';
import { getMetricTableName, mapKeyBy } from '@/utils';
import { useMetadataWithSettings } from './useMetadata';
@ -129,6 +129,9 @@ export function useDashboardFilterKeyValues({
dateRange,
});
const { data: sources, isLoading: isSourcesLoading } = useSources();
const sourcesLookup = useMemo(() => mapKeyBy(sources ?? [], 'id'), [sources]);
const queryClient = useQueryClient();
type TQueryData = { key: string; value: string[] }[];
@ -140,6 +143,9 @@ export function useDashboardFilterKeyValues({
chartConfig.from,
keys,
];
const source = sourcesLookup.get(chartConfig.source);
return {
queryKey: [...queryKeyPrefix, chartConfig],
placeholderData: () => {
@ -157,7 +163,7 @@ export function useDashboardFilterKeyValues({
});
return cached[0]?.data;
},
enabled: !isLoadingOptimizedCalls,
enabled: !isLoadingOptimizedCalls && !isSourcesLoading,
staleTime: 1000 * 60 * 5, // Cache every 5 min
queryFn: async ({ signal }) =>
metadata.getKeyValues({
@ -166,6 +172,7 @@ export function useDashboardFilterKeyValues({
limit: 10000,
disableRowLimit: true,
signal,
source,
}),
};
}),

View file

@ -3,6 +3,7 @@ import { ChartConfigWithOptDateRange } from '@hyperdx/common-utils/dist/types';
import { useQuery, UseQueryOptions } from '@tanstack/react-query';
import { useClickhouseClient } from '@/clickhouse';
import { useSource } from '@/source';
import { useMetadataWithSettings } from './useMetadata';
@ -15,12 +16,21 @@ export function useExplainQuery(
with: undefined,
};
const clickhouseClient = useClickhouseClient();
const metadata = useMetadataWithSettings();
const { data: source, isLoading: isSourceLoading } = useSource({
id: config?.source,
});
return useQuery({
queryKey: ['explain', config],
queryFn: async ({ signal }) => {
const query = await renderChartConfig(config, metadata);
const query = await renderChartConfig(
config,
metadata,
source?.querySettings,
);
const response = await clickhouseClient.query<'JSONEachRow'>({
query: `EXPLAIN ESTIMATE ${query.sql}`,
query_params: query.params,
@ -32,6 +42,7 @@ export function useExplainQuery(
},
retry: false,
staleTime: 1000 * 60,
enabled: !isSourceLoading,
...options,
});
}

View file

@ -10,7 +10,10 @@ import {
TableConnection,
TableMetadata,
} from '@hyperdx/common-utils/dist/core/metadata';
import { ChartConfigWithDateRange } from '@hyperdx/common-utils/dist/types';
import {
ChartConfigWithDateRange,
TSource,
} from '@hyperdx/common-utils/dist/types';
import {
keepPreviousData,
useQuery,
@ -22,7 +25,7 @@ import api from '@/api';
import { IS_LOCAL_MODE } from '@/config';
import { LOCAL_STORE_CONNECTIONS_KEY } from '@/connection';
import { getMetadata } from '@/metadata';
import { useSources } from '@/source';
import { useSource, useSources } from '@/source';
import { toArray } from '@/utils';
// Hook to get metadata with proper settings applied
@ -265,6 +268,10 @@ export function useGetValuesDistribution(
options?: Omit<UseQueryOptions<Map<string, number>, Error>, 'queryKey'>,
) {
const metadata = useMetadataWithSettings();
const { data: source, isLoading: isLoadingSource } = useSource({
id: chartConfig.source,
});
return useQuery<Map<string, number>>({
queryKey: ['useMetadata.useGetValuesDistribution', chartConfig, key],
queryFn: async () => {
@ -272,10 +279,11 @@ export function useGetValuesDistribution(
chartConfig,
key,
limit,
source,
});
},
staleTime: Infinity,
enabled: !!key,
enabled: !!key && !isLoadingSource,
placeholderData: keepPreviousData,
retry: false,
...options,

View file

@ -12,7 +12,10 @@ import {
isFirstOrderByAscending,
isTimestampExpressionInFirstOrderBy,
} from '@hyperdx/common-utils/dist/core/utils';
import { ChartConfigWithOptTimestamp } from '@hyperdx/common-utils/dist/types';
import {
ChartConfigWithOptTimestamp,
TSource,
} from '@hyperdx/common-utils/dist/types';
import {
QueryClient,
QueryFunction,
@ -24,6 +27,7 @@ import api from '@/api';
import { getClickhouseClient } from '@/clickhouse';
import { useMetadataWithSettings } from '@/hooks/useMetadata';
import { useMVOptimizationExplanation } from '@/hooks/useMVOptimizationExplanation';
import { useSource } from '@/source';
import { omit } from '@/utils';
import {
generateTimeWindowsAscending,
@ -67,6 +71,7 @@ type QueryMeta = {
hasPreviousQueries: boolean;
metadata: Metadata;
optimizedConfig?: ChartConfigWithOptTimestamp;
source: TSource | undefined;
};
// Get time window from page param
@ -141,7 +146,7 @@ const queryFn: QueryFunction<TQueryFnData, TQueryKey, TPageParam> = async ({
throw new Error('Query missing client meta');
}
const { queryClient, metadata, hasPreviousQueries, optimizedConfig } =
const { queryClient, metadata, hasPreviousQueries, optimizedConfig, source } =
meta as QueryMeta;
// Only stream incrementally if this is a fresh query with no previous
@ -177,7 +182,11 @@ const queryFn: QueryFunction<TQueryFnData, TQueryKey, TPageParam> = async ({
},
};
const query = await renderChartConfig(windowedConfig, metadata);
const query = await renderChartConfig(
windowedConfig,
metadata,
source?.querySettings,
);
// Create abort signal from timeout if provided
const abortController = queryTimeout ? new AbortController() : undefined;
@ -399,6 +408,10 @@ export default function useOffsetPaginatedQuery(
placeholderData: undefined,
});
const { data: source, isLoading: isSourceLoading } = useSource({
id: config?.source,
});
const {
data,
fetchNextPage,
@ -419,7 +432,8 @@ export default function useOffsetPaginatedQuery(
// Only preserve previous query in live mode
return isLive ? prev : undefined;
},
enabled: enabled && !isLoadingMe && !isLoadingMVOptimization,
enabled:
enabled && !isLoadingMe && !isLoadingMVOptimization && !isSourceLoading,
initialPageParam: { windowIndex: 0, offset: 0 } as TPageParam,
getNextPageParam: (lastPage, allPages) => {
return getNextPageParam(lastPage, allPages, config);
@ -430,6 +444,7 @@ export default function useOffsetPaginatedQuery(
hasPreviousQueries,
metadata,
optimizedConfig: mvOptimizationData?.optimizedConfig,
source,
} satisfies QueryMeta,
queryFn,
gcTime: isLive ? ms('30s') : ms('5m'), // more aggressive gc for live data, since it can end up holding lots of data

View file

@ -94,6 +94,7 @@ async function getServiceMapQuery({
where: '',
},
metadata,
source.querySettings,
),
renderChartConfig(
{
@ -108,6 +109,7 @@ async function getServiceMapQuery({
where: '',
},
metadata,
source.querySettings,
),
]);

View file

@ -141,6 +141,7 @@ export function useSessions(
groupBy: 'serviceName, sessionId',
},
metadata,
traceSource.querySettings,
),
renderChartConfig(
{
@ -161,6 +162,7 @@ export function useSessions(
connection: sessionSource.connection,
},
metadata,
sessionSource.querySettings,
),
renderChartConfig(
{
@ -179,6 +181,7 @@ export function useSessions(
connection: traceSource?.connection,
},
metadata,
traceSource.querySettings,
),
]);
@ -372,6 +375,7 @@ export function useRRWebEventStream(
},
},
metadata,
source.querySettings,
);
const format = 'JSONEachRow';

View file

@ -20,7 +20,12 @@ import {
TSource,
TSourceUnion,
} from '@hyperdx/common-utils/dist/types';
import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query';
import {
useMutation,
useQuery,
useQueryClient,
UseQueryOptions,
} from '@tanstack/react-query';
import { hdxServer } from '@/api';
import { HDX_LOCAL_DEFAULT_SOURCES } from '@/config';

View file

@ -756,3 +756,13 @@ export const orderByStringToSortingState = (
},
];
};
export const mapKeyBy = <T>(array: T[], key: keyof T) => {
const map = new Map<T[typeof key], T>();
for (const item of array) {
map.set(item[key], item);
}
return map;
};

View file

@ -1,30 +1,36 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`renderChartConfig Aggregate Merge Functions should generate SQL for an aggregate merge function 1`] = `"SELECT avgMerge(Duration),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity"`;
exports[`renderChartConfig Aggregate Merge Functions should generate SQL for an aggregate merge function 1`] = `"SELECT avgMerge(Duration),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig Aggregate Merge Functions should generate SQL for an aggregate merge function with a condition 1`] = `"SELECT avgMergeIf(Duration, ((severity = 'ERROR')) AND toFloat64OrDefault(toString(Duration)) IS NOT NULL),severity FROM default.logs WHERE (((severity = 'ERROR'))) GROUP BY severity"`;
exports[`renderChartConfig Aggregate Merge Functions should generate SQL for an aggregate merge function with a condition 1`] = `"SELECT avgMergeIf(Duration, ((severity = 'ERROR')) AND toFloat64OrDefault(toString(Duration)) IS NOT NULL),severity FROM default.logs WHERE (((severity = 'ERROR'))) GROUP BY severity SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig Aggregate Merge Functions should generate SQL for an histogram merge function 1`] = `"SELECT histogramMerge(20)(Duration),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity"`;
exports[`renderChartConfig Aggregate Merge Functions should generate SQL for an histogram merge function 1`] = `"SELECT histogramMerge(20)(Duration),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig Aggregate Merge Functions should generate SQL for an quantile merge function with a condition 1`] = `"SELECT quantileMergeIf(0.95)(Duration, ((severity = 'ERROR')) AND toFloat64OrDefault(toString(Duration)) IS NOT NULL),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) AND (((severity = 'ERROR'))) GROUP BY severity"`;
exports[`renderChartConfig Aggregate Merge Functions should generate SQL for an quantile merge function with a condition 1`] = `"SELECT quantileMergeIf(0.95)(Duration, ((severity = 'ERROR')) AND toFloat64OrDefault(toString(Duration)) IS NOT NULL),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) AND (((severity = 'ERROR'))) GROUP BY severity SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig HAVING clause should not render HAVING clause when having is empty string 1`] = `"SELECT count(),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity"`;
exports[`renderChartConfig HAVING clause should not render HAVING clause when having is empty string 1`] = `"SELECT count(),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig HAVING clause should not render HAVING clause when not provided 1`] = `"SELECT count(),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity"`;
exports[`renderChartConfig HAVING clause should not render HAVING clause when not provided 1`] = `"SELECT count(),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig HAVING clause should render HAVING clause with SQL language 1`] = `"SELECT count(),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity HAVING count(*) > 100"`;
exports[`renderChartConfig HAVING clause should render HAVING clause with SQL language 1`] = `"SELECT count(),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity HAVING count(*) > 100 SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig HAVING clause should render HAVING clause with granularity and groupBy 1`] = `"SELECT count(),event_type,toStartOfInterval(toDateTime(timestamp), INTERVAL 5 minute) AS \`__hdx_time_bucket\` FROM default.events WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY event_type,toStartOfInterval(toDateTime(timestamp), INTERVAL 5 minute) AS \`__hdx_time_bucket\` HAVING count(*) > 50 ORDER BY toStartOfInterval(toDateTime(timestamp), INTERVAL 5 minute) AS \`__hdx_time_bucket\`"`;
exports[`renderChartConfig HAVING clause should render HAVING clause with granularity and groupBy 1`] = `"SELECT count(),event_type,toStartOfInterval(toDateTime(timestamp), INTERVAL 5 minute) AS \`__hdx_time_bucket\` FROM default.events WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY event_type,toStartOfInterval(toDateTime(timestamp), INTERVAL 5 minute) AS \`__hdx_time_bucket\` HAVING count(*) > 50 ORDER BY toStartOfInterval(toDateTime(timestamp), INTERVAL 5 minute) AS \`__hdx_time_bucket\` SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig HAVING clause should render HAVING clause with multiple conditions 1`] = `
"SELECT avg(
toFloat64OrDefault(toString(response_time))
),count(),endpoint FROM default.metrics WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY endpoint HAVING avg(response_time) > 500 AND count(*) > 10"
),count(),endpoint FROM default.metrics WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY endpoint HAVING avg(response_time) > 500 AND count(*) > 10 SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig containing CTE clauses should render a ChSql CTE configuration correctly 1`] = `"WITH TestCte AS (SELECT TimeUnix, Line FROM otel_logs) SELECT Line FROM TestCte"`;
exports[`renderChartConfig SETTINGS clause should apply the "chart config" settings to the query 1`] = `"SELECT histogramMerge(20)(Duration),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig containing CTE clauses should render a chart config CTE configuration correctly 1`] = `"WITH Parts AS (SELECT _part, _part_offset FROM default.some_table WHERE ((FieldA = 'test')) ORDER BY rand() DESC LIMIT 1000) SELECT * FROM Parts WHERE ((FieldA = 'test') AND (indexHint((_part, _part_offset) IN (SELECT tuple(_part, _part_offset) FROM Parts)))) ORDER BY rand() DESC LIMIT 1000"`;
exports[`renderChartConfig SETTINGS clause should apply the "query settings" settings to the query 1`] = `"SELECT histogramMerge(20)(Duration),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig SETTINGS clause should concat the "chart config" and "query setting" settings and apply them to the query 1`] = `"SELECT histogramMerge(20)(Duration),severity FROM default.logs WHERE (timestamp >= fromUnixTimestamp64Milli(1739318400000) AND timestamp <= fromUnixTimestamp64Milli(1739491200000)) GROUP BY severity SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig containing CTE clauses should render a ChSql CTE configuration correctly 1`] = `"WITH TestCte AS (SELECT TimeUnix, Line FROM otel_logs) SELECT Line FROM TestCte SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig containing CTE clauses should render a chart config CTE configuration correctly 1`] = `"WITH Parts AS (SELECT _part, _part_offset FROM default.some_table WHERE ((FieldA = 'test')) ORDER BY rand() DESC LIMIT 1000 SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000) SELECT * FROM Parts WHERE ((FieldA = 'test') AND (indexHint((_part, _part_offset) IN (SELECT tuple(_part, _part_offset) FROM Parts)))) ORDER BY rand() DESC LIMIT 1000 SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"`;
exports[`renderChartConfig histogram metric queries count should generate a count query with grouping and time bucketing 1`] = `
"WITH source AS (
@ -54,7 +60,7 @@ exports[`renderChartConfig histogram metric queries count should generate a coun
sum(delta) AS \\"Value\\"
FROM source
GROUP BY group, \`__hdx_time_bucket\`
) SELECT \`__hdx_time_bucket\`, group, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
) SELECT \`__hdx_time_bucket\`, group, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig histogram metric queries count should generate a count query without grouping but time bucketing 1`] = `
@ -85,7 +91,7 @@ exports[`renderChartConfig histogram metric queries count should generate a coun
sum(delta) AS \\"Value\\"
FROM source
GROUP BY \`__hdx_time_bucket\`
) SELECT \`__hdx_time_bucket\`, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
) SELECT \`__hdx_time_bucket\`, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig histogram metric queries count should generate a count query without grouping or time bucketing 1`] = `
@ -116,7 +122,7 @@ exports[`renderChartConfig histogram metric queries count should generate a coun
sum(delta) AS \\"Value\\"
FROM source
GROUP BY \`__hdx_time_bucket\`
) SELECT \`__hdx_time_bucket\`, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
) SELECT \`__hdx_time_bucket\`, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig histogram metric queries quantile should generate a query with grouping and time bucketing 1`] = `
@ -200,7 +206,7 @@ exports[`renderChartConfig histogram metric queries quantile should generate a q
END AS \\"Value\\"
FROM points
WHERE length(point) > 1 AND total > 0
) SELECT \`__hdx_time_bucket\`, group, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
) SELECT \`__hdx_time_bucket\`, group, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig histogram metric queries quantile should generate a query without grouping but time bucketing 1`] = `
@ -284,7 +290,7 @@ exports[`renderChartConfig histogram metric queries quantile should generate a q
END AS \\"Value\\"
FROM points
WHERE length(point) > 1 AND total > 0
) SELECT \`__hdx_time_bucket\`, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
) SELECT \`__hdx_time_bucket\`, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig histogram metric queries quantile should generate a query without grouping or time bucketing 1`] = `
@ -368,7 +374,7 @@ exports[`renderChartConfig histogram metric queries quantile should generate a q
END AS \\"Value\\"
FROM points
WHERE length(point) > 1 AND total > 0
) SELECT \`__hdx_time_bucket\`, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
) SELECT \`__hdx_time_bucket\`, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig k8s semantic convention migrations should generate SQL with metricNameSql for container.cpu.utilization histogram metric 1`] = `
@ -452,7 +458,7 @@ exports[`renderChartConfig k8s semantic convention migrations should generate SQ
END AS \\"Value\\"
FROM points
WHERE length(point) > 1 AND total > 0
) SELECT \`__hdx_time_bucket\`, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
) SELECT \`__hdx_time_bucket\`, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig k8s semantic convention migrations should generate SQL with metricNameSql for histogram metric with groupBy 1`] = `
@ -536,7 +542,7 @@ exports[`renderChartConfig k8s semantic convention migrations should generate SQ
END AS \\"Value\\"
FROM points
WHERE length(point) > 1 AND total > 0
) SELECT \`__hdx_time_bucket\`, group, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
) SELECT \`__hdx_time_bucket\`, group, \\"Value\\" FROM metrics WHERE (\`__hdx_time_bucket\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket\` <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig k8s semantic convention migrations should generate SQL with metricNameSql for k8s.node.cpu.utilization sum metric 1`] = `
@ -579,7 +585,7 @@ exports[`renderChartConfig k8s semantic convention migrations should generate SQ
ORDER BY AttributesHash, \`__hdx_time_bucket2\`
) SELECT max(
toFloat64OrDefault(toString(Rate))
) AS \\"Value\\",toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (\`__hdx_time_bucket2\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket2\` <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` LIMIT 10"
) AS \\"Value\\",toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (\`__hdx_time_bucket2\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket2\` <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` LIMIT 10 SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig k8s semantic convention migrations should generate SQL with metricNameSql for k8s.pod.cpu.utilization gauge metric 1`] = `
@ -612,7 +618,7 @@ exports[`renderChartConfig k8s semantic convention migrations should generate SQ
ORDER BY AttributesHash, __hdx_time_bucket2
) SELECT avg(
toFloat64OrDefault(toString(LastValue))
),toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (__hdx_time_bucket2 >= fromUnixTimestamp64Milli(1739318400000) AND __hdx_time_bucket2 <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
),toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (__hdx_time_bucket2 >= fromUnixTimestamp64Milli(1739318400000) AND __hdx_time_bucket2 <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig k8s semantic convention migrations should handle metrics without metricNameSql (backward compatibility) 1`] = `
@ -645,7 +651,7 @@ exports[`renderChartConfig k8s semantic convention migrations should handle metr
ORDER BY AttributesHash, __hdx_time_bucket2
) SELECT avg(
toFloat64OrDefault(toString(LastValue))
),toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (__hdx_time_bucket2 >= fromUnixTimestamp64Milli(1739318400000) AND __hdx_time_bucket2 <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
),toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (__hdx_time_bucket2 >= fromUnixTimestamp64Milli(1739318400000) AND __hdx_time_bucket2 <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig should generate sql for a single gauge metric 1`] = `
@ -676,7 +682,7 @@ exports[`renderChartConfig should generate sql for a single gauge metric 1`] = `
FROM Source
GROUP BY AttributesHash, __hdx_time_bucket2
ORDER BY AttributesHash, __hdx_time_bucket2
) SELECT quantile(0.95)(toFloat64OrDefault(toString(LastValue))),toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (__hdx_time_bucket2 >= fromUnixTimestamp64Milli(1739318400000) AND __hdx_time_bucket2 <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
) SELECT quantile(0.95)(toFloat64OrDefault(toString(LastValue))),toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (__hdx_time_bucket2 >= fromUnixTimestamp64Milli(1739318400000) AND __hdx_time_bucket2 <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig should generate sql for a single gauge metric with a delta() function applied 1`] = `
@ -709,7 +715,7 @@ exports[`renderChartConfig should generate sql for a single gauge metric with a
ORDER BY AttributesHash, __hdx_time_bucket2
) SELECT max(
toFloat64OrDefault(toString(LastValue))
),toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (__hdx_time_bucket2 >= fromUnixTimestamp64Milli(1739318400000) AND __hdx_time_bucket2 <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable'"
),toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (__hdx_time_bucket2 >= fromUnixTimestamp64Milli(1739318400000) AND __hdx_time_bucket2 <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(__hdx_time_bucket2), INTERVAL 1 minute) AS \`__hdx_time_bucket\` LIMIT 10 SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;
exports[`renderChartConfig should generate sql for a single sum metric 1`] = `
@ -752,5 +758,5 @@ exports[`renderChartConfig should generate sql for a single sum metric 1`] = `
ORDER BY AttributesHash, \`__hdx_time_bucket2\`
) SELECT avg(
toFloat64OrDefault(toString(Rate))
) AS \\"Value\\",toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (\`__hdx_time_bucket2\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket2\` <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` LIMIT 10"
) AS \\"Value\\",toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (\`__hdx_time_bucket2\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket2\` <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` LIMIT 10 SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000"
`;

View file

@ -3,12 +3,19 @@ import { ClickHouseClient } from '@clickhouse/client-common';
import { ClickhouseClient as HdxClickhouseClient } from '@/clickhouse/node';
import { Metadata, MetadataCache } from '@/core/metadata';
import { ChartConfigWithDateRange } from '@/types';
import { ChartConfigWithDateRange, TSource } from '@/types';
describe('Metadata Integration Tests', () => {
let client: ClickHouseClient;
let hdxClient: HdxClickhouseClient;
const source = {
querySettings: [
{ setting: 'optimize_read_in_order', value: '0' },
{ setting: 'cast_keep_nullable', value: '0' },
],
} as TSource;
beforeAll(() => {
const host = process.env.CLICKHOUSE_HOST || 'http://localhost:8123';
const username = process.env.CLICKHOUSE_USER || 'default';
@ -85,6 +92,7 @@ describe('Metadata Integration Tests', () => {
chartConfig,
keys: ['SeverityText'],
disableRowLimit,
source,
});
expect(resultSeverityText).toHaveLength(1);
@ -98,6 +106,7 @@ describe('Metadata Integration Tests', () => {
chartConfig,
keys: ['TraceId'],
disableRowLimit,
source,
});
expect(resultTraceId).toHaveLength(1);
@ -115,6 +124,7 @@ describe('Metadata Integration Tests', () => {
chartConfig,
keys: ['TraceId', 'SeverityText'],
disableRowLimit,
source,
});
expect(resultBoth).toEqual([
@ -138,6 +148,7 @@ describe('Metadata Integration Tests', () => {
chartConfig,
keys: ['__hdx_materialized_k8s.pod.name'],
disableRowLimit,
source,
});
expect(resultPodName).toHaveLength(1);
@ -153,6 +164,7 @@ describe('Metadata Integration Tests', () => {
chartConfig,
keys: ['LogAttributes.user'],
disableRowLimit,
source,
});
expect(resultLogAttributes).toHaveLength(1);
@ -167,6 +179,7 @@ describe('Metadata Integration Tests', () => {
const resultEmpty = await metadata.getKeyValues({
chartConfig,
keys: [],
source,
});
expect(resultEmpty).toEqual([]);
@ -177,6 +190,7 @@ describe('Metadata Integration Tests', () => {
chartConfig,
keys: ['SeverityText'],
limit: 2,
source,
});
expect(resultLimited).toHaveLength(1);
@ -380,11 +394,12 @@ describe('Metadata Integration Tests', () => {
);
});
it('should work without source parameter (fall back to base table)', async () => {
it('should work with an undefined source parameter (fall back to base table)', async () => {
const result = await metadata.getKeyValuesWithMVs({
chartConfig,
keys: ['environment', 'service'],
// No source parameter
source: undefined,
});
expect(result).toHaveLength(2);

View file

@ -1,7 +1,7 @@
import { ClickhouseClient } from '../clickhouse/node';
import { Metadata, MetadataCache } from '../core/metadata';
import * as renderChartConfigModule from '../core/renderChartConfig';
import { ChartConfigWithDateRange } from '../types';
import { ChartConfigWithDateRange, TSource } from '../types';
// Mock ClickhouseClient
const mockClickhouseClient = {
@ -20,6 +20,13 @@ jest.mock('../core/renderChartConfig', () => ({
.mockResolvedValue({ sql: 'SELECT 1', params: {} }),
}));
const source = {
querySettings: [
{ setting: 'optimize_read_in_order', value: '0' },
{ setting: 'cast_keep_nullable', value: '0' },
],
} as TSource;
describe('MetadataCache', () => {
let metadataCache: MetadataCache;
@ -259,6 +266,7 @@ describe('Metadata', () => {
keys: ['column1', 'column2'],
limit: 10,
disableRowLimit: false,
source,
});
expect(mockClickhouseClient.query).toHaveBeenCalledWith(
@ -278,6 +286,7 @@ describe('Metadata', () => {
keys: ['column1', 'column2'],
limit: 10,
disableRowLimit: true,
source,
});
expect(mockClickhouseClient.query).toHaveBeenCalledWith(
@ -292,6 +301,7 @@ describe('Metadata', () => {
chartConfig: mockChartConfig,
keys: ['column1', 'column2'],
limit: 10,
source,
});
expect(mockClickhouseClient.query).toHaveBeenCalledWith(
@ -310,6 +320,7 @@ describe('Metadata', () => {
chartConfig: mockChartConfig,
keys: ['column1', 'column2'],
limit: 10,
source,
});
expect(result).toEqual([
@ -334,6 +345,7 @@ describe('Metadata', () => {
chartConfig: mockChartConfig,
keys: ['column1'],
limit: 10,
source,
});
expect(result).toEqual([{ key: 'column1', value: ['value1', 'value2'] }]);
@ -349,6 +361,7 @@ describe('Metadata', () => {
chartConfig: mockChartConfig,
keys: [],
limit: 10,
source,
});
expect(results).toEqual([]);
@ -400,6 +413,7 @@ describe('Metadata', () => {
const result = await metadata.getValuesDistribution({
chartConfig: mockChartConfig,
key: 'severity',
source,
});
expect(result).toEqual(
@ -442,6 +456,7 @@ describe('Metadata', () => {
await metadata.getValuesDistribution({
chartConfig: configWithAliases,
key: 'severity',
source,
});
const actualConfig = renderChartConfigSpy.mock.calls[0][0];
@ -481,6 +496,7 @@ describe('Metadata', () => {
await metadata.getValuesDistribution({
chartConfig: configWithFilters,
key: 'severity',
source,
});
const actualConfig = renderChartConfigSpy.mock.calls[0][0];

View file

@ -4,9 +4,14 @@ import {
ChartConfigWithOptDateRange,
DisplayType,
MetricsDataType,
QuerySettings,
} from '@/types';
import { renderChartConfig, timeFilterExpr } from '../core/renderChartConfig';
import {
ChartConfigWithOptDateRangeEx,
renderChartConfig,
timeFilterExpr,
} from '../core/renderChartConfig';
describe('renderChartConfig', () => {
let mockMetadata: jest.Mocked<Metadata>;
@ -70,10 +75,19 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const querySettings: QuerySettings = [
{ setting: 'optimize_read_in_order', value: '0' },
{ setting: 'cast_keep_nullable', value: '1' },
{ setting: 'additional_result_filter', value: 'x != 2' },
{ setting: 'count_distinct_implementation', value: 'uniqCombined64' },
{ setting: 'async_insert_busy_timeout_min_ms', value: '20000' },
];
it('should generate sql for a single gauge metric', async () => {
const generatedSql = await renderChartConfig(
gaugeConfiguration,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
@ -94,6 +108,7 @@ describe('renderChartConfig', () => {
],
},
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
@ -133,7 +148,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
});
@ -162,9 +181,9 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
await expect(renderChartConfig(config, mockMetadata)).rejects.toThrow(
'multi select or string select on metrics not supported',
);
await expect(
renderChartConfig(config, mockMetadata, querySettings),
).rejects.toThrow('multi select or string select on metrics not supported');
});
describe('histogram metric queries', () => {
@ -200,7 +219,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
});
@ -237,7 +260,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
});
@ -275,7 +302,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
});
@ -312,7 +343,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
});
@ -348,7 +383,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
});
@ -385,7 +424,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
});
@ -408,7 +451,11 @@ describe('renderChartConfig', () => {
whereLanguage: 'sql',
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
});
@ -458,7 +505,11 @@ describe('renderChartConfig', () => {
limit: { limit: 1000 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toMatchSnapshot();
});
@ -481,7 +532,9 @@ describe('renderChartConfig', () => {
whereLanguage: 'sql',
};
await expect(renderChartConfig(config, mockMetadata)).rejects.toThrow(
await expect(
renderChartConfig(config, mockMetadata, querySettings),
).rejects.toThrow(
"must specify either 'sql' or 'chartConfig' in with clause",
);
});
@ -504,9 +557,9 @@ describe('renderChartConfig', () => {
whereLanguage: 'sql',
};
await expect(renderChartConfig(config, mockMetadata)).rejects.toThrow(
'non-conforming sql object in CTE',
);
await expect(
renderChartConfig(config, mockMetadata, querySettings),
).rejects.toThrow('non-conforming sql object in CTE');
});
it('should throw if the CTE chartConfig param is invalid', async () => {
@ -530,9 +583,9 @@ describe('renderChartConfig', () => {
whereLanguage: 'sql',
};
await expect(renderChartConfig(config, mockMetadata)).rejects.toThrow(
'non-conforming chartConfig object in CTE',
);
await expect(
renderChartConfig(config, mockMetadata, querySettings),
).rejects.toThrow('non-conforming chartConfig object in CTE');
});
});
@ -572,7 +625,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
// Verify the SQL contains the IN-based metric name condition
@ -617,7 +674,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain('k8s.node.cpu.utilization');
@ -660,7 +721,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain('container.cpu.utilization');
@ -704,7 +769,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain('k8s.pod.cpu.utilization');
@ -747,7 +816,11 @@ describe('renderChartConfig', () => {
limit: { limit: 10 },
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
// Should use the simple string comparison for regular metrics (not IN-based)
@ -782,7 +855,11 @@ describe('renderChartConfig', () => {
dateRange: [new Date('2025-02-12'), new Date('2025-02-14')],
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain('HAVING');
expect(actual).toContain('count(*) > 100');
@ -818,7 +895,11 @@ describe('renderChartConfig', () => {
dateRange: [new Date('2025-02-12'), new Date('2025-02-14')],
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain('HAVING');
expect(actual).toContain('avg(response_time) > 500 AND count(*) > 10');
@ -847,7 +928,11 @@ describe('renderChartConfig', () => {
dateRange: [new Date('2025-02-12'), new Date('2025-02-14')],
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).not.toContain('HAVING');
expect(actual).toMatchSnapshot();
@ -878,7 +963,11 @@ describe('renderChartConfig', () => {
granularity: '5 minute',
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain('HAVING');
expect(actual).toContain('count(*) > 50');
@ -910,7 +999,11 @@ describe('renderChartConfig', () => {
dateRange: [new Date('2025-02-12'), new Date('2025-02-14')],
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).not.toContain('HAVING');
expect(actual).toMatchSnapshot();
@ -1151,7 +1244,11 @@ describe('renderChartConfig', () => {
dateRange: [new Date('2025-02-12'), new Date('2025-02-14')],
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain('avgMerge(Duration)');
expect(actual).toMatchSnapshot();
@ -1178,7 +1275,11 @@ describe('renderChartConfig', () => {
groupBy: 'severity',
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain(
"avgMergeIf(Duration, ((severity = 'ERROR')) AND toFloat64OrDefault(toString(Duration)) IS NOT NULL)",
@ -1210,7 +1311,11 @@ describe('renderChartConfig', () => {
dateRange: [new Date('2025-02-12'), new Date('2025-02-14')],
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain(
"quantileMergeIf(0.95)(Duration, ((severity = 'ERROR')) AND toFloat64OrDefault(toString(Duration)) IS NOT NULL)",
@ -1240,10 +1345,85 @@ describe('renderChartConfig', () => {
dateRange: [new Date('2025-02-12'), new Date('2025-02-14')],
};
const generatedSql = await renderChartConfig(config, mockMetadata);
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain('histogramMerge(20)(Duration)');
expect(actual).toMatchSnapshot();
});
});
describe('SETTINGS clause', () => {
const config: ChartConfigWithOptDateRangeEx = {
displayType: DisplayType.Table,
connection: 'test-connection',
from: {
databaseName: 'default',
tableName: 'logs',
},
select: [
{
aggFn: 'histogramMerge',
valueExpression: 'Duration',
level: 20,
},
],
where: '',
whereLanguage: 'sql',
groupBy: 'severity',
timestampValueExpression: 'timestamp',
dateRange: [new Date('2025-02-12'), new Date('2025-02-14')],
};
test('should apply the "query settings" settings to the query', async () => {
const generatedSql = await renderChartConfig(
config,
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain(
"SETTINGS optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000",
);
expect(actual).toMatchSnapshot();
});
test('should apply the "chart config" settings to the query', async () => {
const generatedSql = await renderChartConfig(
{
...config,
settings: chSql`short_circuit_function_evaluation = 'force_enable'`,
},
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain(
"SETTINGS short_circuit_function_evaluation = 'force_enable'",
);
expect(actual).toMatchSnapshot();
});
test('should concat the "chart config" and "query setting" settings and apply them to the query', async () => {
const generatedSql = await renderChartConfig(
{
...config,
settings: chSql`short_circuit_function_evaluation = 'force_enable'`,
},
mockMetadata,
querySettings,
);
const actual = parameterizedQueryToSql(generatedSql);
expect(actual).toContain(
"SETTINGS short_circuit_function_evaluation = 'force_enable', optimize_read_in_order = 0, cast_keep_nullable = 1, additional_result_filter = 'x != 2', count_distinct_implementation = 'uniqCombined64', async_insert_busy_timeout_min_ms = 20000",
);
expect(actual).toMatchSnapshot();
});
});
});

View file

@ -10,6 +10,7 @@ import {
import {
convertToDashboardTemplate,
extractSettingsClauseFromEnd,
findJsonExpressions,
formatDate,
getAlignedDateRange,
@ -17,7 +18,9 @@ import {
isFirstOrderByAscending,
isJsonExpression,
isTimestampExpressionInFirstOrderBy,
joinQuerySettings,
optimizeTimestampValueExpression,
parseToNumber,
parseToStartOfFunction,
replaceJsonExpressions,
splitAndTrimCSV,
@ -1423,4 +1426,135 @@ describe('utils', () => {
expect(alignedEnd.toISOString()).toBe('2025-11-26T13:00:00.000Z');
});
});
describe('extractSettingsClauseFromEnd', () => {
test.each([
{
label: 'no settings clause',
sql: 'SELECT * FROM table',
withoutSettingsClause: 'SELECT * FROM table',
settingsClause: undefined,
},
{
label: 'basic',
sql: 'SELECT * FROM table SETTINGS opt=1, cast=1',
withoutSettingsClause: 'SELECT * FROM table',
settingsClause: 'SETTINGS opt=1, cast=1',
},
{
label: 'basic with semicolon',
sql: 'SELECT * FROM table SETTINGS opt = 1, cast = 1;',
withoutSettingsClause: 'SELECT * FROM table',
settingsClause: 'SETTINGS opt = 1, cast = 1',
},
{
label: 'with WHERE clause',
sql: 'SELECT * FROM table WHERE col=Value SETTINGS opt = 1, cast = 1;',
withoutSettingsClause: 'SELECT * FROM table WHERE col=Value',
settingsClause: 'SETTINGS opt = 1, cast = 1',
},
{
label: 'SETTINGS not at end',
sql: 'SELECT * FROM table WHERE col=Value SETTINGS opt = 1, cast = 1 FORMAT json;',
withoutSettingsClause: 'SELECT * FROM table WHERE col=Value',
// This test case illustrates that subsequent clauses will also be extracted.
settingsClause: 'SETTINGS opt = 1, cast = 1 FORMAT json',
},
])(
'Extracts SETTINGS clause from: "$label" query',
({ sql, settingsClause, withoutSettingsClause }) => {
const [remaining, extractedSettingsClause] =
extractSettingsClauseFromEnd(sql);
expect(remaining).toBe(withoutSettingsClause);
expect(extractedSettingsClause).toBe(settingsClause);
},
);
});
describe('parseToNumber', () => {
it('returns `undefined` for an empty string', () => {
expect(parseToNumber('')).toBe(undefined);
});
it('returns `undefined` for a whitespace string', () => {
expect(parseToNumber(' ')).toBe(undefined);
});
it('returns `undefined` for a non-numeric string', () => {
expect(parseToNumber(' . ? / ')).toBe(undefined);
expect(parseToNumber(' some string value ')).toBe(undefined);
expect(parseToNumber('5678abc')).toBe(undefined);
});
it('returns `undefined` for an infinite number', () => {
expect(parseToNumber('Infinity')).toBe(undefined);
expect(parseToNumber('-Infinity')).toBe(undefined);
});
it('returns the number value for a parseable number', () => {
expect(parseToNumber('123')).toBe(123);
expect(parseToNumber('0.123')).toBe(0.123);
expect(parseToNumber('1.123')).toBe(1.123);
expect(parseToNumber('10000000')).toBe(10000000);
});
});
describe('joinQuerySettings', () => {
test('returns `undefined` if the querySettings are `undefined` or empty', () => {
expect(joinQuerySettings(undefined)).toBe(undefined);
expect(joinQuerySettings([])).toBe(undefined);
});
test('filters out items whose `setting` or `value` field is empty', () => {
expect(
joinQuerySettings([
{ setting: '', value: '1' },
{ setting: 'async_insert', value: '' },
{ setting: 'async_insert_busy_timeout_min_ms', value: '20000' },
]),
).toEqual('async_insert_busy_timeout_min_ms = 20000');
});
test('joins the values into key value pairs', () => {
const result = joinQuerySettings([
{ setting: 'additional_result_filter', value: 'x != 2' },
{ setting: 'async_insert', value: '0' },
{ setting: 'async_insert_busy_timeout_min_ms', value: '20000' },
]);
expect(result).toContain("additional_result_filter = 'x != 2'");
expect(result).toContain('async_insert = 0');
expect(result).toContain('async_insert_busy_timeout_min_ms = 20000');
});
test('joins the result into a comma separated string', () => {
expect(
joinQuerySettings([
{ setting: 'additional_result_filter', value: 'x != 2' },
{ setting: 'async_insert', value: '0' },
{ setting: 'async_insert_busy_timeout_min_ms', value: '20000' },
]),
).toEqual(
"additional_result_filter = 'x != 2', async_insert = 0, async_insert_busy_timeout_min_ms = 20000",
);
});
test('wraps non-numeric and infinite numeric values in quotes', () => {
expect(
joinQuerySettings([{ setting: 'setting_name', value: 'x != 2' }]),
).toEqual("setting_name = 'x != 2'");
expect(
joinQuerySettings([{ setting: 'setting_name', value: 'string value' }]),
).toEqual("setting_name = 'string value'");
expect(
joinQuerySettings([{ setting: 'setting_name', value: '1000' }]),
).toEqual('setting_name = 1000');
expect(
joinQuerySettings([{ setting: 'setting_name', value: 'Infinity' }]),
).toEqual("setting_name = 'Infinity'");
});
});
});

View file

@ -9,6 +9,8 @@ import { Metadata } from '@/core/metadata';
import {
ChartConfigWithOptDateRange,
MaterializedViewConfiguration,
QuerySettings,
TSource,
} from '@/types';
import { ColumnMeta } from '..';
@ -81,7 +83,7 @@ describe('materializedViews', () => {
const SOURCE = {
from: { databaseName: 'default', tableName: 'otel_spans' },
materializedViews: [MV_CONFIG_METRIC_ROLLUP_1M],
};
} as TSource;
describe('tryConvertConfigToMaterializedViewSelect', () => {
it('should return empty object if selecting a string instead of an array of aggregates', async () => {
@ -1092,7 +1094,7 @@ describe('materializedViews', () => {
{} as any,
{
from: { databaseName: 'default', tableName: 'table_without_mv' },
},
} as TSource,
);
expect(actual).toEqual(chartConfig);
@ -1507,7 +1509,7 @@ describe('materializedViews', () => {
{} as any,
{
from: { databaseName: 'default', tableName: 'table_without_mv' },
},
} as TSource,
);
expect(result).toEqual({

View file

@ -18,11 +18,12 @@ import {
splitChartConfigs,
} from '@/core/renderChartConfig';
import {
extractSettingsClauseFromEnd,
hashCode,
replaceJsonExpressions,
splitAndTrimWithBracket,
} from '@/core/utils';
import { ChartConfigWithOptDateRange } from '@/types';
import { ChartConfigWithOptDateRange, QuerySettings } from '@/types';
// export @clickhouse/client-common types
export type {
@ -557,6 +558,7 @@ export abstract class BaseClickhouseClient {
config,
metadata,
opts,
querySettings,
}: {
config: ChartConfigWithOptDateRange;
metadata: Metadata;
@ -564,10 +566,13 @@ export abstract class BaseClickhouseClient {
abort_signal?: AbortSignal;
clickhouse_settings?: Record<string, any>;
};
querySettings: QuerySettings | undefined;
}): Promise<ResponseJSON<Record<string, string | number>>> {
config = setChartSelectsAlias(config);
const queries: ChSql[] = await Promise.all(
splitChartConfigs(config).map(c => renderChartConfig(c, metadata)),
splitChartConfigs(config).map(c =>
renderChartConfig(c, metadata, querySettings),
),
);
const isTimeSeries = config.displayType === 'line';
@ -654,6 +659,7 @@ export abstract class BaseClickhouseClient {
config,
metadata,
opts,
querySettings,
}: {
config: ChartConfigWithOptDateRange;
metadata: Metadata;
@ -661,9 +667,14 @@ export abstract class BaseClickhouseClient {
abort_signal?: AbortSignal;
clickhouse_settings?: Record<string, any>;
};
querySettings: QuerySettings | undefined;
}): Promise<{ isValid: boolean; rowEstimate?: number; error?: string }> {
try {
const renderedConfig = await renderChartConfig(config, metadata);
const renderedConfig = await renderChartConfig(
config,
metadata,
querySettings,
);
const explainedQuery = chSql`EXPLAIN ESTIMATE ${renderedConfig}`;
const result = await this.query<'JSON'>({
@ -736,10 +747,12 @@ export function chSqlToAliasMap(
try {
const sql = parameterizedQueryToSql(chSql);
// Remove the SETTINGS clause because `SQLParser` doesn't understand it.
const [sqlWithoutSettingsClause] = extractSettingsClauseFromEnd(sql);
// Replace JSON expressions with replacement tokens so that node-sql-parser can parse the SQL
const { sqlWithReplacements, replacements: jsonReplacementsToExpressions } =
replaceJsonExpressions(sql);
replaceJsonExpressions(sqlWithoutSettingsClause);
const parser = new SQLParser.Parser();
// eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion -- astify returns union type
const ast = parser.astify(sqlWithReplacements, {

View file

@ -383,7 +383,7 @@ async function tryOptimizeConfig<C extends ChartConfigWithOptDateRange>(
clickhouseClient: BaseClickhouseClient,
signal: AbortSignal | undefined,
mvConfig: MaterializedViewConfiguration,
sourceFrom: TSource['from'],
source: Omit<TSource, 'connection'>, // for overlap with ISource type
) {
const errors: string[] = [];
// Attempt to optimize any CTEs that exist in the config
@ -393,8 +393,8 @@ async function tryOptimizeConfig<C extends ChartConfigWithOptDateRange>(
config.with.map(async cte => {
if (
cte.chartConfig &&
cte.chartConfig.from.databaseName === sourceFrom.databaseName &&
cte.chartConfig.from.tableName === sourceFrom.tableName
cte.chartConfig.from.databaseName === source.from.databaseName &&
cte.chartConfig.from.tableName === source.from.tableName
) {
return tryConvertConfigToMaterializedViewSelect(
cte.chartConfig,
@ -433,8 +433,8 @@ async function tryOptimizeConfig<C extends ChartConfigWithOptDateRange>(
// Attempt to optimize the main (outer) select
if (
config.from.databaseName === sourceFrom.databaseName &&
config.from.tableName === sourceFrom.tableName
config.from.databaseName === source.from.databaseName &&
config.from.tableName === source.from.tableName
) {
const convertedOuterSelect = await tryConvertConfigToMaterializedViewSelect(
optimizedConfig ?? config,
@ -460,6 +460,7 @@ async function tryOptimizeConfig<C extends ChartConfigWithOptDateRange>(
opts: {
abort_signal: signal,
},
querySettings: source.querySettings,
});
if (error) {
@ -486,7 +487,7 @@ export async function tryOptimizeConfigWithMaterializedViewWithExplanations<
metadata: Metadata,
clickhouseClient: BaseClickhouseClient,
signal: AbortSignal | undefined,
source: Pick<TSource, 'from'> & Partial<Pick<TSource, 'materializedViews'>>,
source: Omit<TSource, 'connection'>, // for overlap with ISource type
): Promise<{
optimizedConfig?: C;
explanations: MVOptimizationExplanation[];
@ -500,7 +501,7 @@ export async function tryOptimizeConfigWithMaterializedViewWithExplanations<
clickhouseClient,
signal,
mvConfig,
source.from,
source,
).then(result => ({ ...result, mvConfig })),
),
);
@ -540,7 +541,7 @@ export async function tryOptimizeConfigWithMaterializedView<
metadata: Metadata,
clickhouseClient: BaseClickhouseClient,
signal: AbortSignal | undefined,
source: Pick<TSource, 'from'> & Partial<Pick<TSource, 'materializedViews'>>,
source: Omit<TSource, 'connection'>, // for overlap with ISource type
) {
const { optimizedConfig } =
await tryOptimizeConfigWithMaterializedViewWithExplanations(
@ -653,6 +654,7 @@ export async function optimizeGetKeyValuesCalls<
config,
metadata,
opts: { abort_signal: signal },
querySettings: source?.querySettings,
});
return {
id: toMvId({

View file

@ -12,7 +12,12 @@ import {
tableExpr,
} from '@/clickhouse';
import { renderChartConfig } from '@/core/renderChartConfig';
import type { ChartConfig, ChartConfigWithDateRange, TSource } from '@/types';
import type {
ChartConfig,
ChartConfigWithDateRange,
QuerySettings,
TSource,
} from '@/types';
import { optimizeGetKeyValuesCalls } from './materializedViews';
import { objectHash } from './utils';
@ -701,11 +706,13 @@ export class Metadata {
key,
samples = 100_000,
limit = 100,
source,
}: {
chartConfig: ChartConfigWithDateRange;
key: string;
samples?: number;
limit?: number;
source: TSource | undefined;
}) {
const cacheKeyConfig = pick(chartConfig, [
'connection',
@ -746,7 +753,11 @@ export class Metadata {
limit: { limit },
};
const sql = await renderChartConfig(config, this);
const sql = await renderChartConfig(
config,
this,
source?.querySettings,
);
const json = await this.clickhouseClient
.query<'JSON'>({
@ -785,12 +796,16 @@ export class Metadata {
limit = 20,
disableRowLimit = false,
signal,
source,
}: {
chartConfig: ChartConfigWithDateRange;
keys: string[];
limit?: number;
disableRowLimit?: boolean;
signal?: AbortSignal;
source:
| Omit<TSource, 'connection'> /* for overlap with ISource type */
| undefined;
}): Promise<{ key: string; value: string[] }[]> {
const cacheKeyConfig = {
...pick(chartConfig, [
@ -856,7 +871,11 @@ export class Metadata {
};
})();
const sql = await renderChartConfig(sqlConfig, this);
const sql = await renderChartConfig(
sqlConfig,
this,
source?.querySettings,
);
const json = await this.clickhouseClient
.query<'JSON'>({
@ -898,7 +917,7 @@ export class Metadata {
}: {
chartConfig: ChartConfigWithDateRange;
keys: string[];
source?: TSource;
source: TSource | undefined;
limit?: number;
disableRowLimit?: boolean;
signal?: AbortSignal;
@ -940,6 +959,7 @@ export class Metadata {
limit,
disableRowLimit,
signal,
source,
}),
),
);

View file

@ -8,8 +8,11 @@ import { Metadata } from '@/core/metadata';
import {
convertDateRangeToGranularityString,
convertGranularityToSeconds,
extractSettingsClauseFromEnd,
getFirstTimestampValueExpression,
joinQuerySettings,
optimizeTimestampValueExpression,
parseToNumber,
parseToStartOfFunction,
splitAndTrimWithBracket,
} from '@/core/utils';
@ -24,6 +27,7 @@ import {
ChSqlSchema,
CteChartConfig,
MetricsDataType,
QuerySettings,
SearchCondition,
SearchConditionLanguage,
SelectList,
@ -164,9 +168,12 @@ const fastifySQL = ({
}) => {
// Parse the SQL AST
try {
// Remove the SETTINGS clause because `SQLParser` doesn't understand it.
const [rawSqlWithoutSettingsClause] = extractSettingsClauseFromEnd(rawSQL);
const parser = new SQLParser.Parser();
// eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion -- astify returns union type, we expect Select
const ast = parser.astify(rawSQL, {
const ast = parser.astify(rawSqlWithoutSettingsClause, {
database: 'Postgresql',
}) as SQLParser.Select;
@ -916,9 +923,21 @@ function renderLimit(
return chSql`${{ Int32: chartConfig.limit.limit }}${offset}`;
}
function renderSettings(
chartConfig: ChartConfigWithOptDateRangeEx,
querySettings: QuerySettings | undefined,
) {
const querySettingsJoined = joinQuerySettings(querySettings);
return concatChSql(', ', [
chSql`${chartConfig.settings ?? ''}`,
chSql`${querySettingsJoined ?? ''}`,
]);
}
// includedDataInterval isn't exported at this time. It's only used internally
// for metric SQL generation.
type ChartConfigWithOptDateRangeEx = ChartConfigWithOptDateRange & {
export type ChartConfigWithOptDateRangeEx = ChartConfigWithOptDateRange & {
includedDataInterval?: string;
settings?: ChSql;
};
@ -926,6 +945,7 @@ type ChartConfigWithOptDateRangeEx = ChartConfigWithOptDateRange & {
async function renderWith(
chartConfig: ChartConfigWithOptDateRangeEx,
metadata: Metadata,
querySettings: QuerySettings | undefined,
): Promise<ChSql | undefined> {
const { with: withClauses } = chartConfig;
if (withClauses) {
@ -972,8 +992,12 @@ async function renderWith(
// results in schema conformance.
const resolvedSql = sql
? sql
: // eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion -- intentional, see comment above
await renderChartConfig(chartConfig as ChartConfig, metadata);
: await renderChartConfig(
// eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion -- intentional, see comment above
chartConfig as ChartConfig,
metadata,
querySettings,
);
if (clause.isSubquery === false) {
return chSql`(${resolvedSql}) AS ${{ Identifier: clause.name }}`;
@ -1339,8 +1363,9 @@ async function translateMetricChartConfig(
}
export async function renderChartConfig(
rawChartConfig: ChartConfigWithOptDateRange,
rawChartConfig: ChartConfigWithOptDateRangeEx,
metadata: Metadata,
querySettings: QuerySettings | undefined,
): Promise<ChSql> {
// metric types require more rewriting since we know more about the schema
// but goes through the same generation process
@ -1348,7 +1373,7 @@ export async function renderChartConfig(
? await translateMetricChartConfig(rawChartConfig, metadata)
: rawChartConfig;
const withClauses = await renderWith(chartConfig, metadata);
const withClauses = await renderWith(chartConfig, metadata, querySettings);
const select = await renderSelect(chartConfig, metadata);
const from = renderFrom(chartConfig);
const where = await renderWhere(chartConfig, metadata);
@ -1357,6 +1382,7 @@ export async function renderChartConfig(
const orderBy = renderOrderBy(chartConfig);
//const fill = renderFill(chartConfig); //TODO: Fill breaks heatmaps and some charts
const limit = renderLimit(chartConfig);
const settings = renderSettings(chartConfig, querySettings);
return concatChSql(' ', [
chSql`${withClauses?.sql ? chSql`WITH ${withClauses}` : ''}`,
@ -1368,8 +1394,9 @@ export async function renderChartConfig(
chSql`${orderBy?.sql ? chSql`ORDER BY ${orderBy}` : ''}`,
//chSql`${fill?.sql ? chSql`WITH FILL ${fill}` : ''}`,
chSql`${limit?.sql ? chSql`LIMIT ${limit}` : ''}`,
// eslint-disable-next-line @typescript-eslint/no-unsafe-type-assertion -- settings type narrowing
chSql`${'settings' in chartConfig ? chSql`SETTINGS ${chartConfig.settings as ChSql}` : []}`,
// SETTINGS must be last - see `extractSettingsClause` in "./utils.ts"
chSql`${settings.sql ? chSql`SETTINGS ${settings}` : []}`,
]);
}

View file

@ -13,6 +13,7 @@ import {
DashboardSchema,
DashboardTemplateSchema,
DashboardWithoutId,
QuerySettings,
SQLInterval,
TileTemplateSchema,
TSourceUnion,
@ -693,3 +694,56 @@ export function isDateRangeEqual(range1: [Date, Date], range2: [Date, Date]) {
range1[1].getTime() === range2[1].getTime()
);
}
/*
This function extracts the SETTINGS clause from the end(!) of the sql string.
*/
export function extractSettingsClauseFromEnd(
sqlInput: string,
): [string, string | undefined] {
const sql = sqlInput.trim().endsWith(';')
? sqlInput.trim().slice(0, -1)
: sqlInput.trim();
const settingsIndex = sql.toUpperCase().indexOf('SETTINGS');
if (settingsIndex === -1) {
return [sql, undefined] as const;
}
const settingsClause = sql.substring(settingsIndex).trim();
const remaining = sql.substring(0, settingsIndex).trim();
return [remaining, settingsClause] as const;
}
export function parseToNumber(input: string): number | undefined {
const trimmed = input.trim();
if (trimmed === '') {
return undefined;
}
const num = Number(trimmed);
return Number.isFinite(num) ? num : undefined;
}
export function joinQuerySettings(
querySettings: QuerySettings | undefined,
): string | undefined {
if (!querySettings?.length) {
return undefined;
}
const emptyFiltered = querySettings.filter(
({ setting, value }) => setting.length && value.length,
);
const formattedPairs = emptyFiltered.map(
({ setting, value }) =>
`${setting} = ${parseToNumber(value) ?? `'${value}'`}`,
);
return formattedPairs.join(', ');
}

View file

@ -603,6 +603,12 @@ export enum SourceKind {
// TABLE SOURCE FORM VALIDATION
// --------------------------
const QuerySettingsSchema = z.array(
z.object({ setting: z.string(), value: z.string() }),
);
export type QuerySettings = z.infer<typeof QuerySettingsSchema>;
// Base schema with fields common to all source types
const SourceBaseSchema = z.object({
id: z.string(),
@ -613,6 +619,7 @@ const SourceBaseSchema = z.object({
databaseName: z.string().min(1, 'Database is required'),
tableName: z.string().min(1, 'Table is required'),
}),
querySettings: QuerySettingsSchema.optional(),
});
const RequiredTimestampColumnSchema = z