diff --git a/.github/workflows/skywalking.yaml b/.github/workflows/skywalking.yaml index 4cc6fc8448f5..e39032a5380d 100644 --- a/.github/workflows/skywalking.yaml +++ b/.github/workflows/skywalking.yaml @@ -1019,12 +1019,12 @@ jobs: found=false for i in {1..60}; do # check if segment files exist - if docker exec $CONTAINER_ID sh -c '[ -n "$(ls /tmp/measure-data/measure/data/day/seg* 2>/dev/null)" ]'; then + if docker exec $CONTAINER_ID sh -c '[ -n "$(ls /tmp/measure-data/measure/data/metricsDay/seg* 2>/dev/null)" ]'; then echo "✅ found segment files" sleep 180 # create and copy files docker cp $CONTAINER_ID:/tmp ${BANYANDB_DATA_GENERATE_ROOT} - docker cp $CONTAINER_ID:/tmp/measure-data/measure/data/index ${BANYANDB_DATA_GENERATE_ROOT} + docker cp $CONTAINER_ID:/tmp/measure-data/measure/data/metadata ${BANYANDB_DATA_GENERATE_ROOT} found=true break else diff --git a/docs/en/changes/changes.md b/docs/en/changes/changes.md index baca7d1b9477..f8665bc3fb88 100644 --- a/docs/en/changes/changes.md +++ b/docs/en/changes/changes.md @@ -22,6 +22,8 @@ * Adapt the mesh metrics if detect the ambient mesh in the eBPF access log receiver. * Add JSON format support for the `/debugging/config/dump` status API. * Enhance status APIs to support multiple `accept` header values, e.g. `Accept: application/json; charset=utf-8`. +* Storage: separate `SpanAttachedEventRecord` for SkyWalking trace and Zipkin trace. +* [Break Change]BanyanDB: Setup new Group policy. #### UI diff --git a/docs/en/setup/backend/configuration-vocabulary.md b/docs/en/setup/backend/configuration-vocabulary.md index 60c715066267..187973b13fa7 100644 --- a/docs/en/setup/backend/configuration-vocabulary.md +++ b/docs/en/setup/backend/configuration-vocabulary.md @@ -380,94 +380,142 @@ If the `cold` stage is enabled and `warm` stage is disabled, the data will be mo If both `warm` and `cold` stages are enabled, the data will be moved to the `warm` stage after the TTL of the `hot` stage, and then to the `cold` stage after the TTL of the `warm` stage. OAP will query the data from the "hot and warm" stage by default if the "warm" stage is enabled. -| Group | Settings | Stage Settings | Value(s) and Explanation | System Environment Variable¹ | Default | -|---------------|-----------------|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------|-----------| -| recordsNormal | - | - | The group for records not specified in `super`. Each dataset will be grouped under a single group named `normal`. | - | - | -| - | shardNum | - | Shards Number for normal records group. | SW_STORAGE_BANYANDB_GR_NORMAL_SHARD_NUM | 1 | -| - | segmentInterval | - | Segment Interval Days for normal records group. | SW_STORAGE_BANYANDB_GR_NORMAL_SI_DAYS | 1 | -| - | ttl | - | TTL Days for normal records group. | SW_STORAGE_BANYANDB_GR_NORMAL_TTL_DAYS | 3 | -| - | enableWarmStage | - | Activate warm stage for normal records group. | SW_STORAGE_BANYANDB_GR_NORMAL_ENABLE_WARM_STAGE | false | -| - | enableColdStage | - | Activate cold stage for normal records group. | SW_STORAGE_BANYANDB_GR_NORMAL_ENABLE_COLD_STAGE | false | -| - | warm | - | The warm stage settings. | - | - | -| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_WARM_SHARD_NUM | 1 | -| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_WARM_SI_DAYS | 2 | -| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_WARM_TTL_DAYS | 7 | -| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_WARM_NODE_SELECTOR | type=warm | -| - | cold | - | The cold stage settings. | - | - | -| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_COLD_SHARD_NUM | 1 | -| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_COLD_SI_DAYS | 3 | -| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_COLD_TTL_DAYS | 30 | -| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_COLD_NODE_SELECTOR | type=cold | -| recordsSuper | - | - | `super` is a special dataset designed to store trace or log data that is too large for normal datasets.Each super dataset will be a separate group in BanyanDB, following the settings defined in the "super" section. | - | - | -| - | shardNum | - | Shards Number for super records group. | SW_STORAGE_BANYANDB_GR_SUPER_SHARD_NUM | 2 | -| - | segmentInterval | - | Segment Interval Days for super records group. | SW_STORAGE_BANYANDB_GR_SUPER_SI_DAYS | 1 | -| - | ttl | - | TTL Days for super records group. | SW_STORAGE_BANYANDB_GR_SUPER_TTL_DAYS | 3 | -| - | enableWarmStage | - | Activate warm stage for super records group. | SW_STORAGE_BANYANDB_GR_SUPER_ENABLE_WARM_STAGE | false | -| - | enableColdStage | - | Activate cold stage for super records group. | SW_STORAGE_BANYANDB_GR_SUPER_ENABLE_COLD_STAGE | false | -| - | warm | - | The warm stage settings. | - | - | -| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GR_SUPER_WARM_SHARD_NUM | 2 | -| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_SUPER_WARM_SI_DAYS | 1 | -| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_SUPER_WARM_TTL_DAYS | 7 | -| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_SUPER_WARM_NODE_SELECTOR | type=warm | -| - | cold | - | The cold stage settings. | - | - | -| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GR_SUPER_COLD_SHARD_NUM | 2 | -| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_SUPER_COLD_SI_DAYS | 1 | -| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_SUPER_COLD_TTL_DAYS | 30 | -| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_SUPER_COLD_NODE_SELECTOR | type=cold | -| metricsMin | - | - | The group for minute granularity metrics group. | - | - | -| - | shardNum | - | Shards Number for minute granularity metrics group. | SW_STORAGE_BANYANDB_GM_MINUTE_SHARD_NUM | 2 | -| - | segmentInterval | - | Segment Interval Days for minute granularity metrics group. | SW_STORAGE_BANYANDB_GM_MINUTE_SI_DAYS | 1 | -| - | ttl | - | TTL Days for minute granularity metrics group. | SW_STORAGE_BANYANDB_GM_MINUTE_TTL_DAYS | 7 | -| - | enableWarmStage | - | Activate warm stage for minute granularity metrics group. | SW_STORAGE_BANYANDB_GM_MINUTE_ENABLE_WARM_STAGE | false | -| - | enableColdStage | - | Activate cold stage for minute granularity metrics group. | SW_STORAGE_BANYANDB_GM_MINUTE_ENABLE_COLD_STAGE | false | -| - | warm | - | The warm stage settings. | - | - | -| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_WARM_SHARD_NUM | 2 | -| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_WARM_SI_DAYS | 3 | -| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_WARM_TTL_DAYS | 15 | -| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_WARM_NODE_SELECTOR | type=warm | -| - | cold | - | The cold stage settings. | - | - | -| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_COLD_SHARD_NUM | 2 | -| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_COLD_SI_DAYS | 5 | -| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_COLD_TTL_DAYS | 60 | -| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_COLD_NODE_SELECTOR | type=cold | -| metricsHour | - | - | The group for hour granularity metrics. | - | - | -| - | shardNum | - | Shards Number for hour granularity metrics group. | SW_STORAGE_BANYANDB_GM_HOUR_SHARD_NUM | 1 | -| - | segmentInterval | - | Segment Interval Days for hour granularity metrics group. | SW_STORAGE_BANYANDB_GM_HOUR_SI_DAYS | 5 | -| - | ttl | - | TTL Days for hour granularity metrics group. | SW_STORAGE_BANYANDB_GM_HOUR_TTL_DAYS | 15 | -| - | enableWarmStage | - | Activate warm stage for hour granularity metrics group. | SW_STORAGE_BANYANDB_GM_HOUR_ENABLE_WARM_STAGE | false | -| - | enableColdStage | - | Activate cold stage for hour granularity metrics group. | SW_STORAGE_BANYANDB_GM_HOUR_ENABLE_COLD_STAGE | false | -| - | warm | - | The warm stage settings. | - | - | -| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_WARM_SHARD_NUM | 1 | -| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_WARM_SI_DAYS | 7 | -| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_WARM_TTL_DAYS | 30 | -| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_WARM_NODE_SELECTOR | type=warm | -| - | cold | - | The cold stage settings. | - | - | -| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_COLD_SHARD_NUM | 1 | -| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_COLD_SI_DAYS | 15 | -| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_COLD_TTL_DAYS | 120 | -| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_COLD_NODE_SELECTOR | type=cold | -| metricsDay | - | - | The group for day granularity metrics. | - | - | -| - | shardNum | - | Shards Number for day granularity metrics group. | SW_STORAGE_BANYANDB_GM_DAY_SHARD_NUM | 1 | -| - | segmentInterval | - | Segment Interval Days for day granularity metrics group. | SW_STORAGE_BANYANDB_GM_DAY_SI_DAYS | 15 | -| - | ttl | - | TTL Days for day granularity metrics group. | SW_STORAGE_BANYANDB_GM_DAY_TTL_DAYS | 15 | -| - | enableWarmStage | - | Activate warm stage for day granularity metrics group. | SW_STORAGE_BANYANDB_GM_DAY_ENABLE_WARM_STAGE | false | -| - | enableColdStage | - | Activate cold stage for day granularity metrics group. | SW_STORAGE_BANYANDB_GM_DAY_ENABLE_COLD_STAGE | false | -| - | warm | - | The warm stage settings. | - | - | -| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GM_DAY_WARM_SHARD_NUM | 1 | -| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_DAY_WARM_SI_DAYS | 15 | -| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_DAY_WARM_TTL_DAYS | 30 | -| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_DAY_WARM_NODE_SELECTOR | type=warm | -| - | cold | - | The cold stage settings. | - | - | -| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GM_DAY_COLD_SHARD_NUM | 1 | -| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_DAY_COLD_SI_DAYS | 15 | -| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_DAY_COLD_TTL_DAYS | 120 | -| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_DAY_COLD_NODE_SELECTOR | type=cold | -| metadata | - | - | The `index` group is designed to store metrics that are used for indexing without value columns. Such as `service_traffic`, `network_address_alias`, etc. Since BanyanDB *0.8.0*. | - | - | -| - | shardNum | - | Shards Number for metadata `index` group. | SW_STORAGE_BANYANDB_GM_INDEX_SHARD_NUM | 2 | -| - | segmentInterval | - | Segment Interval Days for metadata `index` group. | SW_STORAGE_BANYANDB_GM_INDEX_SI_DAYS | 15 | -| - | ttl | - | TTL Days for metadata `index` group. | SW_STORAGE_BANYANDB_GM_INDEX_TTL_DAYS | 15 | -| property | - | - | The group settings of property, such as UI and profiling. | - | - | -| - | shardNum | - | Shards Number for property group. | SW_STORAGE_BANYANDB_GP_PROPERTY_SHARD_NUM | 1 | +| Group | Settings | Stage Settings | Value(s) and Explanation | System Environment Variable¹ | Default | +|---------------------------|-----------------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------|-----------| +| recordsNormal | - | - | The group for records not specified in `super`. Each dataset will be grouped under a single group named `normal`. | - | - | +| - | shardNum | - | Shards Number for normal records group. | SW_STORAGE_BANYANDB_GR_NORMAL_SHARD_NUM | 1 | +| - | segmentInterval | - | Segment Interval Days for normal records group. | SW_STORAGE_BANYANDB_GR_NORMAL_SI_DAYS | 1 | +| - | ttl | - | TTL Days for normal records group. | SW_STORAGE_BANYANDB_GR_NORMAL_TTL_DAYS | 3 | +| - | enableWarmStage | - | Activate warm stage for normal records group. | SW_STORAGE_BANYANDB_GR_NORMAL_ENABLE_WARM_STAGE | false | +| - | enableColdStage | - | Activate cold stage for normal records group. | SW_STORAGE_BANYANDB_GR_NORMAL_ENABLE_COLD_STAGE | false | +| - | warm | - | The warm stage settings. | - | - | +| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_WARM_SHARD_NUM | 1 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_WARM_SI_DAYS | 2 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_WARM_TTL_DAYS | 7 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_WARM_NODE_SELECTOR | type=warm | +| - | cold | - | The cold stage settings. | - | - | +| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_COLD_SHARD_NUM | 1 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_COLD_SI_DAYS | 3 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_COLD_TTL_DAYS | 30 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_NORMAL_COLD_NODE_SELECTOR | type=cold | +| recordsTrace | - | - | | - | - | +| - | shardNum | - | Shards Number for SkyWalking trace records group. | SW_STORAGE_BANYANDB_GR_TRACE_SHARD_NUM | 2 | +| - | segmentInterval | - | Segment Interval Days. | SW_STORAGE_BANYANDB_GR_TRACE_SI_DAYS | 1 | +| - | ttl | - | TTL Days. | SW_STORAGE_BANYANDB_GR_TRACE_TTL_DAYS | 3 | +| - | enableWarmStage | - | Activate warm stage. | SW_STORAGE_BANYANDB_GR_TRACE_ENABLE_WARM_STAGE | false | +| - | enableColdStage | - | Activate cold stage. | SW_STORAGE_BANYANDB_GR_TRACE_ENABLE_COLD_STAGE | false | +| - | warm | - | The warm stage settings. | - | - | +| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GR_TRACE_WARM_SHARD_NUM | 2 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_TRACE_WARM_SI_DAYS | 1 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_TRACE_WARM_TTL_DAYS | 7 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_TRACE_WARM_NODE_SELECTOR | type=warm | +| - | cold | - | The cold stage settings. | - | - | +| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GR_TRACE_COLD_SHARD_NUM | 2 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_TRACE_COLD_SI_DAYS | 1 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_TRACE_COLD_TTL_DAYS | 30 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_TRACE_COLD_NODE_SELECTOR | type=cold | +| recordsZipkinTrace | - | - | | - | - | +| - | shardNum | - | Shards Number for Zipkin trace records group. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_SHARD_NUM | 2 | +| - | segmentInterval | - | Segment Interval Days. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_SI_DAYS | 1 | +| - | ttl | - | TTL Days. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_TTL_DAYS | 3 | +| - | enableWarmStage | - | Activate warm stage. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_ENABLE_WARM_STAGE | false | +| - | enableColdStage | - | Activate cold stage. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_ENABLE_COLD_STAGE | false | +| - | warm | - | The warm stage settings. | - | - | +| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_SHARD_NUM | 2 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_SI_DAYS | 1 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_TTL_DAYS | 7 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_NODE_SELECTOR | type=warm | +| - | cold | - | The cold stage settings. | - | - | +| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_SHARD_NUM | 2 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_SI_DAYS | 1 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_TTL_DAYS | 30 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_NODE_SELECTOR | type=cold | +| recordsLog | - | - | | - | - | +| - | shardNum | - | Shards Number. | SW_STORAGE_BANYANDB_GR_LOG_SHARD_NUM | 2 | +| - | segmentInterval | - | Segment Interval Days. | SW_STORAGE_BANYANDB_GR_LOG_SI_DAYS | 1 | +| - | ttl | - | TTL Days. | SW_STORAGE_BANYANDB_GR_LOG_TTL_DAYS | 3 | +| - | enableWarmStage | - | Activate warm stage. | SW_STORAGE_BANYANDB_GR_LOG_ENABLE_WARM_STAGE | false | +| - | enableColdStage | - | Activate cold stage. | SW_STORAGE_BANYANDB_GR_LOG_ENABLE_COLD_STAGE | false | +| - | warm | - | The warm stage settings. | - | - | +| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GR_LOG_WARM_SHARD_NUM | 2 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_LOG_WARM_SI_DAYS | 1 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_LOG_WARM_TTL_DAYS | 7 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_LOG_WARM_NODE_SELECTOR | type=warm | +| - | cold | - | The cold stage settings. | - | - | +| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GR_LOG_COLD_SHARD_NUM | 2 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_LOG_COLD_SI_DAYS | 1 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_LOG_COLD_TTL_DAYS | 30 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_LOG_COLD_NODE_SELECTOR | type=cold | +| recordsBrowserErrorLog | - | - | | - | - | +| - | shardNum | - | Shards Number. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_SHARD_NUM | 2 | +| - | segmentInterval | - | Segment Interval Days. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_SI_DAYS | 1 | +| - | ttl | - | TTL Days. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_TTL_DAYS | 3 | +| - | enableWarmStage | - | Activate warm stage. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_ENABLE_WARM_STAGE | false | +| - | enableColdStage | - | Activate cold stage. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_ENABLE_COLD_STAGE | false | +| - | warm | - | The warm stage settings. | - | - | +| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_SHARD_NUM | 2 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_SI_DAYS | 1 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_TTL_DAYS | 7 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_NODE_SELECTOR | type=warm | +| - | cold | - | The cold stage settings. | - | - | +| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_SHARD_NUM | 2 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_SI_DAYS | 1 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_TTL_DAYS | 30 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_NODE_SELECTOR | type=cold | +| metricsMin | - | - | The group for minute granularity metrics group. | - | - | +| - | shardNum | - | Shards Number for minute granularity metrics group. | SW_STORAGE_BANYANDB_GM_MINUTE_SHARD_NUM | 2 | +| - | segmentInterval | - | Segment Interval Days for minute granularity metrics group. | SW_STORAGE_BANYANDB_GM_MINUTE_SI_DAYS | 1 | +| - | ttl | - | TTL Days for minute granularity metrics group. | SW_STORAGE_BANYANDB_GM_MINUTE_TTL_DAYS | 7 | +| - | enableWarmStage | - | Activate warm stage for minute granularity metrics group. | SW_STORAGE_BANYANDB_GM_MINUTE_ENABLE_WARM_STAGE | false | +| - | enableColdStage | - | Activate cold stage for minute granularity metrics group. | SW_STORAGE_BANYANDB_GM_MINUTE_ENABLE_COLD_STAGE | false | +| - | warm | - | The warm stage settings. | - | - | +| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_WARM_SHARD_NUM | 2 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_WARM_SI_DAYS | 3 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_WARM_TTL_DAYS | 15 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_WARM_NODE_SELECTOR | type=warm | +| - | cold | - | The cold stage settings. | - | - | +| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_COLD_SHARD_NUM | 2 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_COLD_SI_DAYS | 5 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_COLD_TTL_DAYS | 60 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_MINUTE_COLD_NODE_SELECTOR | type=cold | +| metricsHour | - | - | The group for hour granularity metrics. | - | - | +| - | shardNum | - | Shards Number for hour granularity metrics group. | SW_STORAGE_BANYANDB_GM_HOUR_SHARD_NUM | 1 | +| - | segmentInterval | - | Segment Interval Days for hour granularity metrics group. | SW_STORAGE_BANYANDB_GM_HOUR_SI_DAYS | 5 | +| - | ttl | - | TTL Days for hour granularity metrics group. | SW_STORAGE_BANYANDB_GM_HOUR_TTL_DAYS | 15 | +| - | enableWarmStage | - | Activate warm stage for hour granularity metrics group. | SW_STORAGE_BANYANDB_GM_HOUR_ENABLE_WARM_STAGE | false | +| - | enableColdStage | - | Activate cold stage for hour granularity metrics group. | SW_STORAGE_BANYANDB_GM_HOUR_ENABLE_COLD_STAGE | false | +| - | warm | - | The warm stage settings. | - | - | +| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_WARM_SHARD_NUM | 1 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_WARM_SI_DAYS | 7 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_WARM_TTL_DAYS | 30 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_WARM_NODE_SELECTOR | type=warm | +| - | cold | - | The cold stage settings. | - | - | +| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_COLD_SHARD_NUM | 1 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_COLD_SI_DAYS | 15 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_COLD_TTL_DAYS | 120 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_HOUR_COLD_NODE_SELECTOR | type=cold | +| metricsDay | - | - | The group for day granularity metrics. | - | - | +| - | shardNum | - | Shards Number for day granularity metrics group. | SW_STORAGE_BANYANDB_GM_DAY_SHARD_NUM | 1 | +| - | segmentInterval | - | Segment Interval Days for day granularity metrics group. | SW_STORAGE_BANYANDB_GM_DAY_SI_DAYS | 15 | +| - | ttl | - | TTL Days for day granularity metrics group. | SW_STORAGE_BANYANDB_GM_DAY_TTL_DAYS | 15 | +| - | enableWarmStage | - | Activate warm stage for day granularity metrics group. | SW_STORAGE_BANYANDB_GM_DAY_ENABLE_WARM_STAGE | false | +| - | enableColdStage | - | Activate cold stage for day granularity metrics group. | SW_STORAGE_BANYANDB_GM_DAY_ENABLE_COLD_STAGE | false | +| - | warm | - | The warm stage settings. | - | - | +| - | - | shardNum | Shards Number for this stage. | SW_STORAGE_BANYANDB_GM_DAY_WARM_SHARD_NUM | 1 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_DAY_WARM_SI_DAYS | 15 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_DAY_WARM_TTL_DAYS | 30 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_DAY_WARM_NODE_SELECTOR | type=warm | +| - | cold | - | The cold stage settings. | - | - | +| - | - | shardNum | Shards Number for for this stage. | SW_STORAGE_BANYANDB_GM_DAY_COLD_SHARD_NUM | 1 | +| - | - | segmentInterval | Segment Interval Days for this stage. | SW_STORAGE_BANYANDB_GM_DAY_COLD_SI_DAYS | 15 | +| - | - | ttl | TTL Days for this stage. | SW_STORAGE_BANYANDB_GM_DAY_COLD_TTL_DAYS | 120 | +| - | - | nodeSelector | Specifying target nodes for this stage. | SW_STORAGE_BANYANDB_GM_DAY_COLD_NODE_SELECTOR | type=cold | +| metadata | - | - | The `metadata` group is designed to store metrics that are used for indexing without value columns. Such as `service_traffic`, `network_address_alias`, etc. Since BanyanDB *0.8.0*. | - | - | +| - | shardNum | - | Shards Number for metadata `index` group. | SW_STORAGE_BANYANDB_GM_METADATA_SHARD_NUM | 2 | +| - | segmentInterval | - | Segment Interval Days for metadata `index` group. | SW_STORAGE_BANYANDB_GM_METADATA_SI_DAYS | 15 | +| - | ttl | - | TTL Days for metadata `index` group. | SW_STORAGE_BANYANDB_GM_METADATA_TTL_DAYS | 15 | +| property | - | - | The group settings of property, such as UI and profiling. | - | - | +| - | shardNum | - | Shards Number for property group. | SW_STORAGE_BANYANDB_GP_PROPERTY_SHARD_NUM | 1 | ## Note diff --git a/docs/en/setup/backend/storages/banyandb.md b/docs/en/setup/backend/storages/banyandb.md index 614a841af1c5..12c492b51b52 100644 --- a/docs/en/setup/backend/storages/banyandb.md +++ b/docs/en/setup/backend/storages/banyandb.md @@ -32,21 +32,6 @@ storage: Since 10.2.0, the banyandb configuration is separated to an independent configuration file: `bydb.yaml`: ```yaml -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - global: # Targets is the list of BanyanDB servers, separated by commas. # Each target is a BanyanDB server in the format of `host:port`. @@ -82,9 +67,15 @@ global: groups: # The group settings of record. + # - "ShardNum": Number of shards in the group. Shards are the basic units of data storage in BanyanDB. Data is distributed across shards based on the hash value of the series ID. + # Refer to the [BanyanDB Shard](https://skywalking.apache.org/docs/skywalking-banyandb/latest/concept/clustering/#52-data-sharding) documentation for more details. + # - "SIDays": Interval in days for creating a new segment. Segments are time-based, allowing efficient data retention and querying. `SI` stands for Segment Interval. + # - "TTLDays": Time-to-live for the data in the group, in days. Data exceeding the TTL will be deleted. # - # The "normal" section defines settings for datasets not specified in "super". - # Each dataset will be grouped under a single group named "normal". + # For more details on setting `segmentIntervalDays` and `ttlDays`, refer to the [BanyanDB TTL](https://skywalking.apache.org/docs/main/latest/en/banyandb/ttl) documentation. + + # The "recordsNormal" section defines settings for datasets not specified in records. + # Each dataset will be grouped under a single group named "recordsNormal". recordsNormal: # The settings for the default "hot" stage. shardNum: ${SW_STORAGE_BANYANDB_GR_NORMAL_SHARD_NUM:1} @@ -108,24 +99,72 @@ groups: segmentInterval: ${SW_STORAGE_BANYANDB_GR_NORMAL_COLD_SI_DAYS:3} ttl: ${SW_STORAGE_BANYANDB_GR_NORMAL_COLD_TTL_DAYS:30} nodeSelector: ${SW_STORAGE_BANYANDB_GR_NORMAL_COLD_NODE_SELECTOR:"type=cold"} - # "super" is a special dataset designed to store trace or log data that is too large for normal datasets. - # Each super dataset will be a separate group in BanyanDB, following the settings defined in the "super" section. - recordsSuper: - shardNum: ${SW_STORAGE_BANYANDB_GR_SUPER_SHARD_NUM:2} - segmentInterval: ${SW_STORAGE_BANYANDB_GR_SUPER_SI_DAYS:1} - ttl: ${SW_STORAGE_BANYANDB_GR_SUPER_TTL_DAYS:3} - enableWarmStage: ${SW_STORAGE_BANYANDB_GR_SUPER_ENABLE_WARM_STAGE:false} - enableColdStage: ${SW_STORAGE_BANYANDB_GR_SUPER_ENABLE_COLD_STAGE:false} + # The group settings of super datasets. + # Super datasets are used to store trace or log data that is too large for normal datasets. + recordsTrace: + shardNum: ${SW_STORAGE_BANYANDB_GR_TRACE_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_TRACE_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_TRACE_TTL_DAYS:3} + enableWarmStage: ${SW_STORAGE_BANYANDB_GR_TRACE_ENABLE_WARM_STAGE:false} + enableColdStage: ${SW_STORAGE_BANYANDB_GR_TRACE_ENABLE_COLD_STAGE:false} + warm: + shardNum: ${SW_STORAGE_BANYANDB_GR_TRACE_WARM_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_TRACE_WARM_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_TRACE_WARM_TTL_DAYS:7} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_TRACE_WARM_NODE_SELECTOR:"type=warm"} + cold: + shardNum: ${SW_STORAGE_BANYANDB_GR_TRACE_COLD_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_TRACE_COLD_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_TRACE_COLD_TTL_DAYS:30} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_TRACE_COLD_NODE_SELECTOR:"type=cold"} + recordsZipkinTrace: + shardNum: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_TTL_DAYS:3} + enableWarmStage: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_ENABLE_WARM_STAGE:false} + enableColdStage: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_ENABLE_COLD_STAGE:false} + warm: + shardNum: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_TTL_DAYS:7} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_NODE_SELECTOR:"type=warm"} + cold: + shardNum: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_TTL_DAYS:30} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_NODE_SELECTOR:"type=cold"} + recordsLog: + shardNum: ${SW_STORAGE_BANYANDB_GR_LOG_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_LOG_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_LOG_TTL_DAYS:3} + enableWarmStage: ${SW_STORAGE_BANYANDB_GR_LOG_ENABLE_WARM_STAGE:false} + enableColdStage: ${SW_STORAGE_BANYANDB_GR_LOG_ENABLE_COLD_STAGE:false} + warm: + shardNum: ${SW_STORAGE_BANYANDB_GR_LOG_WARM_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_LOG_WARM_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_LOG_WARM_TTL_DAYS:7} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_LOG_WARM_NODE_SELECTOR:"type=warm"} + cold: + shardNum: ${SW_STORAGE_BANYANDB_GR_LOG_COLD_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_LOG_COLD_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_LOG_COLD_TTL_DAYS:30} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_LOG_COLD_NODE_SELECTOR:"type=cold"} + recordsBrowserErrorLog: + shardNum: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_TTL_DAYS:3} + enableWarmStage: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_ENABLE_WARM_STAGE:false} + enableColdStage: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_ENABLE_COLD_STAGE:false} warm: - shardNum: ${SW_STORAGE_BANYANDB_GR_SUPER_WARM_SHARD_NUM:2} - segmentInterval: ${SW_STORAGE_BANYANDB_GR_SUPER_WARM_SI_DAYS:1} - ttl: ${SW_STORAGE_BANYANDB_GR_SUPER_WARM_TTL_DAYS:7} - nodeSelector: ${SW_STORAGE_BANYANDB_GR_SUPER_WARM_NODE_SELECTOR:"type=warm"} + shardNum: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_TTL_DAYS:7} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_NODE_SELECTOR:"type=warm"} cold: - shardNum: ${SW_STORAGE_BANYANDB_GR_SUPER_COLD_SHARD_NUM:2} - segmentInterval: ${SW_STORAGE_BANYANDB_GR_SUPER_COLD_SI_DAYS:1} - ttl: ${SW_STORAGE_BANYANDB_GR_SUPER_COLD_TTL_DAYS:30} - nodeSelector: ${SW_STORAGE_BANYANDB_GR_SUPER_COLD_NODE_SELECTOR:"type=cold"} + shardNum: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_TTL_DAYS:30} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_NODE_SELECTOR:"type=cold"} # The group settings of metrics. # # OAP stores metrics based its granularity. @@ -180,14 +219,14 @@ groups: segmentInterval: ${SW_STORAGE_BANYANDB_GM_DAY_COLD_SI_DAYS:15} ttl: ${SW_STORAGE_BANYANDB_GM_DAY_COLD_TTL_DAYS:120} nodeSelector: ${SW_STORAGE_BANYANDB_GM_DAY_COLD_NODE_SELECTOR:"type=cold"} - # If the metrics is marked as "index_mode", the metrics will be stored in the "index" group. - # The "index" group is designed to store metrics that are used for indexing without value columns. + # If the metrics is marked as "index_mode", the metrics will be stored in the "metadata" group. + # The "metadata" group is designed to store metrics that are used for indexing without value columns. # Such as `service_traffic`, `network_address_alias`, etc. # "index_mode" requires BanyanDB *0.8.0* or later. metadata: - shardNum: ${SW_STORAGE_BANYANDB_GM_INDEX_SHARD_NUM:2} - segmentInterval: ${SW_STORAGE_BANYANDB_GM_INDEX_SI_DAYS:15} - ttl: ${SW_STORAGE_BANYANDB_GM_INDEX_TTL_DAYS:15} + shardNum: ${SW_STORAGE_BANYANDB_GM_METADATA_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GM_METADATA_SI_DAYS:15} + ttl: ${SW_STORAGE_BANYANDB_GM_METADATA_TTL_DAYS:15} # The group settings of property, such as UI and profiling. property: @@ -232,30 +271,4 @@ docker run -d \ - **Cluster Mode**: Suitable for large-scale deployments. - **Configuration**: `targets` is the IP address/hostname and port of the `liaison` nodes, separated by commas. `Liaison` nodes are the entry points of the BanyanDB cluster. -### Group Settings - -BanyanDB supports **group settings** to configure storage groups, shards, segment intervals, and TTL (Time-To-Live). The group settings file is a YAML file required when using BanyanDB as the storage. - -#### Basic Group Settings - -- `ShardNum`: Number of shards in the group. Shards are the basic units of data storage in BanyanDB. Data is distributed across shards based on the hash value of the series ID. Refer to the [BanyanDB Shard](https://skywalking.apache.org/docs/skywalking-banyandb/latest/concept/clustering/#52-data-sharding) documentation for more details. -- `SIDays`: Interval in days for creating a new segment. Segments are time-based, allowing efficient data retention and querying. `SI` stands for Segment Interval. -- `TTLDays`: Time-to-live for the data in the group, in days. Data exceeding the TTL will be deleted. - -For more details on setting `segmentIntervalDays` and `ttlDays`, refer to the [BanyanDB TTL](../../../banyandb/ttl.md) documentation. - -#### Record Group Settings - -The `gr` prefix is used for record group settings. The `normal` and `super` sections are used to define settings for normal and super datasets, respectively. - -Super datasets are used to store trace or log data that is too large for normal datasets. Each super dataset is stored in a separate group in BanyanDB. The settings defined in the `super` section are applied to all super datasets. - -Normal datasets are stored in a single group named `normal`. The settings defined in the `normal` section are applied to all normal datasets. - -#### Metrics Group Settings - -The `gm` prefix is used for metrics group settings. The `minute`, `hour`, and `day` sections are used to define settings for metrics stored based on granularity. - -The `index` group is designed to store metrics used for indexing without value columns. For example, `service_traffic`, `network_address_alias`, etc. - For more details, refer to the documentation of [BanyanDB](https://skywalking.apache.org/docs/skywalking-banyandb/latest/readme/) and the [BanyanDB Java Client](https://github.com/apache/skywalking-banyandb-java-client) subprojects. diff --git a/docs/en/status/query_ttl_setup.md b/docs/en/status/query_ttl_setup.md index f4c4a26d8b7e..f5fadd700229 100644 --- a/docs/en/status/query_ttl_setup.md +++ b/docs/en/status/query_ttl_setup.md @@ -13,6 +13,7 @@ This API is used to get the unified and effective TTL configurations. ```shell > curl -X GET "http://127.0.0.1:12800/status/config/ttl" # Metrics TTL includes the definition of the TTL of the metrics-ish data in the storage, +# Metrics TTL includes the definition of the TTL of the metrics-ish data in the storage, # e.g. # 1. The metadata of the service, instance, endpoint, topology map, etc. # 2. Generated metrics data from OAL and MAL engines. @@ -34,12 +35,17 @@ metrics.day.cold=-1 # Super dataset of records are traces and logs, which volume should be much larger. # # Cover hot and warm data for BanyanDB. -records.default=3 -records.superDataset=3 +records.normal=3 +records.trace=10 +records.zipkinTrace=3 +records.log=3 +records.browserErrorLog=3 # Cold data, '-1' represents no cold stage data. -records.default.cold=-1 -records.superDataset.cold=-1 - +records.normal.cold=-1 +records.trace.cold=30 +records.zipkinTrace.cold=-1 +records.log.cold=-1 +records.browserErrorLog.cold=-1 ``` This API also provides the response in JSON format, which is more friendly for programmatic usage. @@ -49,19 +55,25 @@ This API also provides the response in JSON format, which is more friendly for p -H "Accept: application/json" { - "metrics": { - "minute": 7, - "hour": 15, - "day": 15, - "coldMinute": -1, - "coldHour": -1, - "coldDay": -1 - }, - "records": { - "default": 3, - "superDataset": 3, - "coldValue": -1, - "coldSuperDataset": -1 - } + "metrics": { + "minute": 7, + "hour": 15, + "day": 15, + "coldMinute": -1, + "coldHour": -1, + "coldDay": -1 + }, + "records": { + "normal": 3, + "trace": 10, + "zipkinTrace": 3, + "log": 3, + "browserErrorLog": 3, + "coldNormal": -1, + "coldTrace": 30, + "coldZipkinTrace": -1, + "coldLog": -1, + "coldBrowserErrorLog": -1 + } } ``` diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/log/LogRecord.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/log/LogRecord.java index bd16c46f5288..890ab5fc7bf8 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/log/LogRecord.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/log/LogRecord.java @@ -36,6 +36,7 @@ @Stream(name = LogRecord.INDEX_NAME, scopeId = DefaultScopeDefine.LOG, builder = LogRecord.Builder.class, processor = RecordStreamProcessor.class) @SQLDatabase.ExtraColumn4AdditionalEntity(additionalTable = AbstractLogRecord.ADDITIONAL_TAG_TABLE, parentColumn = TIME_BUCKET) @BanyanDB.TimestampColumn(AbstractLogRecord.TIMESTAMP) +@BanyanDB.Group(streamGroup = BanyanDB.StreamGroup.RECORDS_LOG) public class LogRecord extends AbstractLogRecord { public static final String INDEX_NAME = "log"; diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/segment/SegmentRecord.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/segment/SegmentRecord.java index 41595a9063c7..8bc3e60c0e64 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/segment/SegmentRecord.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/segment/SegmentRecord.java @@ -43,6 +43,7 @@ @Stream(name = SegmentRecord.INDEX_NAME, scopeId = DefaultScopeDefine.SEGMENT, builder = SegmentRecord.Builder.class, processor = RecordStreamProcessor.class) @SQLDatabase.ExtraColumn4AdditionalEntity(additionalTable = SegmentRecord.ADDITIONAL_TAG_TABLE, parentColumn = TIME_BUCKET) @BanyanDB.TimestampColumn(SegmentRecord.START_TIME) +@BanyanDB.Group(streamGroup = BanyanDB.StreamGroup.RECORDS_TRACE) public class SegmentRecord extends Record { public static final String INDEX_NAME = "segment"; diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/spanattach/SWSpanAttachedEventRecord.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/spanattach/SWSpanAttachedEventRecord.java new file mode 100644 index 000000000000..0593b1b49a49 --- /dev/null +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/spanattach/SWSpanAttachedEventRecord.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.skywalking.oap.server.core.analysis.manual.spanattach; + +import lombok.Getter; +import lombok.Setter; +import org.apache.skywalking.oap.server.core.analysis.Stream; +import org.apache.skywalking.oap.server.core.analysis.record.Record; +import org.apache.skywalking.oap.server.core.analysis.worker.RecordStreamProcessor; +import org.apache.skywalking.oap.server.core.source.ScopeDeclaration; +import org.apache.skywalking.oap.server.core.storage.StorageID; +import org.apache.skywalking.oap.server.core.storage.annotation.BanyanDB; +import org.apache.skywalking.oap.server.core.storage.annotation.Column; +import org.apache.skywalking.oap.server.core.storage.annotation.ElasticSearch; +import org.apache.skywalking.oap.server.core.storage.type.Convert2Entity; +import org.apache.skywalking.oap.server.core.storage.type.Convert2Storage; +import org.apache.skywalking.oap.server.core.storage.type.StorageBuilder; + +import static org.apache.skywalking.oap.server.core.source.DefaultScopeDefine.SW_SPAN_ATTACHED_EVENT; + +@Setter +@Getter +@ScopeDeclaration(id = SW_SPAN_ATTACHED_EVENT, name = "SWSpanAttachedEvent") +@Stream(name = SWSpanAttachedEventRecord.INDEX_NAME, scopeId = SW_SPAN_ATTACHED_EVENT, builder = SWSpanAttachedEventRecord.Builder.class, processor = RecordStreamProcessor.class) +@BanyanDB.TimestampColumn(SWSpanAttachedEventRecord.TIMESTAMP) +@BanyanDB.Group(streamGroup = BanyanDB.StreamGroup.RECORDS_TRACE) +public class SWSpanAttachedEventRecord extends Record { + + public static final String INDEX_NAME = "sw_span_attached_event_record"; + public static final String START_TIME_SECOND = "start_time_second"; + public static final String START_TIME_NANOS = "start_time_nanos"; + public static final String EVENT = "event"; + public static final String END_TIME_SECOND = "end_time_second"; + public static final String END_TIME_NANOS = "end_time_nanos"; + public static final String TRACE_REF_TYPE = "trace_ref_type"; + public static final String RELATED_TRACE_ID = "related_trace_id"; + public static final String TRACE_SEGMENT_ID = "trace_segment_id"; + public static final String TRACE_SPAN_ID = "trace_span_id"; + public static final String DATA_BINARY = "data_binary"; + public static final String TIMESTAMP = "timestamp"; + + @ElasticSearch.EnableDocValues + @Column(name = START_TIME_SECOND) + private long startTimeSecond; + @ElasticSearch.EnableDocValues + @Column(name = START_TIME_NANOS) + private int startTimeNanos; + @Column(name = EVENT) + @BanyanDB.SeriesID(index = 0) + private String event; + @Column(name = END_TIME_SECOND) + private long endTimeSecond; + @Column(name = END_TIME_NANOS) + private int endTimeNanos; + @Column(name = TRACE_REF_TYPE) + private int traceRefType; + @Column(name = RELATED_TRACE_ID) + private String relatedTraceId; + @Column(name = TRACE_SEGMENT_ID) + private String traceSegmentId; + @Column(name = TRACE_SPAN_ID) + private String traceSpanId; + @Column(name = DATA_BINARY, storageOnly = true) + private byte[] dataBinary; + @Setter + @Getter + @ElasticSearch.EnableDocValues + @Column(name = TIMESTAMP) + @BanyanDB.NoIndexing + private long timestamp; + + @Override + public StorageID id() { + return new StorageID() + .append(TRACE_SEGMENT_ID, traceSegmentId) + .append(START_TIME_SECOND, startTimeSecond) + .append(START_TIME_NANOS, startTimeNanos) + .append(EVENT, event); + } + + public static class Builder implements StorageBuilder { + @Override + public SWSpanAttachedEventRecord storage2Entity(Convert2Entity converter) { + final SWSpanAttachedEventRecord record = new SWSpanAttachedEventRecord(); + record.setStartTimeSecond(((Number) converter.get(START_TIME_SECOND)).longValue()); + record.setStartTimeNanos(((Number) converter.get(START_TIME_NANOS)).intValue()); + record.setEvent((String) converter.get(EVENT)); + record.setEndTimeSecond(((Number) converter.get(END_TIME_SECOND)).longValue()); + record.setEndTimeNanos(((Number) converter.get(END_TIME_NANOS)).intValue()); + record.setTraceRefType(((Number) converter.get(TRACE_REF_TYPE)).intValue()); + record.setRelatedTraceId((String) converter.get(RELATED_TRACE_ID)); + record.setTraceSegmentId((String) converter.get(TRACE_SEGMENT_ID)); + record.setTraceSpanId((String) converter.get(TRACE_SPAN_ID)); + record.setDataBinary(converter.getBytes(DATA_BINARY)); + record.setTimestamp(((Number) converter.get(TIMESTAMP)).longValue()); + return record; + } + + @Override + public void entity2Storage(SWSpanAttachedEventRecord entity, Convert2Storage converter) { + converter.accept(START_TIME_SECOND, entity.getStartTimeSecond()); + converter.accept(START_TIME_NANOS, entity.getStartTimeNanos()); + converter.accept(EVENT, entity.getEvent()); + converter.accept(END_TIME_SECOND, entity.getEndTimeSecond()); + converter.accept(END_TIME_NANOS, entity.getEndTimeNanos()); + converter.accept(TRACE_REF_TYPE, entity.getTraceRefType()); + converter.accept(RELATED_TRACE_ID, entity.getRelatedTraceId()); + converter.accept(TRACE_SEGMENT_ID, entity.getTraceSegmentId()); + converter.accept(TRACE_SPAN_ID, entity.getTraceSpanId()); + converter.accept(DATA_BINARY, entity.getDataBinary()); + converter.accept(TIMESTAMP, entity.getTimestamp()); + } + } +} diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/spanattach/SpanAttachedEventRecord.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/spanattach/SpanAttachedEventRecord.java index 24211cc5168b..fe15c873e98f 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/spanattach/SpanAttachedEventRecord.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/manual/spanattach/SpanAttachedEventRecord.java @@ -39,6 +39,7 @@ @ScopeDeclaration(id = SPAN_ATTACHED_EVENT, name = "SpanAttachedEvent") @Stream(name = SpanAttachedEventRecord.INDEX_NAME, scopeId = SPAN_ATTACHED_EVENT, builder = SpanAttachedEventRecord.Builder.class, processor = RecordStreamProcessor.class) @BanyanDB.TimestampColumn(SpanAttachedEventRecord.TIMESTAMP) +@BanyanDB.Group(streamGroup = BanyanDB.StreamGroup.RECORDS_ZIPKIN_TRACE) public class SpanAttachedEventRecord extends Record { public static final String INDEX_NAME = "span_attached_event_record"; diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/browser/manual/errorlog/BrowserErrorLogRecord.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/browser/manual/errorlog/BrowserErrorLogRecord.java index 8a0d00a72664..bfbae9b0ee42 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/browser/manual/errorlog/BrowserErrorLogRecord.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/browser/manual/errorlog/BrowserErrorLogRecord.java @@ -35,6 +35,7 @@ @SuperDataset @Stream(name = BrowserErrorLogRecord.INDEX_NAME, scopeId = DefaultScopeDefine.BROWSER_ERROR_LOG, builder = BrowserErrorLogRecord.Builder.class, processor = RecordStreamProcessor.class) @BanyanDB.TimestampColumn(BrowserErrorLogRecord.TIMESTAMP) +@BanyanDB.Group(streamGroup = BanyanDB.StreamGroup.RECORDS_BROWSER_ERROR_LOG) public class BrowserErrorLogRecord extends Record { public static final String INDEX_NAME = "browser_error_log"; public static final String UNIQUE_ID = "unique_id"; diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/TTLStatusQuery.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/TTLStatusQuery.java index 60ee8995e22e..e444329eea7e 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/TTLStatusQuery.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/TTLStatusQuery.java @@ -52,7 +52,7 @@ public TTLDefinition getTTL() { if (ttlDefinition == null) { ttlDefinition = new TTLDefinition( new MetricsTTL(coreMetricsDataTTL, coreMetricsDataTTL, coreMetricsDataTTL), - new RecordsTTL(coreRecordDataTTL, coreRecordDataTTL) + new RecordsTTL(coreRecordDataTTL, coreRecordDataTTL, coreRecordDataTTL, coreRecordDataTTL, coreRecordDataTTL) ); } return ttlDefinition; diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/TraceQueryService.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/TraceQueryService.java index 0aa466a729c6..0fd76a580ec0 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/TraceQueryService.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/TraceQueryService.java @@ -39,7 +39,7 @@ import org.apache.skywalking.oap.server.core.CoreModule; import org.apache.skywalking.oap.server.core.analysis.manual.searchtag.Tag; import org.apache.skywalking.oap.server.core.analysis.manual.segment.SegmentRecord; -import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventRecord; +import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SWSpanAttachedEventRecord; import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventTraceType; import org.apache.skywalking.oap.server.core.config.IComponentLibraryCatalogService; import org.apache.skywalking.oap.server.core.query.input.Duration; @@ -195,8 +195,8 @@ private Trace invokeQueryTrace(final String traceId, @Nullable final Duration du } if (CollectionUtils.isNotEmpty(sortedSpans)) { - final List spanAttachedEvents = getSpanAttachedEventQueryDAO(). - querySpanAttachedEventsDebuggable(SpanAttachedEventTraceType.SKYWALKING, Arrays.asList(traceId), duration); + final List spanAttachedEvents = getSpanAttachedEventQueryDAO(). + querySWSpanAttachedEventsDebuggable(SpanAttachedEventTraceType.SKYWALKING, Arrays.asList(traceId), duration); appendAttachedEventsToSpanDebuggable(sortedSpans, spanAttachedEvents); } @@ -322,7 +322,7 @@ private void findChildren(List spans, Span parentSpan, List children }); } - private void appendAttachedEventsToSpanDebuggable(List spans, List events) throws InvalidProtocolBufferException { + private void appendAttachedEventsToSpanDebuggable(List spans, List events) throws InvalidProtocolBufferException { DebuggingTraceContext traceContext = DebuggingTraceContext.TRACE_CONTEXT.get(); DebuggingSpan debuggingSpan = null; try { @@ -338,7 +338,7 @@ private void appendAttachedEventsToSpanDebuggable(List spans, List spans, List events) throws InvalidProtocolBufferException { + private void appendAttachedEventsToSpan(List spans, List events) throws InvalidProtocolBufferException { if (CollectionUtils.isEmpty(events)) { return; } @@ -353,7 +353,7 @@ private void appendAttachedEventsToSpan(List spans, List spanMatcher = new HashMap<>(); - for (SpanAttachedEventRecord record : events) { + for (SWSpanAttachedEventRecord record : events) { if (!StringUtils.isNumeric(record.getTraceSpanId())) { continue; } diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/source/DefaultScopeDefine.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/source/DefaultScopeDefine.java index 61ec1edcd60b..13bb43524dc8 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/source/DefaultScopeDefine.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/source/DefaultScopeDefine.java @@ -150,6 +150,7 @@ public class DefaultScopeDefine { public static final int BROWSER_APP_WEB_VITALS_PAGE_PERF = 87; public static final int BROWSER_APP_RESOURCE_PERF = 88; public static final int BROWSER_APP_WEB_INTERACTION_PAGE_PERF = 89; + public static final int SW_SPAN_ATTACHED_EVENT = 90; /** * Catalog of scope, the metrics processor could use this to group all generated metrics by oal rt. diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/annotation/BanyanDB.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/annotation/BanyanDB.java index 6d3f875dc308..b797a55d6185 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/annotation/BanyanDB.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/annotation/BanyanDB.java @@ -24,6 +24,7 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; +import lombok.Getter; import org.apache.skywalking.oap.server.core.analysis.metrics.Metrics; import org.apache.skywalking.oap.server.core.analysis.record.Record; import org.apache.skywalking.oap.server.core.storage.StorageID; @@ -282,4 +283,52 @@ enum AnalyzerType { @Retention(RetentionPolicy.RUNTIME) @interface IndexMode { } + + @Target({ElementType.TYPE}) + @Retention(RetentionPolicy.RUNTIME) + @interface Group { + /** + * Specify the group name for the Stream (Record). The default value is "recordsNormal". + */ + StreamGroup streamGroup() default StreamGroup.RECORDS_NORMAL; + } + + enum StreamGroup { + RECORDS_NORMAL("recordsNormal"), + RECORDS_TRACE("recordsTrace"), + RECORDS_ZIPKIN_TRACE("recordsZipkinTrace"), + RECORDS_LOG("recordsLog"), + RECORDS_BROWSER_ERROR_LOG("recordsBrowserErrorLog"); + + @Getter + private final String name; + + StreamGroup(final String name) { + this.name = name; + } + } + + enum MeasureGroup { + METRICS_MIN("metricsMin"), + METRICS_HOUR("metricsHour"), + METRICS_DAY("metricsDay"), + METADATA("metadata"); + @Getter + private final String name; + + MeasureGroup(final String name) { + this.name = name; + } + } + + enum PropertyGroup { + PROPERTY("property"); + + @Getter + private final String name; + + PropertyGroup(final String name) { + this.name = name; + } + } } diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/model/BanyanDBModelExtension.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/model/BanyanDBModelExtension.java index 198091d8428c..36a0e461f66d 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/model/BanyanDBModelExtension.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/model/BanyanDBModelExtension.java @@ -21,6 +21,7 @@ import lombok.Getter; import lombok.Setter; import org.apache.skywalking.oap.server.core.analysis.record.Record; +import org.apache.skywalking.oap.server.core.storage.annotation.BanyanDB; import java.util.List; @@ -87,4 +88,8 @@ public static class TopN { @Getter private List groupByTagNames; } + + @Setter + @Getter + private BanyanDB.StreamGroup streamGroup = BanyanDB.StreamGroup.RECORDS_NORMAL; } diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/model/StorageModels.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/model/StorageModels.java index e27eb73f4dd3..059f98936791 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/model/StorageModels.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/model/StorageModels.java @@ -107,6 +107,11 @@ public Model add(Class aClass, int scopeId, Storage storage) throws StorageEx banyanDBModelExtension.setIndexMode(true); } + if (aClass.isAnnotationPresent(BanyanDB.Group.class)) { + BanyanDB.StreamGroup streamGroup = aClass.getAnnotation(BanyanDB.Group.class).streamGroup(); + banyanDBModelExtension.setStreamGroup(streamGroup); + } + // Set routing rules for ElasticSearch elasticSearchModelExtension.setRouting(storage.getModelName(), modelColumns); diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/query/ISpanAttachedEventQueryDAO.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/query/ISpanAttachedEventQueryDAO.java index 8514f1780aaf..1de3d386b16d 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/query/ISpanAttachedEventQueryDAO.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/query/ISpanAttachedEventQueryDAO.java @@ -19,6 +19,8 @@ package org.apache.skywalking.oap.server.core.storage.query; import javax.annotation.Nullable; + +import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SWSpanAttachedEventRecord; import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventRecord; import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventTraceType; import org.apache.skywalking.oap.server.core.query.input.Duration; @@ -33,20 +35,44 @@ public interface ISpanAttachedEventQueryDAO extends Service { /** * @param duration nullable unless for BanyanDB query from cold stage */ - default List querySpanAttachedEventsDebuggable(SpanAttachedEventTraceType type, List traceIds, @Nullable Duration duration) throws IOException { + default List querySWSpanAttachedEventsDebuggable(SpanAttachedEventTraceType type, List traceIds, @Nullable Duration duration) throws IOException { DebuggingTraceContext traceContext = DebuggingTraceContext.TRACE_CONTEXT.get(); DebuggingSpan span = null; try { StringBuilder builder = new StringBuilder(); if (traceContext != null) { - span = traceContext.createSpan("Query Dao: querySpanAttachedEvents"); - builder.append("Condition: Type: ") + span = traceContext.createSpan("Query Dao: querySWSpanAttachedEvents"); + builder.append("Condition: Span Type: ") .append(type) .append(", TraceIds: ") .append(traceIds); span.setMsg(builder.toString()); } - return querySpanAttachedEvents(type, traceIds, duration); + return querySWSpanAttachedEvents(traceIds, duration); + } finally { + if (traceContext != null && span != null) { + traceContext.stopSpan(span); + } + } + } + + /** + * @param duration nullable unless for BanyanDB query from cold stage + */ + default List queryZKSpanAttachedEventsDebuggable(SpanAttachedEventTraceType type, List traceIds, @Nullable Duration duration) throws IOException { + DebuggingTraceContext traceContext = DebuggingTraceContext.TRACE_CONTEXT.get(); + DebuggingSpan span = null; + try { + StringBuilder builder = new StringBuilder(); + if (traceContext != null) { + span = traceContext.createSpan("Query Dao: queryZKSpanAttachedEvents"); + builder.append("Condition: Span Type: ") + .append(type) + .append(", TraceIds: ") + .append(traceIds); + span.setMsg(builder.toString()); + } + return queryZKSpanAttachedEvents(traceIds, duration); } finally { if (traceContext != null && span != null) { traceContext.stopSpan(span); @@ -55,7 +81,14 @@ default List querySpanAttachedEventsDebuggable(SpanAtta } /** + * Query SkyWalking span attached events by trace ids. + * @param duration nullable unless for BanyanDB query from cold stage + */ + List querySWSpanAttachedEvents(List traceIds, @Nullable Duration duration) throws IOException; + + /** + * Query Zipkin span attached events by trace ids. * @param duration nullable unless for BanyanDB query from cold stage */ - List querySpanAttachedEvents(SpanAttachedEventTraceType type, List traceIds, @Nullable Duration duration) throws IOException; + List queryZKSpanAttachedEvents(List traceIds, @Nullable Duration duration) throws IOException; } diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/ttl/RecordsTTL.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/ttl/RecordsTTL.java index e7dc88646aa6..81e5b9a5cebe 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/ttl/RecordsTTL.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/ttl/RecordsTTL.java @@ -18,7 +18,6 @@ package org.apache.skywalking.oap.server.core.storage.ttl; -import com.google.gson.annotations.SerializedName; import lombok.Data; /** @@ -28,10 +27,15 @@ */ @Data public class RecordsTTL { - @SerializedName("default") - private final int value; - private final int superDataset; - // -1 means no cold stage. - private int coldValue = -1; - private int coldSuperDataset = -1; + private final int normal; + private final int trace; + private final int zipkinTrace; + private final int log; + private final int browserErrorLog; + + private int coldNormal = -1; + private int coldTrace = -1; + private int coldZipkinTrace = -1; + private int coldLog = -1; + private int coldBrowserErrorLog = -1; } diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/ttl/TTLDefinition.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/ttl/TTLDefinition.java index 4730ab0aee96..55c9c5fe8ff0 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/ttl/TTLDefinition.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/storage/ttl/TTLDefinition.java @@ -54,11 +54,17 @@ public String generateTTLDefinition() { ttlDefinition.append("# Super dataset of records are traces and logs, which volume should be much larger.\n"); ttlDefinition.append("#\n"); ttlDefinition.append("# Cover hot and warm data for BanyanDB.\n"); - ttlDefinition.append("records.default=").append(records.getValue()).append("\n"); - ttlDefinition.append("records.superDataset=").append(records.getSuperDataset()).append("\n"); + ttlDefinition.append("records.normal=").append(records.getNormal()).append("\n"); + ttlDefinition.append("records.trace=").append(records.getTrace()).append("\n"); + ttlDefinition.append("records.zipkinTrace=").append(records.getZipkinTrace()).append("\n"); + ttlDefinition.append("records.log=").append(records.getLog()).append("\n"); + ttlDefinition.append("records.browserErrorLog=").append(records.getBrowserErrorLog()).append("\n"); ttlDefinition.append("# Cold data, '-1' represents no cold stage data.\n"); - ttlDefinition.append("records.default.cold=").append(records.getColdValue()).append("\n"); - ttlDefinition.append("records.superDataset.cold=").append(records.getColdSuperDataset()).append("\n"); + ttlDefinition.append("records.normal.cold=").append(records.getColdNormal()).append("\n"); + ttlDefinition.append("records.trace.cold=").append(records.getColdTrace()).append("\n"); + ttlDefinition.append("records.zipkinTrace.cold=").append(records.getColdZipkinTrace()).append("\n"); + ttlDefinition.append("records.log.cold=").append(records.getColdLog()).append("\n"); + ttlDefinition.append("records.browserErrorLog.cold=").append(records.getColdBrowserErrorLog()).append("\n"); return ttlDefinition.toString(); } diff --git a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/zipkin/ZipkinSpanRecord.java b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/zipkin/ZipkinSpanRecord.java index b8b45b5b8114..d8760ab455d2 100644 --- a/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/zipkin/ZipkinSpanRecord.java +++ b/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/zipkin/ZipkinSpanRecord.java @@ -51,6 +51,7 @@ @Stream(name = ZipkinSpanRecord.INDEX_NAME, scopeId = DefaultScopeDefine.ZIPKIN_SPAN, builder = ZipkinSpanRecord.Builder.class, processor = RecordStreamProcessor.class) @SQLDatabase.ExtraColumn4AdditionalEntity(additionalTable = ZipkinSpanRecord.ADDITIONAL_QUERY_TABLE, parentColumn = TIME_BUCKET) @BanyanDB.TimestampColumn(ZipkinSpanRecord.TIMESTAMP_MILLIS) +@BanyanDB.Group(streamGroup = BanyanDB.StreamGroup.RECORDS_ZIPKIN_TRACE) public class ZipkinSpanRecord extends Record { private static final Gson GSON = new Gson(); public static final int QUERY_LENGTH = 256; diff --git a/oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol b/oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol index 23baed2234e4..e9d4f81bb2bd 160000 --- a/oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol +++ b/oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol @@ -1 +1 @@ -Subproject commit 23baed2234e4bbc18cd7ec7d47bfe7d4bc8ef363 +Subproject commit e9d4f81bb2bde6eb92bf7595c1257cc8d60470f5 diff --git a/oap-server/server-query-plugin/zipkin-query-plugin/src/main/java/org/apache/skywalking/oap/query/zipkin/handler/ZipkinQueryHandler.java b/oap-server/server-query-plugin/zipkin-query-plugin/src/main/java/org/apache/skywalking/oap/query/zipkin/handler/ZipkinQueryHandler.java index 456da749fd80..13050a14cfb7 100644 --- a/oap-server/server-query-plugin/zipkin-query-plugin/src/main/java/org/apache/skywalking/oap/query/zipkin/handler/ZipkinQueryHandler.java +++ b/oap-server/server-query-plugin/zipkin-query-plugin/src/main/java/org/apache/skywalking/oap/query/zipkin/handler/ZipkinQueryHandler.java @@ -191,7 +191,7 @@ public AggregatedHttpResponse getTraceById(@Param("traceId") String traceId) thr if (CollectionUtils.isEmpty(trace)) { return AggregatedHttpResponse.of(NOT_FOUND, ANY_TEXT_TYPE, traceId + " not found"); } - appendEventsDebuggable(trace, getSpanAttachedEventQueryDAO().querySpanAttachedEventsDebuggable( + appendEventsDebuggable(trace, getSpanAttachedEventQueryDAO().queryZKSpanAttachedEventsDebuggable( SpanAttachedEventTraceType.ZIPKIN, Arrays.asList(traceId), null)); return response(SpanBytesEncoder.JSON_V2.encodeList(trace)); } finally { @@ -364,7 +364,7 @@ private void appendEventsToTraces(List> traces) throws IOException { return; } - final List records = getSpanAttachedEventQueryDAO().querySpanAttachedEventsDebuggable(SpanAttachedEventTraceType.ZIPKIN, + final List records = getSpanAttachedEventQueryDAO().queryZKSpanAttachedEventsDebuggable(SpanAttachedEventTraceType.ZIPKIN, new ArrayList<>(traceIdWithSpans.keySet()), null); final Map> traceEvents = records.stream().collect(Collectors.groupingBy(SpanAttachedEventRecord::getRelatedTraceId)); for (Map.Entry> entry : traceEvents.entrySet()) { diff --git a/oap-server/server-receiver-plugin/skywalking-trace-receiver-plugin/src/main/java/org/apache/skywalking/oap/server/receiver/trace/provider/handler/v8/grpc/SpanAttachedEventReportServiceHandler.java b/oap-server/server-receiver-plugin/skywalking-trace-receiver-plugin/src/main/java/org/apache/skywalking/oap/server/receiver/trace/provider/handler/v8/grpc/SpanAttachedEventReportServiceHandler.java index 7d0dc591598f..4fac33ceeba7 100644 --- a/oap-server/server-receiver-plugin/skywalking-trace-receiver-plugin/src/main/java/org/apache/skywalking/oap/server/receiver/trace/provider/handler/v8/grpc/SpanAttachedEventReportServiceHandler.java +++ b/oap-server/server-receiver-plugin/skywalking-trace-receiver-plugin/src/main/java/org/apache/skywalking/oap/server/receiver/trace/provider/handler/v8/grpc/SpanAttachedEventReportServiceHandler.java @@ -25,6 +25,7 @@ import org.apache.skywalking.apm.network.language.agent.v3.SpanAttachedEvent; import org.apache.skywalking.apm.network.language.agent.v3.SpanAttachedEventReportServiceGrpc; import org.apache.skywalking.oap.server.core.analysis.TimeBucket; +import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SWSpanAttachedEventRecord; import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventRecord; import org.apache.skywalking.oap.server.core.analysis.worker.RecordStreamProcessor; import org.apache.skywalking.oap.server.library.module.ModuleManager; @@ -46,22 +47,44 @@ public void onNext(SpanAttachedEvent event) { log.debug("receive span attached event is streaming"); } - final SpanAttachedEventRecord record = new SpanAttachedEventRecord(); - record.setStartTimeSecond(event.getStartTime().getSeconds()); - record.setStartTimeNanos(event.getStartTime().getNanos()); - record.setEvent(event.getEvent()); - record.setEndTimeSecond(event.getEndTime().getSeconds()); - record.setEndTimeNanos(event.getEndTime().getNanos()); - record.setTraceRefType(event.getTraceContext().getTypeValue()); - record.setRelatedTraceId(event.getTraceContext().getTraceId()); - record.setTraceSegmentId(event.getTraceContext().getTraceSegmentId()); - record.setTraceSpanId(event.getTraceContext().getSpanId()); - record.setDataBinary(event.toByteArray()); - long timestamp = TimeUnit.SECONDS.toMillis(record.getStartTimeSecond()) - + TimeUnit.NANOSECONDS.toMillis(record.getStartTimeNanos()); - record.setTimeBucket(TimeBucket.getRecordTimeBucket(timestamp)); - record.setTimestamp(timestamp); - RecordStreamProcessor.getInstance().in(record); + switch (event.getTraceContext().getType()) { + case SKYWALKING: + final SWSpanAttachedEventRecord swRecord = new SWSpanAttachedEventRecord(); + swRecord.setStartTimeSecond(event.getStartTime().getSeconds()); + swRecord.setStartTimeNanos(event.getStartTime().getNanos()); + swRecord.setEvent(event.getEvent()); + swRecord.setEndTimeSecond(event.getEndTime().getSeconds()); + swRecord.setEndTimeNanos(event.getEndTime().getNanos()); + swRecord.setTraceRefType(event.getTraceContext().getTypeValue()); + swRecord.setRelatedTraceId(event.getTraceContext().getTraceId()); + swRecord.setTraceSegmentId(event.getTraceContext().getTraceSegmentId()); + swRecord.setTraceSpanId(event.getTraceContext().getSpanId()); + swRecord.setDataBinary(event.toByteArray()); + long timestamp = TimeUnit.SECONDS.toMillis(swRecord.getStartTimeSecond()) + + TimeUnit.NANOSECONDS.toMillis(swRecord.getStartTimeNanos()); + swRecord.setTimeBucket(TimeBucket.getRecordTimeBucket(timestamp)); + swRecord.setTimestamp(timestamp); + RecordStreamProcessor.getInstance().in(swRecord); + break; + case ZIPKIN: + final SpanAttachedEventRecord record = new SpanAttachedEventRecord(); + record.setStartTimeSecond(event.getStartTime().getSeconds()); + record.setStartTimeNanos(event.getStartTime().getNanos()); + record.setEvent(event.getEvent()); + record.setEndTimeSecond(event.getEndTime().getSeconds()); + record.setEndTimeNanos(event.getEndTime().getNanos()); + record.setTraceRefType(event.getTraceContext().getTypeValue()); + record.setRelatedTraceId(event.getTraceContext().getTraceId()); + record.setTraceSegmentId(event.getTraceContext().getTraceSegmentId()); + record.setTraceSpanId(event.getTraceContext().getSpanId()); + record.setDataBinary(event.toByteArray()); + long ts = TimeUnit.SECONDS.toMillis(record.getStartTimeSecond()) + + TimeUnit.NANOSECONDS.toMillis(record.getStartTimeNanos()); + record.setTimeBucket(TimeBucket.getRecordTimeBucket(ts)); + record.setTimestamp(ts); + RecordStreamProcessor.getInstance().in(record); + break; + } } @Override diff --git a/oap-server/server-starter/src/main/resources/bydb.yml b/oap-server/server-starter/src/main/resources/bydb.yml index 2899832266a1..a88116f2270a 100644 --- a/oap-server/server-starter/src/main/resources/bydb.yml +++ b/oap-server/server-starter/src/main/resources/bydb.yml @@ -48,9 +48,15 @@ global: groups: # The group settings of record. + # - "ShardNum": Number of shards in the group. Shards are the basic units of data storage in BanyanDB. Data is distributed across shards based on the hash value of the series ID. + # Refer to the [BanyanDB Shard](https://skywalking.apache.org/docs/skywalking-banyandb/latest/concept/clustering/#52-data-sharding) documentation for more details. + # - "SIDays": Interval in days for creating a new segment. Segments are time-based, allowing efficient data retention and querying. `SI` stands for Segment Interval. + # - "TTLDays": Time-to-live for the data in the group, in days. Data exceeding the TTL will be deleted. # - # The "normal" section defines settings for datasets not specified in "super". - # Each dataset will be grouped under a single group named "normal". + # For more details on setting `segmentIntervalDays` and `ttlDays`, refer to the [BanyanDB TTL](https://skywalking.apache.org/docs/main/latest/en/banyandb/ttl) documentation. + + # The "recordsNormal" section defines settings for datasets not specified in records. + # Each dataset will be grouped under a single group named "recordsNormal". recordsNormal: # The settings for the default "hot" stage. shardNum: ${SW_STORAGE_BANYANDB_GR_NORMAL_SHARD_NUM:1} @@ -74,24 +80,72 @@ groups: segmentInterval: ${SW_STORAGE_BANYANDB_GR_NORMAL_COLD_SI_DAYS:3} ttl: ${SW_STORAGE_BANYANDB_GR_NORMAL_COLD_TTL_DAYS:30} nodeSelector: ${SW_STORAGE_BANYANDB_GR_NORMAL_COLD_NODE_SELECTOR:"type=cold"} - # "super" is a special dataset designed to store trace or log data that is too large for normal datasets. - # Each super dataset will be a separate group in BanyanDB, following the settings defined in the "super" section. - recordsSuper: - shardNum: ${SW_STORAGE_BANYANDB_GR_SUPER_SHARD_NUM:2} - segmentInterval: ${SW_STORAGE_BANYANDB_GR_SUPER_SI_DAYS:1} - ttl: ${SW_STORAGE_BANYANDB_GR_SUPER_TTL_DAYS:3} - enableWarmStage: ${SW_STORAGE_BANYANDB_GR_SUPER_ENABLE_WARM_STAGE:false} - enableColdStage: ${SW_STORAGE_BANYANDB_GR_SUPER_ENABLE_COLD_STAGE:false} + # The group settings of super datasets. + # Super datasets are used to store trace or log data that is too large for normal datasets. + recordsTrace: + shardNum: ${SW_STORAGE_BANYANDB_GR_TRACE_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_TRACE_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_TRACE_TTL_DAYS:3} + enableWarmStage: ${SW_STORAGE_BANYANDB_GR_TRACE_ENABLE_WARM_STAGE:false} + enableColdStage: ${SW_STORAGE_BANYANDB_GR_TRACE_ENABLE_COLD_STAGE:false} + warm: + shardNum: ${SW_STORAGE_BANYANDB_GR_TRACE_WARM_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_TRACE_WARM_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_TRACE_WARM_TTL_DAYS:7} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_TRACE_WARM_NODE_SELECTOR:"type=warm"} + cold: + shardNum: ${SW_STORAGE_BANYANDB_GR_TRACE_COLD_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_TRACE_COLD_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_TRACE_COLD_TTL_DAYS:30} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_TRACE_COLD_NODE_SELECTOR:"type=cold"} + recordsZipkinTrace: + shardNum: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_TTL_DAYS:3} + enableWarmStage: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_ENABLE_WARM_STAGE:false} + enableColdStage: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_ENABLE_COLD_STAGE:false} + warm: + shardNum: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_TTL_DAYS:7} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_WARM_NODE_SELECTOR:"type=warm"} + cold: + shardNum: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_TTL_DAYS:30} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_ZIPKIN_TRACE_COLD_NODE_SELECTOR:"type=cold"} + recordsLog: + shardNum: ${SW_STORAGE_BANYANDB_GR_LOG_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_LOG_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_LOG_TTL_DAYS:3} + enableWarmStage: ${SW_STORAGE_BANYANDB_GR_LOG_ENABLE_WARM_STAGE:false} + enableColdStage: ${SW_STORAGE_BANYANDB_GR_LOG_ENABLE_COLD_STAGE:false} + warm: + shardNum: ${SW_STORAGE_BANYANDB_GR_LOG_WARM_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_LOG_WARM_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_LOG_WARM_TTL_DAYS:7} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_LOG_WARM_NODE_SELECTOR:"type=warm"} + cold: + shardNum: ${SW_STORAGE_BANYANDB_GR_LOG_COLD_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_LOG_COLD_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_LOG_COLD_TTL_DAYS:30} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_LOG_COLD_NODE_SELECTOR:"type=cold"} + recordsBrowserErrorLog: + shardNum: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_TTL_DAYS:3} + enableWarmStage: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_ENABLE_WARM_STAGE:false} + enableColdStage: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_ENABLE_COLD_STAGE:false} warm: - shardNum: ${SW_STORAGE_BANYANDB_GR_SUPER_WARM_SHARD_NUM:2} - segmentInterval: ${SW_STORAGE_BANYANDB_GR_SUPER_WARM_SI_DAYS:1} - ttl: ${SW_STORAGE_BANYANDB_GR_SUPER_WARM_TTL_DAYS:7} - nodeSelector: ${SW_STORAGE_BANYANDB_GR_SUPER_WARM_NODE_SELECTOR:"type=warm"} + shardNum: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_TTL_DAYS:7} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_WARM_NODE_SELECTOR:"type=warm"} cold: - shardNum: ${SW_STORAGE_BANYANDB_GR_SUPER_COLD_SHARD_NUM:2} - segmentInterval: ${SW_STORAGE_BANYANDB_GR_SUPER_COLD_SI_DAYS:1} - ttl: ${SW_STORAGE_BANYANDB_GR_SUPER_COLD_TTL_DAYS:30} - nodeSelector: ${SW_STORAGE_BANYANDB_GR_SUPER_COLD_NODE_SELECTOR:"type=cold"} + shardNum: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_SI_DAYS:1} + ttl: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_TTL_DAYS:30} + nodeSelector: ${SW_STORAGE_BANYANDB_GR_BROWSER_ERROR_LOG_COLD_NODE_SELECTOR:"type=cold"} # The group settings of metrics. # # OAP stores metrics based its granularity. @@ -146,14 +200,14 @@ groups: segmentInterval: ${SW_STORAGE_BANYANDB_GM_DAY_COLD_SI_DAYS:15} ttl: ${SW_STORAGE_BANYANDB_GM_DAY_COLD_TTL_DAYS:120} nodeSelector: ${SW_STORAGE_BANYANDB_GM_DAY_COLD_NODE_SELECTOR:"type=cold"} - # If the metrics is marked as "index_mode", the metrics will be stored in the "index" group. - # The "index" group is designed to store metrics that are used for indexing without value columns. + # If the metrics is marked as "index_mode", the metrics will be stored in the "metadata" group. + # The "metadata" group is designed to store metrics that are used for indexing without value columns. # Such as `service_traffic`, `network_address_alias`, etc. # "index_mode" requires BanyanDB *0.8.0* or later. metadata: - shardNum: ${SW_STORAGE_BANYANDB_GM_INDEX_SHARD_NUM:2} - segmentInterval: ${SW_STORAGE_BANYANDB_GM_INDEX_SI_DAYS:15} - ttl: ${SW_STORAGE_BANYANDB_GM_INDEX_TTL_DAYS:15} + shardNum: ${SW_STORAGE_BANYANDB_GM_METADATA_SHARD_NUM:2} + segmentInterval: ${SW_STORAGE_BANYANDB_GM_METADATA_SI_DAYS:15} + ttl: ${SW_STORAGE_BANYANDB_GM_METADATA_TTL_DAYS:15} # The group settings of property, such as UI and profiling. property: diff --git a/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBConfigLoader.java b/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBConfigLoader.java index 7e323d7ef5af..4bcbaa3322a7 100644 --- a/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBConfigLoader.java +++ b/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBConfigLoader.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.Properties; import lombok.extern.slf4j.Slf4j; +import org.apache.skywalking.oap.server.core.storage.annotation.BanyanDB; import org.apache.skywalking.oap.server.library.module.ModuleProvider; import org.apache.skywalking.oap.server.library.module.ModuleStartException; import org.apache.skywalking.oap.server.library.util.ResourceUtils; @@ -76,28 +77,49 @@ public BanyanDBStorageConfig loadConfig() throws ModuleStartException { ); copyStages(recordsNormal, config.getRecordsNormal()); - Properties recordsSupper = (Properties) groups.get("recordsSuper"); + Properties log = (Properties) groups.get(BanyanDB.StreamGroup.RECORDS_LOG.getName()); copyProperties( - config.getRecordsSuper(), recordsSupper, + config.getRecordsLog(), log, + moduleProvider.getModule().name(), moduleProvider.name() + ); + copyStages(log, config.getRecordsLog()); + + Properties segment = (Properties) groups.get(BanyanDB.StreamGroup.RECORDS_TRACE.getName()); + copyProperties( + config.getRecordsTrace(), segment, + moduleProvider.getModule().name(), moduleProvider.name() + ); + copyStages(segment, config.getRecordsTrace()); + + Properties zipkinSpan = (Properties) groups.get(BanyanDB.StreamGroup.RECORDS_ZIPKIN_TRACE.getName()); + copyProperties( + config.getRecordsZipkinTrace(), zipkinSpan, + moduleProvider.getModule().name(), moduleProvider.name() + ); + copyStages(zipkinSpan, config.getRecordsZipkinTrace()); + + Properties browserErrorLog = (Properties) groups.get(BanyanDB.StreamGroup.RECORDS_BROWSER_ERROR_LOG.getName()); + copyProperties( + config.getRecordsBrowserErrorLog(), browserErrorLog, moduleProvider.getModule().name(), moduleProvider.name() ); - copyStages(recordsSupper, config.getRecordsSuper()); + copyStages(browserErrorLog, config.getRecordsBrowserErrorLog()); - Properties metricsMin = (Properties) groups.get("metricsMin"); + Properties metricsMin = (Properties) groups.get(BanyanDB.MeasureGroup.METRICS_MIN.getName()); copyProperties( config.getMetricsMin(), metricsMin, moduleProvider.getModule().name(), moduleProvider.name() ); copyStages(metricsMin, config.getMetricsMin()); - Properties metricsHour = (Properties) groups.get("metricsHour"); + Properties metricsHour = (Properties) groups.get(BanyanDB.MeasureGroup.METRICS_HOUR.getName()); copyProperties( config.getMetricsHour(), metricsHour, moduleProvider.getModule().name(), moduleProvider.name() ); copyStages(metricsHour, config.getMetricsHour()); - Properties metricsDay = (Properties) groups.get("metricsDay"); + Properties metricsDay = (Properties) groups.get(BanyanDB.MeasureGroup.METRICS_DAY.getName()); copyProperties( config.getMetricsDay(), metricsDay, moduleProvider.getModule().name(), moduleProvider.name() @@ -105,11 +127,11 @@ public BanyanDBStorageConfig loadConfig() throws ModuleStartException { copyStages(metricsDay, config.getMetricsDay()); copyProperties( - config.getMetadata(), (Properties) groups.get("metadata"), + config.getMetadata(), (Properties) groups.get(BanyanDB.MeasureGroup.METADATA.getName()), moduleProvider.getModule().name(), moduleProvider.name() ); copyProperties( - config.getProperty(), (Properties) groups.get("property"), + config.getProperty(), (Properties) groups.get(BanyanDB.PropertyGroup.PROPERTY.getName()), moduleProvider.getModule().name(), moduleProvider.name() ); } catch (IllegalAccessException e) { diff --git a/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBStorageConfig.java b/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBStorageConfig.java index 38e9ce556d8f..574f32a17727 100644 --- a/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBStorageConfig.java +++ b/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBStorageConfig.java @@ -34,7 +34,11 @@ public class BanyanDBStorageConfig extends ModuleConfig { private Global global = new Global(); private RecordsNormal recordsNormal = new RecordsNormal(); - private RecordsSuper recordsSuper = new RecordsSuper(); + private RecordsTrace recordsTrace = new RecordsTrace(); + private RecordsZipkinTrace recordsZipkinTrace = new RecordsZipkinTrace(); + private RecordsLog recordsLog = new RecordsLog(); + private RecordsBrowserErrorLog recordsBrowserErrorLog = new RecordsBrowserErrorLog(); + private MetricsMin metricsMin = new MetricsMin(); private MetricsHour metricsHour = new MetricsHour(); private MetricsDay metricsDay = new MetricsDay(); @@ -142,13 +146,27 @@ public GroupResource() { public static class RecordsNormal extends BanyanDBStorageConfig.GroupResource { } - /** - * RecordsSuper is a special dataset designed to store trace or log data that is too large for normal datasets. - * Each super dataset will be a separate group in BanyanDB. - */ + // RecordsSuper is a special dataset designed to store traces or logs data that is too large for normal datasets. + // Each super dataset will be a separate group in BanyanDB. + + @Getter + @Setter + public static class RecordsLog extends BanyanDBStorageConfig.GroupResource { + } + + @Getter + @Setter + public static class RecordsTrace extends BanyanDBStorageConfig.GroupResource { + } + + @Getter + @Setter + public static class RecordsZipkinTrace extends BanyanDBStorageConfig.GroupResource { + } + @Getter @Setter - public static class RecordsSuper extends BanyanDBStorageConfig.GroupResource { + public static class RecordsBrowserErrorLog extends BanyanDBStorageConfig.GroupResource { } // The group settings of metrics. diff --git a/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBTTLStatusQuery.java b/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBTTLStatusQuery.java index 6a79a520dabe..205d10179dd7 100644 --- a/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBTTLStatusQuery.java +++ b/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/BanyanDBTTLStatusQuery.java @@ -25,10 +25,17 @@ public class BanyanDBTTLStatusQuery implements StorageTTLStatusQuery { private int grNormalTTLDays; + private int grTraceTTLDays; + private int grZipkinTraceTTLDays; + private int grLogTTLDays; + private int grBrowserErrorLogTTLDays; private int grSuperTTLDays; // -1 means no cold stage. private int grColdNormalTTLDays = -1; - private int grColdSuperTTLDays = -1; + private int grColdTraceTTLDays = -1; + private int grColdZipkinTraceTTLDays = -1; + private int grColdLogTTLDays = -1; + private int grColdBrowserErrorLogTTLDays = -1; private int gmMinuteTTLDays; private int gmHourTTLDays; private int gmDayTTLDays; @@ -38,7 +45,10 @@ public class BanyanDBTTLStatusQuery implements StorageTTLStatusQuery { public BanyanDBTTLStatusQuery(BanyanDBStorageConfig config) { grNormalTTLDays = config.getRecordsNormal().getTtl(); - grSuperTTLDays = config.getRecordsSuper().getTtl(); + grTraceTTLDays = config.getRecordsTrace().getTtl(); + grZipkinTraceTTLDays = config.getRecordsZipkinTrace().getTtl(); + grLogTTLDays = config.getRecordsLog().getTtl(); + grBrowserErrorLogTTLDays = config.getRecordsBrowserErrorLog().getTtl(); gmMinuteTTLDays = config.getMetricsMin().getTtl(); gmHourTTLDays = config.getMetricsHour().getTtl(); gmDayTTLDays = config.getMetricsDay().getTtl(); @@ -49,11 +59,32 @@ public BanyanDBTTLStatusQuery(BanyanDBStorageConfig config) { grColdNormalTTLDays = stage.getTtl(); } }); - config.getRecordsSuper().getAdditionalLifecycleStages().forEach(stage -> { + config.getRecordsTrace().getAdditionalLifecycleStages().forEach(stage -> { if (stage.getName().equals(BanyanDBStorageConfig.StageName.warm)) { - grSuperTTLDays = grSuperTTLDays + stage.getTtl(); + grTraceTTLDays = grTraceTTLDays + stage.getTtl(); } else if (stage.getName().equals(BanyanDBStorageConfig.StageName.cold)) { - grColdSuperTTLDays = stage.getTtl(); + grColdTraceTTLDays = stage.getTtl(); + } + }); + config.getRecordsZipkinTrace().getAdditionalLifecycleStages().forEach(stage -> { + if (stage.getName().equals(BanyanDBStorageConfig.StageName.warm)) { + grZipkinTraceTTLDays = grZipkinTraceTTLDays + stage.getTtl(); + } else if (stage.getName().equals(BanyanDBStorageConfig.StageName.cold)) { + grColdZipkinTraceTTLDays = stage.getTtl(); + } + }); + config.getRecordsLog().getAdditionalLifecycleStages().forEach(stage -> { + if (stage.getName().equals(BanyanDBStorageConfig.StageName.warm)) { + grLogTTLDays = grLogTTLDays + stage.getTtl(); + } else if (stage.getName().equals(BanyanDBStorageConfig.StageName.cold)) { + grColdLogTTLDays = stage.getTtl(); + } + }); + config.getRecordsBrowserErrorLog().getAdditionalLifecycleStages().forEach(stage -> { + if (stage.getName().equals(BanyanDBStorageConfig.StageName.warm)) { + grBrowserErrorLogTTLDays = grBrowserErrorLogTTLDays + stage.getTtl(); + } else if (stage.getName().equals(BanyanDBStorageConfig.StageName.cold)) { + grColdBrowserErrorLogTTLDays = stage.getTtl(); } }); config.getMetricsMin().getAdditionalLifecycleStages().forEach(stage -> { @@ -83,10 +114,13 @@ public BanyanDBTTLStatusQuery(BanyanDBStorageConfig config) { public TTLDefinition getTTL() { TTLDefinition definition = new TTLDefinition( new MetricsTTL(gmMinuteTTLDays, gmHourTTLDays, gmDayTTLDays), - new RecordsTTL(grNormalTTLDays, grSuperTTLDays) + new RecordsTTL(grNormalTTLDays, grTraceTTLDays, grZipkinTraceTTLDays, grLogTTLDays, grBrowserErrorLogTTLDays) ); - definition.getRecords().setColdValue(grColdNormalTTLDays); - definition.getRecords().setColdSuperDataset(grColdSuperTTLDays); + definition.getRecords().setColdNormal(grColdNormalTTLDays); + definition.getRecords().setColdTrace(grColdTraceTTLDays); + definition.getRecords().setColdZipkinTrace(grColdZipkinTraceTTLDays); + definition.getRecords().setColdLog(grColdLogTTLDays); + definition.getRecords().setColdBrowserErrorLog(grColdBrowserErrorLogTTLDays); definition.getMetrics().setColdMinute(gmColdMinuteTTLDays); definition.getMetrics().setColdHour(gmColdHourTTLDays); definition.getMetrics().setColdDay(gmColdDayTTLDays); diff --git a/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/MetadataRegistry.java b/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/MetadataRegistry.java index 4551f045571b..edb4b517a064 100644 --- a/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/MetadataRegistry.java +++ b/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/MetadataRegistry.java @@ -497,22 +497,53 @@ public SchemaMetadata parseMetadata(Model model, BanyanDBStorageConfig config, D return new SchemaMetadata(BanyanDBStorageConfig.PROPERTY_GROUP_NAME, model.getName(), Kind.PROPERTY, DownSampling.None, config.getProperty()); } if (model.isRecord()) { // stream - return new SchemaMetadata(model.isSuperDataset() ? model.getName() : "normal", - model.getName(), - Kind.STREAM, - model.getDownsampling(), - model.isSuperDataset() ? config.getRecordsSuper() : config.getRecordsNormal()); + BanyanDB.StreamGroup streamGroup = model.getBanyanDBModelExtension().getStreamGroup(); + switch (streamGroup) { + case RECORDS_LOG: + return new SchemaMetadata(BanyanDB.StreamGroup.RECORDS_LOG.getName(), + model.getName(), + Kind.STREAM, + model.getDownsampling(), + config.getRecordsLog()); + case RECORDS_TRACE: + return new SchemaMetadata(BanyanDB.StreamGroup.RECORDS_TRACE.getName(), + model.getName(), + Kind.STREAM, + model.getDownsampling(), + config.getRecordsTrace()); + case RECORDS_ZIPKIN_TRACE: + return new SchemaMetadata(BanyanDB.StreamGroup.RECORDS_ZIPKIN_TRACE.getName(), + model.getName(), + Kind.STREAM, + model.getDownsampling(), + config.getRecordsZipkinTrace()); + case RECORDS_BROWSER_ERROR_LOG: + return new SchemaMetadata(BanyanDB.StreamGroup.RECORDS_BROWSER_ERROR_LOG.getName(), + model.getName(), + Kind.STREAM, + model.getDownsampling(), + config.getRecordsBrowserErrorLog()); + case RECORDS_NORMAL: + return new SchemaMetadata(BanyanDB.StreamGroup.RECORDS_NORMAL.getName(), + model.getName(), + Kind.STREAM, + model.getDownsampling(), + config.getRecordsBrowserErrorLog()); + default: + throw new IllegalStateException("unknown stream group " + streamGroup); + + } } if (model.getBanyanDBModelExtension().isIndexMode()) { - return new SchemaMetadata("index", model.getName(), Kind.MEASURE, + return new SchemaMetadata(BanyanDB.MeasureGroup.METADATA.getName(), model.getName(), Kind.MEASURE, model.getDownsampling(), config.getMetadata()); } switch (model.getDownsampling()) { case Minute: - return new SchemaMetadata(DownSampling.Minute.getName(), + return new SchemaMetadata(BanyanDB.MeasureGroup.METRICS_MIN.getName(), model.getName(), Kind.MEASURE, model.getDownsampling(), @@ -521,7 +552,7 @@ public SchemaMetadata parseMetadata(Model model, BanyanDBStorageConfig config, D if (!configService.shouldToHour()) { throw new UnsupportedOperationException("downsampling to hour is not supported"); } - return new SchemaMetadata(DownSampling.Hour.getName(), + return new SchemaMetadata(BanyanDB.MeasureGroup.METRICS_HOUR.getName(), model.getName(), Kind.MEASURE, model.getDownsampling(), @@ -530,7 +561,7 @@ public SchemaMetadata parseMetadata(Model model, BanyanDBStorageConfig config, D if (!configService.shouldToDay()) { throw new UnsupportedOperationException("downsampling to day is not supported"); } - return new SchemaMetadata(DownSampling.Day.getName(), + return new SchemaMetadata(BanyanDB.MeasureGroup.METRICS_DAY.getName(), model.getName(), Kind.MEASURE, model.getDownsampling(), diff --git a/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/stream/BanyanDBSpanAttachedEventQueryDAO.java b/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/stream/BanyanDBSpanAttachedEventQueryDAO.java index 74e625870e6e..2a8e4aa671ff 100644 --- a/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/stream/BanyanDBSpanAttachedEventQueryDAO.java +++ b/oap-server/server-storage-plugin/storage-banyandb-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/banyandb/stream/BanyanDBSpanAttachedEventQueryDAO.java @@ -24,8 +24,8 @@ import org.apache.skywalking.banyandb.v1.client.RowEntity; import org.apache.skywalking.banyandb.v1.client.StreamQuery; import org.apache.skywalking.banyandb.v1.client.StreamQueryResponse; +import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SWSpanAttachedEventRecord; import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventRecord; -import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventTraceType; import org.apache.skywalking.oap.server.core.query.input.Duration; import org.apache.skywalking.oap.server.core.storage.query.ISpanAttachedEventQueryDAO; import org.apache.skywalking.oap.server.storage.plugin.banyandb.BanyanDBConverter; @@ -37,7 +37,7 @@ import java.util.stream.Collectors; public class BanyanDBSpanAttachedEventQueryDAO extends AbstractBanyanDBDAO implements ISpanAttachedEventQueryDAO { - private static final Set TAGS = ImmutableSet.of(SpanAttachedEventRecord.START_TIME_SECOND, + private static final Set ZK_TAGS = ImmutableSet.of(SpanAttachedEventRecord.START_TIME_SECOND, SpanAttachedEventRecord.START_TIME_NANOS, SpanAttachedEventRecord.EVENT, SpanAttachedEventRecord.END_TIME_SECOND, @@ -48,6 +48,19 @@ public class BanyanDBSpanAttachedEventQueryDAO extends AbstractBanyanDBDAO imple SpanAttachedEventRecord.TRACE_SPAN_ID, SpanAttachedEventRecord.DATA_BINARY, SpanAttachedEventRecord.TIMESTAMP); + + private static final Set SW_TAGS = ImmutableSet.of(SWSpanAttachedEventRecord.START_TIME_SECOND, + SWSpanAttachedEventRecord.START_TIME_NANOS, + SWSpanAttachedEventRecord.EVENT, + SWSpanAttachedEventRecord.END_TIME_SECOND, + SWSpanAttachedEventRecord.END_TIME_NANOS, + SWSpanAttachedEventRecord.TRACE_REF_TYPE, + SWSpanAttachedEventRecord.RELATED_TRACE_ID, + SWSpanAttachedEventRecord.TRACE_SEGMENT_ID, + SWSpanAttachedEventRecord.TRACE_SPAN_ID, + SWSpanAttachedEventRecord.DATA_BINARY, + SWSpanAttachedEventRecord.TIMESTAMP); + private final int batchSize; public BanyanDBSpanAttachedEventQueryDAO(BanyanDBStorageClient client, int profileDataQueryBatchSize) { @@ -56,25 +69,46 @@ public BanyanDBSpanAttachedEventQueryDAO(BanyanDBStorageClient client, int profi } @Override - public List querySpanAttachedEvents(SpanAttachedEventTraceType type, List traceIds, @Nullable Duration duration) throws IOException { + public List querySWSpanAttachedEvents(List traceIds, @Nullable Duration duration) throws IOException { + final boolean isColdStage = duration != null && duration.isColdStage(); + final StreamQueryResponse resp = queryDebuggable( + isColdStage, SWSpanAttachedEventRecord.INDEX_NAME, ZK_TAGS, getTimestampRange(duration), + new QueryBuilder() { + @Override + protected void apply(StreamQuery query) { + query.and(in(SWSpanAttachedEventRecord.RELATED_TRACE_ID, traceIds)); + query.setOrderBy(new StreamQuery.OrderBy(AbstractQuery.Sort.ASC)); + query.setLimit(batchSize); + } + }); + + return resp.getElements().stream().map(this::buildSWRecord).collect(Collectors.toList()); + } + + @Override + public List queryZKSpanAttachedEvents(List traceIds, @Nullable Duration duration) throws IOException { final boolean isColdStage = duration != null && duration.isColdStage(); final StreamQueryResponse resp = queryDebuggable( - isColdStage, SpanAttachedEventRecord.INDEX_NAME, TAGS, getTimestampRange(duration), + isColdStage, SpanAttachedEventRecord.INDEX_NAME, ZK_TAGS, getTimestampRange(duration), new QueryBuilder() { @Override protected void apply(StreamQuery query) { query.and(in(SpanAttachedEventRecord.RELATED_TRACE_ID, traceIds)); - query.and(eq(SpanAttachedEventRecord.TRACE_REF_TYPE, type.value())); query.setOrderBy(new StreamQuery.OrderBy(AbstractQuery.Sort.ASC)); query.setLimit(batchSize); } }); - return resp.getElements().stream().map(this::buildRecord).collect(Collectors.toList()); + return resp.getElements().stream().map(this::buildZKRecord).collect(Collectors.toList()); } - private SpanAttachedEventRecord buildRecord(RowEntity row) { + private SpanAttachedEventRecord buildZKRecord(RowEntity row) { final SpanAttachedEventRecord.Builder builder = new SpanAttachedEventRecord.Builder(); return builder.storage2Entity(new BanyanDBConverter.StorageToStream(SpanAttachedEventRecord.INDEX_NAME, row)); } + + private SWSpanAttachedEventRecord buildSWRecord(RowEntity row) { + final SWSpanAttachedEventRecord.Builder builder = new SWSpanAttachedEventRecord.Builder(); + return builder.storage2Entity(new BanyanDBConverter.StorageToStream(SWSpanAttachedEventRecord.INDEX_NAME, row)); + } } diff --git a/oap-server/server-storage-plugin/storage-elasticsearch-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/elasticsearch/query/SpanAttachedEventEsDAO.java b/oap-server/server-storage-plugin/storage-elasticsearch-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/elasticsearch/query/SpanAttachedEventEsDAO.java index 91be71ff8f43..5abd7462ce6e 100644 --- a/oap-server/server-storage-plugin/storage-elasticsearch-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/elasticsearch/query/SpanAttachedEventEsDAO.java +++ b/oap-server/server-storage-plugin/storage-elasticsearch-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/elasticsearch/query/SpanAttachedEventEsDAO.java @@ -26,8 +26,8 @@ import org.apache.skywalking.library.elasticsearch.requests.search.SearchParams; import org.apache.skywalking.library.elasticsearch.requests.search.Sort; import org.apache.skywalking.library.elasticsearch.response.search.SearchHit; +import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SWSpanAttachedEventRecord; import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventRecord; -import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventTraceType; import org.apache.skywalking.oap.server.core.query.input.Duration; import org.apache.skywalking.oap.server.core.storage.query.ISpanAttachedEventQueryDAO; import org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient; @@ -44,19 +44,48 @@ public class SpanAttachedEventEsDAO extends EsDAO implements ISpanAttachedEventQueryDAO { private final int scrollingBatchSize; - protected Function searchHitSpanAttachedEventRecordFunction = hit -> { + protected Function searchHitZKSpanAttachedEventRecordFunction = hit -> { final var sourceAsMap = hit.getSource(); final var builder = new SpanAttachedEventRecord.Builder(); return builder.storage2Entity(new ElasticSearchConverter.ToEntity(SpanAttachedEventRecord.INDEX_NAME, sourceAsMap)); }; + protected Function searchHitSWSpanAttachedEventRecordFunction = hit -> { + final var sourceAsMap = hit.getSource(); + final var builder = new SWSpanAttachedEventRecord.Builder(); + return builder.storage2Entity(new ElasticSearchConverter.ToEntity(SWSpanAttachedEventRecord.INDEX_NAME, sourceAsMap)); + }; + public SpanAttachedEventEsDAO(ElasticSearchClient client, StorageModuleElasticsearchConfig config) { super(client); this.scrollingBatchSize = config.getProfileDataQueryBatchSize(); } @Override - public List querySpanAttachedEvents(SpanAttachedEventTraceType type, List traceIds, @Nullable Duration duration) throws IOException { + public List querySWSpanAttachedEvents(List traceIds, @Nullable Duration duration) throws IOException { + final String index = + IndexController.LogicIndicesRegister.getPhysicalTableName(SWSpanAttachedEventRecord.INDEX_NAME); + final BoolQueryBuilder query = Query.bool(); + if (IndexController.LogicIndicesRegister.isMergedTable(SWSpanAttachedEventRecord.INDEX_NAME)) { + query.must(Query.term(IndexController.LogicIndicesRegister.RECORD_TABLE_NAME, SWSpanAttachedEventRecord.INDEX_NAME)); + } + final SearchBuilder search = Search.builder().query(query).size(scrollingBatchSize); + query.must(Query.terms(SWSpanAttachedEventRecord.RELATED_TRACE_ID, traceIds)); + search.sort(SWSpanAttachedEventRecord.START_TIME_SECOND, Sort.Order.ASC); + search.sort(SWSpanAttachedEventRecord.START_TIME_NANOS, Sort.Order.ASC); + + final var scroller = ElasticSearchScroller + .builder() + .client(getClient()) + .search(search.build()) + .index(index) + .resultConverter(searchHitSWSpanAttachedEventRecordFunction) + .build(); + return scrollDebuggable(scroller, index, new SearchParams()); + } + + @Override + public List queryZKSpanAttachedEvents(List traceIds, @Nullable Duration duration) throws IOException { final String index = IndexController.LogicIndicesRegister.getPhysicalTableName(SpanAttachedEventRecord.INDEX_NAME); final BoolQueryBuilder query = Query.bool(); @@ -65,17 +94,16 @@ public List querySpanAttachedEvents(SpanAttachedEventTr } final SearchBuilder search = Search.builder().query(query).size(scrollingBatchSize); query.must(Query.terms(SpanAttachedEventRecord.RELATED_TRACE_ID, traceIds)); - query.must(Query.terms(SpanAttachedEventRecord.TRACE_REF_TYPE, type.value())); search.sort(SpanAttachedEventRecord.START_TIME_SECOND, Sort.Order.ASC); search.sort(SpanAttachedEventRecord.START_TIME_NANOS, Sort.Order.ASC); final var scroller = ElasticSearchScroller - .builder() - .client(getClient()) - .search(search.build()) - .index(index) - .resultConverter(searchHitSpanAttachedEventRecordFunction) - .build(); + .builder() + .client(getClient()) + .search(search.build()) + .index(index) + .resultConverter(searchHitZKSpanAttachedEventRecordFunction) + .build(); return scrollDebuggable(scroller, index, new SearchParams()); } } diff --git a/oap-server/server-storage-plugin/storage-jdbc-hikaricp-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/jdbc/common/dao/JDBCSpanAttachedEventQueryDAO.java b/oap-server/server-storage-plugin/storage-jdbc-hikaricp-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/jdbc/common/dao/JDBCSpanAttachedEventQueryDAO.java index 5c1d2b1337dc..7e5bf8a56e55 100644 --- a/oap-server/server-storage-plugin/storage-jdbc-hikaricp-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/jdbc/common/dao/JDBCSpanAttachedEventQueryDAO.java +++ b/oap-server/server-storage-plugin/storage-jdbc-hikaricp-plugin/src/main/java/org/apache/skywalking/oap/server/storage/plugin/jdbc/common/dao/JDBCSpanAttachedEventQueryDAO.java @@ -21,8 +21,8 @@ import javax.annotation.Nullable; import lombok.RequiredArgsConstructor; import lombok.SneakyThrows; +import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SWSpanAttachedEventRecord; import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventRecord; -import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventTraceType; import org.apache.skywalking.oap.server.core.query.input.Duration; import org.apache.skywalking.oap.server.core.storage.query.ISpanAttachedEventQueryDAO; import org.apache.skywalking.oap.server.library.client.jdbc.hikaricp.JDBCClient; @@ -46,12 +46,12 @@ public class JDBCSpanAttachedEventQueryDAO implements ISpanAttachedEventQueryDAO @Override @SneakyThrows - public List querySpanAttachedEvents(SpanAttachedEventTraceType type, List traceIds, @Nullable Duration duration) { + public List queryZKSpanAttachedEvents(List traceIds, @Nullable Duration duration) { final var tables = tableHelper.getTablesWithinTTL(SpanAttachedEventRecord.INDEX_NAME); final var results = new ArrayList(); for (String table : tables) { - final var sqlAndParameters = buildSQL(type, traceIds, table); + final var sqlAndParameters = buildZKSQL(traceIds, table); jdbcClient.executeQuery( sqlAndParameters.sql(), @@ -84,7 +84,47 @@ public List querySpanAttachedEvents(SpanAttachedEventTr .collect(toList()); } - private static SQLAndParameters buildSQL(SpanAttachedEventTraceType type, List traceIds, String table) { + @Override + @SneakyThrows + public List querySWSpanAttachedEvents(List traceIds, @Nullable Duration duration) { + final var tables = tableHelper.getTablesWithinTTL(SWSpanAttachedEventRecord.INDEX_NAME); + final var results = new ArrayList(); + + for (String table : tables) { + final var sqlAndParameters = buildSWSQL(traceIds, table); + + jdbcClient.executeQuery( + sqlAndParameters.sql(), + resultSet -> { + while (resultSet.next()) { + SWSpanAttachedEventRecord record = new SWSpanAttachedEventRecord(); + record.setStartTimeSecond(resultSet.getLong(SWSpanAttachedEventRecord.START_TIME_SECOND)); + record.setStartTimeNanos(resultSet.getInt(SWSpanAttachedEventRecord.START_TIME_NANOS)); + record.setEvent(resultSet.getString(SWSpanAttachedEventRecord.EVENT)); + record.setEndTimeSecond(resultSet.getLong(SWSpanAttachedEventRecord.END_TIME_SECOND)); + record.setEndTimeNanos(resultSet.getInt(SWSpanAttachedEventRecord.END_TIME_NANOS)); + record.setTraceRefType(resultSet.getInt(SWSpanAttachedEventRecord.TRACE_REF_TYPE)); + record.setRelatedTraceId(resultSet.getString(SWSpanAttachedEventRecord.RELATED_TRACE_ID)); + record.setTraceSegmentId(resultSet.getString(SWSpanAttachedEventRecord.TRACE_SEGMENT_ID)); + record.setTraceSpanId(resultSet.getString(SWSpanAttachedEventRecord.TRACE_SPAN_ID)); + String dataBinaryBase64 = resultSet.getString(SWSpanAttachedEventRecord.DATA_BINARY); + if (StringUtil.isNotEmpty(dataBinaryBase64)) { + record.setDataBinary(Base64.getDecoder().decode(dataBinaryBase64)); + } + results.add(record); + } + + return null; + }, + sqlAndParameters.parameters()); + } + return results + .stream() + .sorted(comparing(SWSpanAttachedEventRecord::getStartTimeSecond).thenComparing(SWSpanAttachedEventRecord::getStartTimeNanos)) + .collect(toList()); + } + + private static SQLAndParameters buildZKSQL(List traceIds, String table) { final var sql = new StringBuilder("select * from " + table + " where "); final var parameters = new ArrayList<>(traceIds.size() + 1); @@ -100,12 +140,31 @@ private static SQLAndParameters buildSQL(SpanAttachedEventTraceType type, List traceIds, String table) { + final var sql = new StringBuilder("select * from " + table + " where "); + final var parameters = new ArrayList<>(traceIds.size() + 1); + + sql.append(JDBCTableInstaller.TABLE_COLUMN).append(" = ? "); + parameters.add(SWSpanAttachedEventRecord.INDEX_NAME); + + sql.append(" and ").append(SWSpanAttachedEventRecord.RELATED_TRACE_ID).append(" in "); + sql.append( + traceIds + .stream() + .map(it -> "?") + .collect(joining(",", "(", ")")) + ); + parameters.addAll(traceIds); + + sql.append(" order by ").append(SWSpanAttachedEventRecord.START_TIME_SECOND) + .append(",").append(SWSpanAttachedEventRecord.START_TIME_NANOS).append(" ASC "); + + return new SQLAndParameters(sql.toString(), parameters); + } } diff --git a/oap-server/server-tools/profile-exporter/tool-profile-snapshot-bootstrap/src/test/java/org/apache/skywalking/oap/server/tool/profile/exporter/test/SpanAttachedEventQueryDAO.java b/oap-server/server-tools/profile-exporter/tool-profile-snapshot-bootstrap/src/test/java/org/apache/skywalking/oap/server/tool/profile/exporter/test/SpanAttachedEventQueryDAO.java index fa73191cc8ee..5f9d126927e6 100644 --- a/oap-server/server-tools/profile-exporter/tool-profile-snapshot-bootstrap/src/test/java/org/apache/skywalking/oap/server/tool/profile/exporter/test/SpanAttachedEventQueryDAO.java +++ b/oap-server/server-tools/profile-exporter/tool-profile-snapshot-bootstrap/src/test/java/org/apache/skywalking/oap/server/tool/profile/exporter/test/SpanAttachedEventQueryDAO.java @@ -19,8 +19,9 @@ package org.apache.skywalking.oap.server.tool.profile.exporter.test; import javax.annotation.Nullable; + +import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SWSpanAttachedEventRecord; import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventRecord; -import org.apache.skywalking.oap.server.core.analysis.manual.spanattach.SpanAttachedEventTraceType; import org.apache.skywalking.oap.server.core.query.input.Duration; import org.apache.skywalking.oap.server.core.storage.query.ISpanAttachedEventQueryDAO; @@ -29,7 +30,12 @@ public class SpanAttachedEventQueryDAO implements ISpanAttachedEventQueryDAO { @Override - public List querySpanAttachedEvents(SpanAttachedEventTraceType type, List traceIds, @Nullable Duration duration) throws IOException { + public List querySWSpanAttachedEvents(List traceIds, @Nullable Duration duration) throws IOException { + return null; + } + + @Override + public List queryZKSpanAttachedEvents(List traceIds, @Nullable Duration duration) throws IOException { return null; } } diff --git a/test/e2e-v2/cases/storage/banyandb/stages/docker-compose.yml b/test/e2e-v2/cases/storage/banyandb/stages/docker-compose.yml index 2f08e694c2b4..782974131e94 100644 --- a/test/e2e-v2/cases/storage/banyandb/stages/docker-compose.yml +++ b/test/e2e-v2/cases/storage/banyandb/stages/docker-compose.yml @@ -30,7 +30,7 @@ services: hostname: data-hot1 command: data --etcd-endpoints=http://etcd:2379 --node-labels type=hot volumes: - - ../data-generate/index:/tmp/measure/data/index + - ../data-generate/metadata:/tmp/measure/data/metadata networks: - e2e @@ -79,8 +79,8 @@ services: SW_STORAGE_BANYANDB_GM_DAY_ENABLE_COLD_STAGE: "true" SW_STORAGE_BANYANDB_GR_NORMAL_ENABLE_WARM_STAGE: "true" SW_STORAGE_BANYANDB_GR_NORMAL_ENABLE_COLD_STAGE: "true" - SW_STORAGE_BANYANDB_GR_SUPER_ENABLE_WARM_STAGE: "true" - SW_STORAGE_BANYANDB_GR_SUPER_ENABLE_COLD_STAGE: "true" + SW_STORAGE_BANYANDB_GR_TRACE_ENABLE_WARM_STAGE: "true" + SW_STORAGE_BANYANDB_GR_TRACE_ENABLE_COLD_STAGE: "true" ports: - 12800:12800 - 11800:11800