diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..ae7194d --- /dev/null +++ b/.gitattributes @@ -0,0 +1,10 @@ +# Auto detect text files and perform LF normalization +* text=auto +*.txt text +*.sh text eol=lf +*.html text eol=lf diff=html +*.css text eol=lf +*.js text eol=lf +*.jpg -text +*.pdf -text +*.java text diff=java diff --git a/.github/workflows/branch-ci.yml b/.github/workflows/branch-ci.yml new file mode 100644 index 0000000..5787348 --- /dev/null +++ b/.github/workflows/branch-ci.yml @@ -0,0 +1,37 @@ +name: Branch CI + +on: + push: + paths-ignore: + - '.github/workflows/**' + - '*.md' + - '*.txt' + branches-ignore: + - 'release*' + +jobs: + build: + name: Branch CI + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v3 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + - name: Set up JDK + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: zulu + server-id: github + server-username: GITHUB_ACTOR + server-password: GITHUB_TOKEN + - name: Maven Build + run: mvn clean install -DskipTests=true -Dmaven.javadoc.skip=true -B -V + env: + GITHUB_TOKEN: ${{ secrets.ORGANIZATION_TOKEN }} + - name: Maven Verify + run: mvn verify -B diff --git a/.github/workflows/pre-release-ci.yml b/.github/workflows/pre-release-ci.yml new file mode 100644 index 0000000..5221d62 --- /dev/null +++ b/.github/workflows/pre-release-ci.yml @@ -0,0 +1,59 @@ +name: Pre-release CI + +on: + release: + types: [ prereleased ] + +jobs: + build: + name: Pre-release CI + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v3 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + - name: Set up Java for publishing to GitHub Packages + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: zulu + server-id: github + server-username: GITHUB_ACTOR + server-password: GITHUB_TOKEN + - name: Deploy pre-release version to GitHub Packages + run: | + pre_release_version=${{ github.event.release.tag_name }} + echo Pre-release version $pre_release_version + mvn versions:set -DnewVersion=$pre_release_version -DgenerateBackupPoms=false + mvn versions:commit + mvn clean deploy -Pdeploy2Github -B -V + env: + GITHUB_TOKEN: ${{ secrets.ORGANIZATION_TOKEN }} + - name: Set up Java for publishing to Maven Central Repository + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: zulu + server-id: central + server-username: MAVEN_USERNAME + server-password: MAVEN_PASSWORD + gpg-private-key: ${{ secrets.MAVEN_GPG_PRIVATE_KEY }} + gpg-passphrase: MAVEN_GPG_PASSPHRASE + - name: Deploy pre-release version to Maven Central Repository + run: | + pre_release_version=${{ github.event.release.tag_name }} + echo Pre-release version $pre_release_version + mvn versions:set -DnewVersion=$pre_release_version -DgenerateBackupPoms=false + mvn versions:commit + mvn deploy -Pdeploy2Maven -DskipTests -B -V + env: + MAVEN_USERNAME: ${{ secrets.SONATYPE_CENTRAL_USERNAME }} + MAVEN_PASSWORD: ${{ secrets.SONATYPE_CENTRAL_PASSWORD }} + MAVEN_GPG_PASSPHRASE: ${{ secrets.MAVEN_GPG_PASSPHRASE }} + - name: Rollback pre-release (remove tag) + if: failure() + run: git push origin :refs/tags/${{ github.event.release.tag_name }} diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml new file mode 100644 index 0000000..b2216dc --- /dev/null +++ b/.github/workflows/release-ci.yml @@ -0,0 +1,82 @@ +name: Release CI + +on: + release: + types: [ released ] + +jobs: + build: + name: Release CI + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - run: git checkout ${{ github.event.release.target_commitish }} + - uses: actions/cache@v3 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven- + - name: Set up Java for publishing to GitHub Packages + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: zulu + server-id: github + server-username: GITHUB_ACTOR + server-password: GITHUB_TOKEN + - name: Maven Build + run: mvn clean install -DskipTests=true -B -V + env: + GITHUB_TOKEN: ${{ secrets.ORGANIZATION_TOKEN }} + - name: Maven Verify + run: mvn verify -B + - name: Configure git + run: | + git config --global user.email "${GITHUB_ACTOR}@users.noreply.github.com" + git config --global user.name "${GITHUB_ACTOR}" + - name: Prepare release + id: prepare_release + run: | + mvn -B build-helper:parse-version release:prepare \ + -DreleaseVersion=\${parsedVersion.majorVersion}.\${parsedVersion.minorVersion}.\${parsedVersion.incrementalVersion} \ + -Darguments="-DskipTests=true" + echo release_tag=$(git describe --tags --abbrev=0) >> $GITHUB_OUTPUT + - name: Perform release to GitHub Packages + run: mvn -B release:perform -Pdeploy2Github -Darguments="-DskipTests=true -Pdeploy2Github" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_REPOSITORY: ${{ secrets.GITHUB_REPOSITORY }} + - name: Set up Java for publishing to Maven Central Repository + uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: zulu + server-id: central + server-username: MAVEN_USERNAME + server-password: MAVEN_PASSWORD + gpg-private-key: ${{ secrets.MAVEN_GPG_PRIVATE_KEY }} + gpg-passphrase: MAVEN_GPG_PASSPHRASE + - name: Deploy release version to Maven Central Repository + run: | + release_version=$(echo ${{ steps.prepare_release.outputs.release_tag }} | sed "s/release-//") + echo release version $release_version + mvn versions:set -DnewVersion=$release_version -DgenerateBackupPoms=false + mvn versions:commit + mvn deploy -Pdeploy2Maven -DskipTests -B -V + env: + MAVEN_USERNAME: ${{ secrets.SONATYPE_CENTRAL_USERNAME }} + MAVEN_PASSWORD: ${{ secrets.SONATYPE_CENTRAL_PASSWORD }} + MAVEN_GPG_PASSPHRASE: ${{ secrets.MAVEN_GPG_PASSPHRASE }} + - name: Rollback release + if: failure() + run: | + mvn release:rollback || echo "nothing to rollback" + git push origin :refs/tags/${{ github.event.release.tag_name }} + if [ ! -z "${{ steps.prepare_release.outputs.release_tag }}" ] + then + git tag -d ${{ steps.prepare_release.outputs.release_tag }} + git push origin :refs/tags/${{ steps.prepare_release.outputs.release_tag }} + fi diff --git a/.gitignore b/.gitignore index 524f096..4be3a6d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,24 +1,14 @@ -# Compiled class file -*.class - -# Log file +.* +!.gitignore +!.gitattributes +!.github +!.editorconfig +!.*.yml +!.env.example +**/target/ +*.iml +**/logs/*.log +*.db +*.csv *.log - -# BlueJ files -*.ctxt - -# Mobile Tools for Java (J2ME) -.mtj.tmp/ - -# Package Files # -*.jar -*.war -*.nar -*.ear -*.zip -*.tar.gz -*.rar - -# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml -hs_err_pid* -replay_pid* +!.helmignore diff --git a/LICENSE b/LICENSE.txt similarity index 99% rename from LICENSE rename to LICENSE.txt index 261eeb9..d645695 100644 --- a/LICENSE +++ b/LICENSE.txt @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/README.md b/README.md index 4a093df..bdd50f7 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,8 @@ # ScaleCube KPI/Telemetry library -High-performance KPI and telemetry library for Java, designed for ultra-low-latency and precise metrics. Built on Agrona Counters and HdrHistograms, it supports both real-time monitoring and historical data captures. The library is ideal for systems that require fine-grained performance insights, high-frequency metrics, and minimal runtime overhead. Lightweight and thread-safe, it can be integrated into performance-critical applications such as trading platforms, messaging systems, or any low-latency environment. +High-performance KPI and telemetry library for Java, designed for ultra-low-latency and precise +metrics. Built on Agrona Counters and HdrHistograms, it supports both real-time monitoring and +historical data captures. The library is ideal for systems that require fine-grained performance +insights, high-frequency metrics, and minimal runtime overhead. Lightweight and thread-safe, it can +be integrated into performance-critical applications such as trading platforms, messaging systems, +or any low-latency environment. diff --git a/checkstyle-suppressions.xml b/checkstyle-suppressions.xml new file mode 100644 index 0000000..dbd0fdb --- /dev/null +++ b/checkstyle-suppressions.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + diff --git a/metrics-aeron/pom.xml b/metrics-aeron/pom.xml new file mode 100644 index 0000000..9592997 --- /dev/null +++ b/metrics-aeron/pom.xml @@ -0,0 +1,77 @@ + + + 4.0.0 + + + io.scalecube + scalecube-metrics-parent + 0.1.0-SNAPSHOT + + + scalecube-metrics-aeron + + + + io.scalecube + scalecube-metrics + ${project.parent.version} + + + io.aeron + aeron-driver + + + io.aeron + aeron-archive + + + io.aeron + aeron-cluster + + + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.junit.jupiter + junit-jupiter-params + test + + + org.mockito + mockito-core + test + + + org.mockito + mockito-junit-jupiter + test + + + net.bytebuddy + * + + + + + org.apache.logging.log4j + log4j-slf4j-impl + test + + + org.apache.logging.log4j + log4j-core + test + + + + diff --git a/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/ArchiveCountersAdapter.java b/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/ArchiveCountersAdapter.java new file mode 100644 index 0000000..5bf3f8c --- /dev/null +++ b/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/ArchiveCountersAdapter.java @@ -0,0 +1,181 @@ +package io.scalecube.metrics.aeron; + +import static io.aeron.AeronCounters.ARCHIVE_CONTROL_SESSIONS_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_ERROR_COUNT_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_MAX_CYCLE_TIME_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_RECORDER_MAX_WRITE_TIME_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_RECORDER_TOTAL_WRITE_BYTES_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_RECORDER_TOTAL_WRITE_TIME_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_RECORDING_POSITION_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_RECORDING_SESSION_COUNT_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_REPLAYER_MAX_READ_TIME_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_REPLAYER_TOTAL_READ_BYTES_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_REPLAYER_TOTAL_READ_TIME_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_REPLAY_SESSION_COUNT_TYPE_ID; +import static org.agrona.BitUtil.SIZE_OF_INT; +import static org.agrona.BitUtil.SIZE_OF_LONG; + +import io.scalecube.metrics.KeyFlyweight; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.agrona.DirectBuffer; +import org.agrona.ExpandableArrayBuffer; +import org.agrona.collections.Int2ObjectHashMap; + +public class ArchiveCountersAdapter { + + private static final Pattern KIND_PATTERN = Pattern.compile("^(archive-[^\\s:]+)"); + + private static final int RECORDING_ID_OFFSET = 0; + private static final int SESSION_ID_OFFSET = RECORDING_ID_OFFSET + SIZE_OF_LONG; + private static final int SOURCE_IDENTITY_LENGTH_OFFSET = SESSION_ID_OFFSET + SIZE_OF_INT; + private static final int SOURCE_IDENTITY_OFFSET = SOURCE_IDENTITY_LENGTH_OFFSET + SIZE_OF_INT; + + private final KeyFlyweight keyFlyweight = new KeyFlyweight(); + + public static void populate(Int2ObjectHashMap map) { + final var adapter = new ArchiveCountersAdapter(); + map.put(ARCHIVE_ERROR_COUNT_TYPE_ID, adapter::archiveErrorCount); + map.put(ARCHIVE_CONTROL_SESSIONS_TYPE_ID, adapter::archiveControlSessionCount); + map.put(ARCHIVE_MAX_CYCLE_TIME_TYPE_ID, adapter::archiveMaxCycleTime); + map.put( + ARCHIVE_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID, + adapter::archiveCycleTimeThresholdExceededCount); + map.put(ARCHIVE_RECORDER_MAX_WRITE_TIME_TYPE_ID, adapter::archiveRecorderMaxWriteTime); + map.put(ARCHIVE_RECORDER_TOTAL_WRITE_BYTES_TYPE_ID, adapter::archiveRecorderTotalWriteBytes); + map.put(ARCHIVE_RECORDER_TOTAL_WRITE_TIME_TYPE_ID, adapter::archiveRecorderTotalWriteTime); + map.put(ARCHIVE_REPLAYER_MAX_READ_TIME_TYPE_ID, adapter::archiveReplayerMaxReadTime); + map.put(ARCHIVE_REPLAYER_TOTAL_READ_BYTES_TYPE_ID, adapter::archiveReplayerTotalReadBytes); + map.put(ARCHIVE_REPLAYER_TOTAL_READ_TIME_TYPE_ID, adapter::archiveReplayerTotalReadTime); + map.put(ARCHIVE_RECORDING_SESSION_COUNT_TYPE_ID, adapter::archiveRecordingSessionCount); + map.put(ARCHIVE_REPLAY_SESSION_COUNT_TYPE_ID, adapter::archiveReplaySessionCount); + map.put(ARCHIVE_RECORDING_POSITION_TYPE_ID, adapter::archiveRecordingPosition); + } + + private DirectBuffer archiveErrorCount(DirectBuffer keyBuffer, String label) { + return newKey(2) + .stringValue("name", "archive_error_count") + .longValue("archiveId", keyBuffer.getLong(0)) + .buffer(); + } + + private DirectBuffer archiveControlSessionCount(DirectBuffer keyBuffer, String label) { + return newKey(2) + .stringValue("name", "archive_control_session_count") + .longValue("archiveId", keyBuffer.getLong(0)) + .buffer(); + } + + private DirectBuffer archiveMaxCycleTime(DirectBuffer keyBuffer, String label) { + return newKey(3) + .stringValue("name", "archive_max_cycle_time_nanos") + .longValue("archiveId", keyBuffer.getLong(0)) + .stringValue("kind", getKind(label)) + .buffer(); + } + + private DirectBuffer archiveCycleTimeThresholdExceededCount( + DirectBuffer keyBuffer, String label) { + return newKey(3) + .stringValue("name", "archive_cycle_time_threshold_exceeded_count") + .longValue("archiveId", keyBuffer.getLong(0)) + .stringValue("kind", getKind(label)) + .buffer(); + } + + private DirectBuffer archiveRecorderMaxWriteTime(DirectBuffer keyBuffer, String label) { + return newKey(2) + .stringValue("name", "archive_recorder_max_write_time_nanos") + .longValue("archiveId", keyBuffer.getLong(0)) + .buffer(); + } + + private DirectBuffer archiveRecorderTotalWriteBytes(DirectBuffer keyBuffer, String label) { + return newKey(2) + .stringValue("name", "archive_recorder_total_write_bytes") + .longValue("archiveId", keyBuffer.getLong(0)) + .buffer(); + } + + private DirectBuffer archiveRecorderTotalWriteTime(DirectBuffer keyBuffer, String label) { + return newKey(2) + .stringValue("name", "archive_recorder_total_write_time_nanos") + .longValue("archiveId", keyBuffer.getLong(0)) + .buffer(); + } + + private DirectBuffer archiveReplayerMaxReadTime(DirectBuffer keyBuffer, String label) { + return newKey(2) + .stringValue("name", "archive_replayer_max_read_time_nanos") + .longValue("archiveId", keyBuffer.getLong(0)) + .buffer(); + } + + private DirectBuffer archiveReplayerTotalReadBytes(DirectBuffer keyBuffer, String label) { + return newKey(2) + .stringValue("name", "archive_replayer_total_read_bytes") + .longValue("archiveId", keyBuffer.getLong(0)) + .buffer(); + } + + private DirectBuffer archiveReplayerTotalReadTime(DirectBuffer keyBuffer, String label) { + return newKey(2) + .stringValue("name", "archive_replayer_total_read_time_nanos") + .longValue("archiveId", keyBuffer.getLong(0)) + .buffer(); + } + + private DirectBuffer archiveRecordingSessionCount(DirectBuffer keyBuffer, String label) { + return newKey(2) + .stringValue("name", "archive_recording_session_count") + .longValue("archiveId", keyBuffer.getLong(0)) + .buffer(); + } + + private DirectBuffer archiveReplaySessionCount(DirectBuffer keyBuffer, String label) { + return newKey(2) + .stringValue("name", "archive_replay_session_count") + .longValue("archiveId", keyBuffer.getLong(0)) + .buffer(); + } + + private DirectBuffer archiveRecordingPosition(DirectBuffer keyBuffer, String label) { + final var sourceIdentityLength = keyBuffer.getInt(SOURCE_IDENTITY_LENGTH_OFFSET); + final var archiveIdOffset = SOURCE_IDENTITY_OFFSET + sourceIdentityLength; + final var archiveId = keyBuffer.getLong(archiveIdOffset); + final var streamId = getStreamId(label); + + return newKey(3) + .stringValue("name", "archive_recording_position") + .longValue("archiveId", archiveId) + .intValue("streamId", streamId) + .buffer(); + } + + private KeyFlyweight newKey(int tagsCount) { + return keyFlyweight.wrap(new ExpandableArrayBuffer(), 0).tagsCount(tagsCount); + } + + private static String getKind(String input) { + Matcher matcher = KIND_PATTERN.matcher(input); + if (matcher.find()) { + return matcher.group(1); + } else { + throw new IllegalArgumentException("Wrong input: " + input); + } + } + + private static int getStreamId(String label) { + String[] tokens = label.split(" "); + if (tokens.length >= 4) { + try { + return Integer.parseInt(tokens[3]); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Wrong input: " + label, e); + } + } else { + throw new IllegalArgumentException("Wrong input: " + label); + } + } +} diff --git a/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/ClusterCountersAdapter.java b/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/ClusterCountersAdapter.java new file mode 100644 index 0000000..fbf6458 --- /dev/null +++ b/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/ClusterCountersAdapter.java @@ -0,0 +1,103 @@ +package io.scalecube.metrics.aeron; + +import static io.aeron.AeronCounters.CLUSTER_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_ELECTION_COUNT_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_LEADERSHIP_TERM_ID_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_MAX_CYCLE_TIME_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_TOTAL_MAX_SNAPSHOT_DURATION_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_TOTAL_SNAPSHOT_DURATION_THRESHOLD_EXCEEDED_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.CLUSTER_CLIENT_TIMEOUT_COUNT_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.CLUSTER_NODE_ROLE_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.COMMIT_POSITION_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.CONSENSUS_MODULE_ERROR_COUNT_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.CONSENSUS_MODULE_STATE_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.ELECTION_STATE_TYPE_ID; + +import io.scalecube.metrics.KeyFlyweight; +import org.agrona.DirectBuffer; +import org.agrona.ExpandableArrayBuffer; +import org.agrona.collections.Int2ObjectHashMap; + +public class ClusterCountersAdapter { + + private final KeyFlyweight keyFlyweight = new KeyFlyweight(); + + public static void populate(Int2ObjectHashMap map) { + final var adapter = new ClusterCountersAdapter(); + map.put(CONSENSUS_MODULE_STATE_TYPE_ID, adapter::consensusModuleState); + map.put(CONSENSUS_MODULE_ERROR_COUNT_TYPE_ID, adapter::consensusModuleErrorCount); + map.put(ELECTION_STATE_TYPE_ID, adapter::clusterElectionState); + map.put(CLUSTER_ELECTION_COUNT_TYPE_ID, adapter::clusterElectionCount); + map.put(CLUSTER_LEADERSHIP_TERM_ID_TYPE_ID, adapter::clusterLeadershipTermId); + map.put(CLUSTER_NODE_ROLE_TYPE_ID, adapter::clusterNodeRole); + map.put(COMMIT_POSITION_TYPE_ID, adapter::clusterCommitPosition); + map.put(CLUSTER_CLIENT_TIMEOUT_COUNT_TYPE_ID, adapter::clusterClientTimeoutCount); + map.put(CLUSTER_MAX_CYCLE_TIME_TYPE_ID, adapter::clusterMaxCycleTime); + map.put( + CLUSTER_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID, + adapter::clusterCycleTimeThresholdExceededCount); + map.put(CLUSTER_TOTAL_MAX_SNAPSHOT_DURATION_TYPE_ID, adapter::clusterTotalMaxSnapshotDuration); + map.put( + CLUSTER_TOTAL_SNAPSHOT_DURATION_THRESHOLD_EXCEEDED_TYPE_ID, + adapter::clusterTotalSnapshotDurationThresholdExceededCount); + } + + private DirectBuffer consensusModuleState(DirectBuffer keyBuffer, String label) { + return newKey("consensus_module_state", keyBuffer.getInt(0)); + } + + private DirectBuffer consensusModuleErrorCount(DirectBuffer keyBuffer, String label) { + return newKey("consensus_module_error_count", keyBuffer.getInt(0)); + } + + private DirectBuffer clusterElectionState(DirectBuffer keyBuffer, String label) { + return newKey("cluster_election_state", keyBuffer.getInt(0)); + } + + private DirectBuffer clusterElectionCount(DirectBuffer keyBuffer, String label) { + return newKey("cluster_election_count", keyBuffer.getInt(0)); + } + + private DirectBuffer clusterLeadershipTermId(DirectBuffer keyBuffer, String label) { + return newKey("cluster_leadership_term_id", keyBuffer.getInt(0)); + } + + private DirectBuffer clusterNodeRole(DirectBuffer keyBuffer, String label) { + return newKey("cluster_node_role", keyBuffer.getInt(0)); + } + + private DirectBuffer clusterCommitPosition(DirectBuffer keyBuffer, String label) { + return newKey("cluster_commit_position", keyBuffer.getInt(0)); + } + + private DirectBuffer clusterClientTimeoutCount(DirectBuffer keyBuffer, String label) { + return newKey("cluster_client_timeout_count", keyBuffer.getInt(0)); + } + + private DirectBuffer clusterMaxCycleTime(DirectBuffer keyBuffer, String label) { + return newKey("cluster_max_cycle_time_nanos", keyBuffer.getInt(0)); + } + + private DirectBuffer clusterCycleTimeThresholdExceededCount( + DirectBuffer keyBuffer, String label) { + return newKey("cluster_cycle_time_threshold_exceeded_count", keyBuffer.getInt(0)); + } + + private DirectBuffer clusterTotalMaxSnapshotDuration(DirectBuffer keyBuffer, String label) { + return newKey("cluster_total_max_snapshot_duration_nanos", keyBuffer.getInt(0)); + } + + private DirectBuffer clusterTotalSnapshotDurationThresholdExceededCount( + DirectBuffer keyBuffer, String label) { + return newKey("cluster_total_snapshot_duration_threshold_exceeded_count", keyBuffer.getInt(0)); + } + + private DirectBuffer newKey(String name, int clusterId) { + return keyFlyweight + .wrap(new ExpandableArrayBuffer(), 0) + .tagsCount(2) + .stringValue("name", name) + .intValue("clusterId", clusterId) + .buffer(); + } +} diff --git a/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/ClusteredServiceCountersAdapter.java b/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/ClusteredServiceCountersAdapter.java new file mode 100644 index 0000000..9dd4bc2 --- /dev/null +++ b/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/ClusteredServiceCountersAdapter.java @@ -0,0 +1,77 @@ +package io.scalecube.metrics.aeron; + +import static io.aeron.AeronCounters.CLUSTERED_SERVICE_MAX_SNAPSHOT_DURATION_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTERED_SERVICE_SNAPSHOT_DURATION_THRESHOLD_EXCEEDED_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_CLUSTERED_SERVICE_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_CLUSTERED_SERVICE_ERROR_COUNT_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_CLUSTERED_SERVICE_MAX_CYCLE_TIME_TYPE_ID; +import static org.agrona.BitUtil.SIZE_OF_INT; + +import io.scalecube.metrics.KeyFlyweight; +import org.agrona.DirectBuffer; +import org.agrona.ExpandableArrayBuffer; +import org.agrona.collections.Int2ObjectHashMap; + +public class ClusteredServiceCountersAdapter { + + private final KeyFlyweight keyFlyweight = new KeyFlyweight(); + + public static void populate(Int2ObjectHashMap map) { + final var adapter = new ClusteredServiceCountersAdapter(); + map.put( + CLUSTER_CLUSTERED_SERVICE_MAX_CYCLE_TIME_TYPE_ID, adapter::clusteredServiceMaxCycleTime); + map.put( + CLUSTER_CLUSTERED_SERVICE_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID, + adapter::clusteredServiceCycleTimeThresholdExceededCount); + map.put( + CLUSTERED_SERVICE_MAX_SNAPSHOT_DURATION_TYPE_ID, + adapter::clusteredServiceMaxSnapshotDuration); + map.put( + CLUSTERED_SERVICE_SNAPSHOT_DURATION_THRESHOLD_EXCEEDED_TYPE_ID, + adapter::clusteredServiceSnapshotDurationThresholdExceededCount); + map.put(CLUSTER_CLUSTERED_SERVICE_ERROR_COUNT_TYPE_ID, adapter::clusteredServiceErrorCount); + } + + private DirectBuffer clusteredServiceMaxCycleTime(DirectBuffer keyBuffer, String label) { + final var clusterId = keyBuffer.getInt(0); + final var serviceId = keyBuffer.getInt(SIZE_OF_INT); + return newKey("clustered_service_max_cycle_time_nanos", clusterId, serviceId); + } + + private DirectBuffer clusteredServiceCycleTimeThresholdExceededCount( + DirectBuffer keyBuffer, String label) { + final var clusterId = keyBuffer.getInt(0); + final var serviceId = keyBuffer.getInt(SIZE_OF_INT); + return newKey("clustered_service_cycle_time_threshold_exceeded_count", clusterId, serviceId); + } + + private DirectBuffer clusteredServiceMaxSnapshotDuration(DirectBuffer keyBuffer, String label) { + final var clusterId = keyBuffer.getInt(0); + final var serviceId = keyBuffer.getInt(SIZE_OF_INT); + return newKey("clustered_service_max_snapshot_duration_nanos", clusterId, serviceId); + } + + private DirectBuffer clusteredServiceSnapshotDurationThresholdExceededCount( + DirectBuffer keyBuffer, String label) { + final var clusterId = keyBuffer.getInt(0); + final var serviceId = keyBuffer.getInt(SIZE_OF_INT); + return newKey( + "clustered_service_snapshot_duration_threshold_exceeded_count", clusterId, serviceId); + } + + private DirectBuffer clusteredServiceErrorCount(DirectBuffer keyBuffer, String label) { + final var clusterId = keyBuffer.getInt(0); + final var serviceId = keyBuffer.getInt(SIZE_OF_INT); + return newKey("clustered_service_error_count", clusterId, serviceId); + } + + private DirectBuffer newKey(String name, int clusterId, int serviceId) { + return keyFlyweight + .wrap(new ExpandableArrayBuffer(), 0) + .tagsCount(3) + .stringValue("name", name) + .intValue("clusterId", clusterId) + .intValue("serviceId", serviceId) + .buffer(); + } +} diff --git a/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/CncCountersReaderAgent.java b/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/CncCountersReaderAgent.java new file mode 100644 index 0000000..e8228e4 --- /dev/null +++ b/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/CncCountersReaderAgent.java @@ -0,0 +1,211 @@ +package io.scalecube.metrics.aeron; + +import static io.aeron.CncFileDescriptor.CNC_FILE; + +import io.aeron.Aeron; +import io.aeron.CncFileDescriptor; +import io.aeron.RethrowingErrorHandler; +import io.aeron.exceptions.DriverTimeoutException; +import io.scalecube.metrics.CounterDescriptor; +import io.scalecube.metrics.CountersHandler; +import io.scalecube.metrics.Delay; +import java.io.File; +import java.time.Duration; +import java.util.ArrayList; +import org.agrona.CloseHelper; +import org.agrona.collections.Int2ObjectHashMap; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentInvoker; +import org.agrona.concurrent.AgentTerminationException; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.status.CountersReader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Agent that periodically reads counters from mapped counters file {@link + * CncFileDescriptor#CNC_FILE}, and invokes {@link CountersHandler} with the counters values. + */ +public class CncCountersReaderAgent implements Agent { + + private static final Logger LOGGER = LoggerFactory.getLogger(CncCountersReaderAgent.class); + + public enum State { + INIT, + RUNNING, + CLEANUP, + CLOSED + } + + private final String roleName; + private final String aeronDirectoryName; + private final boolean warnIfCncNotExists; + private final EpochClock epochClock; + private final Duration driverTimeout; + private final CountersHandler countersHandler; + + private final Delay readInterval; + private Aeron aeron; + private CountersReader countersReader; + private AgentInvoker conductorAgentInvoker; + private final Int2ObjectHashMap keyConverters = new Int2ObjectHashMap<>(); + private State state = State.CLOSED; + + /** + * Constructor. + * + * @param roleName roleName + * @param aeronDirectoryName aeronDirectoryName + * @param warnIfCncNotExists whether to log warning if counters file does not exist + * @param epochClock epochClock + * @param readInterval interval at which to read counters + * @param driverTimeout media-driver timeout (see {@link Aeron.Context#driverTimeoutMs(long)}) + * @param countersHandler callback handler to process counters + */ + public CncCountersReaderAgent( + String roleName, + String aeronDirectoryName, + boolean warnIfCncNotExists, + EpochClock epochClock, + Duration readInterval, + Duration driverTimeout, + CountersHandler countersHandler) { + this.roleName = roleName; + this.aeronDirectoryName = aeronDirectoryName; + this.warnIfCncNotExists = warnIfCncNotExists; + this.epochClock = epochClock; + this.driverTimeout = driverTimeout; + this.countersHandler = countersHandler; + this.readInterval = new Delay(epochClock, readInterval.toMillis()); + ArchiveCountersAdapter.populate(keyConverters); + ClusterCountersAdapter.populate(keyConverters); + ClusteredServiceCountersAdapter.populate(keyConverters); + } + + @Override + public String roleName() { + return roleName; + } + + @Override + public void onStart() { + if (state != State.CLOSED) { + throw new AgentTerminationException("Illegal state: " + state); + } + state(State.INIT); + } + + @Override + public int doWork() { + try { + return switch (state) { + case INIT -> init(); + case RUNNING -> running(); + case CLEANUP -> cleanup(); + default -> throw new AgentTerminationException("Unknown state: " + state); + }; + } catch (AgentTerminationException e) { + throw e; + } catch (Exception e) { + state(State.CLEANUP); + throw e; + } + } + + private int init() { + if (readInterval.isNotOverdue()) { + return 0; + } + + final var cncFile = new File(aeronDirectoryName, CNC_FILE); + if (!cncFile.exists()) { + if (warnIfCncNotExists) { + LOGGER.warn("[{}] {} not exists", roleName(), cncFile); + } + state(State.CLEANUP); + return 0; + } + + aeron = + Aeron.connect( + new Aeron.Context() + .useConductorAgentInvoker(true) + .aeronDirectoryName(aeronDirectoryName) + .errorHandler(RethrowingErrorHandler.INSTANCE) + .subscriberErrorHandler(RethrowingErrorHandler.INSTANCE) + .driverTimeoutMs(driverTimeout.toMillis())); + conductorAgentInvoker = aeron.conductorAgentInvoker(); + countersReader = aeron.countersReader(); + + state(State.RUNNING); + LOGGER.info("[{}] Initialized, now running", roleName()); + return 1; + } + + private int running() { + try { + conductorAgentInvoker.invoke(); + } catch (AgentTerminationException | DriverTimeoutException e) { + state(State.CLEANUP); + LOGGER.warn( + "[{}] conductorAgentInvoker has thrown exception: {}, proceed to cleanup", + roleName(), + e.toString()); + return 0; + } + + if (readInterval.isNotOverdue()) { + return 0; + } + + readInterval.delay(); + + final var timestamp = epochClock.time(); + final var counterDescriptors = new ArrayList(); + countersReader.forEach( + (counterId, typeId, keyBuffer, label) -> { + final var keyConverter = keyConverters.get(typeId); + if (keyConverter != null) { + counterDescriptors.add( + new CounterDescriptor( + counterId, + typeId, + countersReader.getCounterValue(counterId), + keyConverter.convert(keyBuffer, label), + null)); + } + }); + countersHandler.accept(timestamp, counterDescriptors); + + return 0; + } + + private int cleanup() { + CloseHelper.quietCloseAll(aeron); + aeron = null; + conductorAgentInvoker = null; + countersReader = null; + + State previous = state; + if (previous != State.CLOSED) { // when it comes from onClose() + readInterval.delay(); + state(State.INIT); + } + return 1; + } + + @Override + public void onClose() { + state(State.CLOSED); + cleanup(); + } + + private void state(State state) { + LOGGER.debug("[{}][state] {}->{}", roleName(), this.state, state); + this.state = state; + } + + public State state() { + return state; + } +} diff --git a/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/KeyConverter.java b/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/KeyConverter.java new file mode 100644 index 0000000..7494115 --- /dev/null +++ b/metrics-aeron/src/main/java/io/scalecube/metrics/aeron/KeyConverter.java @@ -0,0 +1,8 @@ +package io.scalecube.metrics.aeron; + +import org.agrona.DirectBuffer; + +public interface KeyConverter { + + DirectBuffer convert(DirectBuffer keyBuffer, String label); +} diff --git a/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/ClusteredServiceImpl.java b/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/ClusteredServiceImpl.java new file mode 100644 index 0000000..d3bac3f --- /dev/null +++ b/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/ClusteredServiceImpl.java @@ -0,0 +1,44 @@ +package io.scalecube.metrics.aeron; + +import io.aeron.ExclusivePublication; +import io.aeron.Image; +import io.aeron.cluster.codecs.CloseReason; +import io.aeron.cluster.service.ClientSession; +import io.aeron.cluster.service.Cluster; +import io.aeron.cluster.service.Cluster.Role; +import io.aeron.cluster.service.ClusteredService; +import io.aeron.logbuffer.Header; +import org.agrona.DirectBuffer; + +public class ClusteredServiceImpl implements ClusteredService { + + @Override + public void onStart(Cluster cluster, Image snapshotImage) {} + + @Override + public void onSessionOpen(ClientSession session, long timestamp) {} + + @Override + public void onSessionClose(ClientSession session, long timestamp, CloseReason closeReason) {} + + @Override + public void onSessionMessage( + ClientSession session, + long timestamp, + DirectBuffer buffer, + int offset, + int length, + Header header) {} + + @Override + public void onTimerEvent(long correlationId, long timestamp) {} + + @Override + public void onTakeSnapshot(ExclusivePublication snapshotPublication) {} + + @Override + public void onRoleChange(Role newRole) {} + + @Override + public void onTerminate(Cluster cluster) {} +} diff --git a/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/CncCountersReaderAgentTest.java b/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/CncCountersReaderAgentTest.java new file mode 100644 index 0000000..57e85ba --- /dev/null +++ b/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/CncCountersReaderAgentTest.java @@ -0,0 +1,104 @@ +package io.scalecube.metrics.aeron; + +import static io.aeron.CommonContext.AERON_DIR_PROP_DEFAULT; +import static org.agrona.IoUtil.delete; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +import io.aeron.driver.MediaDriver; +import io.aeron.driver.MediaDriver.Context; +import io.scalecube.metrics.CountersHandler; +import io.scalecube.metrics.aeron.CncCountersReaderAgent.State; +import java.io.File; +import java.time.Duration; +import org.agrona.concurrent.CachedEpochClock; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class CncCountersReaderAgentTest { + + private static final Duration READ_INTERVAL = Duration.ofSeconds(3); + private static final Duration DRIVER_TIMEOUT = Duration.ofMillis(500); + + private final CachedEpochClock epochClock = new CachedEpochClock(); + private final CountersHandler countersHandler = mock(CountersHandler.class); + private CncCountersReaderAgent agent; + + @BeforeEach + void beforeEach() { + delete(new File(AERON_DIR_PROP_DEFAULT), true); + agent = + new CncCountersReaderAgent( + "CncCountersReaderAgent", + AERON_DIR_PROP_DEFAULT, + true, + epochClock, + READ_INTERVAL, + DRIVER_TIMEOUT, + countersHandler); + agent.onStart(); + } + + @AfterEach + void afterEach() { + if (agent != null) { + agent.onClose(); + } + } + + @Test + void testWorkWithCncCounters() { + try (final var mediaDriver = MediaDriver.launch()) { + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + epochClock.advance(READ_INTERVAL.toMillis() + 1); + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + verify(countersHandler).accept(anyLong(), anyList()); + } + } + + @Test + void testStartWithoutCncCounters() { + agent.doWork(); + assertEquals(State.CLEANUP, agent.state()); + } + + @Test + void testWorkWhenCncCountersShutdown() throws InterruptedException { + try (final var mediaDriver = MediaDriver.launch(new Context().dirDeleteOnShutdown(true))) { + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + } + epochClock.advance(READ_INTERVAL.toMillis() + 1); + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + + Thread.sleep(DRIVER_TIMEOUT.toMillis()); + epochClock.advance(READ_INTERVAL.toMillis() + 1); + agent.doWork(); + assertEquals(State.CLEANUP, agent.state()); + } + + @Test + void testWorkWhenCncCountersRestarted() throws InterruptedException { + try (final var mediaDriver = MediaDriver.launch(new Context().dirDeleteOnShutdown(true))) { + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + } + + Thread.sleep(DRIVER_TIMEOUT.toMillis()); + epochClock.advance(READ_INTERVAL.toMillis() + 1); + agent.doWork(); + assertEquals(State.CLEANUP, agent.state()); + + try (final var mediaDriver = MediaDriver.launch(new Context().dirDeleteOnShutdown(true))) { + agent.doWork(); + assertEquals(State.INIT, agent.state()); + } + } +} diff --git a/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/CncCountersTest.java b/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/CncCountersTest.java new file mode 100644 index 0000000..20ef4a9 --- /dev/null +++ b/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/CncCountersTest.java @@ -0,0 +1,467 @@ +package io.scalecube.metrics.aeron; + +import static io.aeron.AeronCounters.ARCHIVE_CONTROL_SESSIONS_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_ERROR_COUNT_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_MAX_CYCLE_TIME_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_RECORDER_MAX_WRITE_TIME_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_RECORDER_TOTAL_WRITE_BYTES_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_RECORDER_TOTAL_WRITE_TIME_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_RECORDING_POSITION_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_RECORDING_SESSION_COUNT_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_REPLAYER_MAX_READ_TIME_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_REPLAYER_TOTAL_READ_BYTES_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_REPLAYER_TOTAL_READ_TIME_TYPE_ID; +import static io.aeron.AeronCounters.ARCHIVE_REPLAY_SESSION_COUNT_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTERED_SERVICE_MAX_SNAPSHOT_DURATION_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTERED_SERVICE_SNAPSHOT_DURATION_THRESHOLD_EXCEEDED_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_CLUSTERED_SERVICE_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_CLUSTERED_SERVICE_ERROR_COUNT_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_CLUSTERED_SERVICE_MAX_CYCLE_TIME_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_ELECTION_COUNT_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_LEADERSHIP_TERM_ID_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_MAX_CYCLE_TIME_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_TOTAL_MAX_SNAPSHOT_DURATION_TYPE_ID; +import static io.aeron.AeronCounters.CLUSTER_TOTAL_SNAPSHOT_DURATION_THRESHOLD_EXCEEDED_TYPE_ID; +import static io.aeron.CommonContext.AERON_DIR_PROP_DEFAULT; +import static io.aeron.cluster.ConsensusModule.Configuration.CLUSTER_CLIENT_TIMEOUT_COUNT_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.CLUSTER_NODE_ROLE_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.COMMIT_POSITION_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.CONSENSUS_MODULE_ERROR_COUNT_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.CONSENSUS_MODULE_STATE_TYPE_ID; +import static io.aeron.cluster.ConsensusModule.Configuration.ELECTION_STATE_TYPE_ID; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +import io.aeron.Aeron; +import io.aeron.archive.Archive; +import io.aeron.cluster.ConsensusModule; +import io.aeron.cluster.client.AeronCluster; +import io.aeron.cluster.client.AeronCluster.Context; +import io.aeron.cluster.service.ClusteredServiceContainer; +import io.aeron.cluster.service.ClusteredServiceContainer.Configuration; +import io.aeron.driver.MediaDriver; +import io.scalecube.metrics.CounterDescriptor; +import io.scalecube.metrics.CountersHandler; +import io.scalecube.metrics.KeyCodec; +import io.scalecube.metrics.KeyFlyweight; +import io.scalecube.metrics.aeron.CncCountersReaderAgent.State; +import java.time.Duration; +import java.util.List; +import java.util.stream.Stream; +import org.agrona.CloseHelper; +import org.agrona.DirectBuffer; +import org.agrona.ExpandableArrayBuffer; +import org.agrona.collections.MutableReference; +import org.agrona.concurrent.CachedEpochClock; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +public class CncCountersTest { + + private static final Duration READ_INTERVAL = Duration.ofSeconds(3); + private static final Duration DRIVER_TIMEOUT = Duration.ofMillis(5000); + + private static final int ARCHIVE_ID = 1; + private static final int CLUSTER_ID = Configuration.clusterId(); + private static final int SERVICE_ID = Configuration.serviceId(); + + private static MediaDriver mediaDriver; + private static Aeron aeron; + private static Archive archive; + private static ConsensusModule consensusModule; + private static ClusteredServiceContainer serviceContainer; + private static AeronCluster aeronCluster; + + private final CachedEpochClock epochClock = new CachedEpochClock(); + private final KeyCodec keyCodec = new KeyCodec(); + private CncCountersReaderAgent agent; + + @BeforeAll + static void beforeAll() { + mediaDriver = + MediaDriver.launch( + new MediaDriver.Context().dirDeleteOnStart(true).dirDeleteOnShutdown(true)); + aeron = Aeron.connect(); + archive = + Archive.launch( + new Archive.Context() + .archiveId(ARCHIVE_ID) + .deleteArchiveOnStart(true) + .recordingEventsEnabled(false) + .archiveDirectoryName("target/aeron-archive") + .controlChannel("aeron:udp?endpoint=localhost:8010") + .replicationChannel("aeron:udp?endpoint=localhost:0")); + consensusModule = + ConsensusModule.launch( + new ConsensusModule.Context() + .clusterId(CLUSTER_ID) + .deleteDirOnStart(true) + .clusterDirectoryName("target/aeron-cluster") + .ingressChannel("aeron:udp") + .replicationChannel("aeron:udp?endpoint=localhost:0") + .clusterMemberId(0) + .clusterMembers( + "0," + + "localhost:8005," + + "localhost:8006," + + "localhost:8007," + + "localhost:8008," + + "localhost:8010")); + serviceContainer = + ClusteredServiceContainer.launch( + new ClusteredServiceContainer.Context() + .clusterId(CLUSTER_ID) + .serviceId(SERVICE_ID) + .clusterDirectoryName("target/aeron-cluster") + .clusteredService(new ClusteredServiceImpl())); + + aeronCluster = + AeronCluster.connect( + new Context() + .ingressChannel("aeron:udp") + .ingressEndpoints("0=localhost:8005") + .isIngressExclusive(true) + .egressChannel("aeron:udp?endpoint=localhost:0") + .egressListener(new EgressListenerImpl())); + } + + @AfterAll + static void afterAll() { + CloseHelper.quietCloseAll( + aeronCluster, consensusModule, serviceContainer, archive, aeron, mediaDriver); + } + + @AfterEach + void afterEach() { + if (agent != null) { + agent.onClose(); + } + } + + @Test + void testCncCounters() { + final MutableReference> reference = new MutableReference<>(); + final CountersHandler countersHandler = + new CountersHandler() { + @Override + public void accept(long timestamp, List counterDescriptors) { + reference.set(counterDescriptors); + } + }; + + agent = + new CncCountersReaderAgent( + "CncCountersReaderAgent", + AERON_DIR_PROP_DEFAULT, + true, + epochClock, + READ_INTERVAL, + DRIVER_TIMEOUT, + countersHandler); + agent.onStart(); + + agent.doWork(); // INIT -> RUNNING + assertEquals(State.RUNNING, agent.state()); + epochClock.advance(READ_INTERVAL.toMillis() + 1); + agent.doWork(); // RUNNING + countersHandler.accept() + assertEquals(State.RUNNING, agent.state()); + + final var counters = reference.get(); + assertNotNull(counters, "counters"); + assertTrue(counters.size() > 0, "counters.size: " + counters.size()); + } + + @MethodSource("testCncCounterKeyConvertersSource") + @ParameterizedTest + void testCncCounterKeyConverters(String test, DirectBuffer expectedKey) { + final MutableReference> reference = new MutableReference<>(); + final CountersHandler countersHandler = + new CountersHandler() { + @Override + public void accept(long timestamp, List counterDescriptors) { + reference.set(counterDescriptors); + } + }; + + agent = + new CncCountersReaderAgent( + "CncCountersReaderAgent", + AERON_DIR_PROP_DEFAULT, + true, + epochClock, + READ_INTERVAL, + DRIVER_TIMEOUT, + countersHandler); + agent.onStart(); + + agent.doWork(); // INIT -> RUNNING + assertEquals(State.RUNNING, agent.state()); + epochClock.advance(READ_INTERVAL.toMillis() + 1); + agent.doWork(); // RUNNING + countersHandler.accept() + assertEquals(State.RUNNING, agent.state()); + + final var counters = reference.get(); + assertNotNull(counters, "counters"); + assertTrue(counters.size() > 0, "counters.size: " + counters.size()); + + final var actualKeys = counters.stream().map(CounterDescriptor::keyBuffer).toList(); + assertTrue(actualKeys.contains(expectedKey), errorMessage(actualKeys, expectedKey)); + } + + private static Stream testCncCounterKeyConvertersSource() { + final var builder = Stream.builder(); + + // Archive counters + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_ERROR_COUNT_TYPE_ID, + archiveCounterKey("archive_error_count", 1))); + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_CONTROL_SESSIONS_TYPE_ID, + archiveCounterKey("archive_control_session_count", ARCHIVE_ID))); + + List.of("archive-conductor", "archive-recorder", "archive-replayer") + .forEach( + kind -> { + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_MAX_CYCLE_TIME_TYPE_ID, + archiveMaxCycleTime(kind))); + }); + + List.of("archive-conductor", "archive-recorder", "archive-replayer") + .forEach( + kind -> { + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID, + archiveCycleTimeThresholdExceededCount(kind))); + }); + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_RECORDER_MAX_WRITE_TIME_TYPE_ID, + archiveCounterKey("archive_recorder_max_write_time_nanos", ARCHIVE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_RECORDER_TOTAL_WRITE_BYTES_TYPE_ID, + archiveCounterKey("archive_recorder_total_write_bytes", ARCHIVE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_RECORDER_TOTAL_WRITE_TIME_TYPE_ID, + archiveCounterKey("archive_recorder_total_write_time_nanos", ARCHIVE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_REPLAYER_MAX_READ_TIME_TYPE_ID, + archiveCounterKey("archive_replayer_max_read_time_nanos", ARCHIVE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_REPLAYER_TOTAL_READ_BYTES_TYPE_ID, + archiveCounterKey("archive_replayer_total_read_bytes", ARCHIVE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_REPLAYER_TOTAL_READ_TIME_TYPE_ID, + archiveCounterKey("archive_replayer_total_read_time_nanos", ARCHIVE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_RECORDING_SESSION_COUNT_TYPE_ID, + archiveCounterKey("archive_recording_session_count", ARCHIVE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_REPLAY_SESSION_COUNT_TYPE_ID, + archiveCounterKey("archive_replay_session_count", ARCHIVE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + ARCHIVE_RECORDING_POSITION_TYPE_ID, archiveRecordingPosition())); + + // Cluster counters + + builder.add( + arguments( + "Counter TypeId: " + CONSENSUS_MODULE_STATE_TYPE_ID, + clusterCounterKey("consensus_module_state", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CONSENSUS_MODULE_ERROR_COUNT_TYPE_ID, + clusterCounterKey("consensus_module_error_count", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + ELECTION_STATE_TYPE_ID, + clusterCounterKey("cluster_election_state", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_ELECTION_COUNT_TYPE_ID, + clusterCounterKey("cluster_election_count", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_LEADERSHIP_TERM_ID_TYPE_ID, + clusterCounterKey("cluster_leadership_term_id", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_NODE_ROLE_TYPE_ID, + clusterCounterKey("cluster_node_role", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + COMMIT_POSITION_TYPE_ID, + clusterCounterKey("cluster_commit_position", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_CLIENT_TIMEOUT_COUNT_TYPE_ID, + clusterCounterKey("cluster_client_timeout_count", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_MAX_CYCLE_TIME_TYPE_ID, + clusterCounterKey("cluster_max_cycle_time_nanos", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID, + clusterCounterKey("cluster_cycle_time_threshold_exceeded_count", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_TOTAL_MAX_SNAPSHOT_DURATION_TYPE_ID, + clusterCounterKey("cluster_total_max_snapshot_duration_nanos", CLUSTER_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_TOTAL_SNAPSHOT_DURATION_THRESHOLD_EXCEEDED_TYPE_ID, + clusterCounterKey( + "cluster_total_snapshot_duration_threshold_exceeded_count", CLUSTER_ID))); + + // ClusteredService counters + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_CLUSTERED_SERVICE_MAX_CYCLE_TIME_TYPE_ID, + clusteredServiceCounterKey( + "clustered_service_max_cycle_time_nanos", CLUSTER_ID, SERVICE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_CLUSTERED_SERVICE_CYCLE_TIME_THRESHOLD_EXCEEDED_TYPE_ID, + clusteredServiceCounterKey( + "clustered_service_cycle_time_threshold_exceeded_count", CLUSTER_ID, SERVICE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTERED_SERVICE_MAX_SNAPSHOT_DURATION_TYPE_ID, + clusteredServiceCounterKey( + "clustered_service_max_snapshot_duration_nanos", CLUSTER_ID, SERVICE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTERED_SERVICE_SNAPSHOT_DURATION_THRESHOLD_EXCEEDED_TYPE_ID, + clusteredServiceCounterKey( + "clustered_service_snapshot_duration_threshold_exceeded_count", + CLUSTER_ID, + SERVICE_ID))); + + builder.add( + arguments( + "Counter TypeId: " + CLUSTER_CLUSTERED_SERVICE_ERROR_COUNT_TYPE_ID, + clusteredServiceCounterKey("clustered_service_error_count", CLUSTER_ID, SERVICE_ID))); + + return builder.build(); + } + + // Archive counters + + private static DirectBuffer archiveCounterKey(String name, long archiveId) { + return new KeyFlyweight() + .wrap(new ExpandableArrayBuffer(), 0) + .tagsCount(2) + .stringValue("name", name) + .longValue("archiveId", archiveId) + .buffer(); + } + + private static DirectBuffer archiveMaxCycleTime(String kind) { + return new KeyFlyweight() + .wrap(new ExpandableArrayBuffer(), 0) + .tagsCount(3) + .stringValue("name", "archive_max_cycle_time_nanos") + .longValue("archiveId", ARCHIVE_ID) + .stringValue("kind", kind) + .buffer(); + } + + private static DirectBuffer archiveCycleTimeThresholdExceededCount(String kind) { + return new KeyFlyweight() + .wrap(new ExpandableArrayBuffer(), 0) + .tagsCount(3) + .stringValue("name", "archive_cycle_time_threshold_exceeded_count") + .longValue("archiveId", ARCHIVE_ID) + .stringValue("kind", kind) + .buffer(); + } + + private static DirectBuffer archiveRecordingPosition() { + return new KeyFlyweight() + .wrap(new ExpandableArrayBuffer(), 0) + .tagsCount(3) + .stringValue("name", "archive_recording_position") + .longValue("archiveId", ARCHIVE_ID) + .intValue("streamId", ConsensusModule.Configuration.logStreamId()) + .buffer(); + } + + // Cluster counters + + private static DirectBuffer clusterCounterKey(String name, int clusterId) { + return new KeyFlyweight() + .wrap(new ExpandableArrayBuffer(), 0) + .tagsCount(2) + .stringValue("name", name) + .intValue("clusterId", clusterId) + .buffer(); + } + + // ClusteredService counters + + private static DirectBuffer clusteredServiceCounterKey( + String name, int clusterId, int serviceId) { + return new KeyFlyweight() + .wrap(new ExpandableArrayBuffer(), 0) + .tagsCount(3) + .stringValue("name", name) + .intValue("clusterId", clusterId) + .intValue("serviceId", serviceId) + .buffer(); + } + + private String errorMessage(List actualKeys, DirectBuffer expectedKey) { + return "Expected key: " + + keyCodec.decodeKey(expectedKey, 0) + + " -- was not found in the list of actual keys: " + + actualKeys.stream().map(b -> keyCodec.decodeKey(b, 0)).map(Record::toString).toList(); + } +} diff --git a/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/EgressListenerImpl.java b/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/EgressListenerImpl.java new file mode 100644 index 0000000..d114d1a --- /dev/null +++ b/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/EgressListenerImpl.java @@ -0,0 +1,44 @@ +package io.scalecube.metrics.aeron; + +import io.aeron.cluster.client.EgressListener; +import io.aeron.cluster.codecs.AdminRequestType; +import io.aeron.cluster.codecs.AdminResponseCode; +import io.aeron.cluster.codecs.EventCode; +import io.aeron.logbuffer.Header; +import org.agrona.DirectBuffer; + +public class EgressListenerImpl implements EgressListener { + + @Override + public void onMessage( + long clusterSessionId, + long timestamp, + DirectBuffer buffer, + int offset, + int length, + Header header) {} + + @Override + public void onSessionEvent( + long correlationId, + long clusterSessionId, + long leadershipTermId, + int leaderMemberId, + EventCode code, + String detail) {} + + @Override + public void onNewLeader( + long clusterSessionId, long leadershipTermId, int leaderMemberId, String ingressEndpoints) {} + + @Override + public void onAdminResponse( + long clusterSessionId, + long correlationId, + AdminRequestType requestType, + AdminResponseCode responseCode, + String message, + DirectBuffer payload, + int payloadOffset, + int payloadLength) {} +} diff --git a/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/LoggingExtension.java b/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/LoggingExtension.java new file mode 100644 index 0000000..8e55bb1 --- /dev/null +++ b/metrics-aeron/src/test/java/io/scalecube/metrics/aeron/LoggingExtension.java @@ -0,0 +1,51 @@ +package io.scalecube.metrics.aeron; + +import java.lang.reflect.Method; +import org.junit.jupiter.api.extension.AfterAllCallback; +import org.junit.jupiter.api.extension.AfterEachCallback; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A JUnit5 Extension, merely writes the test's name at start and finish. Make sure to start JAVA + * with -Djunit.jupiter.extensions.autodetection.enabled=true to activate this + * extension + */ +public class LoggingExtension + implements AfterEachCallback, BeforeEachCallback, AfterAllCallback, BeforeAllCallback { + + private static final Logger LOGGER = LoggerFactory.getLogger(LoggingExtension.class); + + @Override + public void beforeAll(ExtensionContext context) { + LOGGER.info( + "***** Setup: {} *****", context.getTestClass().map(Class::getSimpleName).orElse("")); + } + + @Override + public void afterEach(ExtensionContext context) { + LOGGER.info( + "***** Test finished: {}.{}.{} *****", + context.getTestClass().map(Class::getSimpleName).orElse(""), + context.getTestMethod().map(Method::getName).orElse(""), + context.getDisplayName()); + } + + @Override + public void beforeEach(ExtensionContext context) { + LOGGER.info( + "***** Test started: {}.{}.{} *****", + context.getTestClass().map(Class::getSimpleName).orElse(""), + context.getTestMethod().map(Method::getName).orElse(""), + context.getDisplayName()); + } + + @Override + public void afterAll(ExtensionContext context) { + LOGGER.info( + "***** TearDown: {} *****", context.getTestClass().map(Class::getSimpleName).orElse("")); + } +} diff --git a/metrics-aeron/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension b/metrics-aeron/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension new file mode 100644 index 0000000..fea3ed8 --- /dev/null +++ b/metrics-aeron/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension @@ -0,0 +1 @@ +io.scalecube.metrics.aeron.LoggingExtension diff --git a/metrics-aeron/src/test/resources/junit-platform.properties b/metrics-aeron/src/test/resources/junit-platform.properties new file mode 100644 index 0000000..6efc0d5 --- /dev/null +++ b/metrics-aeron/src/test/resources/junit-platform.properties @@ -0,0 +1 @@ +junit.jupiter.extensions.autodetection.enabled=true diff --git a/metrics-aeron/src/test/resources/log4j2-test.xml b/metrics-aeron/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000..ed8e666 --- /dev/null +++ b/metrics-aeron/src/test/resources/log4j2-test.xml @@ -0,0 +1,41 @@ + + + + + %level{length=1} %d{ISO8601} %c{1.} %m [%t]%n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/metrics-benchmarks/pom.xml b/metrics-benchmarks/pom.xml new file mode 100644 index 0000000..0ad5c0f --- /dev/null +++ b/metrics-benchmarks/pom.xml @@ -0,0 +1,108 @@ + + + 4.0.0 + + + io.scalecube + scalecube-metrics-parent + 0.1.0-SNAPSHOT + + + scalecube-metrics-benchmarks + + + + io.scalecube + scalecube-metrics + ${project.parent.version} + + + + org.openjdk.jmh + jmh-core + + + org.openjdk.jmh + jmh-generator-annprocess + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + @{argLine} + -javaagent:${settings.localRepository}/org/mockito/mockito-core/${mockito.version}/mockito-core-${mockito.version}.jar + + + + + maven-jar-plugin + + + maven-dependency-plugin + + + org.apache.maven.plugins + maven-shade-plugin + 3.4.1 + + + package + + shade + + + benchmarks + + + org.openjdk.jmh.Main + + true + + + + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + false + + + + + + + + diff --git a/metrics-benchmarks/src/main/java/io/exberry/metrics/HistogramBenchmark.java b/metrics-benchmarks/src/main/java/io/exberry/metrics/HistogramBenchmark.java new file mode 100644 index 0000000..f0e93bf --- /dev/null +++ b/metrics-benchmarks/src/main/java/io/exberry/metrics/HistogramBenchmark.java @@ -0,0 +1,62 @@ +package io.scalecube.metrics; + +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import org.HdrHistogram.Histogram; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Group; +import org.openjdk.jmh.annotations.GroupThreads; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +@Warmup(iterations = 3, time = 3) +@Measurement(iterations = 3, time = 3) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Group) +public class HistogramBenchmark { + + static final long HIGHEST_TRACKABLE_VALUE = TimeUnit.SECONDS.toNanos(1); + static final int VALUE_COUNT = 1024; + + volatile Histogram current = new Histogram(1, HIGHEST_TRACKABLE_VALUE, 3); + volatile Histogram swap = new Histogram(1, HIGHEST_TRACKABLE_VALUE, 3); + + int counter = 0; + final long[] values = new long[VALUE_COUNT]; + + @Setup(Level.Trial) + public void setup() { + final var random = ThreadLocalRandom.current(); + for (int i = 0; i < VALUE_COUNT; i++) { + values[i] = random.nextLong(1, HIGHEST_TRACKABLE_VALUE + 1); + } + } + + @Benchmark + @Group("readWriteGroup") + @GroupThreads(1) + public void record() { + final long value = values[counter++ & (VALUE_COUNT - 1)]; + final var histogram = current; + histogram.recordValue(Math.min(value, HIGHEST_TRACKABLE_VALUE)); + } + + @Benchmark + @Group("readWriteGroup") + @GroupThreads(value = 1) + public void swapAndUpdate() throws InterruptedException { + Thread.sleep(1000); + final var value = current; + current = swap; + swap = value; + value.reset(); + } +} diff --git a/metrics-benchmarks/src/main/java/io/exberry/metrics/MetricsBenchmark.java b/metrics-benchmarks/src/main/java/io/exberry/metrics/MetricsBenchmark.java new file mode 100644 index 0000000..914005a --- /dev/null +++ b/metrics-benchmarks/src/main/java/io/exberry/metrics/MetricsBenchmark.java @@ -0,0 +1,36 @@ +package io.scalecube.metrics; + +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +@Threads(1) +@State(Scope.Thread) +@Warmup(iterations = 3, time = 3) +@Measurement(iterations = 3, time = 3) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +public class MetricsBenchmark { + + @Benchmark + public void nanoTime(Blackhole bh) { + bh.consume(System.nanoTime()); + } + + @Benchmark + public void burnCpuMicros() { + long durationNanos = 20 * 1000; + long start = System.nanoTime(); + while ((System.nanoTime() - start) < durationNanos) { + Thread.onSpinWait(); + } + } +} diff --git a/metrics-benchmarks/src/main/java/io/exberry/metrics/TpsBenchmark.java b/metrics-benchmarks/src/main/java/io/exberry/metrics/TpsBenchmark.java new file mode 100644 index 0000000..94e6b74 --- /dev/null +++ b/metrics-benchmarks/src/main/java/io/exberry/metrics/TpsBenchmark.java @@ -0,0 +1,51 @@ +package io.scalecube.metrics; + +import java.util.concurrent.TimeUnit; +import org.agrona.collections.MutableLong; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Group; +import org.openjdk.jmh.annotations.GroupThreads; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +@Warmup(iterations = 3, time = 3) +@Measurement(iterations = 3, time = 3) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Group) +public class TpsBenchmark { + + volatile MutableLong current; + volatile MutableLong swap; + + @Setup(Level.Trial) + public void setup() { + current = new MutableLong(); + swap = new MutableLong(); + } + + @Benchmark + @Group("readWriteGroup") + @GroupThreads(1) + public void record() { + current.increment(); + } + + @Benchmark + @Group("readWriteGroup") + @GroupThreads(value = 1) + public void swapAndUpdate() throws InterruptedException { + Thread.sleep(1000); + final var counter = current; + current = swap; + swap = counter; + counter.set(0); + } +} diff --git a/metrics-benchmarks/src/main/java/io/exberry/metrics/VolatileBenchmark.java b/metrics-benchmarks/src/main/java/io/exberry/metrics/VolatileBenchmark.java new file mode 100644 index 0000000..8ee1cee --- /dev/null +++ b/metrics-benchmarks/src/main/java/io/exberry/metrics/VolatileBenchmark.java @@ -0,0 +1,44 @@ +package io.scalecube.metrics; + +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Warmup; + +@Warmup(iterations = 3, time = 3) +@Measurement(iterations = 3, time = 3) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@Threads(1) +@State(Scope.Thread) +public class VolatileBenchmark { + + int plain = 0; + volatile int vol = 0; + + @Benchmark + public int readPlain() { + return plain; + } + + @Benchmark + public int readVolatile() { + return vol; + } + + @Benchmark + public void writePlain() { + plain++; + } + + @Benchmark + public void writeVolatile() { + vol++; + } +} diff --git a/metrics-benchmarks/src/main/java/io/exberry/metrics/VolatileReadMostlyBenchmark.java b/metrics-benchmarks/src/main/java/io/exberry/metrics/VolatileReadMostlyBenchmark.java new file mode 100644 index 0000000..1d2945f --- /dev/null +++ b/metrics-benchmarks/src/main/java/io/exberry/metrics/VolatileReadMostlyBenchmark.java @@ -0,0 +1,40 @@ +package io.scalecube.metrics; + +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Group; +import org.openjdk.jmh.annotations.GroupThreads; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +@Warmup(iterations = 3, time = 3) +@Measurement(iterations = 3, time = 3) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Group) +public class VolatileReadMostlyBenchmark { + + volatile int vol = 0; + + @Benchmark + @Group("readWriteGroup") + @GroupThreads(1) + public int reader() { + return vol; + } + + @Benchmark + @Group("readWriteGroup") + @GroupThreads(value = 1) + public void infrequentWriter() throws InterruptedException { + Thread.sleep(1000); + // read-then-write switchover + int current = vol; + vol = current + 1; + } +} diff --git a/metrics-examples/pom.xml b/metrics-examples/pom.xml new file mode 100644 index 0000000..9d48176 --- /dev/null +++ b/metrics-examples/pom.xml @@ -0,0 +1,78 @@ + + + 4.0.0 + + + io.scalecube + scalecube-metrics-parent + 0.1.0-SNAPSHOT + + + scalecube-metrics-examples + + + + io.scalecube + scalecube-metrics + ${project.parent.version} + + + io.scalecube + scalecube-metrics-mimir + ${project.parent.version} + + + io.scalecube + scalecube-metrics-aeron + ${project.parent.version} + + + io.scalecube + scalecube-metrics-prometheus + ${project.parent.version} + + + + org.slf4j + slf4j-api + + + org.apache.logging.log4j + log4j-slf4j-impl + + + org.apache.logging.log4j + log4j-core + + + + org.testcontainers + testcontainers + + + + io.aeron + aeron-all + + + io.aeron + aeron-agent + + + net.bytebuddy + byte-buddy + + + net.bytebuddy + byte-buddy-agent + + + + com.fasterxml.jackson.core + jackson-databind + + + + diff --git a/metrics-examples/src/main/java/io/scalecube/metrics/loki/JvmSafepointExporter.java b/metrics-examples/src/main/java/io/scalecube/metrics/loki/JvmSafepointExporter.java new file mode 100644 index 0000000..b88a84e --- /dev/null +++ b/metrics-examples/src/main/java/io/scalecube/metrics/loki/JvmSafepointExporter.java @@ -0,0 +1,266 @@ +package io.scalecube.metrics.loki; + +import static io.scalecube.metrics.MetricNames.sanitizeName; + +import io.scalecube.metrics.Delay; +import io.scalecube.metrics.loki.LokiPublisher.WriteProxy; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Duration; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; +import org.agrona.CloseHelper; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentInvoker; +import org.agrona.concurrent.AgentTerminationException; +import org.agrona.concurrent.EpochClock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +// TODO: more work is needed: on restart can process gc.log that was already processed +public class JvmSafepointExporter implements Agent { + + private static final Logger LOGGER = LoggerFactory.getLogger(JvmSafepointExporter.class); + + private static final Duration READ_INTERVAL = Duration.ofSeconds(1); + private static final int DEFAULT_CHUNK_SIZE = 64 * 1024; + + private static final DateTimeFormatter GC_LOG_TIMESTAMP_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + + private static final Pattern SAFEPOINT_PATTERN = + Pattern.compile( + "\\[(?[^]]+)] Safepoint \"(?[^\"]+)\", " + + "Time since last: (?\\d+) ns, " + + "Reaching safepoint: (?\\d+) ns, " + + "Cleanup: (?\\d+) ns, " + + "At safepoint: (?\\d+) ns, " + + "Total: (?\\d+) ns"); + + public enum State { + INIT, + RUNNING, + CLEANUP, + CLOSED + } + + private final File gcLogDir; + private final Map labels; + private final WriteProxy writeProxy; + private final AgentInvoker publisherInvoker; + + private final Delay retryInterval; + private final Delay readInterval; + private FileChannel fileChannel; + private final ByteBuffer chunkBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); + private final StringBuilder lineBuffer = new StringBuilder(); + private State state = State.CLOSED; + + public JvmSafepointExporter( + File gcLogDir, + Map labels, + WriteProxy writeProxy, + AgentInvoker publisherInvoker, + EpochClock epochClock, + Duration retryInterval) { + this.gcLogDir = gcLogDir; + this.labels = labels; + this.writeProxy = writeProxy; + this.publisherInvoker = publisherInvoker; + this.retryInterval = new Delay(epochClock, retryInterval.toMillis()); + this.readInterval = new Delay(epochClock, READ_INTERVAL.toMillis()); + } + + @Override + public String roleName() { + return "JvmSafepointExporter"; + } + + @Override + public void onStart() { + if (state != State.CLOSED) { + throw new AgentTerminationException("Illegal state: " + state); + } + state(State.INIT); + } + + @Override + public int doWork() throws Exception { + try { + if (publisherInvoker != null) { + publisherInvoker.invoke(); + } + return switch (state) { + case INIT -> init(); + case RUNNING -> running(); + case CLEANUP -> cleanup(); + default -> throw new AgentTerminationException("Unknown state: " + state); + }; + } catch (AgentTerminationException e) { + throw e; + } catch (Exception e) { + state(State.CLEANUP); + throw e; + } + } + + private int init() throws IOException { + if (retryInterval.isNotOverdue()) { + return 0; + } + + // -Xlog:gc*,safepoint:$LOGS_DIR/$TS-$SERVICE_NAME-gc.log \ + + final var filePath = findLatestGcLog(gcLogDir.toPath()); + if (!Files.exists(filePath) || Files.isDirectory(filePath)) { + throw new IllegalArgumentException("Wrong file: " + filePath); + } + + fileChannel = FileChannel.open(filePath); + + state(State.RUNNING); + return 1; + } + + private static Path findLatestGcLog(Path dir) throws IOException { + try (var files = Files.list(dir)) { + return files + .filter(Files::isRegularFile) + .filter(p -> p.getFileName().toString().contains("gc.log")) + .max(Comparator.comparingLong(p -> p.toFile().lastModified())) + .orElseThrow(() -> new FileNotFoundException("No matching gc.log files found in " + dir)); + } + } + + private int running() throws IOException { + if (readInterval.isOverdue()) { + final int read = fileChannel.read(chunkBuffer.clear()); + if (read > 0) { + final byte[] bytes = new byte[chunkBuffer.flip().remaining()]; + chunkBuffer.get(bytes); + lineBuffer.append(new String(bytes, StandardCharsets.UTF_8)); + } else { + readInterval.delay(); + } + } + + int workCount = 0; + final var events = new ArrayList(); + int lineEnd; + while ((lineEnd = lineBuffer.indexOf("\n")) >= 0) { + String line = lineBuffer.substring(0, lineEnd).trim(); + lineBuffer.delete(0, lineEnd + 1); + final var event = processLine(line); + if (event != null) { + workCount++; + events.add(event); + } + } + + if (!events.isEmpty()) { + writeProxy.push(toWriteRequest(events)); + } + + return workCount; + } + + private static SafepointEvent processLine(String line) { + final var matcher = SAFEPOINT_PATTERN.matcher(line); + if (!matcher.find()) { + return null; + } + return new SafepointEvent( + ZonedDateTime.parse(matcher.group("timestamp"), GC_LOG_TIMESTAMP_FORMATTER).toInstant(), + matcher.group("reason"), + Long.parseLong(matcher.group("sinceLast")), + Long.parseLong(matcher.group("reaching")), + Long.parseLong(matcher.group("cleanup")), + Long.parseLong(matcher.group("at")), + Long.parseLong(matcher.group("total"))); + } + + private WriteRequest toWriteRequest(List events) { + final var streamLabels = streamLabels(labels); + streamLabels.put("metric_name", "jvm_safepoint"); + + final var values = events.stream().map(JvmSafepointExporter::toLogEntry).toList(); + + return new WriteRequest(List.of(new WriteRequest.Stream(streamLabels, values))); + } + + private static Map streamLabels(Map map) { + final var labels = new HashMap(); + if (map != null) { + map.forEach((key, value) -> labels.put(sanitizeName(key), value)); + } + return labels; + } + + private static String[] toLogEntry(SafepointEvent event) { + final var ts = event.timestamp(); + final var timestamp = + String.valueOf(TimeUnit.SECONDS.toNanos(ts.getEpochSecond()) + ts.getNano()); + + final var logLine = + String.format( + "reason=\"%s\" sinceLast=%.3f reaching=%.3f cleanup=%.3f at=%.3f total=%.3f", + event.reason(), + toMicros(event.sinceLastNs()), + toMicros(event.reachingNs()), + toMicros(event.cleanupNs()), + toMicros(event.atSafepointNs()), + toMicros(event.totalNs())); + + return new String[] {timestamp, logLine}; + } + + private static double toMicros(long nanos) { + return nanos / (double) TimeUnit.MICROSECONDS.toNanos(1); + } + + private int cleanup() { + CloseHelper.quietClose(fileChannel); + lineBuffer.setLength(0); + + State previous = state; + if (previous != State.CLOSED) { // when it comes from onClose() + retryInterval.delay(); + state(State.INIT); + } + return 1; + } + + @Override + public void onClose() { + state(State.CLOSED); + cleanup(); + } + + private void state(State state) { + LOGGER.debug("[{}][state] {}->{}", roleName(), this.state, state); + this.state = state; + } + + record SafepointEvent( + Instant timestamp, + String reason, + long sinceLastNs, + long reachingNs, + long cleanupNs, + long atSafepointNs, + long totalNs) {} +} diff --git a/metrics-examples/src/main/java/io/scalecube/metrics/loki/LokiPublisher.java b/metrics-examples/src/main/java/io/scalecube/metrics/loki/LokiPublisher.java new file mode 100644 index 0000000..772379e --- /dev/null +++ b/metrics-examples/src/main/java/io/scalecube/metrics/loki/LokiPublisher.java @@ -0,0 +1,258 @@ +package io.scalecube.metrics.loki; + +import static java.util.concurrent.atomic.AtomicIntegerFieldUpdater.newUpdater; + +import java.time.Duration; +import java.util.ConcurrentModificationException; +import java.util.Objects; +import java.util.StringJoiner; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import org.agrona.CloseHelper; +import org.agrona.ErrorHandler; +import org.agrona.concurrent.AgentInvoker; +import org.agrona.concurrent.AgentRunner; +import org.agrona.concurrent.BackoffIdleStrategy; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.IdleStrategy; +import org.agrona.concurrent.ManyToOneConcurrentArrayQueue; +import org.agrona.concurrent.SystemEpochClock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class LokiPublisher implements AutoCloseable { + + private static final Logger LOGGER = LoggerFactory.getLogger(LokiPublisher.class); + + private final Context context; + + private final AgentInvoker agentInvoker; + private final AgentRunner agentRunner; + + private LokiPublisher(Context context) { + context.conclude(); + this.context = context; + + final var agent = + new LokiPublisherAgent( + context.url(), + context.epochClock(), + context.retryInterval(), + context.publishInterval(), + context.writeLimit(), + context.writeQueue()); + + if (context.useAgentInvoker()) { + agentRunner = null; + agentInvoker = new AgentInvoker(context.errorHandler(), null, agent); + } else { + agentInvoker = null; + agentRunner = new AgentRunner(context.idleStrategy(), context.errorHandler(), null, agent); + } + } + + public static LokiPublisher launch(Context context) { + final var metricsReceiver = new LokiPublisher(context); + if (metricsReceiver.agentInvoker != null) { + metricsReceiver.agentInvoker.start(); + } else { + AgentRunner.startOnThread(metricsReceiver.agentRunner); + } + return metricsReceiver; + } + + public Context context() { + return context; + } + + public AgentInvoker agentInvoker() { + return agentInvoker; + } + + public WriteProxy proxy() { + return new WriteProxy(context.writeQueue()); + } + + @Override + public void close() { + CloseHelper.quietCloseAll(agentInvoker, agentRunner); + } + + public static class Context { + + private static final AtomicIntegerFieldUpdater IS_CONCLUDED_UPDATER = + newUpdater(Context.class, "isConcluded"); + private volatile int isConcluded; + + private Duration retryInterval; + private Duration publishInterval; + private EpochClock epochClock; + private boolean useAgentInvoker; + private ErrorHandler errorHandler; + private IdleStrategy idleStrategy; + private String url; + private Integer writeLimit; + private Integer writeQueueCapacity; + private ManyToOneConcurrentArrayQueue writeQueue; + + public Context() {} + + private void conclude() { + if (0 != IS_CONCLUDED_UPDATER.getAndSet(this, 1)) { + throw new ConcurrentModificationException(); + } + + if (retryInterval == null) { + retryInterval = Duration.ofSeconds(3); + } + + if (publishInterval == null) { + publishInterval = Duration.ofSeconds(5); + } + + if (epochClock == null) { + epochClock = SystemEpochClock.INSTANCE; + } + + if (errorHandler == null) { + errorHandler = ex -> LOGGER.error("Exception occurred: ", ex); + } + + if (idleStrategy == null) { + idleStrategy = new BackoffIdleStrategy(); + } + + Objects.requireNonNull(url, "url"); + + if (writeLimit == null) { + writeLimit = 100; + } + + if (writeQueueCapacity == null) { + writeQueueCapacity = 64 * 1024; + } + + if (writeQueue == null) { + writeQueue = new ManyToOneConcurrentArrayQueue<>(writeQueueCapacity); + } + } + + public Duration retryInterval() { + return retryInterval; + } + + public Context retryInterval(Duration retryInterval) { + this.retryInterval = retryInterval; + return this; + } + + public Duration publishInterval() { + return publishInterval; + } + + public Context publishInterval(Duration publishInterval) { + this.publishInterval = publishInterval; + return this; + } + + public EpochClock epochClock() { + return epochClock; + } + + public Context epochClock(EpochClock epochClock) { + this.epochClock = epochClock; + return this; + } + + public boolean useAgentInvoker() { + return useAgentInvoker; + } + + public Context useAgentInvoker(boolean useAgentInvoker) { + this.useAgentInvoker = useAgentInvoker; + return this; + } + + public ErrorHandler errorHandler() { + return errorHandler; + } + + public Context errorHandler(ErrorHandler errorHandler) { + this.errorHandler = errorHandler; + return this; + } + + public IdleStrategy idleStrategy() { + return idleStrategy; + } + + public Context idleStrategy(IdleStrategy idleStrategy) { + this.idleStrategy = idleStrategy; + return this; + } + + public String url() { + return url; + } + + public Context url(String url) { + this.url = url; + return this; + } + + public Integer writeLimit() { + return writeLimit; + } + + public Context writeLimit(Integer writeLimit) { + this.writeLimit = writeLimit; + return this; + } + + public Integer writeQueueCapacity() { + return writeQueueCapacity; + } + + public Context writeQueueCapacity(Integer writeQueueCapacity) { + this.writeQueueCapacity = writeQueueCapacity; + return this; + } + + public ManyToOneConcurrentArrayQueue writeQueue() { + return writeQueue; + } + + public Context writeQueue(ManyToOneConcurrentArrayQueue writeQueue) { + this.writeQueue = writeQueue; + return this; + } + + @Override + public String toString() { + return new StringJoiner(", ", Context.class.getSimpleName() + "[", "]") + .add("retryInterval=" + retryInterval) + .add("publishInterval=" + publishInterval) + .add("epochClock=" + epochClock) + .add("useAgentInvoker=" + useAgentInvoker) + .add("errorHandler=" + errorHandler) + .add("idleStrategy=" + idleStrategy) + .add("url='" + url + "'") + .add("writeLimit=" + writeLimit) + .add("writeQueueCapacity=" + writeQueueCapacity) + .add("writeQueue=" + writeQueue) + .toString(); + } + } + + public static class WriteProxy { + + private final ManyToOneConcurrentArrayQueue writeQueue; + + public WriteProxy(ManyToOneConcurrentArrayQueue writeQueue) { + this.writeQueue = writeQueue; + } + + public void push(WriteRequest request) { + writeQueue.offer(request); + } + } +} diff --git a/metrics-examples/src/main/java/io/scalecube/metrics/loki/LokiPublisherAgent.java b/metrics-examples/src/main/java/io/scalecube/metrics/loki/LokiPublisherAgent.java new file mode 100644 index 0000000..6157a15 --- /dev/null +++ b/metrics-examples/src/main/java/io/scalecube/metrics/loki/LokiPublisherAgent.java @@ -0,0 +1,199 @@ +package io.scalecube.metrics.loki; + +import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; +import com.fasterxml.jackson.annotation.PropertyAccessor; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.scalecube.metrics.Delay; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpRequest.BodyPublishers; +import java.net.http.HttpResponse; +import java.time.Duration; +import java.util.ArrayList; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.zip.GZIPOutputStream; +import org.agrona.LangUtil; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentTerminationException; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.ManyToOneConcurrentArrayQueue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class LokiPublisherAgent implements Agent { + + private static final Logger LOGGER = LoggerFactory.getLogger(LokiPublisherAgent.class); + + private static final ObjectMapper OBJECT_MAPPER = + new ObjectMapper().setVisibility(PropertyAccessor.FIELD, Visibility.ANY); + + public enum State { + INIT, + RUNNING, + CLEANUP, + CLOSED + } + + private final String url; + private final int writeLimit; + private final ManyToOneConcurrentArrayQueue writeQueue; + + private final Delay retryInterval; + private final Delay publishInterval; + private HttpClient httpClient; + private ExecutorService executor; + private CompletableFuture> future; + private State state = State.CLOSED; + + public LokiPublisherAgent( + String url, + EpochClock epochClock, + Duration retryInterval, + Duration publishInterval, + int writeLimit, + ManyToOneConcurrentArrayQueue writeQueue) { + this.url = url; + this.writeLimit = writeLimit; + this.writeQueue = writeQueue; + this.retryInterval = new Delay(epochClock, retryInterval.toMillis()); + this.publishInterval = new Delay(epochClock, publishInterval.toMillis()); + } + + @Override + public String roleName() { + return "LokiPublisherAgent"; + } + + @Override + public void onStart() { + if (state != State.CLOSED) { + throw new AgentTerminationException("Illegal state: " + state); + } + state(State.INIT); + } + + @Override + public int doWork() throws Exception { + try { + return switch (state) { + case INIT -> init(); + case RUNNING -> running(); + case CLEANUP -> cleanup(); + default -> throw new AgentTerminationException("Unknown state: " + state); + }; + } catch (AgentTerminationException e) { + throw e; + } catch (Exception e) { + state(State.CLEANUP); + throw e; + } + } + + private int init() { + if (retryInterval.isNotOverdue()) { + return 0; + } + + executor = + Executors.newSingleThreadExecutor( + r -> { + final var thread = new Thread(r); + thread.setDaemon(true); + return thread; + }); + httpClient = HttpClient.newBuilder().executor(executor).build(); + publishInterval.delay(); + + state(State.RUNNING); + return 1; + } + + private int running() throws Exception { + if (publishInterval.isOverdue()) { + publishInterval.delay(); + if (future != null) { + future.cancel(true); + future = null; + } + + final var streams = new ArrayList(); + writeQueue.drain(request -> streams.addAll(request.streams()), writeLimit); + + if (!streams.isEmpty()) { + future = send(new WriteRequest(streams)); + } + } + + if (future != null) { + if (future.isDone()) { + final var response = future.get(); + final var statusCode = response.statusCode(); + if (statusCode != 200 && statusCode != 204) { + LOGGER.warn("Failed to push metrics: HTTP {}, body: {}", statusCode, response.body()); + } + future = null; + return 1; + } + } + + return 0; + } + + private CompletableFuture> send(WriteRequest request) { + return httpClient.sendAsync( + HttpRequest.newBuilder() + .uri(URI.create(url)) + .header("Content-Type", "application/json") + .header("Content-Encoding", "gzip") + .POST(BodyPublishers.ofByteArray(gzip(request))) + .build(), + HttpResponse.BodyHandlers.ofString()); + } + + public static byte[] gzip(WriteRequest request) { + final var byteStream = new ByteArrayOutputStream(); + try (final var outputStream = new GZIPOutputStream(byteStream)) { + OBJECT_MAPPER.writeValue(outputStream, request); + } catch (IOException e) { + LangUtil.rethrowUnchecked(e); + } + return byteStream.toByteArray(); + } + + private int cleanup() { + if (executor != null) { + executor.shutdownNow(); + } + // CloseHelper.quietClose(httpClient); + httpClient = null; + executor = null; + + if (future != null) { + future.cancel(true); + future = null; + } + + State previous = state; + if (previous != State.CLOSED) { // when it comes from onClose() + retryInterval.delay(); + state(State.INIT); + } + return 1; + } + + @Override + public void onClose() { + state(State.CLOSED); + cleanup(); + } + + private void state(State state) { + LOGGER.debug("[{}][state] {}->{}", roleName(), this.state, state); + this.state = state; + } +} diff --git a/metrics-examples/src/main/java/io/scalecube/metrics/loki/WriteRequest.java b/metrics-examples/src/main/java/io/scalecube/metrics/loki/WriteRequest.java new file mode 100644 index 0000000..7bf8a14 --- /dev/null +++ b/metrics-examples/src/main/java/io/scalecube/metrics/loki/WriteRequest.java @@ -0,0 +1,40 @@ +package io.scalecube.metrics.loki; + +import java.util.List; +import java.util.Map; + +public class WriteRequest { + + private List streams; + + public WriteRequest() {} + + public WriteRequest(List streams) { + this.streams = streams; + } + + public List streams() { + return streams; + } + + public static class Stream { + + private Map stream; // e.g., {job="test", level="info"} + private List values; // each entry: [timestamp, log line] + + public Stream() {} + + public Stream(Map stream, List values) { + this.stream = stream; + this.values = values; + } + + public Map stream() { + return stream; + } + + public List values() { + return values; + } + } +} diff --git a/metrics-examples/src/main/java/io/scalecube/metrics/mimir/JvmSafepointExporter.java b/metrics-examples/src/main/java/io/scalecube/metrics/mimir/JvmSafepointExporter.java new file mode 100644 index 0000000..1258eac --- /dev/null +++ b/metrics-examples/src/main/java/io/scalecube/metrics/mimir/JvmSafepointExporter.java @@ -0,0 +1,272 @@ +package io.scalecube.metrics.mimir; + +import io.scalecube.metrics.Delay; +import io.scalecube.metrics.mimir.MimirPublisher.WriteProxy; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Duration; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; +import org.agrona.CloseHelper; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentInvoker; +import org.agrona.concurrent.AgentTerminationException; +import org.agrona.concurrent.EpochClock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import prometheus.Remote.WriteRequest; +import prometheus.Types.Label; +import prometheus.Types.Sample; +import prometheus.Types.TimeSeries; +import prometheus.Types.TimeSeries.Builder; + +// TODO: keep it, but more work is needed, there's problem on grafana with idle period +public class JvmSafepointExporter implements Agent { + + private static final Logger LOGGER = LoggerFactory.getLogger(JvmSafepointExporter.class); + + private static final Duration READ_INTERVAL = Duration.ofSeconds(1); + private static final int DEFAULT_CHUNK_SIZE = 64 * 1024; + + private static final DateTimeFormatter GC_LOG_TIMESTAMP_FORMATTER = + DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + + private static final Pattern SAFEPOINT_PATTERN = + Pattern.compile( + "\\[(?[^]]+)] Safepoint \"(?[^\"]+)\", " + + "Time since last: (?\\d+) ns, " + + "Reaching safepoint: (?\\d+) ns, " + + "Cleanup: (?\\d+) ns, " + + "At safepoint: (?\\d+) ns, " + + "Total: (?\\d+) ns"); + + public enum State { + INIT, + RUNNING, + CLEANUP, + CLOSED + } + + private final File gcLogDir; + private final Map labels; + private final WriteProxy writeProxy; + private final AgentInvoker publisherInvoker; + + private final Delay retryInterval; + private final Delay readInterval; + private final Delay idleInterval; + private FileChannel fileChannel; + private final ByteBuffer chunkBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); + private final StringBuilder lineBuffer = new StringBuilder(); + private State state = State.CLOSED; + + public JvmSafepointExporter( + File gcLogDir, + Map labels, + WriteProxy writeProxy, + AgentInvoker publisherInvoker, + EpochClock epochClock, + Duration retryInterval) { + this.gcLogDir = gcLogDir; + this.labels = labels; + this.writeProxy = writeProxy; + this.publisherInvoker = publisherInvoker; + this.retryInterval = new Delay(epochClock, retryInterval.toMillis()); + this.readInterval = new Delay(epochClock, READ_INTERVAL.toMillis()); + this.idleInterval = new Delay(epochClock, READ_INTERVAL.toMillis()); + } + + @Override + public String roleName() { + return "JvmSafepointExporter"; + } + + @Override + public void onStart() { + if (state != State.CLOSED) { + throw new AgentTerminationException("Illegal state: " + state); + } + state(State.INIT); + } + + @Override + public int doWork() throws Exception { + try { + if (publisherInvoker != null) { + publisherInvoker.invoke(); + } + return switch (state) { + case INIT -> init(); + case RUNNING -> running(); + case CLEANUP -> cleanup(); + default -> throw new AgentTerminationException("Unknown state: " + state); + }; + } catch (AgentTerminationException e) { + throw e; + } catch (Exception e) { + state(State.CLEANUP); + throw e; + } + } + + private int init() throws IOException { + if (retryInterval.isNotOverdue()) { + return 0; + } + + // -Xlog:gc*,safepoint:$LOGS_DIR/$TS-$SERVICE_NAME-gc.log \ + + final var filePath = findLatestGcLog(gcLogDir.toPath()); + if (!Files.exists(filePath) || Files.isDirectory(filePath)) { + throw new IllegalArgumentException("Wrong file: " + filePath); + } + + fileChannel = FileChannel.open(filePath); + + state(State.RUNNING); + return 1; + } + + private static Path findLatestGcLog(Path dir) throws IOException { + try (var files = Files.list(dir)) { + return files + .filter(Files::isRegularFile) + .filter(p -> p.getFileName().toString().contains("gc.log")) + .max(Comparator.comparingLong(p -> p.toFile().lastModified())) + .orElseThrow(() -> new FileNotFoundException("No matching gc.log files found in " + dir)); + } + } + + private int running() throws IOException { + if (readInterval.isOverdue()) { + final int read = fileChannel.read(chunkBuffer.clear()); + if (read > 0) { + final byte[] bytes = new byte[chunkBuffer.flip().remaining()]; + chunkBuffer.get(bytes); + lineBuffer.append(new String(bytes, StandardCharsets.UTF_8)); + } else { + readInterval.delay(); + } + } + + int workCount = 0; + final var tsList = new ArrayList(); + int lineEnd; + while ((lineEnd = lineBuffer.indexOf("\n")) >= 0) { + String line = lineBuffer.substring(0, lineEnd).trim(); + lineBuffer.delete(0, lineEnd + 1); + final var event = processLine(line); + if (event != null) { + workCount++; + tsList.addAll(toTimeSeriesList(event)); + } + } + + if (!tsList.isEmpty()) { + idleInterval.delay(); + writeProxy.push(WriteRequest.newBuilder().addAllTimeseries(tsList).build()); + } + + if (idleInterval.isOverdue()) { + writeProxy.push( + WriteRequest.newBuilder() + .addAllTimeseries( + toTimeSeriesList(new SafepointEvent(Instant.now(), "Null", 0, 0, 0, 0, 0))) + .build()); + } + + return workCount; + } + + private static SafepointEvent processLine(String line) { + final var matcher = SAFEPOINT_PATTERN.matcher(line); + if (!matcher.find()) { + return null; + } + return new SafepointEvent( + ZonedDateTime.parse(matcher.group("timestamp"), GC_LOG_TIMESTAMP_FORMATTER).toInstant(), + matcher.group("reason"), + Long.parseLong(matcher.group("sinceLast")), + Long.parseLong(matcher.group("reaching")), + Long.parseLong(matcher.group("cleanup")), + Long.parseLong(matcher.group("at")), + Long.parseLong(matcher.group("total"))); + } + + private List toTimeSeriesList(SafepointEvent event) { + final var tsList = new ArrayList(); + final var timestamp = event.timestamp().toEpochMilli(); + final var reason = event.reason(); + + tsList.add( + toTimeSeries("jvm_safepoint_reaching_micros", reason, event.reachingNs(), timestamp)); + tsList.add(toTimeSeries("jvm_safepoint_cleanup_micros", reason, event.cleanupNs(), timestamp)); + tsList.add(toTimeSeries("jvm_safepoint_at_micros", reason, event.atSafepointNs(), timestamp)); + tsList.add(toTimeSeries("jvm_safepoint_total_micros", reason, event.totalNs(), timestamp)); + + return tsList; + } + + private TimeSeries toTimeSeries(String metric, String reason, long value, long timestamp) { + var builder = + TimeSeries.newBuilder() + .addLabels(Label.newBuilder().setName("__name__").setValue(metric).build()) + .addLabels(Label.newBuilder().setName("reason").setValue(reason).build()) + .addSamples( + Sample.newBuilder().setValue(value / 1000.0).setTimestamp(timestamp).build()); + addLabels(builder); + return builder.build(); + } + + private void addLabels(Builder builder) { + if (labels != null) { + labels.forEach( + (name, value) -> + builder.addLabels(Label.newBuilder().setName(name).setValue(value).build())); + } + } + + private int cleanup() { + CloseHelper.quietClose(fileChannel); + lineBuffer.setLength(0); + + State previous = state; + if (previous != State.CLOSED) { // when it comes from onClose() + retryInterval.delay(); + state(State.INIT); + } + return 1; + } + + @Override + public void onClose() { + state(State.CLOSED); + cleanup(); + } + + private void state(State state) { + LOGGER.debug("[{}][state] {}->{}", roleName(), this.state, state); + this.state = state; + } + + record SafepointEvent( + Instant timestamp, + String reason, + long sinceLastNs, + long reachingNs, + long cleanupNs, + long atSafepointNs, + long totalNs) {} +} diff --git a/metrics-examples/src/main/java/io/scalecube/metrics/mimir/MimirAllInOneAeron.java b/metrics-examples/src/main/java/io/scalecube/metrics/mimir/MimirAllInOneAeron.java new file mode 100644 index 0000000..2e49b10 --- /dev/null +++ b/metrics-examples/src/main/java/io/scalecube/metrics/mimir/MimirAllInOneAeron.java @@ -0,0 +1,395 @@ +package io.scalecube.metrics.mimir; + +import static io.aeron.Publication.MAX_POSITION_EXCEEDED; +import static io.aeron.cluster.client.AeronCluster.SESSION_HEADER_LENGTH; +import static org.testcontainers.utility.MountableFile.forClasspathResource; + +import io.aeron.Aeron; +import io.aeron.ExclusivePublication; +import io.aeron.Image; +import io.aeron.Publication; +import io.aeron.archive.Archive; +import io.aeron.cluster.ConsensusModule; +import io.aeron.cluster.client.AeronCluster; +import io.aeron.cluster.client.AeronCluster.Context; +import io.aeron.cluster.client.EgressListener; +import io.aeron.cluster.codecs.AdminRequestType; +import io.aeron.cluster.codecs.AdminResponseCode; +import io.aeron.cluster.codecs.CloseReason; +import io.aeron.cluster.codecs.EventCode; +import io.aeron.cluster.service.ClientSession; +import io.aeron.cluster.service.Cluster; +import io.aeron.cluster.service.Cluster.Role; +import io.aeron.cluster.service.ClusteredService; +import io.aeron.cluster.service.ClusteredServiceContainer; +import io.aeron.driver.MediaDriver; +import io.aeron.logbuffer.BufferClaim; +import io.aeron.logbuffer.Header; +import io.scalecube.metrics.CountersReaderAgent; +import io.scalecube.metrics.CountersRegistry; +import io.scalecube.metrics.HistogramMetric; +import io.scalecube.metrics.MetricsReaderAgent; +import io.scalecube.metrics.MetricsRecorder; +import io.scalecube.metrics.MetricsTransmitter; +import io.scalecube.metrics.TpsMetric; +import io.scalecube.metrics.aeron.CncCountersReaderAgent; +import java.time.Duration; +import org.agrona.BitUtil; +import org.agrona.DirectBuffer; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentRunner; +import org.agrona.concurrent.BackoffIdleStrategy; +import org.agrona.concurrent.CompositeAgent; +import org.agrona.concurrent.SystemEpochClock; +import org.agrona.concurrent.status.AtomicCounter; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; + +public class MimirAllInOneAeron { + + public static void main(String[] args) throws Exception { + Network network = Network.newNetwork(); + + final var mimir = + new GenericContainer<>("grafana/mimir") + .withExposedPorts(9009) + .withNetwork(network) + .withNetworkAliases("mimir") + .withCopyFileToContainer(forClasspathResource("mimir.yml"), "/etc/mimir.yml") + .withCommand("-config.file=/etc/mimir.yml", "-target=all", "-log.level=debug") + .withLogConsumer( + outputFrame -> System.err.print("[mimir] " + outputFrame.getUtf8String())); + mimir.start(); + + // Start Grafana container + GenericContainer grafana = + new GenericContainer<>("grafana/grafana") + .withExposedPorts(3000) + .withNetwork(network) + .withNetworkAliases("grafana") + .withEnv("GF_SECURITY_ADMIN_USER", "user") + .withEnv("GF_SECURITY_ADMIN_PASSWORD", "password") + .withCopyFileToContainer( + forClasspathResource("mimir.datasource.yml"), + "/etc/grafana/provisioning/datasources/datasource.yml"); + grafana.start(); + + final var mimirPort = mimir.getMappedPort(9009); + final var pushUrl = "http://" + mimir.getHost() + ":" + mimirPort + "/api/v1/push"; + + String grafanaUrl = "http://" + grafana.getHost() + ":" + grafana.getMappedPort(3000); + System.out.println("Started Mimir on: " + mimirPort + " | pushUrl: " + pushUrl); + System.out.println("Grafana is available at: " + grafanaUrl); + + final var metricsRecorder = MetricsRecorder.launch(); + final var metricsTransmitter = MetricsTransmitter.launch(); + + final var countersRegistry = CountersRegistry.create(); + final var countersManager = countersRegistry.countersManager(); + final var sessionCounter = countersManager.newCounter("session_count"); + + final var mediaDriver = + MediaDriver.launch( + new MediaDriver.Context().dirDeleteOnStart(true).dirDeleteOnShutdown(true)); + final var aeron = Aeron.connect(); + final var archive = + Archive.launch( + new Archive.Context() + .recordingEventsEnabled(false) + .archiveDirectoryName("target/aeron-archive") + .controlChannel("aeron:udp?endpoint=localhost:8010") + .replicationChannel("aeron:udp?endpoint=localhost:0")); + + final var consensusModule = + ConsensusModule.launch( + new ConsensusModule.Context() + .ingressChannel("aeron:udp") + .replicationChannel("aeron:udp?endpoint=localhost:0") + .clusterDirectoryName("target/aeron-cluster") + .clusterMemberId(0) + .clusterMembers( + "0," + + "localhost:8005," + + "localhost:8006," + + "localhost:8007," + + "localhost:8008," + + "localhost:8010")); + final var serviceContainer = + ClusteredServiceContainer.launch( + new ClusteredServiceContainer.Context() + .clusterDirectoryName("target/aeron-cluster") + .clusteredService(new ClusteredServiceImpl(metricsRecorder, sessionCounter))); + + final var aeronCluster = + AeronCluster.connect( + new Context() + .ingressChannel("aeron:udp") + .ingressEndpoints("0=localhost:8005") + .isIngressExclusive(true) + .egressChannel("aeron:udp?endpoint=localhost:0") + .egressListener(new EgressListenerImpl(metricsRecorder))); + + System.out.println("Started mediaDriver: " + mediaDriver); + System.out.println("Started aeron: " + aeron); + System.out.println("Started archive: " + archive); + System.out.println("Started consensusModule: " + consensusModule); + System.out.println("Started serviceContainer: " + serviceContainer); + System.out.println("Connected aeronCluster: " + aeronCluster); + + // Publisher + + final var mimirPublisher = MimirPublisher.launch(new MimirPublisher.Context().url(pushUrl)); + + final var compositeAgent = + new CompositeAgent( + new CountersReaderAgent( + "CountersReaderAgent", + countersRegistry.context().countersDir(), + true, + SystemEpochClock.INSTANCE, + Duration.ofSeconds(1), + new CountersMimirHandler(null, mimirPublisher.proxy())), + new CncCountersReaderAgent( + "CncCountersReaderAgent", + mediaDriver.aeronDirectoryName(), + true, + SystemEpochClock.INSTANCE, + Duration.ofSeconds(3), + Duration.ofSeconds(5), + new CountersMimirHandler(null, mimirPublisher.proxy())), + new MetricsReaderAgent( + "MetricsReaderAgent", + metricsTransmitter.context().broadcastBuffer(), + SystemEpochClock.INSTANCE, + Duration.ofSeconds(3), + new MetricsMimirHandler(null, mimirPublisher.proxy()))); + + AgentRunner.startOnThread( + new AgentRunner( + new BackoffIdleStrategy(), Throwable::printStackTrace, null, compositeAgent)); + + // Receive data + + AgentRunner.startOnThread( + new AgentRunner( + new BackoffIdleStrategy(), + Throwable::printStackTrace, + null, + new Agent() { + @Override + public int doWork() { + return aeronCluster.pollEgress(); + } + + @Override + public String roleName() { + return ""; + } + })); + + // Send data + + final var bufferClaim = new BufferClaim(); + + while (true) { + final var claim = aeronCluster.tryClaim(BitUtil.SIZE_OF_LONG, bufferClaim); + + if (claim == Publication.CLOSED + || claim == MAX_POSITION_EXCEEDED + || Thread.currentThread().isInterrupted()) { + throw new RuntimeException("Good bye"); + } + + if (claim > 0) { + final var buffer = bufferClaim.buffer(); + final var offset = bufferClaim.offset(); + final var i = offset + SESSION_HEADER_LENGTH; + final var now = System.nanoTime(); + buffer.putLong(i, now); + bufferClaim.commit(); + } + + Thread.sleep(1); + } + } + + private static class ClusteredServiceImpl implements ClusteredService { + + private static final long HIGHEST_TRACKABLE_VALUE = (long) 1e9; + private static final double CONVERSION_FACTOR = 1e-3; + private static final long RESOLUTION_MS = 3000; + private static final int LENGTH = 2 * BitUtil.SIZE_OF_LONG; + + private final MetricsRecorder metricsRecorder; + private final AtomicCounter sessionCounter; + + private final BufferClaim bufferClaim = new BufferClaim(); + private TpsMetric tpsMetric; + private HistogramMetric pingMetric; + + private ClusteredServiceImpl(MetricsRecorder metricsRecorder, AtomicCounter sessionCounter) { + this.metricsRecorder = metricsRecorder; + this.sessionCounter = sessionCounter; + } + + @Override + public void onStart(Cluster cluster, Image snapshotImage) { + tpsMetric = + metricsRecorder.newTps( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", "tps")); + + pingMetric = + metricsRecorder.newHistogram( + keyFlyweight -> + keyFlyweight + .tagsCount(2) + .stringValue("name", "hft_latency") + .stringValue("kind", "ping"), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION_MS); + } + + @Override + public void onSessionOpen(ClientSession session, long timestamp) { + System.out.println("onSessionOpen: " + session); + sessionCounter.increment(); + } + + @Override + public void onSessionClose(ClientSession session, long timestamp, CloseReason closeReason) { + System.out.println("onSessionClose: " + session + ", closeReason=" + closeReason); + sessionCounter.decrement(); + } + + @Override + public void onSessionMessage( + ClientSession session, + long timestamp, + DirectBuffer buffer, + int offset, + int length, + Header header) { + tpsMetric.record(); + + final var ping = buffer.getLong(offset); + final var pong = System.nanoTime(); + final var delta = pong - ping; + + pingMetric.record(delta); + + if (session.tryClaim(LENGTH, bufferClaim) > 0) { + final var buf = bufferClaim.buffer(); + final var index = bufferClaim.offset() + SESSION_HEADER_LENGTH; + buf.putLong(index, ping); + buf.putLong(index + BitUtil.SIZE_OF_LONG, pong); + bufferClaim.commit(); + } + } + + @Override + public void onTimerEvent(long correlationId, long timestamp) {} + + @Override + public void onTakeSnapshot(ExclusivePublication snapshotPublication) {} + + @Override + public void onRoleChange(Role newRole) {} + + @Override + public void onTerminate(Cluster cluster) {} + } + + private static class EgressListenerImpl implements EgressListener { + + private static final long HIGHEST_TRACKABLE_VALUE = (long) 1e9; + private static final double CONVERSION_FACTOR = 1e-3; + private static final long RESOLUTION_MS = 3000; + + private final HistogramMetric pongMetric; + private final HistogramMetric rttMetric; + + private EgressListenerImpl(MetricsRecorder metricsRecorder) { + pongMetric = + metricsRecorder.newHistogram( + keyFlyweight -> + keyFlyweight + .tagsCount(2) + .stringValue("name", "hft_latency") + .stringValue("kind", "pong"), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION_MS); + rttMetric = + metricsRecorder.newHistogram( + keyFlyweight -> + keyFlyweight + .tagsCount(2) + .stringValue("name", "hft_latency") + .stringValue("kind", "rtt"), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION_MS); + } + + @Override + public void onMessage( + long clusterSessionId, + long timestamp, + DirectBuffer buffer, + int offset, + int length, + Header header) { + final var now = System.nanoTime(); + final var ping = buffer.getLong(offset); + final var pong = buffer.getLong(offset + BitUtil.SIZE_OF_LONG); + pongMetric.record(now - pong); + rttMetric.record(now - ping); + } + + @Override + public void onSessionEvent( + long correlationId, + long clusterSessionId, + long leadershipTermId, + int leaderMemberId, + EventCode code, + String detail) { + System.out.println( + "onSessionEvent: clusterSessionId=" + + clusterSessionId + + ", leadershipTermId=" + + leadershipTermId + + ", leaderMemberId=" + + leaderMemberId + + ", code=" + + code + + ", detail=" + + detail); + } + + @Override + public void onNewLeader( + long clusterSessionId, long leadershipTermId, int leaderMemberId, String ingressEndpoints) { + System.out.println( + "onNewLeader: leadershipTermId=" + + leadershipTermId + + ", leaderMemberId=" + + leaderMemberId); + } + + @Override + public void onAdminResponse( + long clusterSessionId, + long correlationId, + AdminRequestType requestType, + AdminResponseCode responseCode, + String message, + DirectBuffer payload, + int payloadOffset, + int payloadLength) { + System.out.println( + "onAdminResponse: requestType=" + requestType + ", responseCode=" + responseCode); + } + } +} diff --git a/metrics-examples/src/main/java/io/scalecube/metrics/mimir/MimirAllInOneHistogram.java b/metrics-examples/src/main/java/io/scalecube/metrics/mimir/MimirAllInOneHistogram.java new file mode 100644 index 0000000..3062abd --- /dev/null +++ b/metrics-examples/src/main/java/io/scalecube/metrics/mimir/MimirAllInOneHistogram.java @@ -0,0 +1,273 @@ +package io.scalecube.metrics.mimir; + +import static org.testcontainers.utility.MountableFile.forClasspathResource; + +import io.scalecube.metrics.KeyCodec; +import io.scalecube.metrics.MetricsHandler; +import io.scalecube.metrics.MetricsReaderAgent; +import io.scalecube.metrics.MetricsRecorder; +import io.scalecube.metrics.MetricsTransmitter; +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpRequest.BodyPublishers; +import java.net.http.HttpResponse; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import org.HdrHistogram.Histogram; +import org.HdrHistogram.HistogramIterationValue; +import org.agrona.DirectBuffer; +import org.agrona.concurrent.AgentRunner; +import org.agrona.concurrent.BackoffIdleStrategy; +import org.agrona.concurrent.SystemEpochClock; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.xerial.snappy.Snappy; +import prometheus.Remote.WriteRequest; +import prometheus.Types; +import prometheus.Types.BucketSpan; +import prometheus.Types.Label; +import prometheus.Types.TimeSeries; + +public class MimirAllInOneHistogram { + + public static void main(String[] args) { + Network network = Network.newNetwork(); + + final var mimir = + new GenericContainer<>("grafana/mimir") + .withExposedPorts(9009) + .withNetwork(network) + .withNetworkAliases("mimir") + .withCopyFileToContainer(forClasspathResource("mimir.yml"), "/etc/mimir.yml") + .withCommand("-config.file=/etc/mimir.yml", "-target=all", "-log.level=debug") + .withLogConsumer( + outputFrame -> System.err.print("[mimir] " + outputFrame.getUtf8String())); + mimir.start(); + + // Start Grafana container + GenericContainer grafana = + new GenericContainer<>("grafana/grafana") + .withExposedPorts(3000) + .withNetwork(network) + .withNetworkAliases("grafana") + .withEnv("GF_SECURITY_ADMIN_USER", "user") + .withEnv("GF_SECURITY_ADMIN_PASSWORD", "password") + .withCopyFileToContainer( + forClasspathResource("mimir.datasource.yml"), + "/etc/grafana/provisioning/datasources/datasource.yml"); + grafana.start(); + + final var mimirPort = mimir.getMappedPort(9009); + final var pushUrl = "http://" + mimir.getHost() + ":" + mimirPort + "/api/v1/push"; + + String grafanaUrl = "http://" + grafana.getHost() + ":" + grafana.getMappedPort(3000); + System.out.println("Started Mimir on: " + mimirPort + " | pushUrl: " + pushUrl); + System.out.println("Grafana is available at: " + grafanaUrl); + + final var metricsRecorder = MetricsRecorder.launch(); + final var metricsTransmitter = MetricsTransmitter.launch(); + + AgentRunner.startOnThread( + new AgentRunner( + new BackoffIdleStrategy(), + Throwable::printStackTrace, + null, + new MetricsReaderAgent( + "MetricsReaderAgent", + metricsTransmitter.context().broadcastBuffer(), + SystemEpochClock.INSTANCE, + Duration.ofSeconds(3), + new MimirHistogramHandler(pushUrl)))); + + // Start measurements and burning cpu + + final var highestTrackableValue = (long) 1e9; + final var conversionFactor = 1e-3; + final var resolutionMs = 3000; + final var latencyMetric = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", "hft_latency"), + highestTrackableValue, + conversionFactor, + resolutionMs); + + for (; ; ) { + final var now = System.nanoTime(); + burnCpuMicros(20); + latencyMetric.record(System.nanoTime() - now); + Thread.onSpinWait(); + } + } + + private static void burnCpuMicros(long micros) { + long durationNanos = micros * 1000; + long start = System.nanoTime(); + while ((System.nanoTime() - start) < durationNanos) { + Thread.onSpinWait(); + } + } + + private static class MimirHistogramHandler implements MetricsHandler { + + private final String pushUrl; + private final HttpClient httpClient = HttpClient.newHttpClient(); + private final KeyCodec keyCodec = new KeyCodec(); + + private MimirHistogramHandler(String pushUrl) { + this.pushUrl = pushUrl; + } + + @Override + public void onHistogram( + long timestamp, + DirectBuffer keyBuffer, + int keyOffset, + int keyLength, + Histogram accumulated, + Histogram distinct, + long highestTrackableValue, + double conversionFactor) { + final var key = keyCodec.decodeKey(keyBuffer, keyOffset); + final var name = key.stringValue("name"); + + final var histogram = buildPromNativeHistogram(accumulated, 8, timestamp); + final var timeSeries = wrapIntoTimeSeries(name, histogram); + try { + push(timeSeries); + System.out.println(Instant.now() + " | push(timeSeries)"); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static List computePromNativeBucketBoundaries( + double minNs, double maxNs, int schema) { + double min = minNs / 1000.0; // ns -> µs + double max = maxNs / 1000.0; // ns -> µs + double factor = Math.pow(2, Math.pow(2, -schema)); + List boundaries = new ArrayList<>(); + double current = min; + while (current < max) { + boundaries.add(current); + current *= factor; + } + return boundaries; + } + + private static List convertHdrToPromBuckets(Histogram hdr, List boundaries) { + List bucketCounts = new ArrayList<>(boundaries.size()); + long cumulative = 0; + for (double boundaryUs : boundaries) { + long boundaryNs = (long) Math.ceil(boundaryUs * 1000); // µs -> ns for query + long count = hdr.getCountBetweenValues(0, boundaryNs); + bucketCounts.add(count - cumulative); + cumulative = count; + } + return bucketCounts; + } + + private static Types.Histogram buildPromNativeHistogram( + Histogram hdr, int schema, long timestamp) { + double minNs = hdr.getMinNonZeroValue(); + double maxNs = hdr.getMaxValue(); + + List boundaries = computePromNativeBucketBoundaries(minNs, maxNs, schema); + List bucketCounts = convertHdrToPromBuckets(hdr, boundaries); + + double totalSumNs = 0; + for (HistogramIterationValue value : hdr.recordedValues()) { + totalSumNs += value.getValueIteratedTo() * value.getCountAtValueIteratedTo(); + } + double totalSumUs = totalSumNs / 1000.0; + + Types.Histogram.Builder builder = + Types.Histogram.newBuilder() + .setSchema(schema) + .setCountInt(hdr.getTotalCount()) + .setZeroCountInt(0) + .setSum(totalSumUs) + .setTimestamp(timestamp); + + // Delta-encoded bucket counts + List deltas = new ArrayList<>(); + List spans = new ArrayList<>(); + + int baseBucket = 0; + int spanStart = -1; + int spanLength = 0; + boolean inSpan = false; + + for (int i = 0; i < bucketCounts.size(); i++) { + long count = bucketCounts.get(i); + if (count > 0) { + if (!inSpan) { + spanStart = i; + spanLength = 1; + inSpan = true; + } else { + spanLength++; + } + deltas.add(count); + } else { + if (inSpan) { + // Close the span + spans.add( + BucketSpan.newBuilder() + .setOffset(spanStart - baseBucket) + .setLength(spanLength) + .build()); + baseBucket = spanStart + spanLength; + inSpan = false; + } + } + } + + // If ended with a span + if (inSpan) { + spans.add( + BucketSpan.newBuilder() + .setOffset(spanStart - baseBucket) + .setLength(spanLength) + .build()); + } + + builder.addAllPositiveSpans(spans); + builder.addAllPositiveDeltas(deltas); + + return builder.build(); + } + + private static TimeSeries wrapIntoTimeSeries(String name, Types.Histogram histogram) { + TimeSeries.Builder builder = TimeSeries.newBuilder(); + builder.addLabels(Label.newBuilder().setName("__name__").setValue(name)); + builder.addLabels(Label.newBuilder().setName("service").setValue("hft_app")); + builder.addHistograms(histogram); + return builder.build(); + } + + private void push(TimeSeries timeSeries) throws Exception { + byte[] payload = WriteRequest.newBuilder().addTimeseries(timeSeries).build().toByteArray(); + byte[] compressedPayload = Snappy.compress(payload); + + HttpRequest request = + HttpRequest.newBuilder() + .uri(URI.create(pushUrl)) + .header("Content-Type", "application/x-protobuf") + .header("Content-Encoding", "snappy") + .header("X-Prometheus-Remote-Write-Version", "0.1.0") + .POST(BodyPublishers.ofByteArray(compressedPayload)) + .build(); + + HttpResponse response = + httpClient.send(request, HttpResponse.BodyHandlers.ofString()); + + if (response.statusCode() != 200) { + System.err.println( + "Failed to push metrics: HTTP " + response.statusCode() + ", body: " + response.body()); + } + } + } +} diff --git a/metrics-examples/src/main/java/io/scalecube/metrics/mimir/MimirAllInOnePercentiles.java b/metrics-examples/src/main/java/io/scalecube/metrics/mimir/MimirAllInOnePercentiles.java new file mode 100644 index 0000000..c330648 --- /dev/null +++ b/metrics-examples/src/main/java/io/scalecube/metrics/mimir/MimirAllInOnePercentiles.java @@ -0,0 +1,96 @@ +package io.scalecube.metrics.mimir; + +import static org.testcontainers.utility.MountableFile.forClasspathResource; + +import io.scalecube.metrics.MetricsReaderAgent; +import io.scalecube.metrics.MetricsRecorder; +import io.scalecube.metrics.MetricsTransmitter; +import java.time.Duration; +import org.agrona.concurrent.AgentRunner; +import org.agrona.concurrent.BackoffIdleStrategy; +import org.agrona.concurrent.SystemEpochClock; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; + +public class MimirAllInOnePercentiles { + + public static void main(String[] args) { + Network network = Network.newNetwork(); + + final var mimir = + new GenericContainer<>("grafana/mimir") + .withExposedPorts(9009) + .withNetwork(network) + .withNetworkAliases("mimir") + .withCopyFileToContainer(forClasspathResource("mimir.yml"), "/etc/mimir.yml") + .withCommand("-config.file=/etc/mimir.yml", "-target=all", "-log.level=debug") + .withLogConsumer( + outputFrame -> System.err.print("[mimir] " + outputFrame.getUtf8String())); + mimir.start(); + + // Start Grafana container + GenericContainer grafana = + new GenericContainer<>("grafana/grafana") + .withExposedPorts(3000) + .withNetwork(network) + .withNetworkAliases("grafana") + .withEnv("GF_SECURITY_ADMIN_USER", "user") + .withEnv("GF_SECURITY_ADMIN_PASSWORD", "password") + .withCopyFileToContainer( + forClasspathResource("mimir.datasource.yml"), + "/etc/grafana/provisioning/datasources/datasource.yml"); + grafana.start(); + + final var mimirPort = mimir.getMappedPort(9009); + final var pushUrl = "http://" + mimir.getHost() + ":" + mimirPort + "/api/v1/push"; + + String grafanaUrl = "http://" + grafana.getHost() + ":" + grafana.getMappedPort(3000); + System.out.println("Started Mimir on: " + mimirPort + " | pushUrl: " + pushUrl); + System.out.println("Grafana is available at: " + grafanaUrl); + + final var metricsRecorder = MetricsRecorder.launch(); + final var metricsTransmitter = MetricsTransmitter.launch(); + final var mimirPublisher = MimirPublisher.launch(new MimirPublisher.Context().url(pushUrl)); + + // Metrics + + AgentRunner.startOnThread( + new AgentRunner( + new BackoffIdleStrategy(), + Throwable::printStackTrace, + null, + new MetricsReaderAgent( + "MetricsReaderAgent", + metricsTransmitter.context().broadcastBuffer(), + SystemEpochClock.INSTANCE, + Duration.ofSeconds(3), + new MetricsMimirHandler(null, mimirPublisher.proxy())))); + + // Start measurements and burning cpu + + final var highestTrackableValue = (long) 1e9; + final var conversionFactor = 1e-3; + final var resolutionMs = 10000; + final var latencyMetric = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", "hft_latency"), + highestTrackableValue, + conversionFactor, + resolutionMs); + + for (; ; ) { + final var now = System.nanoTime(); + burnCpuMicros(20); + latencyMetric.record(System.nanoTime() - now); + Thread.onSpinWait(); + } + } + + private static void burnCpuMicros(long micros) { + long durationNanos = micros * 1000; + long start = System.nanoTime(); + while ((System.nanoTime() - start) < durationNanos) { + Thread.onSpinWait(); + } + } +} diff --git a/metrics-examples/src/main/java/io/scalecube/metrics/prometheus/PrometheusAllInOneAeron.java b/metrics-examples/src/main/java/io/scalecube/metrics/prometheus/PrometheusAllInOneAeron.java new file mode 100644 index 0000000..14ef40c --- /dev/null +++ b/metrics-examples/src/main/java/io/scalecube/metrics/prometheus/PrometheusAllInOneAeron.java @@ -0,0 +1,408 @@ +package io.scalecube.metrics.prometheus; + +import static io.aeron.Publication.MAX_POSITION_EXCEEDED; +import static io.aeron.cluster.client.AeronCluster.SESSION_HEADER_LENGTH; +import static org.testcontainers.utility.MountableFile.forClasspathResource; + +import io.aeron.Aeron; +import io.aeron.ExclusivePublication; +import io.aeron.Image; +import io.aeron.Publication; +import io.aeron.archive.Archive; +import io.aeron.cluster.ConsensusModule; +import io.aeron.cluster.client.AeronCluster; +import io.aeron.cluster.client.AeronCluster.Context; +import io.aeron.cluster.client.EgressListener; +import io.aeron.cluster.codecs.AdminRequestType; +import io.aeron.cluster.codecs.AdminResponseCode; +import io.aeron.cluster.codecs.CloseReason; +import io.aeron.cluster.codecs.EventCode; +import io.aeron.cluster.service.ClientSession; +import io.aeron.cluster.service.Cluster; +import io.aeron.cluster.service.Cluster.Role; +import io.aeron.cluster.service.ClusteredService; +import io.aeron.cluster.service.ClusteredServiceContainer; +import io.aeron.driver.MediaDriver; +import io.aeron.logbuffer.BufferClaim; +import io.aeron.logbuffer.Header; +import io.scalecube.metrics.CountersReaderAgent; +import io.scalecube.metrics.CountersRegistry; +import io.scalecube.metrics.HistogramMetric; +import io.scalecube.metrics.MetricsReaderAgent; +import io.scalecube.metrics.MetricsRecorder; +import io.scalecube.metrics.MetricsTransmitter; +import io.scalecube.metrics.TpsMetric; +import io.scalecube.metrics.aeron.CncCountersReaderAgent; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.Map; +import org.agrona.BitUtil; +import org.agrona.DirectBuffer; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentRunner; +import org.agrona.concurrent.BackoffIdleStrategy; +import org.agrona.concurrent.CompositeAgent; +import org.agrona.concurrent.SystemEpochClock; +import org.agrona.concurrent.status.AtomicCounter; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; + +public class PrometheusAllInOneAeron { + + public static void main(String[] args) throws IOException, InterruptedException { + Network network = Network.newNetwork(); + + GenericContainer prometheus = + new GenericContainer<>("prom/prometheus") + .withNetwork(network) + .withNetworkAliases("prometheus") + .withExposedPorts(9090) + .withCopyFileToContainer( + forClasspathResource("prometheus.yml"), "/etc/prometheus/prometheus.yml") + .withCommand("--config.file=/etc/prometheus/prometheus.yml", "--log.level=debug"); + prometheus.start(); + + GenericContainer loki = + new GenericContainer<>("grafana/loki") + .withNetwork(network) + .withNetworkAliases("loki") + .withExposedPorts(3100) + .withCommand("-config.file=/etc/loki/local-config.yaml"); + loki.start(); + + // Start Grafana container + GenericContainer grafana = + new GenericContainer<>("grafana/grafana") + .withNetwork(network) + .withExposedPorts(3000) + .withEnv("GF_SECURITY_ADMIN_USER", "user") + .withEnv("GF_SECURITY_ADMIN_PASSWORD", "password") + .withCopyFileToContainer( + forClasspathResource("prometheus.datasource.yml"), + "/etc/grafana/provisioning/datasources/datasource.yml") + .withCopyFileToContainer( + forClasspathResource("loki.datasource.yml"), + "/etc/grafana/provisioning/datasources/loki.yml"); + grafana.start(); + + // final var lokiPort = loki.getMappedPort(3100); + // final var pushUrl = "http://" + loki.getHost() + ":" + lokiPort + "/loki/api/v1/push"; + + String grafanaUrl = "http://" + grafana.getHost() + ":" + grafana.getMappedPort(3000); + System.out.println("Prometheus: " + prometheus.getMappedPort(9090)); + System.out.println("Loki: " + loki.getMappedPort(3100)); + System.out.println("Grafana: " + grafanaUrl); + + final var metricsRecorder = MetricsRecorder.launch(); + final var metricsTransmitter = MetricsTransmitter.launch(); + + final var highestTrackableValue = (long) 1e9; + final var conversionFactor = 1e-3; + final var resolutionMs = 1000; + + final var tps = + metricsRecorder.newTps( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", "tps")); + final var pingLatency = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", "ping_latency"), + highestTrackableValue, + conversionFactor, + resolutionMs); + final var pongLatency = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", "pong_latency"), + highestTrackableValue, + conversionFactor, + resolutionMs); + final var rttLatency = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", "rtt_latency"), + highestTrackableValue, + conversionFactor, + resolutionMs); + + final var countersRegistry = CountersRegistry.create(); + final var countersManager = countersRegistry.countersManager(); + final var sessionCounter = countersManager.newCounter("session_count"); + final var servicePosition = countersManager.newCounter("service_position"); + + final var mediaDriver = + MediaDriver.launch( + new MediaDriver.Context().dirDeleteOnStart(true).dirDeleteOnShutdown(true)); + final var aeron = Aeron.connect(); + final var archive = + Archive.launch( + new Archive.Context() + .recordingEventsEnabled(false) + .controlChannel("aeron:udp?endpoint=localhost:8010") + .replicationChannel("aeron:udp?endpoint=localhost:0")); + + final var consensusModule = + ConsensusModule.launch( + new ConsensusModule.Context() + .ingressChannel("aeron:udp") + .replicationChannel("aeron:udp?endpoint=localhost:0") + .clusterMemberId(0) + .clusterMembers( + "0," + + "localhost:8005," + + "localhost:8006," + + "localhost:8007," + + "localhost:8008," + + "localhost:8010")); + final var serviceContainer = + ClusteredServiceContainer.launch( + new ClusteredServiceContainer.Context() + .clusteredService( + new ClusteredServiceImpl(tps, pingLatency, sessionCounter, servicePosition))); + + final var aeronCluster = + AeronCluster.connect( + new Context() + .ingressChannel("aeron:udp") + .ingressEndpoints("0=localhost:8005") + .isIngressExclusive(true) + .egressChannel("aeron:udp?endpoint=localhost:0") + .egressListener(new EgressListenerImpl(pongLatency, rttLatency))); + + System.out.println("Started mediaDriver: " + mediaDriver); + System.out.println("Started aeron: " + aeron); + System.out.println("Started archive: " + archive); + System.out.println("Started consensusModule: " + consensusModule); + System.out.println("Started serviceContainer: " + serviceContainer); + System.out.println("Connected aeronCluster: " + aeronCluster); + + // Start scrape target + + final var labels = Map.of("app", "hft_app"); + final var countersAdapter = new CountersPrometheusAdapter(labels); + final var cncCountersAdapter = new CountersPrometheusAdapter(labels); + final var metricsAdapter = new MetricsPrometheusAdapter(labels); + + final var compositeAgent = + new CompositeAgent( + new CountersReaderAgent( + "CountersReaderAgent", + countersRegistry.context().countersDir(), + true, + SystemEpochClock.INSTANCE, + Duration.ofSeconds(1), + countersAdapter), + new CncCountersReaderAgent( + "CncCountersReaderAgent", + mediaDriver.aeronDirectoryName(), + true, + SystemEpochClock.INSTANCE, + Duration.ofSeconds(3), + Duration.ofSeconds(5), + cncCountersAdapter), + new MetricsReaderAgent( + "MetricsReaderAgent", + metricsTransmitter.context().broadcastBuffer(), + SystemEpochClock.INSTANCE, + Duration.ofSeconds(3), + metricsAdapter)); + + AgentRunner.startOnThread( + new AgentRunner( + new BackoffIdleStrategy(), Throwable::printStackTrace, null, compositeAgent)); + + PrometheusMetricsServer.launch( + new InetSocketAddress(8080), + new PrometheusMetricsHandler(countersAdapter, cncCountersAdapter, metricsAdapter)); + + // Receive data + + AgentRunner.startOnThread( + new AgentRunner( + new BackoffIdleStrategy(), + Throwable::printStackTrace, + null, + new Agent() { + @Override + public int doWork() { + return aeronCluster.pollEgress(); + } + + @Override + public String roleName() { + return ""; + } + })); + + // Send data + + final var bufferClaim = new BufferClaim(); + + while (true) { + final var claim = aeronCluster.tryClaim(BitUtil.SIZE_OF_LONG, bufferClaim); + + if (claim == Publication.CLOSED + || claim == MAX_POSITION_EXCEEDED + || Thread.currentThread().isInterrupted()) { + throw new RuntimeException("Good bye"); + } + + if (claim > 0) { + final var buffer = bufferClaim.buffer(); + final var offset = bufferClaim.offset(); + final var i = offset + SESSION_HEADER_LENGTH; + final var now = System.nanoTime(); + buffer.putLong(i, now); + bufferClaim.commit(); + } + + Thread.sleep(1); + } + } + + private static class ClusteredServiceImpl implements ClusteredService { + + private static final int LENGTH = 2 * BitUtil.SIZE_OF_LONG; + + private final TpsMetric tps; + private final HistogramMetric pingLatency; + private final AtomicCounter sessionCounter; + private final AtomicCounter servicePosition; + + private final BufferClaim bufferClaim = new BufferClaim(); + + private ClusteredServiceImpl( + TpsMetric tps, + HistogramMetric pingLatency, + AtomicCounter sessionCounter, + AtomicCounter servicePosition) { + this.tps = tps; + this.pingLatency = pingLatency; + this.sessionCounter = sessionCounter; + this.servicePosition = servicePosition; + } + + @Override + public void onStart(Cluster cluster, Image snapshotImage) {} + + @Override + public void onSessionOpen(ClientSession session, long timestamp) { + System.out.println("onSessionOpen: " + session); + sessionCounter.increment(); + } + + @Override + public void onSessionClose(ClientSession session, long timestamp, CloseReason closeReason) { + System.out.println("onSessionClose: " + session + ", closeReason=" + closeReason); + sessionCounter.decrement(); + } + + @Override + public void onSessionMessage( + ClientSession session, + long timestamp, + DirectBuffer buffer, + int offset, + int length, + Header header) { + tps.record(); + + servicePosition.set(header.position()); + + final var ping = buffer.getLong(offset); + final var pong = System.nanoTime(); + final var delta = pong - ping; + + pingLatency.record(delta); + + if (session.tryClaim(LENGTH, bufferClaim) > 0) { + final var buf = bufferClaim.buffer(); + final var index = bufferClaim.offset() + SESSION_HEADER_LENGTH; + buf.putLong(index, ping); + buf.putLong(index + BitUtil.SIZE_OF_LONG, pong); + bufferClaim.commit(); + } + } + + @Override + public void onTimerEvent(long correlationId, long timestamp) {} + + @Override + public void onTakeSnapshot(ExclusivePublication snapshotPublication) {} + + @Override + public void onRoleChange(Role newRole) {} + + @Override + public void onTerminate(Cluster cluster) {} + } + + private static class EgressListenerImpl implements EgressListener { + + private final HistogramMetric pongLatency; + private final HistogramMetric rttLatency; + + private EgressListenerImpl(HistogramMetric pongLatency, HistogramMetric rttLatency) { + this.pongLatency = pongLatency; + this.rttLatency = rttLatency; + } + + @Override + public void onMessage( + long clusterSessionId, + long timestamp, + DirectBuffer buffer, + int offset, + int length, + Header header) { + final var now = System.nanoTime(); + final var ping = buffer.getLong(offset); + final var pong = buffer.getLong(offset + BitUtil.SIZE_OF_LONG); + pongLatency.record(now - pong); + rttLatency.record(now - ping); + } + + @Override + public void onSessionEvent( + long correlationId, + long clusterSessionId, + long leadershipTermId, + int leaderMemberId, + EventCode code, + String detail) { + System.out.println( + "onSessionEvent: clusterSessionId=" + + clusterSessionId + + ", leadershipTermId=" + + leadershipTermId + + ", leaderMemberId=" + + leaderMemberId + + ", code=" + + code + + ", detail=" + + detail); + } + + @Override + public void onNewLeader( + long clusterSessionId, long leadershipTermId, int leaderMemberId, String ingressEndpoints) { + System.out.println( + "onNewLeader: leadershipTermId=" + + leadershipTermId + + ", leaderMemberId=" + + leaderMemberId); + } + + @Override + public void onAdminResponse( + long clusterSessionId, + long correlationId, + AdminRequestType requestType, + AdminResponseCode responseCode, + String message, + DirectBuffer payload, + int payloadOffset, + int payloadLength) { + System.out.println( + "onAdminResponse: requestType=" + requestType + ", responseCode=" + responseCode); + } + } +} diff --git a/metrics-examples/src/main/java/io/scalecube/metrics/prometheus/PrometheusNativeHistogram.java b/metrics-examples/src/main/java/io/scalecube/metrics/prometheus/PrometheusNativeHistogram.java new file mode 100644 index 0000000..67e1e7a --- /dev/null +++ b/metrics-examples/src/main/java/io/scalecube/metrics/prometheus/PrometheusNativeHistogram.java @@ -0,0 +1,213 @@ +package io.scalecube.metrics.prometheus; + +import static org.testcontainers.utility.MountableFile.forClasspathResource; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import io.scalecube.metrics.MetricsHandler; +import io.scalecube.metrics.MetricsReaderAgent; +import io.scalecube.metrics.MetricsRecorder; +import io.scalecube.metrics.MetricsTransmitter; +import java.io.IOException; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.atomic.AtomicReference; +import org.HdrHistogram.Histogram; +import org.agrona.DirectBuffer; +import org.agrona.concurrent.AgentRunner; +import org.agrona.concurrent.BackoffIdleStrategy; +import org.agrona.concurrent.SystemEpochClock; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; + +public class PrometheusNativeHistogram { + + public static void main(String[] args) throws IOException { + Network network = Network.newNetwork(); + + GenericContainer prometheus = + new GenericContainer<>("prom/prometheus") + .withNetwork(network) + .withNetworkAliases("prometheus") + .withExposedPorts(9090) + .withCopyFileToContainer( + forClasspathResource("prometheus.yml"), "/etc/prometheus/prometheus.yml") + .withCommand( + "--config.file=/etc/prometheus/prometheus.yml", + "--enable-feature=native-histograms", + "--log.level=debug"); + prometheus.start(); + + // Start Grafana container + GenericContainer grafana = + new GenericContainer<>("grafana/grafana") + .withNetwork(network) + .withExposedPorts(3000) + .withEnv("GF_SECURITY_ADMIN_USER", "user") + .withEnv("GF_SECURITY_ADMIN_PASSWORD", "password") + .withCopyFileToContainer( + forClasspathResource("prometheus.datasource.yml"), + "/etc/grafana/provisioning/datasources/datasource.yml"); + grafana.start(); + + String grafanaUrl = "http://" + grafana.getHost() + ":" + grafana.getMappedPort(3000); + System.out.println("Started prometheus on: " + prometheus.getMappedPort(9090)); + System.out.println("Grafana is available at: " + grafanaUrl); + + // Start scrape target + + final var socketAddress = new InetSocketAddress(8080); + final var server = HttpServer.create(socketAddress, 0); + final var metricsHandlerAdapter = new MetricsHandlerAdapter("hft_latency", "HFT latency"); + + server.createContext("/metrics", metricsHandlerAdapter); + server.setExecutor(null); // Use default executor + server.start(); + System.out.println(Instant.now() + " | Server started on " + socketAddress); + + final var metricsRecorder = MetricsRecorder.launch(); + final var metricsTransmitter = MetricsTransmitter.launch(); + + AgentRunner.startOnThread( + new AgentRunner( + new BackoffIdleStrategy(), + Throwable::printStackTrace, + null, + new MetricsReaderAgent( + "MetricsReaderAgent", + metricsTransmitter.context().broadcastBuffer(), + SystemEpochClock.INSTANCE, + Duration.ofSeconds(3), + metricsHandlerAdapter))); + + // Start measurements and burning cpu + + final var highestTrackableValue = (long) 1e9; + final var conversionFactor = 1e-3; + final var resolutionMs = 1000; + final var latencyMetric = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", "hft_latency"), + highestTrackableValue, + conversionFactor, + resolutionMs); + + for (; ; ) { + final var now = System.nanoTime(); + burnCpuMicros(20); + final var delta = System.nanoTime() - now; + latencyMetric.record(delta); + Thread.onSpinWait(); + } + } + + private static void burnCpuMicros(long micros) { + long durationNanos = micros * 1000; + long start = System.nanoTime(); + while ((System.nanoTime() - start) < durationNanos) { + Thread.onSpinWait(); + } + } + + private static class MetricsHandlerAdapter implements MetricsHandler, HttpHandler { + + private final String metricName; + private final String help; + + private final AtomicReference latestHistogram = new AtomicReference<>(); + + public MetricsHandlerAdapter(String metricName, String help) { + this.metricName = metricName; + this.help = help; + } + + @Override + public void onHistogram( + long timestamp, + DirectBuffer keyBuffer, + int keyOffset, + int keyLength, + Histogram accumulated, + Histogram distinct, + long highestTrackableValue, + double conversionFactor) { + final var latest = latestHistogram.get(); + if (latest != null) { + latest.add(distinct); + latestHistogram.set(latest); + } else { + latestHistogram.set(distinct); + } + } + + @Override + public void handle(HttpExchange exchange) { + try { + Histogram histogram = latestHistogram.getAndSet(null); + StringBuilder sb = new StringBuilder(); + + sb.append("# HELP ").append(metricName).append(" ").append(help).append("\n"); + sb.append("# TYPE ").append(metricName).append(" histogram\n"); + + if (histogram != null && histogram.getTotalCount() > 0) { + System.out.println(Instant.now() + " | Scrape histogram"); + + final long totalCount = histogram.getTotalCount(); + double totalSum = 0.0; + long cumulativeCount = 0; + + for (var v : histogram.recordedValues()) { + long rawValue = v.getValueIteratedTo(); + long count = v.getCountAtValueIteratedTo(); + double upperBound = rawValue / 1000.0; // nanos to micros + + cumulativeCount += count; + totalSum += upperBound * count; // reuse upperBound here + + sb.append(metricName) + .append("{native=\"true\",scale=\"3\",le=\"") + .append(formatDouble(upperBound)) + .append("\"} ") + .append(cumulativeCount) + .append("\n"); + } + + // +Inf bucket + sb.append(metricName) + .append("{native=\"true\",scale=\"3\",le=\"+Inf\"} ") + .append(totalCount) + .append("\n"); + + sb.append(metricName).append("_sum ").append(formatDouble(totalSum)).append("\n"); + sb.append(metricName).append("_count ").append(totalCount).append("\n"); + } + + sb.append("# EOF\n"); + + byte[] response = sb.toString().getBytes(); + exchange + .getResponseHeaders() + .set("Content-Type", "application/openmetrics-text; version=1.0.0; charset=utf-8"); + exchange.sendResponseHeaders(200, response.length); + + try (OutputStream os = exchange.getResponseBody()) { + os.write(response); + } + } catch (Exception e) { + e.printStackTrace(System.err); + try { + exchange.sendResponseHeaders(500, -1); + } catch (Exception ex) { + // ignore + } + } + } + + private static String formatDouble(double upperBound) { + return String.format("%.3f", upperBound); + } + } +} diff --git a/metrics-examples/src/main/resources/log4j2.xml b/metrics-examples/src/main/resources/log4j2.xml new file mode 100644 index 0000000..ed8e666 --- /dev/null +++ b/metrics-examples/src/main/resources/log4j2.xml @@ -0,0 +1,41 @@ + + + + + %level{length=1} %d{ISO8601} %c{1.} %m [%t]%n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/metrics-examples/src/main/resources/loki.datasource.yml b/metrics-examples/src/main/resources/loki.datasource.yml new file mode 100644 index 0000000..2d16f5c --- /dev/null +++ b/metrics-examples/src/main/resources/loki.datasource.yml @@ -0,0 +1,8 @@ +apiVersion: 1 +datasources: + - name: Loki + type: loki + access: proxy + url: http://loki:3100 + jsonData: + maxLines: 1000 diff --git a/metrics-examples/src/main/resources/mimir.datasource.yml b/metrics-examples/src/main/resources/mimir.datasource.yml new file mode 100644 index 0000000..def9723 --- /dev/null +++ b/metrics-examples/src/main/resources/mimir.datasource.yml @@ -0,0 +1,7 @@ +apiVersion: 1 +datasources: + - name: Mimir + type: prometheus + access: proxy + url: http://mimir:9009/prometheus + isDefault: true diff --git a/metrics-examples/src/main/resources/mimir.yml b/metrics-examples/src/main/resources/mimir.yml new file mode 100644 index 0000000..825eaa6 --- /dev/null +++ b/metrics-examples/src/main/resources/mimir.yml @@ -0,0 +1,8 @@ +multitenancy_enabled: false + +server: + http_listen_port: 9009 + +ingester: + ring: + replication_factor: 1 diff --git a/metrics-examples/src/main/resources/prometheus.datasource.yml b/metrics-examples/src/main/resources/prometheus.datasource.yml new file mode 100644 index 0000000..0eddf26 --- /dev/null +++ b/metrics-examples/src/main/resources/prometheus.datasource.yml @@ -0,0 +1,7 @@ +apiVersion: 1 +datasources: + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + isDefault: true diff --git a/metrics-examples/src/main/resources/prometheus.yml b/metrics-examples/src/main/resources/prometheus.yml new file mode 100644 index 0000000..739a404 --- /dev/null +++ b/metrics-examples/src/main/resources/prometheus.yml @@ -0,0 +1,7 @@ +global: + scrape_interval: 5s + +scrape_configs: + - job_name: 'bare-java' + static_configs: + - targets: [ '172.17.0.1:8080' ] diff --git a/metrics-examples/src/main/resources/vm.datasource.yml b/metrics-examples/src/main/resources/vm.datasource.yml new file mode 100644 index 0000000..e586240 --- /dev/null +++ b/metrics-examples/src/main/resources/vm.datasource.yml @@ -0,0 +1,7 @@ +apiVersion: 1 +datasources: + - name: VictoriaMetrics + type: prometheus + access: proxy + url: http://victoria-metrics:8428 + isDefault: true diff --git a/metrics-mimir/pom.xml b/metrics-mimir/pom.xml new file mode 100644 index 0000000..a70c462 --- /dev/null +++ b/metrics-mimir/pom.xml @@ -0,0 +1,81 @@ + + + 4.0.0 + + + io.scalecube + scalecube-metrics-parent + 0.1.0-SNAPSHOT + + + scalecube-metrics-mimir + + + 4.31.1 + 1.1.10.7 + + + + + io.scalecube + scalecube-metrics + ${project.parent.version} + + + com.google.protobuf + protobuf-java + ${protobuf-java.version} + + + org.xerial.snappy + snappy-java + ${snappy-java.version} + + + + + + + + kr.motd.maven + os-maven-plugin + 1.7.1 + + + + detect + + + + + + + + + kr.motd.maven + os-maven-plugin + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + ${project.basedir}/src/main/proto + + com.google.protobuf:protoc:${protobuf-java.version}:exe:${os.detected.classifier} + + + + + + compile + + + + + + + + diff --git a/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/CountersMimirHandler.java b/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/CountersMimirHandler.java new file mode 100644 index 0000000..3feeb95 --- /dev/null +++ b/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/CountersMimirHandler.java @@ -0,0 +1,102 @@ +package io.scalecube.metrics.mimir; + +import static io.scalecube.metrics.MetricNames.sanitizeName; + +import io.scalecube.metrics.CounterDescriptor; +import io.scalecube.metrics.CountersHandler; +import io.scalecube.metrics.Key; +import io.scalecube.metrics.KeyCodec; +import io.scalecube.metrics.mimir.MimirPublisher.WriteProxy; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import prometheus.Remote.WriteRequest; +import prometheus.Types.Label; +import prometheus.Types.Sample; +import prometheus.Types.TimeSeries; +import prometheus.Types.TimeSeries.Builder; + +/** + * Handles counter updates and pushes them to Mimir via {@link WriteProxy}. Converts {@link + * CounterDescriptor} data into Mimir {@link TimeSeries} with labels and values. + */ +public class CountersMimirHandler implements CountersHandler { + + private final Map tags; + private final WriteProxy writeProxy; + + private final KeyCodec keyCodec = new KeyCodec(); + + /** + * Constructor. + * + * @param tags tags (optional) + * @param writeProxy writeProxy + */ + public CountersMimirHandler(Map tags, WriteProxy writeProxy) { + this.tags = tags; + this.writeProxy = writeProxy; + } + + @Override + public void accept(long timestamp, List counterDescriptors) { + final var builder = WriteRequest.newBuilder(); + + for (var descriptor : counterDescriptors) { + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + final var visibility = key.stringValue("visibility"); + if (!"private".equals(visibility)) { + final var name = descriptor.label() != null ? descriptor.label() : key.stringValue("name"); + if (name != null) { + final var tags = toTags(key); + final var value = descriptor.value(); + builder.addTimeseries(toTimeSeries(timestamp, name, tags, value)); + } + } + } + + if (builder.getTimeseriesCount() > 0) { + writeProxy.push(builder.build()); + } + } + + private Map toTags(Key key) { + final var allTags = new HashMap(); + if (tags != null) { + allTags.putAll(tags); + } + + for (var entry : key.tags().entrySet()) { + final var tagId = entry.getKey(); + final var value = entry.getValue(); + allTags.put(tagId, String.valueOf(value)); + } + + return allTags; + } + + private static TimeSeries toTimeSeries( + long timestamp, String name, Map tags, long value) { + final var builder = + TimeSeries.newBuilder() + .addLabels(Label.newBuilder().setName("__name__").setValue(sanitizeName(name)).build()) + .addSamples(Sample.newBuilder().setValue(value).setTimestamp(timestamp).build()); + addLabels(builder, tags); + return builder.build(); + } + + private static void addLabels(Builder builder, Map tags) { + if (tags != null) { + tags.forEach( + (name, value) -> { + if (!"name".equals(name)) { + builder.addLabels( + Label.newBuilder() + .setName(sanitizeName(name)) + .setValue(String.valueOf(value)) + .build()); + } + }); + } + } +} diff --git a/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/MetricsMimirHandler.java b/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/MetricsMimirHandler.java new file mode 100644 index 0000000..2faec8b --- /dev/null +++ b/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/MetricsMimirHandler.java @@ -0,0 +1,191 @@ +package io.scalecube.metrics.mimir; + +import static io.scalecube.metrics.MetricNames.sanitizeName; + +import io.scalecube.metrics.Key; +import io.scalecube.metrics.KeyCodec; +import io.scalecube.metrics.MetricsHandler; +import io.scalecube.metrics.mimir.MimirPublisher.WriteProxy; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.HdrHistogram.Histogram; +import org.agrona.DirectBuffer; +import prometheus.Remote.WriteRequest; +import prometheus.Types.Label; +import prometheus.Types.Sample; +import prometheus.Types.TimeSeries; +import prometheus.Types.TimeSeries.Builder; + +/** + * Handles metrics (histograms, tps values) and pushes them to Mimir via {@link WriteProxy}. Exports + * percentiles, max, and count for histograms, and value snapshots for tps. + */ +public class MetricsMimirHandler implements MetricsHandler { + + private static final double[] PERCENTILES = + new double[] { + 10.0, // lower quantile – distribution shape + 50.0, // median + 90.0, // upper user experience + 95.0, // soft SLA + 99.0, // hard SLA + 99.5, // early jitter detection + 99.9, // jitter tail + 99.99, // rare stall detection + 99.999 // ghost-stall detection + }; + + private final Map tags; + private final WriteProxy writeProxy; + + private final KeyCodec keyCodec = new KeyCodec(); + + /** + * Constructor. + * + * @param tags tags (optional) + * @param writeProxy writeProxy + */ + public MetricsMimirHandler(Map tags, WriteProxy writeProxy) { + this.tags = tags; + this.writeProxy = writeProxy; + } + + @Override + public void onHistogram( + long timestamp, + DirectBuffer keyBuffer, + int keyOffset, + int keyLength, + Histogram accumulated, + Histogram distinct, + long highestTrackableValue, + double conversionFactor) { + final var key = keyCodec.decodeKey(keyBuffer, keyOffset); + final var name = key.stringValue("name"); + final var tags = toTags(key); + + if (name != null) { + writeProxy.push( + WriteRequest.newBuilder() + .addAllTimeseries(toTimeSeriesList(timestamp, name, tags, conversionFactor, distinct)) + .build()); + } + } + + @Override + public void onTps( + long timestamp, DirectBuffer keyBuffer, int keyOffset, int keyLength, long value) { + final var key = keyCodec.decodeKey(keyBuffer, keyOffset); + final var name = key.stringValue("name"); + final var tags = toTags(key); + + if (name != null) { + writeProxy.push( + WriteRequest.newBuilder() + .addAllTimeseries(toTimeSeriesList(timestamp, name, tags, value)) + .build()); + } + } + + private Map toTags(Key key) { + final var allTags = new HashMap(); + if (tags != null) { + allTags.putAll(tags); + } + + for (var entry : key.tags().entrySet()) { + final var tagId = entry.getKey(); + final var value = entry.getValue(); + allTags.put(tagId, String.valueOf(value)); + } + + return allTags; + } + + private static List toTimeSeriesList( + long timestamp, + String name, + Map tags, + double conversionFactor, + Histogram histogram) { + + final var tsList = new ArrayList(); + + // Percentile + for (double percentile : PERCENTILES) { + final var value = histogram.getValueAtPercentile(percentile) * conversionFactor; + final var percentileBuilder = + TimeSeries.newBuilder() + .addLabels(Label.newBuilder().setName("__name__").setValue(name).build()) + .addLabels( + Label.newBuilder().setName("p").setValue(formatPercentile(percentile)).build()) + .addSamples(Sample.newBuilder().setValue(value).setTimestamp(timestamp).build()); + addLabels(percentileBuilder, tags); + tsList.add(percentileBuilder.build()); + } + + // Max + final var value = histogram.getMaxValue() * conversionFactor; + final var maxBuilder = + TimeSeries.newBuilder() + .addLabels(Label.newBuilder().setName("__name__").setValue(name).build()) + .addLabels(Label.newBuilder().setName("p").setValue("max").build()) + .addSamples(Sample.newBuilder().setValue(value).setTimestamp(timestamp).build()); + addLabels(maxBuilder, tags); + tsList.add(maxBuilder.build()); + + // Count + final var countBuilder = + TimeSeries.newBuilder() + .addLabels(Label.newBuilder().setName("__name__").setValue(name + "_count").build()) + .addSamples( + Sample.newBuilder() + .setValue(histogram.getTotalCount()) + .setTimestamp(timestamp) + .build()); + addLabels(countBuilder, tags); + tsList.add(countBuilder.build()); + + return tsList; + } + + private static List toTimeSeriesList( + long timestamp, String name, Map tags, long value) { + final var tsList = new ArrayList(); + + final var tpsBuilder = + TimeSeries.newBuilder() + .addLabels(Label.newBuilder().setName("__name__").setValue(name).build()) + .addSamples(Sample.newBuilder().setValue(value).setTimestamp(timestamp).build()); + addLabels(tpsBuilder, tags); + tsList.add(tpsBuilder.build()); + + return tsList; + } + + private static String formatPercentile(double value) { + if (value == Math.floor(value)) { + return String.valueOf((int) value); + } else { + return String.valueOf(value); + } + } + + private static void addLabels(Builder builder, Map tags) { + if (tags != null) { + tags.forEach( + (name, value) -> { + if (!"name".equals(name)) { + builder.addLabels( + Label.newBuilder() + .setName(sanitizeName(name)) + .setValue(String.valueOf(value)) + .build()); + } + }); + } + } +} diff --git a/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/MimirPublisher.java b/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/MimirPublisher.java new file mode 100644 index 0000000..ac08b47 --- /dev/null +++ b/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/MimirPublisher.java @@ -0,0 +1,303 @@ +package io.scalecube.metrics.mimir; + +import static java.util.concurrent.atomic.AtomicIntegerFieldUpdater.newUpdater; + +import java.time.Duration; +import java.util.ConcurrentModificationException; +import java.util.Objects; +import java.util.StringJoiner; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import org.agrona.CloseHelper; +import org.agrona.ErrorHandler; +import org.agrona.concurrent.AgentInvoker; +import org.agrona.concurrent.AgentRunner; +import org.agrona.concurrent.BackoffIdleStrategy; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.IdleStrategy; +import org.agrona.concurrent.ManyToOneConcurrentArrayQueue; +import org.agrona.concurrent.SystemEpochClock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import prometheus.Remote.WriteRequest; + +/** + * Component that publishes {@link WriteRequest} objects to Mimir. Requests gets accumulated in the + * queue, and being published by time interval. + */ +public class MimirPublisher implements AutoCloseable { + + private static final Logger LOGGER = LoggerFactory.getLogger(MimirPublisher.class); + + private final Context context; + + private final AgentInvoker agentInvoker; + private final AgentRunner agentRunner; + + private MimirPublisher(Context context) { + context.conclude(); + this.context = context; + + final var agent = + new MimirPublisherAgent( + context.url(), + context.epochClock(), + context.retryInterval(), + context.publishInterval(), + context.writeLimit(), + context.writeQueue()); + + if (context.useAgentInvoker()) { + agentRunner = null; + agentInvoker = new AgentInvoker(context.errorHandler(), null, agent); + } else { + agentInvoker = null; + agentRunner = new AgentRunner(context.idleStrategy(), context.errorHandler(), null, agent); + } + } + + /** + * Launch {@link MimirPublisher} with default {@link Context}. + * + * @return newly started {@link MimirPublisher} + */ + public static MimirPublisher launch() { + return launch(new Context()); + } + + /** + * Launch {@link MimirPublisher} with provided {@link Context}. + * + * @param context context + * @return newly started {@link MimirPublisher} + */ + public static MimirPublisher launch(Context context) { + final var metricsReceiver = new MimirPublisher(context); + if (metricsReceiver.agentInvoker != null) { + metricsReceiver.agentInvoker.start(); + } else { + AgentRunner.startOnThread(metricsReceiver.agentRunner); + } + return metricsReceiver; + } + + /** + * Returns {@link Context} instance. + * + * @return {@link Context} instance + */ + public Context context() { + return context; + } + + /** + * Returns {@link AgentInvoker} instance when running without threads, or null if running with + * {@link AgentRunner}. + * + * @return {@link AgentInvoker} instance, or null + */ + public AgentInvoker agentInvoker() { + return agentInvoker; + } + + /** + * Returns {@link WriteProxy} instance. + * + * @return {@link WriteProxy} instance + */ + public WriteProxy proxy() { + return new WriteProxy(context.writeQueue()); + } + + @Override + public void close() { + CloseHelper.quietCloseAll(agentInvoker, agentRunner); + } + + public static class Context { + + private static final AtomicIntegerFieldUpdater IS_CONCLUDED_UPDATER = + newUpdater(MimirPublisher.Context.class, "isConcluded"); + private volatile int isConcluded; + + private Duration retryInterval; + private Duration publishInterval; + private EpochClock epochClock; + private boolean useAgentInvoker; + private ErrorHandler errorHandler; + private IdleStrategy idleStrategy; + private String url; + private Integer writeLimit; + private Integer writeQueueCapacity; + private ManyToOneConcurrentArrayQueue writeQueue; + + public Context() {} + + private void conclude() { + if (0 != IS_CONCLUDED_UPDATER.getAndSet(this, 1)) { + throw new ConcurrentModificationException(); + } + + if (retryInterval == null) { + retryInterval = Duration.ofSeconds(3); + } + + if (publishInterval == null) { + publishInterval = Duration.ofSeconds(5); + } + + if (epochClock == null) { + epochClock = SystemEpochClock.INSTANCE; + } + + if (errorHandler == null) { + errorHandler = ex -> LOGGER.error("Exception occurred: ", ex); + } + + if (idleStrategy == null) { + idleStrategy = new BackoffIdleStrategy(); + } + + Objects.requireNonNull(url, "url"); + + if (writeLimit == null) { + writeLimit = 100; + } + + if (writeQueueCapacity == null) { + writeQueueCapacity = 64 * 1024; + } + + if (writeQueue == null) { + writeQueue = new ManyToOneConcurrentArrayQueue<>(writeQueueCapacity); + } + } + + public Duration retryInterval() { + return retryInterval; + } + + public Context retryInterval(Duration retryInterval) { + this.retryInterval = retryInterval; + return this; + } + + public Duration publishInterval() { + return publishInterval; + } + + public Context publishInterval(Duration publishInterval) { + this.publishInterval = publishInterval; + return this; + } + + public EpochClock epochClock() { + return epochClock; + } + + public Context epochClock(EpochClock epochClock) { + this.epochClock = epochClock; + return this; + } + + public boolean useAgentInvoker() { + return useAgentInvoker; + } + + public Context useAgentInvoker(boolean useAgentInvoker) { + this.useAgentInvoker = useAgentInvoker; + return this; + } + + public ErrorHandler errorHandler() { + return errorHandler; + } + + public Context errorHandler(ErrorHandler errorHandler) { + this.errorHandler = errorHandler; + return this; + } + + public IdleStrategy idleStrategy() { + return idleStrategy; + } + + public Context idleStrategy(IdleStrategy idleStrategy) { + this.idleStrategy = idleStrategy; + return this; + } + + public String url() { + return url; + } + + public Context url(String url) { + this.url = url; + return this; + } + + public Integer writeLimit() { + return writeLimit; + } + + public Context writeLimit(Integer writeLimit) { + this.writeLimit = writeLimit; + return this; + } + + public Integer writeQueueCapacity() { + return writeQueueCapacity; + } + + public Context writeQueueCapacity(Integer writeQueueCapacity) { + this.writeQueueCapacity = writeQueueCapacity; + return this; + } + + public ManyToOneConcurrentArrayQueue writeQueue() { + return writeQueue; + } + + public Context writeQueue(ManyToOneConcurrentArrayQueue writeQueue) { + this.writeQueue = writeQueue; + return this; + } + + @Override + public String toString() { + return new StringJoiner(", ", Context.class.getSimpleName() + "[", "]") + .add("retryInterval=" + retryInterval) + .add("publishInterval=" + publishInterval) + .add("epochClock=" + epochClock) + .add("useAgentInvoker=" + useAgentInvoker) + .add("errorHandler=" + errorHandler) + .add("idleStrategy=" + idleStrategy) + .add("url='" + url + "'") + .add("writeLimit=" + writeLimit) + .add("writeQueueCapacity=" + writeQueueCapacity) + .add("writeQueue=" + writeQueue) + .toString(); + } + } + + /** + * Wrapper around {@code write-queue} for the {@link WriteRequest} objects. Being used to push + * {@link WriteRequest}(s) that later will be published to Mimir. + */ + public static class WriteProxy { + + private final ManyToOneConcurrentArrayQueue writeQueue; + + private WriteProxy(ManyToOneConcurrentArrayQueue writeQueue) { + this.writeQueue = writeQueue; + } + + /** + * Pushes {@link WriteRequest} to the queue (if cannot fit in, request is dropped). + * + * @param request {@code prometheus.WriteRequest} + */ + public void push(WriteRequest request) { + writeQueue.offer(request); + } + } +} diff --git a/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/MimirPublisherAgent.java b/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/MimirPublisherAgent.java new file mode 100644 index 0000000..a03f9ef --- /dev/null +++ b/metrics-mimir/src/main/java/io/scalecube/metrics/mimir/MimirPublisherAgent.java @@ -0,0 +1,196 @@ +package io.scalecube.metrics.mimir; + +import io.scalecube.metrics.Delay; +import java.io.IOException; +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpRequest.BodyPublishers; +import java.net.http.HttpResponse; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.agrona.LangUtil; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentTerminationException; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.ManyToOneConcurrentArrayQueue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.xerial.snappy.Snappy; +import prometheus.Remote.WriteRequest; + +class MimirPublisherAgent implements Agent { + + private static final Logger LOGGER = LoggerFactory.getLogger(MimirPublisherAgent.class); + + public enum State { + INIT, + RUNNING, + CLEANUP, + CLOSED + } + + private final String url; + private final int writeLimit; + private final ManyToOneConcurrentArrayQueue writeQueue; + + private final Delay retryInterval; + private final Delay publishInterval; + private ExecutorService executor; + private HttpClient httpClient; + private CompletableFuture> future; + private State state = State.CLOSED; + + MimirPublisherAgent( + String url, + EpochClock epochClock, + Duration retryInterval, + Duration publishInterval, + int writeLimit, + ManyToOneConcurrentArrayQueue writeQueue) { + this.url = url; + this.writeLimit = writeLimit; + this.writeQueue = writeQueue; + this.retryInterval = new Delay(epochClock, retryInterval.toMillis()); + this.publishInterval = new Delay(epochClock, publishInterval.toMillis()); + } + + @Override + public String roleName() { + return "MimirPublisherAgent"; + } + + @Override + public void onStart() { + if (state != State.CLOSED) { + throw new AgentTerminationException("Illegal state: " + state); + } + state(State.INIT); + } + + @Override + public int doWork() throws Exception { + try { + return switch (state) { + case INIT -> init(); + case RUNNING -> running(); + case CLEANUP -> cleanup(); + default -> throw new AgentTerminationException("Unknown state: " + state); + }; + } catch (AgentTerminationException e) { + throw e; + } catch (Exception e) { + state(State.CLEANUP); + throw e; + } + } + + private int init() { + if (retryInterval.isNotOverdue()) { + return 0; + } + + executor = + Executors.newSingleThreadExecutor( + r -> { + final var thread = new Thread(r); + thread.setDaemon(true); + return thread; + }); + httpClient = HttpClient.newBuilder().executor(executor).build(); + publishInterval.delay(); + + state(State.RUNNING); + return 1; + } + + private int running() throws Exception { + final var fillRate = (double) writeQueue.size() / writeQueue.capacity(); + + if (publishInterval.isOverdue() || fillRate > 0.5) { + publishInterval.delay(); + if (future != null) { + future.cancel(true); + future = null; + } + + final var builder = WriteRequest.newBuilder(); + writeQueue.drain( + request -> builder.addAllTimeseries(request.getTimeseriesList()), writeLimit); + final var writeRequest = builder.build(); + + if (writeRequest.getTimeseriesCount() > 0) { + future = send(writeRequest); + } + } + + if (future != null) { + if (future.isDone()) { + final var response = future.get(); + if (response.statusCode() != 200) { + LOGGER.warn( + "Failed to push metrics: HTTP {}, body: {}", response.statusCode(), response.body()); + } + future = null; + } + } + + return 0; + } + + private CompletableFuture> send(WriteRequest request) { + final var payload = request.toByteArray(); + final byte[] compressedPayload; + try { + compressedPayload = Snappy.compress(payload); + } catch (IOException e) { + LangUtil.rethrowUnchecked(e); + return null; + } + + final var httpRequest = + HttpRequest.newBuilder() + .uri(URI.create(url)) + .header("Content-Type", "application/x-protobuf") + .header("Content-Encoding", "snappy") + .header("X-Prometheus-Remote-Write-Version", "0.1.0") + .POST(BodyPublishers.ofByteArray(compressedPayload)) + .build(); + + return httpClient.sendAsync(httpRequest, HttpResponse.BodyHandlers.ofString()); + } + + private int cleanup() { + if (executor != null) { + executor.shutdownNow(); + } + // CloseHelper.quietClose(httpClient); + httpClient = null; + executor = null; + + if (future != null) { + future.cancel(true); + future = null; + } + + State previous = state; + if (previous != State.CLOSED) { // when it comes from onClose() + retryInterval.delay(); + state(State.INIT); + } + return 1; + } + + @Override + public void onClose() { + state(State.CLOSED); + cleanup(); + } + + private void state(State state) { + LOGGER.debug("[{}][state] {}->{}", roleName(), this.state, state); + this.state = state; + } +} diff --git a/metrics-mimir/src/main/proto/gogoproto/gogo.proto b/metrics-mimir/src/main/proto/gogoproto/gogo.proto new file mode 100644 index 0000000..b80c856 --- /dev/null +++ b/metrics-mimir/src/main/proto/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/metrics-mimir/src/main/proto/remote.proto b/metrics-mimir/src/main/proto/remote.proto new file mode 100644 index 0000000..b4f82f5 --- /dev/null +++ b/metrics-mimir/src/main/proto/remote.proto @@ -0,0 +1,88 @@ +// Copyright 2016 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package prometheus; + +option go_package = "prompb"; + +import "types.proto"; +import "gogoproto/gogo.proto"; + +message WriteRequest { + repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; + // Cortex uses this field to determine the source of the write request. + // We reserve it to avoid any compatibility issues. + reserved 2; + repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false]; +} + +// ReadRequest represents a remote read request. +message ReadRequest { + repeated Query queries = 1; + + enum ResponseType { + // Server will return a single ReadResponse message with matched series that includes list of raw samples. + // It's recommended to use streamed response types instead. + // + // Response headers: + // Content-Type: "application/x-protobuf" + // Content-Encoding: "snappy" + SAMPLES = 0; + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. + // + // Response headers: + // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" + // Content-Encoding: "" + STREAMED_XOR_CHUNKS = 1; + } + + // accepted_response_types allows negotiating the content type of the response. + // + // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is + // implemented by server, error is returned. + // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used. + repeated ResponseType accepted_response_types = 2; +} + +// ReadResponse is a response when response_type equals SAMPLES. +message ReadResponse { + // In same order as the request's queries. + repeated QueryResult results = 1; +} + +message Query { + int64 start_timestamp_ms = 1; + int64 end_timestamp_ms = 2; + repeated prometheus.LabelMatcher matchers = 3; + prometheus.ReadHints hints = 4; +} + +message QueryResult { + // Samples within a time series must be ordered by time. + repeated prometheus.TimeSeries timeseries = 1; +} + +// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS. +// We strictly stream full series after series, optionally split by time. This means that a single frame can contain +// partition of the single series, but once a new series is started to be streamed it means that no more chunks will +// be sent for previous one. Series are returned sorted in the same way TSDB block are internally. +message ChunkedReadResponse { + repeated prometheus.ChunkedSeries chunked_series = 1; + + // query_index represents an index of the query from ReadRequest.queries these chunks relates to. + int64 query_index = 2; +} diff --git a/metrics-mimir/src/main/proto/types.proto b/metrics-mimir/src/main/proto/types.proto new file mode 100644 index 0000000..8bc69d5 --- /dev/null +++ b/metrics-mimir/src/main/proto/types.proto @@ -0,0 +1,191 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package prometheus; + +option go_package = "prompb"; + +import "gogoproto/gogo.proto"; + +message MetricMetadata { + enum MetricType { + UNKNOWN = 0; + COUNTER = 1; + GAUGE = 2; + HISTOGRAM = 3; + GAUGEHISTOGRAM = 4; + SUMMARY = 5; + INFO = 6; + STATESET = 7; + } + + // Represents the metric type, these match the set from Prometheus. + // Refer to github.com/prometheus/common/model/metadata.go for details. + MetricType type = 1; + string metric_family_name = 2; + string help = 4; + string unit = 5; +} + +message Sample { + double value = 1; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 2; +} + +message Exemplar { + // Optional, can be empty. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + double value = 2; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 3; +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + UNKNOWN = 0; // Need to test for a counter reset explicitly. + YES = 1; // This is the 1st histogram after a counter reset. + NO = 2; // There was no counter reset between this and the previous Histogram. + GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 15; + + // custom_values are not part of the specification, DO NOT use in remote write clients. + // Used only for converting from OpenTelemetry to Prometheus internally. + repeated double custom_values = 16; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + +// TimeSeries represents samples and labels for a single time series. +message TimeSeries { + // For a timeseries to be valid, and for the samples and exemplars + // to be ingested by the remote system properly, the labels field is required. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 4 [(gogoproto.nullable) = false]; +} + +message Label { + string name = 1; + string value = 2; +} + +message Labels { + repeated Label labels = 1 [(gogoproto.nullable) = false]; +} + +// Matcher specifies a rule, which can match or set of labels or not. +message LabelMatcher { + enum Type { + EQ = 0; + NEQ = 1; + RE = 2; + NRE = 3; + } + Type type = 1; + string name = 2; + string value = 3; +} + +message ReadHints { + int64 step_ms = 1; // Query step size in milliseconds. + string func = 2; // String representation of surrounding function or aggregation. + int64 start_ms = 3; // Start time in milliseconds. + int64 end_ms = 4; // End time in milliseconds. + repeated string grouping = 5; // List of label names used in aggregation. + bool by = 6; // Indicate whether it is without or by. + int64 range_ms = 7; // Range vector selector range in milliseconds. +} + +// Chunk represents a TSDB chunk. +// Time range [min, max] is inclusive. +message Chunk { + int64 min_time_ms = 1; + int64 max_time_ms = 2; + + // We require this to match chunkenc.Encoding. + enum Encoding { + UNKNOWN = 0; + XOR = 1; + HISTOGRAM = 2; + FLOAT_HISTOGRAM = 3; + } + Encoding type = 3; + bytes data = 4; +} + +// ChunkedSeries represents single, encoded time series. +message ChunkedSeries { + // Labels should be sorted. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + // Chunks will be in start time order and may overlap. + repeated Chunk chunks = 2 [(gogoproto.nullable) = false]; +} diff --git a/metrics-prometheus/pom.xml b/metrics-prometheus/pom.xml new file mode 100644 index 0000000..d31c145 --- /dev/null +++ b/metrics-prometheus/pom.xml @@ -0,0 +1,27 @@ + + + 4.0.0 + + + io.scalecube + scalecube-metrics-parent + 0.1.0-SNAPSHOT + + + scalecube-metrics-prometheus + + + + io.scalecube + scalecube-metrics + ${project.parent.version} + + + org.slf4j + slf4j-api + + + + diff --git a/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/CountersPrometheusAdapter.java b/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/CountersPrometheusAdapter.java new file mode 100644 index 0000000..fcedd15 --- /dev/null +++ b/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/CountersPrometheusAdapter.java @@ -0,0 +1,101 @@ +package io.scalecube.metrics.prometheus; + +import static io.scalecube.metrics.MetricNames.sanitizeName; + +import io.scalecube.metrics.CounterDescriptor; +import io.scalecube.metrics.CountersHandler; +import io.scalecube.metrics.Key; +import io.scalecube.metrics.KeyCodec; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +/** + * Adapter that translates counters (backed by {@link org.agrona.concurrent.status.AtomicCounter}) + * into the Prometheus text exposition format. Counters are being updated via the {@link + * CountersHandler} interface and exposed to Prometheus by implementing the {@link PrometheusWriter} + * contract. + */ +public class CountersPrometheusAdapter implements CountersHandler, PrometheusWriter { + + private final Map tags; + + private final KeyCodec keyCodec = new KeyCodec(); + private final AtomicReference> counterDescriptorsReference = + new AtomicReference<>(); + + /** + * Constructor. + * + * @param tags (optional) + */ + public CountersPrometheusAdapter(Map tags) { + this.tags = tags; + } + + @Override + public void accept(long timestamp, List counterDescriptors) { + counterDescriptorsReference.set(List.copyOf(counterDescriptors)); + } + + @Override + public void write(OutputStreamWriter writer) throws IOException { + final var counterDescriptors = counterDescriptorsReference.getAndSet(null); + if (counterDescriptors == null) { + return; + } + + for (var descriptor : counterDescriptors) { + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + final var visibility = key.stringValue("visibility"); + if (!"private".equals(visibility)) { + final var name = descriptor.label() != null ? descriptor.label() : key.stringValue("name"); + if (name != null) { + writer + .append(sanitizeName(name)) + .append(formatLabels(toTags(key))) + .append(" ") + .append(String.valueOf(descriptor.value())) + .append("\n"); + } + } + } + } + + private static String formatLabels(Map labels) { + if (labels == null || labels.isEmpty()) { + return ""; + } + + return labels.entrySet().stream() + .filter(entry -> !"name".equals(entry.getKey())) + .map( + entry -> + sanitizeName(entry.getKey()) + "=\"" + escapeLabelValue(entry.getValue()) + "\"") + .collect(Collectors.joining(",", "{", "}")); + } + + private static String escapeLabelValue(String value) { + // Escape backslashes and quotes as per Prometheus spec + return value.replace("\\", "\\\\").replace("\"", "\\\""); + } + + private Map toTags(Key key) { + final var allTags = new HashMap(); + if (tags != null) { + allTags.putAll(tags); + } + + for (var entry : key.tags().entrySet()) { + final var tagId = entry.getKey(); + final var value = entry.getValue(); + allTags.put(tagId, String.valueOf(value)); + } + + return allTags; + } +} diff --git a/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/MetricsPrometheusAdapter.java b/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/MetricsPrometheusAdapter.java new file mode 100644 index 0000000..650c081 --- /dev/null +++ b/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/MetricsPrometheusAdapter.java @@ -0,0 +1,227 @@ +package io.scalecube.metrics.prometheus; + +import static io.scalecube.metrics.MetricNames.sanitizeName; + +import io.scalecube.metrics.Key; +import io.scalecube.metrics.KeyCodec; +import io.scalecube.metrics.MetricsHandler; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import org.HdrHistogram.Histogram; +import org.agrona.DirectBuffer; + +/** + * Adapter that translates metrics (histograms, tps values) into the Prometheus text exposition + * format. Metrics are being updated via the {@link MetricsHandler} interface and exposed to + * Prometheus by implementing the {@link PrometheusWriter} contract. + */ +public class MetricsPrometheusAdapter implements MetricsHandler, PrometheusWriter { + + private static final double[] PERCENTILES = + new double[] { + 10.0, // lower quantile – distribution shape + 50.0, // median + 90.0, // upper user experience + 95.0, // soft SLA + 99.0, // hard SLA + 99.5, // early jitter detection + 99.9, // jitter tail + 99.99, // rare stall detection + 99.999 // ghost-stall detection + }; + + private final Map tags; + + private final KeyCodec keyCodec = new KeyCodec(); + private final AtomicReference> histogramMapReference = + new AtomicReference<>(); + private final AtomicReference> tpsMapReference = new AtomicReference<>(); + + /** + * Constructor. + * + * @param tags (optional) + */ + public MetricsPrometheusAdapter(Map tags) { + this.tags = tags; + } + + @Override + public void onHistogram( + long timestamp, + DirectBuffer keyBuffer, + int keyOffset, + int keyLength, + Histogram accumulated, + Histogram distinct, + long highestTrackableValue, + double conversionFactor) { + final var key = keyCodec.decodeKey(keyBuffer, keyOffset); + final var name = key.stringValue("name"); + final var tags = toTags(key); + + if (name != null) { + histogramMapReference.updateAndGet( + map -> { + if (map == null) { + map = new LinkedHashMap<>(); + } + map.put( + key, + new HistogramItem( + name, tags, accumulated, distinct, highestTrackableValue, conversionFactor)); + return map; + }); + } + } + + @Override + public void onTps( + long timestamp, DirectBuffer keyBuffer, int keyOffset, int keyLength, long value) { + final var key = keyCodec.decodeKey(keyBuffer, keyOffset); + final var name = key.stringValue("name"); + final var tags = toTags(key); + + if (name != null) { + tpsMapReference.updateAndGet( + map -> { + if (map == null) { + map = new LinkedHashMap<>(); + } + map.put(key, new TpsItem(name, tags, value)); + return map; + }); + } + } + + @Override + public void write(OutputStreamWriter writer) throws IOException { + final var histogramMap = histogramMapReference.getAndSet(null); + if (histogramMap != null) { + for (var histogramItem : histogramMap.values()) { + writeHistogram(writer, histogramItem); + } + } + + final var tpsMap = tpsMapReference.getAndSet(null); + if (tpsMap != null) { + for (var tpsItem : tpsMap.values()) { + writeTps(writer, tpsItem); + } + } + } + + private static void writeHistogram(OutputStreamWriter writer, HistogramItem histogramItem) + throws IOException { + final var name = histogramItem.name(); + final var conversionFactor = histogramItem.conversionFactor(); + final var histogram = histogramItem.distinct(); + final var tags = histogramItem.tags(); + + // Percentile + for (double percentile : PERCENTILES) { + writer + .append(sanitizeName(name)) + .append(formatLabels(addTag(tags, "p", formatPercentile(percentile)))) + .append(" ") + .append(formatDouble(histogram.getValueAtPercentile(percentile) * conversionFactor)) + .append("\n"); + } + + // Max + writer + .append(sanitizeName(name)) + .append(formatLabels(addTag(tags, "p", "max"))) + .append(" ") + .append(formatDouble(histogram.getMaxValue() * conversionFactor)) + .append("\n"); + + // Count + writer + .append(sanitizeName(name + "_count")) + .append(formatLabels(tags)) + .append(" ") + .append(String.valueOf(histogram.getTotalCount())) + .append("\n"); + } + + private static void writeTps(OutputStreamWriter writer, TpsItem tpsItem) throws IOException { + final var name = tpsItem.name(); + final var tags = tpsItem.tags(); + final var value = tpsItem.value(); + + writer + .append(sanitizeName(name)) + .append(formatLabels(tags)) + .append(" ") + .append(String.valueOf(value)) + .append("\n"); + } + + private static LinkedHashMap addTag( + Map tags, String name, String value) { + final var map = new LinkedHashMap<>(tags); + map.put(name, value); + return map; + } + + private static String formatLabels(Map labels) { + if (labels == null || labels.isEmpty()) { + return ""; + } + + return labels.entrySet().stream() + .filter(entry -> !"name".equals(entry.getKey())) + .map( + entry -> + sanitizeName(entry.getKey()) + "=\"" + escapeLabelValue(entry.getValue()) + "\"") + .collect(Collectors.joining(",", "{", "}")); + } + + private static String escapeLabelValue(String value) { + // Escape backslashes and quotes as per Prometheus spec + return value.replace("\\", "\\\\").replace("\"", "\\\""); + } + + private static String formatPercentile(double value) { + if (value == Math.floor(value)) { + return String.valueOf((int) value); + } else { + return String.valueOf(value); + } + } + + private static String formatDouble(double upperBound) { + return String.format("%.3f", upperBound); + } + + private Map toTags(Key key) { + final var allTags = new HashMap(); + if (tags != null) { + allTags.putAll(tags); + } + + for (var entry : key.tags().entrySet()) { + final var tagId = entry.getKey(); + final var value = entry.getValue(); + allTags.put(tagId, String.valueOf(value)); + } + + return allTags; + } + + private record HistogramItem( + String name, + Map tags, + Histogram accumulated, + Histogram distinct, + long highestTrackableValue, + double conversionFactor) {} + + private record TpsItem(String name, Map tags, long value) {} +} diff --git a/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/PrometheusMetricsHandler.java b/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/PrometheusMetricsHandler.java new file mode 100644 index 0000000..0b6e743 --- /dev/null +++ b/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/PrometheusMetricsHandler.java @@ -0,0 +1,69 @@ +package io.scalecube.metrics.prometheus; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import java.io.BufferedOutputStream; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; +import java.util.zip.GZIPOutputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * HTTP handler that serializes registered {@link PrometheusWriter} instances into the Prometheus + * text exposition format. The handler responds to {@code GET /metrics} requests with the current + * snapshot of metrics, compressed with GZIP, and encoded in UTF-8. It delegates the actual metric + * serialization to the provided {@link PrometheusWriter} instances. On error, the handler responds + * with HTTP 500. + */ +public class PrometheusMetricsHandler implements HttpHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(PrometheusMetricsHandler.class); + + private final List prometheusWriters; + + /** + * Constructor. + * + * @param prometheusWriters prometheusWriters + */ + public PrometheusMetricsHandler(PrometheusWriter... prometheusWriters) { + this(Arrays.asList(prometheusWriters)); + } + + /** + * Constructor. + * + * @param prometheusWriters prometheusWriters + */ + public PrometheusMetricsHandler(List prometheusWriters) { + this.prometheusWriters = prometheusWriters; + } + + @Override + public void handle(HttpExchange exchange) { + final var responseHeaders = exchange.getResponseHeaders(); + responseHeaders.set("Content-Type", "text/plain; version=0.0.4; charset=utf-8"); + responseHeaders.set("Content-Encoding", "gzip"); + + try (var writer = + new OutputStreamWriter( + new GZIPOutputStream(new BufferedOutputStream(exchange.getResponseBody())), + StandardCharsets.UTF_8)) { + exchange.sendResponseHeaders(200, 0); + for (var prometheusWriter : prometheusWriters) { + prometheusWriter.write(writer); + } + writer.flush(); + } catch (Exception e) { + LOGGER.warn("Exception occurred", e); + try { + exchange.sendResponseHeaders(500, -1); + } catch (Exception ex) { + // ignore + } + } + } +} diff --git a/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/PrometheusMetricsServer.java b/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/PrometheusMetricsServer.java new file mode 100644 index 0000000..a2e9d6c --- /dev/null +++ b/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/PrometheusMetricsServer.java @@ -0,0 +1,64 @@ +package io.scalecube.metrics.prometheus; + +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.concurrent.Executors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Embedded HTTP server that exposes Prometheus metrics on a configurable address. This server uses + * the built-in {@link com.sun.net.httpserver.HttpServer} to bind a {@code /metrics} endpoint, which + * can be scraped by Prometheus. The endpoint is backed by user-provided {@link + * com.sun.net.httpserver.HttpHandler}, typically an instance of {@link PrometheusMetricsHandler}. + */ +public class PrometheusMetricsServer implements AutoCloseable { + + private static final Logger LOGGER = LoggerFactory.getLogger(PrometheusMetricsServer.class); + + private final HttpServer server; + + private PrometheusMetricsServer(HttpServer server) { + this.server = server; + } + + /** + * Launch {@link PrometheusMetricsServer}. + * + * @param address address + * @param metricsHandler metricsHandler + * @return started {@link PrometheusMetricsServer} instance + */ + public static PrometheusMetricsServer launch( + InetSocketAddress address, HttpHandler metricsHandler) { + HttpServer server = null; + try { + server = HttpServer.create(address, 0); + server.createContext("/metrics", metricsHandler); + server.setExecutor( + Executors.newSingleThreadExecutor( + r -> { + final var thread = new Thread(r); + thread.setDaemon(true); + thread.setName("PrometheusMetricsServer"); + return thread; + })); + server.start(); + LOGGER.info("Started prometheus metrics server on {}", address); + return new PrometheusMetricsServer(server); + } catch (IOException e) { + if (server != null) { + server.stop(0); + } + throw new RuntimeException(e); + } + } + + @Override + public void close() { + server.stop(0); + LOGGER.info("Stopped server"); + } +} diff --git a/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/PrometheusWriter.java b/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/PrometheusWriter.java new file mode 100644 index 0000000..9d2e805 --- /dev/null +++ b/metrics-prometheus/src/main/java/io/scalecube/metrics/prometheus/PrometheusWriter.java @@ -0,0 +1,20 @@ +package io.scalecube.metrics.prometheus; + +import java.io.IOException; +import java.io.OutputStreamWriter; + +/** + * Serializer of the state into the Prometheus text exposition format. Implementations of this + * interface are expected to take their current snapshot of metrics and write them into the provided + * {@link java.io.OutputStreamWriter}, following Prometheus text format conventions. + */ +public interface PrometheusWriter { + + /** + * Writes current snapshot of metrics to the Prometheus stream. + * + * @param writer Prometheus output stream + * @throws IOException in case of errors + */ + void write(OutputStreamWriter writer) throws IOException; +} diff --git a/metrics/pom.xml b/metrics/pom.xml new file mode 100644 index 0000000..e202a0b --- /dev/null +++ b/metrics/pom.xml @@ -0,0 +1,135 @@ + + + 4.0.0 + + + io.scalecube + scalecube-metrics-parent + 0.1.0-SNAPSHOT + + + scalecube-metrics + + + + org.slf4j + slf4j-api + + + org.agrona + agrona + + + org.hdrhistogram + HdrHistogram + + + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.junit.jupiter + junit-jupiter-params + test + + + org.mockito + mockito-core + test + + + org.mockito + mockito-junit-jupiter + test + + + net.bytebuddy + * + + + + + org.apache.logging.log4j + log4j-slf4j-impl + test + + + org.apache.logging.log4j + log4j-core + test + + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.6.0 + + + generate-sources + + java + + + + + false + true + uk.co.real_logic.sbe.SbeTool + + + sbe.output.dir + ${project.build.directory}/generated-sources/java + + + sbe.xinclude.aware + true + + + + ${project.build.resources[0].directory}/metrics-schema.xml + + ${project.build.directory}/generated-sources/java + + + + uk.co.real-logic + sbe-tool + ${sbe.version} + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.0.0 + + + add-source + generate-sources + + add-source + + + + ${project.build.directory}/generated-sources/java/ + + + + + + + + + diff --git a/metrics/src/main/java/io/scalecube/metrics/ConcurrentCounters.java b/metrics/src/main/java/io/scalecube/metrics/ConcurrentCounters.java new file mode 100644 index 0000000..7744830 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/ConcurrentCounters.java @@ -0,0 +1,90 @@ +package io.scalecube.metrics; + +import static org.agrona.BitUtil.SIZE_OF_INT; +import static org.agrona.concurrent.status.CountersReader.DEFAULT_TYPE_ID; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Consumer; +import org.agrona.DirectBuffer; +import org.agrona.ExpandableArrayBuffer; +import org.agrona.concurrent.status.AtomicCounter; +import org.agrona.concurrent.status.CountersManager; + +/** + * Manages {@link AtomicCounter} instances with unique keys composed of {@code typeId}, {@code + * name}, and optional attributes. Can be used safely from multiple threads. + */ +public class ConcurrentCounters { + + private final CountersManager countersManager; + + private final ThreadLocal bufferHolder = + ThreadLocal.withInitial(ExpandableArrayBuffer::new); + private final ThreadLocal nameBufferHolder = + ThreadLocal.withInitial(ExpandableArrayBuffer::new); + private final ThreadLocal keyFlyweightHolder = + ThreadLocal.withInitial(KeyFlyweight::new); + private final Map counters = new ConcurrentHashMap<>(); + + public ConcurrentCounters(CountersManager countersManager) { + this.countersManager = countersManager; + } + + /** + * Creates new, or returns existing, {@link AtomicCounter} instance. + * + * @param name name + * @param consumer consumer (optional) + * @return {@link AtomicCounter} instance + */ + public AtomicCounter counter(String name, Consumer consumer) { + return counter(DEFAULT_TYPE_ID, name, consumer); + } + + /** + * Creates new, or returns existing, {@link AtomicCounter} instance. + * + * @param typeId typeId + * @param name name + * @param consumer consumer (optional) + * @return {@link AtomicCounter} instance + */ + public AtomicCounter counter(int typeId, String name, Consumer consumer) { + int offset = 0; + final var buffer = bufferHolder.get(); + + buffer.putInt(offset, typeId); + offset += SIZE_OF_INT; + offset += buffer.putStringWithoutLengthAscii(offset, name); + + final var keyFlyweight = keyFlyweightHolder.get(); + if (consumer != null) { + consumer.accept(keyFlyweight.wrap(buffer, offset)); + } + + var counter = counters.get(buffer); + if (counter != null) { + return counter; + } + + final var nameBuffer = nameBufferHolder.get(); + final var nameLength = nameBuffer.putStringWithoutLengthAscii(0, name); + + counter = + countersManager.newCounter( + typeId, + keyFlyweight.buffer(), + keyFlyweight.offset(), + keyFlyweight.length(), + nameBuffer, + 0, + nameLength); + + final var keyBuffer = new ExpandableArrayBuffer(); + keyBuffer.putBytes(0, buffer, 0, offset + keyFlyweight.length()); + counters.put(keyBuffer, counter); + + return counter; + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/CounterAllocator.java b/metrics/src/main/java/io/scalecube/metrics/CounterAllocator.java new file mode 100644 index 0000000..9cb8f85 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/CounterAllocator.java @@ -0,0 +1,62 @@ +package io.scalecube.metrics; + +import static org.agrona.concurrent.status.CountersReader.DEFAULT_TYPE_ID; + +import java.util.function.Consumer; +import org.agrona.DirectBuffer; +import org.agrona.ExpandableArrayBuffer; +import org.agrona.concurrent.status.AtomicCounter; +import org.agrona.concurrent.status.CountersManager; + +/** + * Allocates {@link AtomicCounter} objects with custom metadata and name using {@link + * CountersManager}. + */ +public class CounterAllocator { + + private final CountersManager countersManager; + + private final ExpandableArrayBuffer nameBuffer = new ExpandableArrayBuffer(); + private final ExpandableArrayBuffer keyBuffer = new ExpandableArrayBuffer(); + private final KeyFlyweight keyFlyweight = new KeyFlyweight(); + + public CounterAllocator(CountersManager countersManager) { + this.countersManager = countersManager; + } + + /** + * Allocates new {@link AtomicCounter} by calling {@link CountersManager#newCounter(int, + * DirectBuffer, int, int, DirectBuffer, int, int)}. + * + * @param name name + * @param consumer consumer for {@link KeyFlyweight} (optional) + * @return newly allocated {@link AtomicCounter} + */ + public AtomicCounter newCounter(String name, Consumer consumer) { + return newCounter(DEFAULT_TYPE_ID, name, consumer); + } + + /** + * Allocates new {@link AtomicCounter} by calling {@link CountersManager#newCounter(int, + * DirectBuffer, int, int, DirectBuffer, int, int)}. + * + * @param typeId typeId + * @param name name + * @param consumer consumer for {@link KeyFlyweight} (optional) + * @return newly allocated {@link AtomicCounter} + */ + public AtomicCounter newCounter(int typeId, String name, Consumer consumer) { + final var nameLength = nameBuffer.putStringWithoutLengthAscii(0, name); + if (consumer != null) { + consumer.accept(keyFlyweight.wrap(keyBuffer, 0)); + } + return countersManager.newCounter( + typeId, + keyFlyweight.buffer(), + keyFlyweight.offset(), + keyFlyweight.length(), + nameBuffer, + 0, + nameLength); + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/CounterDescriptor.java b/metrics/src/main/java/io/scalecube/metrics/CounterDescriptor.java new file mode 100644 index 0000000..3286561 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/CounterDescriptor.java @@ -0,0 +1,160 @@ +package io.scalecube.metrics; + +import static org.agrona.concurrent.status.CountersReader.KEY_OFFSET; +import static org.agrona.concurrent.status.CountersReader.MAX_KEY_LENGTH; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import java.util.function.Predicate; +import org.agrona.DirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.agrona.concurrent.status.CountersReader; + +/** + * Descriptor for counter managed by {@link CountersReader}. This record provides metadata and + * runtime information about counter, including its id, type, current value, associated attributes + * (key buffer), and human-readable label. + */ +public record CounterDescriptor( + int counterId, int typeId, long value, DirectBuffer keyBuffer, String label) { + + /** + * Retrieves {@code CounterDescriptor} by counter id. + * + * @param countersReader countersReader + * @param counterId counterId + * @return {@code CounterDescriptor} instance + */ + public static CounterDescriptor getCounter(CountersReader countersReader, int counterId) { + final var metaDataBuffer = countersReader.metaDataBuffer(); + final var metaDataOffset = CountersReader.metaDataOffset(counterId); + + final int keyOffset = metaDataOffset + KEY_OFFSET; + byte[] keyBytes = new byte[MAX_KEY_LENGTH]; + metaDataBuffer.getBytes(keyOffset, keyBytes); + final var keyBuffer = new UnsafeBuffer(keyBytes); + + final int typeId = countersReader.getCounterTypeId(counterId); + final var value = countersReader.getCounterValue(counterId); + final var label = countersReader.getCounterLabel(counterId); + + return new CounterDescriptor(counterId, typeId, value, keyBuffer, label); + } + + /** + * Finds first counter by predicate. + * + * @param countersReader countersReader + * @param predicate predicate + * @return {@code CounterDescriptor}, or null + */ + public static CounterDescriptor findFirstCounter( + CountersReader countersReader, Predicate predicate) { + final var counters = findAllCounters(countersReader, predicate); + return counters.isEmpty() ? null : counters.get(0); + } + + /** + * Finds last counter by predicate. + * + * @param countersReader countersReader + * @param predicate predicate + * @return {@code CounterDescriptor}, or null + */ + public static CounterDescriptor findLastCounter( + CountersReader countersReader, Predicate predicate) { + final var counters = findAllCounters(countersReader, predicate); + return counters.isEmpty() ? null : counters.get(counters.size() - 1); + } + + /** + * Finds all counters by predicate. + * + * @param countersReader countersReader + * @param predicate predicate + * @return list of {@code CounterDescriptor} objects + */ + public static List findAllCounters( + CountersReader countersReader, Predicate predicate) { + final var list = new ArrayList(); + countersReader.forEach( + (value, counterId, label) -> { + final var descriptor = CounterDescriptor.getCounter(countersReader, counterId); + if (predicate.test(descriptor)) { + list.add(descriptor); + } + }); + return list; + } + + public static Predicate byName(String name) { + return descriptor -> name.equals(descriptor.label()); + } + + public static Predicate byValue(long value) { + return descriptor -> value == descriptor.value(); + } + + public static Predicate byType(int typeId) { + return descriptor -> typeId == descriptor.typeId(); + } + + public static Predicate byTag(String tag, byte value) { + return descriptor -> { + final var keyCodec = new KeyCodec(); + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + return Objects.equals(value, key.byteValue(tag)); + }; + } + + public static Predicate byTag(String tag, short value) { + return descriptor -> { + final var keyCodec = new KeyCodec(); + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + return Objects.equals(value, key.shortValue(tag)); + }; + } + + public static Predicate byTag(String tag, int value) { + return descriptor -> { + final var keyCodec = new KeyCodec(); + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + return Objects.equals(value, key.intValue(tag)); + }; + } + + public static Predicate byTag(String tag, long value) { + return descriptor -> { + final var keyCodec = new KeyCodec(); + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + return Objects.equals(value, key.longValue(tag)); + }; + } + + public static Predicate byTag(String tag, double value) { + return descriptor -> { + final var keyCodec = new KeyCodec(); + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + return Objects.equals(value, key.doubleValue(tag)); + }; + } + + public static Predicate byTag(String tag, String value) { + return descriptor -> { + final var keyCodec = new KeyCodec(); + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + return Objects.equals(value, key.stringValue(tag)); + }; + } + + public static > Predicate byTag( + String tag, T value, Function enumFunc) { + return descriptor -> { + final var keyCodec = new KeyCodec(); + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + return Objects.equals(value, key.enumValue(tag, enumFunc)); + }; + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/CountersHandler.java b/metrics/src/main/java/io/scalecube/metrics/CountersHandler.java new file mode 100644 index 0000000..cce5a76 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/CountersHandler.java @@ -0,0 +1,22 @@ +package io.scalecube.metrics; + +import java.util.List; + +/** + * Callback interface for handling counter descriptors. Being used as part of counter processing + * functionality. + * + * @see CountersReaderAgent + */ +public interface CountersHandler { + + /** + * Callback for handling counter descriptors. + * + * @param timestamp timestamp + * @param counterDescriptors counterDescriptors + */ + default void accept(long timestamp, List counterDescriptors) { + // no-op + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/CountersReaderAgent.java b/metrics/src/main/java/io/scalecube/metrics/CountersReaderAgent.java new file mode 100644 index 0000000..8e7b623 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/CountersReaderAgent.java @@ -0,0 +1,219 @@ +package io.scalecube.metrics; + +import static io.scalecube.metrics.CountersRegistry.Context.COUNTERS_FILE; +import static org.agrona.IoUtil.mapExistingFile; + +import io.scalecube.metrics.CountersRegistry.Context; +import io.scalecube.metrics.CountersRegistry.LayoutDescriptor; +import java.io.File; +import java.nio.MappedByteBuffer; +import java.time.Duration; +import java.util.ArrayList; +import org.agrona.BufferUtil; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentTerminationException; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.UnsafeBuffer; +import org.agrona.concurrent.status.CountersReader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Agent that periodically reads counters from mapped counters file {@link Context#COUNTERS_FILE}, + * and invokes {@link CountersHandler} with the counters values. + */ +public class CountersReaderAgent implements Agent { + + private static final Logger LOGGER = LoggerFactory.getLogger(CountersReaderAgent.class); + + public enum State { + INIT, + RUNNING, + CLEANUP, + CLOSED + } + + private final String roleName; + private final File countersDir; + private final boolean warnIfCountersNotExists; + private final EpochClock epochClock; + private final CountersHandler countersHandler; + + private final Delay readInterval; + private File countersFile; + private MappedByteBuffer countersByteBuffer; + private final UnsafeBuffer headerBuffer = new UnsafeBuffer(); + private long countersStartTimestamp = -1; + private long countersPid = -1; + private CountersReader countersReader; + private State state = State.CLOSED; + + /** + * Constructor. + * + * @param roleName roleName + * @param countersDir counters directory with {@link Context#COUNTERS_FILE} + * @param warnIfCountersNotExists whether to log warning if counters file does not exist + * @param epochClock epochClock + * @param readInterval interval at which to read counters + * @param countersHandler callback handler to process counters + */ + public CountersReaderAgent( + String roleName, + File countersDir, + boolean warnIfCountersNotExists, + EpochClock epochClock, + Duration readInterval, + CountersHandler countersHandler) { + this.roleName = roleName; + this.countersDir = countersDir; + this.warnIfCountersNotExists = warnIfCountersNotExists; + this.epochClock = epochClock; + this.countersHandler = countersHandler; + this.readInterval = new Delay(epochClock, readInterval.toMillis()); + } + + @Override + public String roleName() { + return roleName; + } + + @Override + public void onStart() { + if (state != State.CLOSED) { + throw new AgentTerminationException("Illegal state: " + state); + } + state(State.INIT); + } + + @Override + public int doWork() { + try { + return switch (state) { + case INIT -> init(); + case RUNNING -> running(); + case CLEANUP -> cleanup(); + default -> throw new AgentTerminationException("Unknown state: " + state); + }; + } catch (AgentTerminationException e) { + throw e; + } catch (Exception e) { + state(State.CLEANUP); + throw e; + } + } + + private int init() { + if (readInterval.isNotOverdue()) { + return 0; + } + + countersFile = new File(countersDir, COUNTERS_FILE); + + if (!isActive(countersFile)) { + state(State.CLEANUP); + return 0; + } + + countersByteBuffer = mapExistingFile(countersFile, COUNTERS_FILE); + headerBuffer.wrap(countersByteBuffer, 0, LayoutDescriptor.HEADER_LENGTH); + countersStartTimestamp = LayoutDescriptor.startTimestamp(headerBuffer); + countersPid = LayoutDescriptor.pid(headerBuffer); + + countersReader = + new CountersReader( + LayoutDescriptor.createCountersMetaDataBuffer(countersByteBuffer, headerBuffer), + LayoutDescriptor.createCountersValuesBuffer(countersByteBuffer, headerBuffer)); + + state(State.RUNNING); + LOGGER.info("[{}] Initialized, now running", roleName()); + return 1; + } + + private int running() { + if (readInterval.isNotOverdue()) { + return 0; + } + + readInterval.delay(); + if (!isActive(countersFile)) { + state(State.CLEANUP); + LOGGER.warn("[{}] {} is not active, proceed to cleanup", roleName(), countersFile); + return 0; + } + + final var timestamp = epochClock.time(); + final var counterDescriptors = new ArrayList(); + countersReader.forEach( + (counterId, typeId, keyBuffer, label) -> { + final var counterValue = countersReader.getCounterValue(counterId); + counterDescriptors.add( + new CounterDescriptor(counterId, typeId, counterValue, keyBuffer, label)); + }); + countersHandler.accept(timestamp, counterDescriptors); + + return 0; + } + + private boolean isActive(File countersFile) { + if (!countersFile.exists()) { + if (warnIfCountersNotExists) { + LOGGER.warn("[{}] {} not exists", roleName(), countersFile); + } + return false; + } + + final var buffer = mapExistingFile(countersFile, COUNTERS_FILE); + try { + if (!LayoutDescriptor.isCountersHeaderLengthSufficient(buffer.capacity())) { + LOGGER.warn("[{}] {} has not sufficient length", roleName(), countersFile); + return false; + } + headerBuffer.wrap(buffer, 0, LayoutDescriptor.HEADER_LENGTH); + if (!LayoutDescriptor.isCountersFileLengthSufficient(headerBuffer, buffer.capacity())) { + LOGGER.warn("[{}] {} has not sufficient length", roleName(), countersFile); + return false; + } + if (countersStartTimestamp != -1 + && !LayoutDescriptor.isCountersActive( + headerBuffer, countersStartTimestamp, countersPid)) { + LOGGER.warn("[{}] {} is not active", roleName(), countersFile); + return false; + } + } finally { + BufferUtil.free(buffer); + } + + return true; + } + + private int cleanup() { + BufferUtil.free(countersByteBuffer); + countersByteBuffer = null; + countersFile = null; + countersStartTimestamp = -1; + countersPid = -1; + + State previous = state; + if (previous != State.CLOSED) { // when it comes from onClose() + readInterval.delay(); + state(State.INIT); + } + return 1; + } + + @Override + public void onClose() { + state(State.CLOSED); + cleanup(); + } + + private void state(State state) { + LOGGER.debug("[{}][state] {}->{}", roleName(), this.state, state); + this.state = state; + } + + public State state() { + return state; + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/CountersRegistry.java b/metrics/src/main/java/io/scalecube/metrics/CountersRegistry.java new file mode 100644 index 0000000..8455c66 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/CountersRegistry.java @@ -0,0 +1,338 @@ +package io.scalecube.metrics; + +import static java.util.concurrent.atomic.AtomicIntegerFieldUpdater.newUpdater; +import static org.agrona.BitUtil.CACHE_LINE_LENGTH; +import static org.agrona.IoUtil.delete; +import static org.agrona.IoUtil.ensureDirectoryExists; +import static org.agrona.IoUtil.mapNewFile; +import static org.agrona.concurrent.status.CountersReader.COUNTER_LENGTH; +import static org.agrona.concurrent.status.CountersReader.METADATA_LENGTH; + +import java.io.File; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.util.ConcurrentModificationException; +import java.util.Properties; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import org.agrona.BitUtil; +import org.agrona.BufferUtil; +import org.agrona.CloseHelper; +import org.agrona.DirectBuffer; +import org.agrona.LangUtil; +import org.agrona.SystemUtil; +import org.agrona.concurrent.UnsafeBuffer; +import org.agrona.concurrent.status.ConcurrentCountersManager; +import org.agrona.concurrent.status.CountersManager; + +/** + * Manages {@link CountersManager} with buffers backed by memory-mapped file. Provides centralized + * mechanism to initialize and manage {@link CountersManager}. + */ +public class CountersRegistry implements AutoCloseable { + + private final Context context; + private final CountersManager countersManager; + + private CountersRegistry(Context context) { + this.context = context; + try { + context.conclude(); + countersManager = + new ConcurrentCountersManager( + new UnsafeBuffer(context.countersMetaDataBuffer()), + new UnsafeBuffer(context.countersValuesBuffer())); + } catch (ConcurrentModificationException ex) { + throw ex; + } catch (Exception ex) { + CloseHelper.quietClose(context::close); + throw ex; + } + } + + public static CountersRegistry create() { + return create(new Context()); + } + + public static CountersRegistry create(Context context) { + return new CountersRegistry(context); + } + + public Context context() { + return context; + } + + public CountersManager countersManager() { + return countersManager; + } + + @Override + public void close() { + CloseHelper.quietClose(context::close); + } + + public static class Context { + + public static final String COUNTERS_VALUES_BUFFER_LENGTH_PROP_NAME = + "io.scalecube.metrics.counters.countersValuesBufferLength"; + public static final String COUNTERS_DIR_NAME_PROP_NAME = + "io.scalecube.metrics.counters.countersDirectoryName"; + public static final String DIR_DELETE_ON_SHUTDOWN_PROP_NAME = + "io.scalecube.metrics.counters.dirDeleteOnShutdown"; + + public static final String COUNTERS_FILE = "counters.dat"; + public static final String DEFAULT_COUNTERS_DIR_NAME; + public static final int DEFAULT_COUNTERS_VALUES_BUFFER_LENGTH = 2 * 1024 * 1024; + + static { + String baseDirName = null; + + if (SystemUtil.isLinux()) { + final File devShmDir = new File("/dev/shm"); + if (devShmDir.exists()) { + baseDirName = "/dev/shm/counters"; + } + } + + if (baseDirName == null) { + baseDirName = SystemUtil.tmpDirName() + "counters"; + } + + DEFAULT_COUNTERS_DIR_NAME = baseDirName + '-' + System.getProperty("user.name", "default"); + } + + private static final AtomicIntegerFieldUpdater IS_CONCLUDED_UPDATER = + newUpdater(Context.class, "isConcluded"); + private static final AtomicIntegerFieldUpdater IS_CLOSED_UPDATER = + newUpdater(Context.class, "isClosed"); + + private volatile int isConcluded; + private volatile int isClosed; + + private int countersValuesBufferLength = + Integer.getInteger(COUNTERS_VALUES_BUFFER_LENGTH_PROP_NAME, 0); + private String countersDirectoryName = System.getProperty(COUNTERS_DIR_NAME_PROP_NAME); + private File countersDir; + private boolean dirDeleteOnShutdown = Boolean.getBoolean(DIR_DELETE_ON_SHUTDOWN_PROP_NAME); + private MappedByteBuffer mappedByteBuffer; + private UnsafeBuffer countersMetaDataBuffer; + private UnsafeBuffer countersValuesBuffer; + + public Context() {} + + public Context(Properties props) { + countersDirectoryName(props.getProperty(COUNTERS_DIR_NAME_PROP_NAME)); + countersValuesBufferLength(props.getProperty(COUNTERS_VALUES_BUFFER_LENGTH_PROP_NAME)); + dirDeleteOnShutdown(props.getProperty(DIR_DELETE_ON_SHUTDOWN_PROP_NAME)); + } + + private void conclude() { + if (0 != IS_CONCLUDED_UPDATER.getAndSet(this, 1)) { + throw new ConcurrentModificationException(); + } + + concludeCountersDirectory(); + concludeCountersBuffers(); + } + + private void concludeCountersDirectory() { + if (countersDirectoryName == null) { + countersDirectoryName = DEFAULT_COUNTERS_DIR_NAME; + } + + if (countersDir == null) { + try { + countersDir = new File(countersDirectoryName).getCanonicalFile(); + } catch (IOException e) { + LangUtil.rethrowUnchecked(e); + } + } + + if (countersDir.isDirectory()) { + delete(countersDir, false); + } + + ensureDirectoryExists(countersDir, "counters"); + } + + private void concludeCountersBuffers() { + if (countersValuesBufferLength == 0) { + countersValuesBufferLength = DEFAULT_COUNTERS_VALUES_BUFFER_LENGTH; + } + + final var min = DEFAULT_COUNTERS_VALUES_BUFFER_LENGTH; + if (countersValuesBufferLength < min) { + throw new IllegalArgumentException("countersValuesBufferLength must be at least " + min); + } + if (!BitUtil.isPowerOfTwo(countersValuesBufferLength)) { + throw new IllegalArgumentException("countersValuesBufferLength must be power of 2"); + } + + final var countersMetaDataBufferLength = + LayoutDescriptor.countersMetaDataBufferLength(countersValuesBufferLength); + final var countersFileLength = + LayoutDescriptor.HEADER_LENGTH + + countersMetaDataBufferLength + + countersValuesBufferLength; + + mappedByteBuffer = mapNewFile(new File(countersDir, COUNTERS_FILE), countersFileLength); + + final var headerBuffer = LayoutDescriptor.createHeaderBuffer(mappedByteBuffer); + final var startTimestamp = ManagementFactory.getRuntimeMXBean().getStartTime(); + final var pid = ManagementFactory.getRuntimeMXBean().getPid(); + LayoutDescriptor.fillHeaderBuffer( + headerBuffer, startTimestamp, pid, countersValuesBufferLength); + + countersMetaDataBuffer = + LayoutDescriptor.createCountersMetaDataBuffer(mappedByteBuffer, headerBuffer); + countersValuesBuffer = + LayoutDescriptor.createCountersValuesBuffer(mappedByteBuffer, headerBuffer); + } + + public int countersValuesBufferLength() { + return countersValuesBufferLength; + } + + public Context countersValuesBufferLength(int countersValuesBufferLength) { + this.countersValuesBufferLength = countersValuesBufferLength; + return this; + } + + public Context countersValuesBufferLength(String countersValuesBufferLength) { + if (countersValuesBufferLength != null) { + this.countersValuesBufferLength = Integer.parseInt(countersValuesBufferLength); + } + return this; + } + + public String countersDirectoryName() { + return countersDirectoryName; + } + + public Context countersDirectoryName(String countersDirectoryName) { + this.countersDirectoryName = countersDirectoryName; + return this; + } + + public File countersDir() { + return countersDir; + } + + public Context countersDir(File countersDir) { + this.countersDir = countersDir; + return this; + } + + public boolean dirDeleteOnShutdown() { + return dirDeleteOnShutdown; + } + + public Context dirDeleteOnShutdown(boolean dirDeleteOnShutdown) { + this.dirDeleteOnShutdown = dirDeleteOnShutdown; + return this; + } + + public Context dirDeleteOnShutdown(String dirDeleteOnShutdown) { + if (dirDeleteOnShutdown != null) { + this.dirDeleteOnShutdown = Boolean.parseBoolean(dirDeleteOnShutdown); + } + return this; + } + + UnsafeBuffer countersMetaDataBuffer() { + return countersMetaDataBuffer; + } + + UnsafeBuffer countersValuesBuffer() { + return countersValuesBuffer; + } + + public static String generateCountersDirectoryName() { + return DEFAULT_COUNTERS_DIR_NAME + "-" + UUID.randomUUID(); + } + + private void close() { + if (IS_CLOSED_UPDATER.compareAndSet(this, 0, 1)) { + BufferUtil.free(mappedByteBuffer); + mappedByteBuffer = null; + if (dirDeleteOnShutdown && countersDir != null) { + delete(countersDir, false); + } + } + } + } + + public static class LayoutDescriptor { + + public static final int HEADER_LENGTH = CACHE_LINE_LENGTH * 2; + public static final int START_TIMESTAMP_OFFSET = 0; + public static final int PID_OFFSET = 8; + public static final int COUNTERS_VALUES_BUFFER_LENGTH_OFFSET = 16; + + private LayoutDescriptor() {} + + public static int countersMetaDataBufferLength(int countersValuesBufferLength) { + return countersValuesBufferLength * (METADATA_LENGTH / COUNTER_LENGTH); + } + + public static UnsafeBuffer createHeaderBuffer(ByteBuffer buffer) { + return new UnsafeBuffer(buffer, 0, HEADER_LENGTH); + } + + public static long startTimestamp(DirectBuffer headerBuffer) { + return headerBuffer.getLong(START_TIMESTAMP_OFFSET); + } + + public static long pid(DirectBuffer headerBuffer) { + return headerBuffer.getLong(PID_OFFSET); + } + + public static int countersValuesBufferLength(DirectBuffer headerBuffer) { + return headerBuffer.getInt(COUNTERS_VALUES_BUFFER_LENGTH_OFFSET); + } + + public static UnsafeBuffer createCountersMetaDataBuffer( + ByteBuffer buffer, DirectBuffer headerBuffer) { + final var offset = HEADER_LENGTH; + final var countersValuesBufferLength = countersValuesBufferLength(headerBuffer); + final var length = countersMetaDataBufferLength(countersValuesBufferLength); + return new UnsafeBuffer(buffer, offset, length); + } + + public static UnsafeBuffer createCountersValuesBuffer( + ByteBuffer buffer, DirectBuffer headerBuffer) { + final var countersValuesBufferLength = countersValuesBufferLength(headerBuffer); + final int offset = HEADER_LENGTH + countersMetaDataBufferLength(countersValuesBufferLength); + final var length = countersValuesBufferLength; + return new UnsafeBuffer(buffer, offset, length); + } + + public static boolean isCountersHeaderLengthSufficient(int length) { + return length >= HEADER_LENGTH; + } + + public static boolean isCountersFileLengthSufficient( + DirectBuffer headerBuffer, int fileLength) { + final var countersValuesBufferLength = countersValuesBufferLength(headerBuffer); + final var countersMetaDataBufferLength = + countersMetaDataBufferLength(countersValuesBufferLength); + final var totalLength = + HEADER_LENGTH + countersMetaDataBufferLength + countersValuesBufferLength; + return totalLength >= fileLength; + } + + public static boolean isCountersActive( + DirectBuffer headerBuffer, long startTimestamp, long pid) { + return startTimestamp(headerBuffer) == startTimestamp && pid(headerBuffer) == pid; + } + + public static void fillHeaderBuffer( + UnsafeBuffer headerBuffer, long startTimestamp, long pid, int countersValuesBufferLength) { + headerBuffer.putLong(START_TIMESTAMP_OFFSET, startTimestamp); + headerBuffer.putLong(PID_OFFSET, pid); + headerBuffer.putInt(COUNTERS_VALUES_BUFFER_LENGTH_OFFSET, countersValuesBufferLength); + } + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/Delay.java b/metrics/src/main/java/io/scalecube/metrics/Delay.java new file mode 100644 index 0000000..85abee6 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/Delay.java @@ -0,0 +1,34 @@ +package io.scalecube.metrics; + +import org.agrona.concurrent.EpochClock; + +public class Delay { + + private final EpochClock epochClock; + private final long defaultDelay; + + private long deadline; + + public Delay(EpochClock epochClock, long defaultDelay) { + this.epochClock = epochClock; + this.defaultDelay = defaultDelay; + } + + public void delay() { + delay(defaultDelay); + } + + public void delay(long delay) { + deadline = epochClock.time() + delay; + } + + public boolean isOverdue() { + final long now = epochClock.time(); + return now > deadline; + } + + public boolean isNotOverdue() { + final long now = epochClock.time(); + return now < deadline; + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/HistogramAggregate.java b/metrics/src/main/java/io/scalecube/metrics/HistogramAggregate.java new file mode 100644 index 0000000..2190e88 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/HistogramAggregate.java @@ -0,0 +1,57 @@ +package io.scalecube.metrics; + +import static io.scalecube.metrics.HistogramRecorder.NUMBER_OF_SIGNIFICANT_VALUE_DIGITS; + +import io.scalecube.metrics.MetricsRecorder.MetricsPublication; +import org.HdrHistogram.Histogram; +import org.agrona.DirectBuffer; + +class HistogramAggregate { + + private final DirectBuffer keyBuffer; + private final long highestTrackableValue; + private final double conversionFactor; + private final long resolutionMs; + private final MetricsEncoder encoder; + private final MetricsPublication metricsPublication; + + private final Histogram accumulated; + private final Histogram distinct; + + HistogramAggregate( + DirectBuffer keyBuffer, + long highestTrackableValue, + double conversionFactor, + long resolutionMs, + MetricsEncoder encoder, + MetricsPublication metricsPublication) { + this.keyBuffer = keyBuffer; + this.highestTrackableValue = highestTrackableValue; + this.conversionFactor = conversionFactor; + this.resolutionMs = resolutionMs; + this.encoder = encoder; + this.metricsPublication = metricsPublication; + accumulated = new Histogram(highestTrackableValue, NUMBER_OF_SIGNIFICANT_VALUE_DIGITS); + distinct = new Histogram(highestTrackableValue, NUMBER_OF_SIGNIFICANT_VALUE_DIGITS); + } + + long resolutionMs() { + return resolutionMs; + } + + void update(Histogram value) { + accumulated.add(value); + distinct.add(value); + } + + void publish(long timestamp) { + try { + final var length = + encoder.encodeHistogram( + timestamp, keyBuffer, accumulated, distinct, highestTrackableValue, conversionFactor); + metricsPublication.publish(encoder.buffer(), 0, length); + } finally { + distinct.reset(); + } + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/HistogramMetric.java b/metrics/src/main/java/io/scalecube/metrics/HistogramMetric.java new file mode 100644 index 0000000..1a325a7 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/HistogramMetric.java @@ -0,0 +1,10 @@ +package io.scalecube.metrics; + +import org.agrona.DirectBuffer; + +public interface HistogramMetric { + + DirectBuffer keyBuffer(); + + void record(long value); +} diff --git a/metrics/src/main/java/io/scalecube/metrics/HistogramRecorder.java b/metrics/src/main/java/io/scalecube/metrics/HistogramRecorder.java new file mode 100644 index 0000000..d0522f0 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/HistogramRecorder.java @@ -0,0 +1,61 @@ +package io.scalecube.metrics; + +import org.HdrHistogram.Histogram; +import org.agrona.DirectBuffer; + +class HistogramRecorder implements HistogramMetric { + + static final int NUMBER_OF_SIGNIFICANT_VALUE_DIGITS = 3; + + private final DirectBuffer keyBuffer; + private final long highestTrackableValue; + private final double conversionFactor; + private final long resolutionMs; + + private volatile Histogram current; + private volatile Histogram swap; + + HistogramRecorder( + DirectBuffer keyBuffer, + long highestTrackableValue, + double conversionFactor, + long resolutionMs) { + this.keyBuffer = keyBuffer; + this.highestTrackableValue = highestTrackableValue; + this.conversionFactor = Math.round(conversionFactor * 1000.0) / 1000.0; + this.resolutionMs = resolutionMs; + current = new Histogram(highestTrackableValue, NUMBER_OF_SIGNIFICANT_VALUE_DIGITS); + swap = new Histogram(highestTrackableValue, NUMBER_OF_SIGNIFICANT_VALUE_DIGITS); + } + + @Override + public DirectBuffer keyBuffer() { + return keyBuffer; + } + + @Override + public void record(long value) { + final var histogram = current; + histogram.recordValue(Math.min(value, highestTrackableValue)); + } + + long highestTrackableValue() { + return highestTrackableValue; + } + + double conversionFactor() { + return conversionFactor; + } + + long resolutionMs() { + return resolutionMs; + } + + void swapAndUpdate(HistogramAggregate aggregate) { + final var value = current; + current = swap; + swap = value; + aggregate.update(value); + value.reset(); + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/JvmMetricsAgent.java b/metrics/src/main/java/io/scalecube/metrics/JvmMetricsAgent.java new file mode 100644 index 0000000..6286b9b --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/JvmMetricsAgent.java @@ -0,0 +1,198 @@ +package io.scalecube.metrics; + +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadMXBean; +import org.agrona.CloseHelper; +import org.agrona.collections.Object2ObjectHashMap; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentTerminationException; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.status.AtomicCounter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class JvmMetricsAgent implements Agent { + + private static final Logger LOGGER = LoggerFactory.getLogger(JvmMetricsAgent.class); + + public static final int JVM_COUNTER_TYPE_ID = 1; + + public enum State { + INIT, + RUNNING, + CLEANUP, + CLOSED + } + + private final Object2ObjectHashMap gcCounters = + new Object2ObjectHashMap<>(); + private final Delay readInterval; + private final ThreadMXBean threadMxBean; + private AtomicCounter memFree; + private AtomicCounter memAllocated; + private AtomicCounter memMax; + private AtomicCounter threadCount; + private AtomicCounter peakThreadCount; + private AtomicCounter daemonThreadCount; + private final CounterAllocator allocator; + private State state = State.CLOSED; + private final boolean gcTelemetryEnabled; + + /** + * Publishes memory, thread and gc-related basic telemetry as counters. Counters have a source tag + * of "JVM" and type of "memory", "thread", or "gc", respectively. + * + * @param allocator the allocator + * @param clock the clock, ideally a cached epoch clock + * @param updatePeriodMs how frequently should telemetry data be updated in the counter files, + * given in milliseconds + * @param enableGcTelemetry whether GC telemetry is enabled or not. NOTE that collecting GC + * telemetry data will likely produce garbage. + */ + public JvmMetricsAgent( + CounterAllocator allocator, + EpochClock clock, + Long updatePeriodMs, + boolean enableGcTelemetry) { + this.allocator = allocator; + this.readInterval = new Delay(clock, updatePeriodMs); + this.threadMxBean = ManagementFactory.getThreadMXBean(); + this.gcTelemetryEnabled = enableGcTelemetry; + } + + @Override + public String roleName() { + return "JvmMetricsAgent"; + } + + @Override + public void onStart() { + if (state != State.CLOSED) { + throw new AgentTerminationException("Illegal state: " + state); + } + + memFree = + allocator.newCounter( + JVM_COUNTER_TYPE_ID, + "jvm_memory_free_bytes", + keyFlyweight -> keyFlyweight.tagsCount(0)); + + memAllocated = + allocator.newCounter( + JVM_COUNTER_TYPE_ID, + "jvm_memory_total_bytes", + keyFlyweight -> keyFlyweight.tagsCount(0)); + + memMax = + allocator.newCounter( + JVM_COUNTER_TYPE_ID, "jvm_memory_max_bytes", keyFlyweight -> keyFlyweight.tagsCount(0)); + + threadCount = + allocator.newCounter( + JVM_COUNTER_TYPE_ID, "jvm_threads_current", keyFlyweight -> keyFlyweight.tagsCount(0)); + + peakThreadCount = + allocator.newCounter( + JVM_COUNTER_TYPE_ID, "jvm_threads_peak", keyFlyweight -> keyFlyweight.tagsCount(0)); + + daemonThreadCount = + allocator.newCounter( + JVM_COUNTER_TYPE_ID, "jvm_threads_daemon", keyFlyweight -> keyFlyweight.tagsCount(0)); + + state(State.INIT); + } + + @Override + public int doWork() { + try { + return switch (state) { + case INIT -> init(); + case RUNNING -> running(); + case CLEANUP -> cleanup(); + default -> throw new AgentTerminationException("Unknown state: " + state); + }; + } catch (AgentTerminationException e) { + throw e; + } catch (Exception e) { + state(State.CLEANUP); + throw e; + } + } + + private int init() { + readInterval.delay(); + state(State.RUNNING); + return 1; + } + + private int running() { + if (readInterval.isNotOverdue()) { + return 0; + } + + readInterval.delay(); + + final var runtime = Runtime.getRuntime(); + memFree.set(runtime.freeMemory()); + memAllocated.set(runtime.totalMemory()); + memMax.set(runtime.maxMemory()); + threadCount.set(threadMxBean.getThreadCount()); + peakThreadCount.set(threadMxBean.getPeakThreadCount()); + daemonThreadCount.set(threadMxBean.getDaemonThreadCount()); + + if (gcTelemetryEnabled) { + final var beans = ManagementFactory.getGarbageCollectorMXBeans(); + for (GarbageCollectorMXBean bean : beans) { + final var gcName = MetricNames.sanitizeName(bean.getName()); + allocateGCCounter("jvm_gc_collections_total", gcName).set(bean.getCollectionCount()); + allocateGCCounter("jvm_gc_ollection_time_ms", gcName).set(bean.getCollectionTime()); + } + } + + return 1; + } + + private AtomicCounter allocateGCCounter(String name, String gcName) { + final var existing = gcCounters.get(name); + if (existing != null) { + return existing; + } + + final var counter = + allocator.newCounter( + JVM_COUNTER_TYPE_ID, + name, + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("gcName", gcName)); + + gcCounters.put(name, counter); + return counter; + } + + private int cleanup() { + State previous = state; + if (previous != State.CLOSED) { // when it comes from onClose() + readInterval.delay(); + state(State.INIT); + } + return 1; + } + + @Override + public void onClose() { + state(State.CLOSED); + cleanup(); + CloseHelper.closeAll( + memFree, memAllocated, memMax, threadCount, peakThreadCount, daemonThreadCount); + CloseHelper.closeAll(gcCounters.values()); + } + + private void state(State state) { + LOGGER.debug("[{}][state] {}->{}", roleName(), this.state, state); + this.state = state; + } + + public State state() { + return state; + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/Key.java b/metrics/src/main/java/io/scalecube/metrics/Key.java new file mode 100644 index 0000000..a34c49b --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/Key.java @@ -0,0 +1,84 @@ +package io.scalecube.metrics; + +import java.util.Map; +import java.util.function.Function; + +/** + * Represents key object used in metrics/counters composed of a map of tag-value pairs. Each tag + * maps to a typed value which can be accessed via helper methods. Supposed to be used on + * metrics/counters consumption side. + */ +public record Key(Map tags) { + + /** + * Gets {@code Byte} value for the specified tag. + * + * @param tag tag name + * @return {@code Byte} value, or null + */ + public Byte byteValue(String tag) { + return (Byte) tags.get(tag); + } + + /** + * Gets {@code Short} value for the specified tag. + * + * @param tag tag name + * @return {@code Short} value, or null + */ + public Short shortValue(String tag) { + return (Short) tags.get(tag); + } + + /** + * Gets {@code Integer} value for the specified tag. + * + * @param tag tag name + * @return {@code Integer} value, or null + */ + public Integer intValue(String tag) { + return (Integer) tags.get(tag); + } + + /** + * Gets {@code Long} value for the specified tag. + * + * @param tag tag name + * @return {@code Long} value, or null + */ + public Long longValue(String tag) { + return (Long) tags.get(tag); + } + + /** + * Gets {@code Double} value for the specified tag. + * + * @param tag tag name + * @return {@code Double} value, or null + */ + public Double doubleValue(String tag) { + return (Double) tags.get(tag); + } + + /** + * Gets {@code String} value for the specified tag. + * + * @param tag tag name + * @return {@code String} value, or null + */ + public String stringValue(String tag) { + return (String) tags.get(tag); + } + + /** + * Gets an {@code Enum} value from byte representation for the specified tag. + * + * @param tag tag name + * @param enumFunc function to convert the byte value to enum + * @return enum value, or null + */ + public > T enumValue(String tag, Function enumFunc) { + final var byteValue = byteValue(tag); + return byteValue != null ? enumFunc.apply(byteValue) : null; + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/KeyCodec.java b/metrics/src/main/java/io/scalecube/metrics/KeyCodec.java new file mode 100644 index 0000000..d4f6c49 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/KeyCodec.java @@ -0,0 +1,67 @@ +package io.scalecube.metrics; + +import io.scalecube.metrics.sbe.KeyDecoder; +import java.util.HashMap; +import org.agrona.DirectBuffer; +import org.agrona.ExpandableArrayBuffer; + +/** + * Decodes buffer containing serialized {@link Key} structure. Uses SBE (Simple Binary Encoding) to + * deserialize tag-value pairs. Supposed to be used on metrics/counters consumption side. + */ +public class KeyCodec { + + private final KeyDecoder keyDecoder = new KeyDecoder(); + private final ExpandableArrayBuffer valueBuffer = new ExpandableArrayBuffer(); + + public KeyCodec() {} + + /** + * Decodes key structure from the given buffer and offset. + * + * @param keyBuffer buffer containing the encoded key + * @param keyOffset offset within the buffer + * @return decoded {@link Key} object + */ + public Key decodeKey(DirectBuffer keyBuffer, int keyOffset) { + keyDecoder.wrap(keyBuffer, keyOffset, KeyDecoder.BLOCK_LENGTH, KeyDecoder.SCHEMA_VERSION); + final var tags = new HashMap(); + + keyDecoder + .tags() + .forEach( + decoder -> { + final var tag = decoder.tag(); + final var valueLength = decoder.valueLength(); + decoder.getValue(valueBuffer, 0, valueLength); + + Object value = null; + switch (decoder.valueType()) { + case BYTE: + value = valueBuffer.getByte(0); + break; + case SHORT: + value = valueBuffer.getShort(0); + break; + case INT: + value = valueBuffer.getInt(0); + break; + case LONG: + value = valueBuffer.getLong(0); + break; + case DOUBLE: + value = valueBuffer.getDouble(0); + break; + case STRING: + value = valueBuffer.getStringWithoutLengthAscii(0, valueLength); + break; + default: + break; + } + + tags.put(tag, value); + }); + + return new Key(tags); + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/KeyFlyweight.java b/metrics/src/main/java/io/scalecube/metrics/KeyFlyweight.java new file mode 100644 index 0000000..50722f3 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/KeyFlyweight.java @@ -0,0 +1,230 @@ +package io.scalecube.metrics; + +import static io.scalecube.metrics.sbe.ValueType.BYTE; +import static io.scalecube.metrics.sbe.ValueType.DOUBLE; +import static io.scalecube.metrics.sbe.ValueType.INT; +import static io.scalecube.metrics.sbe.ValueType.LONG; +import static io.scalecube.metrics.sbe.ValueType.SHORT; +import static io.scalecube.metrics.sbe.ValueType.STRING; +import static org.agrona.BitUtil.SIZE_OF_BYTE; +import static org.agrona.BitUtil.SIZE_OF_DOUBLE; +import static org.agrona.BitUtil.SIZE_OF_INT; +import static org.agrona.BitUtil.SIZE_OF_LONG; +import static org.agrona.BitUtil.SIZE_OF_SHORT; + +import io.scalecube.metrics.sbe.KeyEncoder; +import io.scalecube.metrics.sbe.KeyEncoder.TagsEncoder; +import java.util.function.ToIntFunction; +import org.agrona.ExpandableArrayBuffer; +import org.agrona.MutableDirectBuffer; + +/** + * Flyweight-style encoder for serializing tag-value pairs into a buffer using SBE (Simple Binary + * Encoding). + */ +public class KeyFlyweight { + + private final KeyEncoder keyEncoder = new KeyEncoder(); + private TagsEncoder tagsEncoder; + private final ExpandableArrayBuffer valueBuffer = new ExpandableArrayBuffer(); + private MutableDirectBuffer buffer; + private int offset; + private int length; + + public KeyFlyweight() {} + + /** + * Wraps this flyweight around buffer at given offset. + * + * @param buffer buffer to write into + * @param offset starting offset + * @return this {@link KeyFlyweight} instance + */ + public KeyFlyweight wrap(MutableDirectBuffer buffer, int offset) { + this.buffer = buffer; + this.offset = offset; + this.tagsEncoder = null; + keyEncoder.wrap(buffer, offset); + return self(); + } + + /** + * Specifies the number of tag entries to encode. + * + * @param count number of tags + * @return this {@link KeyFlyweight} instance + */ + public KeyFlyweight tagsCount(int count) { + tagsEncoder = keyEncoder.tagsCount(count); + return self(); + } + + /** + * Sets {@code byte} value for the specified tag. + * + * @param tag tag name + * @param value {@code byte} value + * @return this {@link KeyFlyweight} instance + */ + public KeyFlyweight byteValue(String tag, byte value) { + ensureTagsEncoder(); + tagsEncoder.next(); + tagsEncoder.tag(tag).valueType(BYTE); + valueBuffer.putByte(0, value); + tagsEncoder.putValue(valueBuffer, 0, SIZE_OF_BYTE); + return self(); + } + + /** + * Sets {@code short} value for the specified tag. + * + * @param tag tag name + * @param value {@code short} value + * @return this {@link KeyFlyweight} instance + */ + public KeyFlyweight shortValue(String tag, short value) { + ensureTagsEncoder(); + tagsEncoder.next(); + tagsEncoder.tag(tag).valueType(SHORT); + valueBuffer.putShort(0, value); + tagsEncoder.putValue(valueBuffer, 0, SIZE_OF_SHORT); + return self(); + } + + /** + * Sets {@code int} value for the specified tag. + * + * @param tag tag name + * @param value {@code int} value + * @return this {@link KeyFlyweight} instance + */ + public KeyFlyweight intValue(String tag, int value) { + ensureTagsEncoder(); + tagsEncoder.next(); + tagsEncoder.tag(tag).valueType(INT); + valueBuffer.putInt(0, value); + tagsEncoder.putValue(valueBuffer, 0, SIZE_OF_INT); + return self(); + } + + /** + * Sets {@code long} value for the specified tag. + * + * @param tag tag name + * @param value {@code long} value + * @return this {@link KeyFlyweight} instance + */ + public KeyFlyweight longValue(String tag, long value) { + ensureTagsEncoder(); + tagsEncoder.next(); + tagsEncoder.tag(tag).valueType(LONG); + valueBuffer.putLong(0, value); + tagsEncoder.putValue(valueBuffer, 0, SIZE_OF_LONG); + return self(); + } + + /** + * Sets {@code double} value for the specified tag. + * + * @param tag tag name + * @param value {@code double} value + * @return this {@link KeyFlyweight} instance + */ + public KeyFlyweight doubleValue(String tag, double value) { + ensureTagsEncoder(); + tagsEncoder.next(); + tagsEncoder.tag(tag).valueType(DOUBLE); + valueBuffer.putDouble(0, Math.round(value * 1000.0) / 1000.0); + tagsEncoder.putValue(valueBuffer, 0, SIZE_OF_DOUBLE); + return self(); + } + + /** + * Sets {@code String} value for the specified tag. + * + * @param tag tag name + * @param value {@code String} value + * @return this {@link KeyFlyweight} instance + */ + public KeyFlyweight stringValue(String tag, String value) { + ensureTagsEncoder(); + tagsEncoder.next(); + tagsEncoder.tag(tag).valueType(STRING); + final var length = valueBuffer.putStringWithoutLengthAscii(0, value); + tagsEncoder.putValue(valueBuffer, 0, length); + return self(); + } + + /** + * Sets {@code CharSequence} value for the specified tag. + * + * @param tag tag name + * @param value {@code CharSequence} value + * @return this {@link KeyFlyweight} instance + */ + public KeyFlyweight stringValue(String tag, CharSequence value) { + ensureTagsEncoder(); + tagsEncoder.next(); + tagsEncoder.tag(tag).valueType(STRING); + final var length = valueBuffer.putStringWithoutLengthAscii(0, value); + tagsEncoder.putValue(valueBuffer, 0, length); + return self(); + } + + /** + * Sets an enum tag value encoded as byte. + * + * @param tag name tag name + * @param enumValue enum instance + * @param encoder function converting enum to byte + * @param enum type + * @return this {@link KeyFlyweight} instance + */ + public > KeyFlyweight enumValue( + String tag, T enumValue, ToIntFunction encoder) { + ensureTagsEncoder(); + tagsEncoder.next(); + tagsEncoder.tag(tag).valueType(BYTE); + valueBuffer.putByte(0, (byte) encoder.applyAsInt(enumValue)); + tagsEncoder.putValue(valueBuffer, 0, SIZE_OF_BYTE); + return self(); + } + + /** + * Returns wrapped buffer. + * + * @return wrapped buffer + */ + public MutableDirectBuffer buffer() { + return buffer; + } + + /** + * Returns wrapped buffer starting offset. + * + * @return starting offset + */ + public int offset() { + return offset; + } + + /** + * Returns encoded length. + * + * @return encoded length + */ + public int length() { + return length; + } + + private KeyFlyweight self() { + length = keyEncoder.encodedLength(); + return this; + } + + private void ensureTagsEncoder() { + if (tagsEncoder == null) { + throw new IllegalStateException("tagsCount() must be called before setting values"); + } + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/MetricNames.java b/metrics/src/main/java/io/scalecube/metrics/MetricNames.java new file mode 100644 index 0000000..00d6d8f --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/MetricNames.java @@ -0,0 +1,49 @@ +package io.scalecube.metrics; + +/** + * Utility class for sanitizing metric names. Converts arbitrary strings (including CamelCase + * identifiers) into snake_case-compliant names suitable for use in systems like Prometheus. + */ +public class MetricNames { + + private MetricNames() { + // Do not instantiate + } + + /** + * Sanitizes a string into a valid metric name. + * + *
    + *
  • Converts CamelCase to snake_case (preserving acronyms) + *
  • Replaces invalid characters with underscores + *
  • Converts the entire name to lowercase + *
  • Collapses multiple consecutive underscores + *
  • Ensures the name starts with a letter or underscore + *
+ * + * @param value input string to sanitize + * @return sanitized metric name, defaults to "_" if input is null or empty + */ + public static String sanitizeName(String value) { + if (value == null || value.isEmpty()) { + return "_"; + } + + // Convert CamelCase to snake_case with acronym preservation + String snake = + value.replaceAll("([a-z0-9])([A-Z])", "$1_$2").replaceAll("([A-Z]+)([A-Z][a-z])", "$1_$2"); + + // Replace disallowed characters and lowercase + var sanitizedValue = snake.replaceAll("[^a-zA-Z0-9_]", "_").toLowerCase(); + + // Collapse consecutive underscores + sanitizedValue = sanitizedValue.replaceAll("_+", "_"); + + // Ensure first character is valid + if (!Character.isLetter(sanitizedValue.charAt(0)) && sanitizedValue.charAt(0) != '_') { + sanitizedValue = "_" + sanitizedValue; + } + + return sanitizedValue; + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/MetricsEncoder.java b/metrics/src/main/java/io/scalecube/metrics/MetricsEncoder.java new file mode 100644 index 0000000..48fb94b --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/MetricsEncoder.java @@ -0,0 +1,59 @@ +package io.scalecube.metrics; + +import io.scalecube.metrics.sbe.HistogramEncoder; +import io.scalecube.metrics.sbe.MessageHeaderEncoder; +import io.scalecube.metrics.sbe.TpsEncoder; +import java.nio.ByteBuffer; +import org.HdrHistogram.Histogram; +import org.agrona.DirectBuffer; +import org.agrona.ExpandableArrayBuffer; +import org.agrona.MutableDirectBuffer; +import org.agrona.concurrent.UnsafeBuffer; + +class MetricsEncoder { + + private final MessageHeaderEncoder headerEncoder = new MessageHeaderEncoder(); + private final HistogramEncoder histogramEncoder = new HistogramEncoder(); + private final TpsEncoder tpsEncoder = new TpsEncoder(); + + private final MutableDirectBuffer buffer = new ExpandableArrayBuffer(1024 * 1024); + private final ByteBuffer dataBuffer = ByteBuffer.allocate(1024 * 1024); + private final UnsafeBuffer unsafeBuffer = new UnsafeBuffer(); + + MutableDirectBuffer buffer() { + return buffer; + } + + int encodeHistogram( + long timestamp, + DirectBuffer keyBuffer, + Histogram accumulated, + Histogram distinct, + long highestTrackableValue, + double conversionFactor) { + histogramEncoder.wrapAndApplyHeader(buffer, 0, headerEncoder); + histogramEncoder.timestamp(timestamp); + histogramEncoder.highestTrackableValue(highestTrackableValue); + histogramEncoder.conversionFactor(conversionFactor); + histogramEncoder.putKey(keyBuffer, 0, keyBuffer.capacity()); + + accumulated.encodeIntoByteBuffer(dataBuffer.clear()); + unsafeBuffer.wrap(dataBuffer.flip(), 0, dataBuffer.limit()); + histogramEncoder.putAccumulated(unsafeBuffer, 0, unsafeBuffer.capacity()); + + distinct.encodeIntoByteBuffer(dataBuffer.clear()); + unsafeBuffer.wrap(dataBuffer.flip(), 0, dataBuffer.limit()); + histogramEncoder.putDistinct(unsafeBuffer, 0, unsafeBuffer.capacity()); + + return headerEncoder.encodedLength() + histogramEncoder.encodedLength(); + } + + int encodeTps(long timestamp, DirectBuffer keyBuffer, long value) { + tpsEncoder.wrapAndApplyHeader(buffer, 0, headerEncoder); + tpsEncoder.timestamp(timestamp); + tpsEncoder.value(value); + tpsEncoder.putKey(keyBuffer, 0, keyBuffer.capacity()); + + return headerEncoder.encodedLength() + tpsEncoder.encodedLength(); + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/MetricsHandler.java b/metrics/src/main/java/io/scalecube/metrics/MetricsHandler.java new file mode 100644 index 0000000..76c43ed --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/MetricsHandler.java @@ -0,0 +1,51 @@ +package io.scalecube.metrics; + +import org.HdrHistogram.Histogram; +import org.agrona.DirectBuffer; + +/** + * Callback interface for handling metrics (histograms, tps values). Being used as part of metric + * processing functionality. + * + * @see MetricsReader + */ +public interface MetricsHandler { + + /** + * Callback for handling histogram metric. + * + * @param timestamp timestamp + * @param keyBuffer keyBuffer + * @param keyOffset keyOffset + * @param keyLength keyLength + * @param accumulated accumulated histogram + * @param distinct distinct histgoram + * @param highestTrackableValue highestTrackableValue + * @param conversionFactor conversionFactor + */ + default void onHistogram( + long timestamp, + DirectBuffer keyBuffer, + int keyOffset, + int keyLength, + Histogram accumulated, + Histogram distinct, + long highestTrackableValue, + double conversionFactor) { + // no-op + } + + /** + * Callback for handling tps metric. + * + * @param timestamp timestamp + * @param keyBuffer keyBuffer + * @param keyOffset keyOffset + * @param keyLength keyLength + * @param value value + */ + default void onTps( + long timestamp, DirectBuffer keyBuffer, int keyOffset, int keyLength, long value) { + // no-op + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/MetricsReader.java b/metrics/src/main/java/io/scalecube/metrics/MetricsReader.java new file mode 100644 index 0000000..b54b0c4 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/MetricsReader.java @@ -0,0 +1,119 @@ +package io.scalecube.metrics; + +import static org.HdrHistogram.Histogram.decodeFromByteBuffer; + +import io.scalecube.metrics.sbe.HistogramDecoder; +import io.scalecube.metrics.sbe.MessageHeaderDecoder; +import io.scalecube.metrics.sbe.TpsDecoder; +import java.nio.ByteBuffer; +import org.agrona.BufferUtil; +import org.agrona.MutableDirectBuffer; +import org.agrona.concurrent.AtomicBuffer; +import org.agrona.concurrent.MessageHandler; +import org.agrona.concurrent.UnsafeBuffer; +import org.agrona.concurrent.broadcast.BroadcastReceiver; +import org.agrona.concurrent.broadcast.CopyBroadcastReceiver; + +/** + * {@link MessageHandler} implementation that consumes SBE metrics messages (histograms, tps values) + * from the {@link BroadcastReceiver}, and dispatches them to the supplied {@link MetricsHandler}. + */ +public class MetricsReader implements MessageHandler, AutoCloseable { + + private final MessageHeaderDecoder headerDecoder = new MessageHeaderDecoder(); + private final HistogramDecoder histogramDecoder = new HistogramDecoder(); + private final TpsDecoder tpsDecoder = new TpsDecoder(); + + private ByteBuffer scratchBuffer; + private final CopyBroadcastReceiver broadcastReceiver; + private MetricsHandler metricsHandler; + private boolean isClosed = false; + + /** + * Constructor. + * + * @param broadcastBuffer buffer from where to consume SBE metrics messages + */ + public MetricsReader(AtomicBuffer broadcastBuffer) { + scratchBuffer = ByteBuffer.allocateDirect(1024 * 1024); + broadcastReceiver = + new CopyBroadcastReceiver( + new BroadcastReceiver(broadcastBuffer), new UnsafeBuffer(scratchBuffer)); + } + + /** + * Receive one SBE metrics message from the broadcast buffer. + * + * @param handler callback handler to be called for each message received + * @return number of messages that have been received + */ + public int read(MetricsHandler handler) { + metricsHandler = handler; + return broadcastReceiver.receive(this); + } + + @Override + public void onMessage(int msgTypeId, MutableDirectBuffer buffer, int index, int length) { + headerDecoder.wrap(buffer, index); + switch (headerDecoder.templateId()) { + case HistogramDecoder.TEMPLATE_ID: + onHistogram(histogramDecoder.wrapAndApplyHeader(buffer, index, headerDecoder)); + break; + case TpsDecoder.TEMPLATE_ID: + onTps(tpsDecoder.wrapAndApplyHeader(buffer, index, headerDecoder)); + break; + default: + break; + } + } + + private void onHistogram(HistogramDecoder decoder) { + final var timestamp = decoder.timestamp(); + final var highestTrackableValue = decoder.highestTrackableValue(); + final var conversionFactor = decoder.conversionFactor(); + + final var keyBuffer = decoder.buffer(); + final var keyOffset = decoder.limit() + HistogramDecoder.keyHeaderLength(); + final var keyLength = decoder.keyLength(); + decoder.skipKey(); + + final var accumulatedBytes = new byte[decoder.accumulatedLength()]; + decoder.getAccumulated(accumulatedBytes, 0, accumulatedBytes.length); + final var accumulated = decodeFromByteBuffer(ByteBuffer.wrap(accumulatedBytes), 1); + + final var distinctBytes = new byte[decoder.distinctLength()]; + decoder.getDistinct(distinctBytes, 0, distinctBytes.length); + final var distinct = decodeFromByteBuffer(ByteBuffer.wrap(distinctBytes), 1); + + metricsHandler.onHistogram( + timestamp, + keyBuffer, + keyOffset, + keyLength, + accumulated, + distinct, + highestTrackableValue, + conversionFactor); + } + + private void onTps(TpsDecoder decoder) { + final var timestamp = decoder.timestamp(); + final var value = decoder.value(); + + final var keyBuffer = decoder.buffer(); + final var keyOffset = decoder.limit() + TpsDecoder.keyHeaderLength(); + final var keyLength = decoder.keyLength(); + decoder.skipKey(); + + metricsHandler.onTps(timestamp, keyBuffer, keyOffset, keyLength, value); + } + + @Override + public void close() { + if (!isClosed) { + isClosed = true; + BufferUtil.free(scratchBuffer); + scratchBuffer = null; + } + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/MetricsReaderAgent.java b/metrics/src/main/java/io/scalecube/metrics/MetricsReaderAgent.java new file mode 100644 index 0000000..fcd77c8 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/MetricsReaderAgent.java @@ -0,0 +1,124 @@ +package io.scalecube.metrics; + +import java.time.Duration; +import org.agrona.CloseHelper; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentTerminationException; +import org.agrona.concurrent.AtomicBuffer; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.broadcast.BroadcastReceiver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Agent that consumes metrics (histograms, tps values) from the {@link BroadcastReceiver}, and + * dispatches them to user-supplied {@link MetricsHandler}. + */ +public class MetricsReaderAgent implements Agent { + + private static final Logger LOGGER = LoggerFactory.getLogger(MetricsReaderAgent.class); + + public enum State { + INIT, + RUNNING, + CLEANUP, + CLOSED + } + + private final String roleName; + private final AtomicBuffer broadcastBuffer; + private final MetricsHandler metricsHandler; + + private final Delay retryInterval; + private MetricsReader metricsReader; + private State state = State.CLOSED; + + /** + * Constructor. + * + * @param roleName roleName + * @param broadcastBuffer buffer from where to consume SBE metrics messages + * @param epochClock epochClock + * @param retryInterval retryInterval + * @param metricsHandler callback handler for processing metrics (histograms, tps values) + */ + public MetricsReaderAgent( + String roleName, + AtomicBuffer broadcastBuffer, + EpochClock epochClock, + Duration retryInterval, + MetricsHandler metricsHandler) { + this.roleName = roleName; + this.metricsHandler = metricsHandler; + this.broadcastBuffer = broadcastBuffer; + this.retryInterval = new Delay(epochClock, retryInterval.toMillis()); + } + + @Override + public String roleName() { + return roleName; + } + + @Override + public void onStart() { + if (state != State.CLOSED) { + throw new AgentTerminationException("Illegal state: " + state); + } + state(State.INIT); + } + + @Override + public int doWork() { + try { + return switch (state) { + case INIT -> init(); + case RUNNING -> running(); + case CLEANUP -> cleanup(); + default -> throw new AgentTerminationException("Unknown state: " + state); + }; + } catch (AgentTerminationException e) { + throw e; + } catch (Exception e) { + state(State.CLEANUP); + throw e; + } + } + + private int init() { + if (retryInterval.isNotOverdue()) { + return 0; + } + + metricsReader = new MetricsReader(broadcastBuffer); + + state(State.RUNNING); + LOGGER.info("[{}] Initialized, now running", roleName()); + return 1; + } + + private int running() { + return metricsReader.read(metricsHandler); + } + + private int cleanup() { + CloseHelper.quietCloseAll(metricsReader); + + State previous = state; + if (previous != State.CLOSED) { // when it comes from onClose() + retryInterval.delay(); + state(State.INIT); + } + return 1; + } + + @Override + public void onClose() { + state(State.CLOSED); + cleanup(); + } + + private void state(State state) { + LOGGER.debug("[{}][state] {}->{}", roleName(), this.state, state); + this.state = state; + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/MetricsRecorder.java b/metrics/src/main/java/io/scalecube/metrics/MetricsRecorder.java new file mode 100644 index 0000000..ec87b75 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/MetricsRecorder.java @@ -0,0 +1,529 @@ +package io.scalecube.metrics; + +import static java.util.concurrent.atomic.AtomicIntegerFieldUpdater.newUpdater; +import static org.agrona.BitUtil.CACHE_LINE_LENGTH; +import static org.agrona.IoUtil.delete; +import static org.agrona.IoUtil.ensureDirectoryExists; +import static org.agrona.IoUtil.mapExistingFile; +import static org.agrona.IoUtil.mapNewFile; +import static org.agrona.concurrent.ringbuffer.RingBufferDescriptor.TRAILER_LENGTH; + +import java.io.File; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.util.ConcurrentModificationException; +import java.util.Properties; +import java.util.Queue; +import java.util.UUID; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.function.Consumer; +import org.agrona.BitUtil; +import org.agrona.BufferUtil; +import org.agrona.CloseHelper; +import org.agrona.DirectBuffer; +import org.agrona.ErrorHandler; +import org.agrona.ExpandableArrayBuffer; +import org.agrona.LangUtil; +import org.agrona.SystemUtil; +import org.agrona.concurrent.AgentInvoker; +import org.agrona.concurrent.AgentRunner; +import org.agrona.concurrent.BackoffIdleStrategy; +import org.agrona.concurrent.CachedEpochClock; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.IdleStrategy; +import org.agrona.concurrent.SystemEpochClock; +import org.agrona.concurrent.UnsafeBuffer; +import org.agrona.concurrent.ringbuffer.ManyToOneRingBuffer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Entry point for allocating and aggregating metrics (histograms, tps values). Responsible for + * publishing aggregated metrics data to the many-to-one ring-buffer for further processing. Can run + * in multiple instances, but metrics aggregation occurs only per concrete instance. + * + * @see HistogramMetric + * @see TpsMetric + * @see MetricsTransmitter + */ +public class MetricsRecorder implements AutoCloseable { + + private static final Logger LOGGER = LoggerFactory.getLogger(MetricsRecorder.class); + + private final Context context; + + private final AgentInvoker agentInvoker; + private final AgentRunner agentRunner; + private final Queue metricsQueue = new ConcurrentLinkedDeque<>(); + + private MetricsRecorder(Context context) { + try { + context.conclude(); + this.context = context; + + final var agent = + new MetricsRecorderAgent( + metricsQueue, + context.epochClock(), + context.cachedEpochClock(), + context.metricsPublication()); + + if (context.useAgentInvoker()) { + agentRunner = null; + agentInvoker = new AgentInvoker(context.errorHandler(), null, agent); + } else { + agentInvoker = null; + agentRunner = new AgentRunner(context.idleStrategy(), context.errorHandler(), null, agent); + } + } catch (ConcurrentModificationException ex) { + throw ex; + } catch (Exception ex) { + context.close(); + throw ex; + } + } + + /** + * Launch {@link MetricsRecorder} with default {@link Context}. + * + * @return newly started {@link MetricsRecorder} + */ + public static MetricsRecorder launch() { + return launch(new Context()); + } + + /** + * Launch {@link MetricsRecorder} with provided {@link Context}. + * + * @param context context + * @return newly started {@link MetricsRecorder} + */ + public static MetricsRecorder launch(Context context) { + final var metricsRegistry = new MetricsRecorder(context); + if (metricsRegistry.agentInvoker != null) { + metricsRegistry.agentInvoker.start(); + } else { + AgentRunner.startOnThread(metricsRegistry.agentRunner); + } + return metricsRegistry; + } + + /** + * Allocates new {@link HistogramMetric}. + * + * @param consumer consumer for {@link KeyFlyweight} + * @param highestTrackableValue highestTrackableValue for {@link org.HdrHistogram.Histogram} + * @param conversionFactor multiplier for data conversion of the histogram values + * @param resolutionMs window size of measurement in milliseconds, it controls how often + * histograms are cut off and flushed + * @return newly created {@link HistogramMetric} + */ + public HistogramMetric newHistogram( + Consumer consumer, + long highestTrackableValue, + double conversionFactor, + long resolutionMs) { + final var keyFlyweight = new KeyFlyweight(); + final var keyBuffer = new ExpandableArrayBuffer(); + consumer.accept(keyFlyweight.wrap(keyBuffer, 0)); + final var metric = + new HistogramRecorder( + keyFlyweight.buffer(), highestTrackableValue, conversionFactor, resolutionMs); + metricsQueue.add(metric); + return metric; + } + + /** + * Allocates new {@link TpsMetric}. + * + * @param consumer consumer for {@link KeyFlyweight} + * @return newly created {@link TpsMetric} + */ + public TpsMetric newTps(Consumer consumer) { + final var keyFlyweight = new KeyFlyweight(); + final var keyBuffer = new ExpandableArrayBuffer(); + consumer.accept(keyFlyweight.wrap(keyBuffer, 0)); + final var metric = new TpsRecorder(keyFlyweight.buffer()); + metricsQueue.add(metric); + return metric; + } + + /** + * Returns {@link Context} instance. + * + * @return {@link Context} instance + */ + public Context context() { + return context; + } + + /** + * Returns {@link AgentInvoker} instance when running without threads, or null if running with + * {@link AgentRunner}. + * + * @return {@link AgentInvoker} instance, or null + */ + public AgentInvoker agentInvoker() { + return agentInvoker; + } + + @Override + public void close() { + CloseHelper.quietCloseAll(agentInvoker, agentRunner); + context.close(); + } + + public static class Context { + + public static final String METRICS_DIRECTORY_NAME_PROP_NAME = + "io.scalecube.metrics.recorder.metricsDirectoryName"; + public static final String DIR_DELETE_ON_SHUTDOWN_PROP_NAME = + "io.scalecube.metrics.recorder.dirDeleteOnShutdown"; + public static final String METRICS_BUFFER_LENGTH_PROP_NAME = + "io.scalecube.metrics.recorder.metricsBufferLength"; + public static final String IDLE_STRATEGY_PROP_NAME = + "io.scalecube.metrics.recorder.idleStrategy"; + + public static final String METRICS_FILE = "metrics.dat"; + public static final String DEFAULT_METRICS_DIR_NAME; + public static final int DEFAULT_METRICS_BUFFER_LENGTH = 32 * 1024 * 1024; + + static { + String baseDirName = null; + + if (SystemUtil.isLinux()) { + final File devShmDir = new File("/dev/shm"); + if (devShmDir.exists()) { + baseDirName = "/dev/shm/metrics"; + } + } + + if (baseDirName == null) { + baseDirName = SystemUtil.tmpDirName() + "metrics"; + } + + DEFAULT_METRICS_DIR_NAME = baseDirName + '-' + System.getProperty("user.name", "default"); + } + + private static final AtomicIntegerFieldUpdater IS_CONCLUDED_UPDATER = + newUpdater(Context.class, "isConcluded"); + private static final AtomicIntegerFieldUpdater IS_CLOSED_UPDATER = + newUpdater(Context.class, "isClosed"); + + private volatile int isConcluded; + private volatile int isClosed; + + private File metricsDir; + private String metricsDirectoryName = System.getProperty(METRICS_DIRECTORY_NAME_PROP_NAME); + private boolean dirDeleteOnShutdown = Boolean.getBoolean(DIR_DELETE_ON_SHUTDOWN_PROP_NAME); + private EpochClock epochClock; + private CachedEpochClock cachedEpochClock; + private int metricsBufferLength = Integer.getInteger(METRICS_BUFFER_LENGTH_PROP_NAME, 0); + private MappedByteBuffer metricsByteBuffer; + private ManyToOneRingBuffer metricsBuffer; + private boolean useAgentInvoker; + private ErrorHandler errorHandler; + private IdleStrategy idleStrategy; + + public Context() {} + + public Context(Properties props) { + metricsDirectoryName(props.getProperty(METRICS_DIRECTORY_NAME_PROP_NAME)); + dirDeleteOnShutdown(props.getProperty(DIR_DELETE_ON_SHUTDOWN_PROP_NAME)); + metricsBufferLength(props.getProperty(METRICS_BUFFER_LENGTH_PROP_NAME)); + idleStrategy(props.getProperty(IDLE_STRATEGY_PROP_NAME)); + } + + private void conclude() { + if (0 != IS_CONCLUDED_UPDATER.getAndSet(this, 1)) { + throw new ConcurrentModificationException(); + } + + concludeMetricsDirectory(); + concludeMetricsBuffer(); + + if (epochClock == null) { + epochClock = SystemEpochClock.INSTANCE; + } + + if (cachedEpochClock == null) { + cachedEpochClock = new CachedEpochClock(); + } + + if (errorHandler == null) { + errorHandler = ex -> LOGGER.error("Exception occurred: ", ex); + } + + if (idleStrategy == null) { + idleStrategy = new BackoffIdleStrategy(); + } + } + + private void concludeMetricsDirectory() { + if (metricsDirectoryName == null) { + metricsDirectoryName = DEFAULT_METRICS_DIR_NAME; + } + + if (metricsDir == null) { + try { + metricsDir = new File(metricsDirectoryName).getCanonicalFile(); + } catch (IOException e) { + LangUtil.rethrowUnchecked(e); + } + } + + if (metricsDir.isDirectory()) { + final var file = new File(metricsDir, METRICS_FILE); + if (file.exists()) { + final var buffer = mapExistingFile(file, METRICS_FILE); + try { + if (!LayoutDescriptor.isMetricsHeaderLengthSufficient(buffer.capacity())) { + delete(metricsDir, false); + } else { + final var headerBuffer = LayoutDescriptor.createHeaderBuffer(buffer); + final var startTimestamp = ManagementFactory.getRuntimeMXBean().getStartTime(); + final var pid = ManagementFactory.getRuntimeMXBean().getPid(); + if (!LayoutDescriptor.isMetricsActive(headerBuffer, startTimestamp, pid)) { + delete(metricsDir, false); + } + } + } finally { + BufferUtil.free(buffer); + } + } + } + + ensureDirectoryExists(metricsDir, "metrics"); + } + + private void concludeMetricsBuffer() { + if (metricsBufferLength == 0) { + metricsBufferLength = DEFAULT_METRICS_BUFFER_LENGTH; + } + + final var min = DEFAULT_METRICS_BUFFER_LENGTH; + if (metricsBufferLength < min) { + throw new IllegalArgumentException("metricsBufferLength must be at least " + min); + } + if (!BitUtil.isPowerOfTwo(metricsBufferLength)) { + throw new IllegalArgumentException("metricsBufferLength must be power of 2"); + } + + final var file = new File(metricsDir, METRICS_FILE); + final var headerLength = LayoutDescriptor.HEADER_LENGTH; + final var bufferLength = metricsBufferLength + TRAILER_LENGTH; + final var totalLength = headerLength + bufferLength; + + if (!file.exists()) { + final var buffer = mapNewFile(file, totalLength); + try { + final var headerBuffer = LayoutDescriptor.createHeaderBuffer(buffer); + final var startTimestamp = ManagementFactory.getRuntimeMXBean().getStartTime(); + final var pid = ManagementFactory.getRuntimeMXBean().getPid(); + LayoutDescriptor.fillHeaderBuffer(headerBuffer, startTimestamp, pid, bufferLength); + } finally { + BufferUtil.free(buffer); + } + } + + metricsByteBuffer = mapExistingFile(file, METRICS_FILE); + metricsBuffer = + new ManyToOneRingBuffer(new UnsafeBuffer(metricsByteBuffer, headerLength, bufferLength)); + } + + public File metricsDir() { + return metricsDir; + } + + public Context metricsDir(File metricsDir) { + this.metricsDir = metricsDir; + return this; + } + + public String metricsDirectoryName() { + return metricsDirectoryName; + } + + public Context metricsDirectoryName(String metricsDirectoryName) { + this.metricsDirectoryName = metricsDirectoryName; + return this; + } + + public boolean dirDeleteOnShutdown() { + return dirDeleteOnShutdown; + } + + public Context dirDeleteOnShutdown(boolean dirDeleteOnShutdown) { + this.dirDeleteOnShutdown = dirDeleteOnShutdown; + return this; + } + + public Context dirDeleteOnShutdown(String dirDeleteOnShutdown) { + if (dirDeleteOnShutdown != null) { + this.dirDeleteOnShutdown = Boolean.parseBoolean(dirDeleteOnShutdown); + } + return this; + } + + public EpochClock epochClock() { + return epochClock; + } + + public Context epochClock(EpochClock epochClock) { + this.epochClock = epochClock; + return this; + } + + public CachedEpochClock cachedEpochClock() { + return cachedEpochClock; + } + + public Context cachedEpochClock(CachedEpochClock cachedEpochClock) { + this.cachedEpochClock = cachedEpochClock; + return this; + } + + public int metricsBufferLength() { + return metricsBufferLength; + } + + public Context metricsBufferLength(int metricsBufferLength) { + this.metricsBufferLength = metricsBufferLength; + return this; + } + + public Context metricsBufferLength(String metricsBufferLength) { + if (metricsBufferLength != null) { + this.metricsBufferLength = Integer.parseInt(metricsBufferLength); + } + return this; + } + + public boolean useAgentInvoker() { + return useAgentInvoker; + } + + public Context useAgentInvoker(boolean useAgentInvoker) { + this.useAgentInvoker = useAgentInvoker; + return this; + } + + public ErrorHandler errorHandler() { + return errorHandler; + } + + public Context errorHandler(ErrorHandler errorHandler) { + this.errorHandler = errorHandler; + return this; + } + + public Context idleStrategy(IdleStrategy idleStrategy) { + this.idleStrategy = idleStrategy; + return this; + } + + public Context idleStrategy(String idleStrategy) { + if (idleStrategy != null) { + try { + this.idleStrategy = + (IdleStrategy) Class.forName(idleStrategy).getConstructor().newInstance(); + } catch (Exception ex) { + LangUtil.rethrowUnchecked(ex); + } + } + return this; + } + + public IdleStrategy idleStrategy() { + return idleStrategy; + } + + MetricsPublication metricsPublication() { + return new MetricsPublication(metricsBuffer); + } + + public static String generateMetricsDirectoryName() { + return DEFAULT_METRICS_DIR_NAME + "-" + UUID.randomUUID(); + } + + private void close() { + if (IS_CLOSED_UPDATER.compareAndSet(this, 0, 1)) { + BufferUtil.free(metricsByteBuffer); + metricsByteBuffer = null; + if (dirDeleteOnShutdown && metricsDir != null) { + delete(metricsDir, false); + } + } + } + } + + public static class LayoutDescriptor { + + public static final int HEADER_LENGTH = CACHE_LINE_LENGTH * 2; + public static final int START_TIMESTAMP_OFFSET = 0; + public static final int PID_OFFSET = 8; + public static final int METRICS_BUFFER_LENGTH_OFFSET = 16; + + public static UnsafeBuffer createHeaderBuffer(ByteBuffer buffer) { + return new UnsafeBuffer(buffer, 0, HEADER_LENGTH); + } + + public static long startTimestamp(DirectBuffer headerBuffer) { + return headerBuffer.getLong(START_TIMESTAMP_OFFSET); + } + + public static long pid(DirectBuffer headerBuffer) { + return headerBuffer.getLong(PID_OFFSET); + } + + public static long metricsBufferLength(DirectBuffer headerBuffer) { + return headerBuffer.getInt(METRICS_BUFFER_LENGTH_OFFSET); + } + + public static boolean isMetricsHeaderLengthSufficient(int length) { + return length >= HEADER_LENGTH; + } + + public static boolean isMetricsFileLengthSufficient(DirectBuffer headerBuffer, int fileLength) { + final var totalLength = HEADER_LENGTH + metricsBufferLength(headerBuffer); + return totalLength >= fileLength; + } + + public static boolean isMetricsActive( + DirectBuffer headerBuffer, long startTimestamp, long pid) { + return startTimestamp(headerBuffer) == startTimestamp && pid(headerBuffer) == pid; + } + + public static void fillHeaderBuffer( + UnsafeBuffer headerBuffer, long startTimestamp, long pid, int metricsBufferLength) { + headerBuffer.putLong(START_TIMESTAMP_OFFSET, startTimestamp); + headerBuffer.putLong(PID_OFFSET, pid); + headerBuffer.putInt(METRICS_BUFFER_LENGTH_OFFSET, metricsBufferLength); + } + } + + static class MetricsPublication { + + private final ManyToOneRingBuffer metricsBuffer; + + MetricsPublication(ManyToOneRingBuffer metricsBuffer) { + this.metricsBuffer = metricsBuffer; + } + + void publish(DirectBuffer buffer, int offset, int length) { + final var index = metricsBuffer.tryClaim(1, length); + if (index > 0) { + try { + metricsBuffer.buffer().putBytes(index, buffer, offset, length); + metricsBuffer.commit(index); + } catch (Exception ex) { + metricsBuffer.abort(index); + LangUtil.rethrowUnchecked(ex); + } + } + } + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/MetricsRecorderAgent.java b/metrics/src/main/java/io/scalecube/metrics/MetricsRecorderAgent.java new file mode 100644 index 0000000..2915140 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/MetricsRecorderAgent.java @@ -0,0 +1,139 @@ +package io.scalecube.metrics; + +import io.scalecube.metrics.MetricsRecorder.MetricsPublication; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.TimeUnit; +import org.agrona.DeadlineTimerWheel; +import org.agrona.DirectBuffer; +import org.agrona.collections.Long2ObjectHashMap; +import org.agrona.collections.Object2ObjectHashMap; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.CachedEpochClock; +import org.agrona.concurrent.EpochClock; + +class MetricsRecorderAgent implements Agent { + + private static final int TIMER_POLL_LIMIT = 10; + private static final int TIMER_TICK_RESOLUTION = 8192; + private static final int TIMER_TICKS_PER_WHEEL = 128; + private static final long TPS_RESOLUTION = TimeUnit.SECONDS.toMillis(1); + + private final Queue queue; + private final EpochClock epochClock; + private final CachedEpochClock cachedEpochClock; + private final MetricsPublication metricsPublication; + + private final MetricsEncoder metricsEncoder = new MetricsEncoder(); + private final Map> metrics = new Object2ObjectHashMap<>(); + private final Map aggregates = new Object2ObjectHashMap<>(); + private final Long2ObjectHashMap scheduledTimers = new Long2ObjectHashMap<>(); + private final DeadlineTimerWheel timerWheel; + + MetricsRecorderAgent( + Queue queue, + EpochClock epochClock, + CachedEpochClock cachedEpochClock, + MetricsPublication metricsPublication) { + this.queue = queue; + this.epochClock = epochClock; + this.cachedEpochClock = cachedEpochClock; + this.metricsPublication = metricsPublication; + timerWheel = + new DeadlineTimerWheel( + TimeUnit.MILLISECONDS, 0, TIMER_TICK_RESOLUTION, TIMER_TICKS_PER_WHEEL); + } + + @Override + public String roleName() { + return "MetricsRecorderAgent"; + } + + @Override + public int doWork() { + final var time = epochClock.time(); + cachedEpochClock.update(time); + + final var metric = queue.poll(); + if (metric != null) { + if (metric instanceof HistogramRecorder) { + add((HistogramRecorder) metric); + } + if (metric instanceof TpsRecorder) { + add((TpsRecorder) metric); + } + } + + return timerWheel.poll(time, this::onTimerExpiry, TIMER_POLL_LIMIT); + } + + private void add(HistogramRecorder metric) { + final var keyBuffer = metric.keyBuffer(); + final var list = metrics.computeIfAbsent(keyBuffer, k -> new ArrayList<>()); + list.add(metric); + + if (!aggregates.containsKey(keyBuffer)) { + aggregates.put( + keyBuffer, + new HistogramAggregate( + keyBuffer, + metric.highestTrackableValue(), + metric.conversionFactor(), + metric.resolutionMs(), + metricsEncoder, + metricsPublication)); + schedule(keyBuffer, metric.resolutionMs()); + } + } + + private void add(TpsRecorder metric) { + final var keyBuffer = metric.keyBuffer(); + final var list = metrics.computeIfAbsent(keyBuffer, k -> new ArrayList<>()); + list.add(metric); + + if (!aggregates.containsKey(keyBuffer)) { + aggregates.put(keyBuffer, new TpsAggregate(keyBuffer, metricsEncoder, metricsPublication)); + schedule(keyBuffer, TPS_RESOLUTION); + } + } + + private boolean onTimerExpiry(TimeUnit timeUnit, long time, long timerId) { + final var keyBuffer = scheduledTimers.remove(timerId); + if (keyBuffer == null) { + return true; + } + final var aggregate = aggregates.get(keyBuffer); + if (aggregate == null) { + return true; + } + final var list = metrics.get(keyBuffer); + if (list == null) { + return true; + } + + if (aggregate instanceof HistogramAggregate agg) { + for (int i = 0; i < list.size(); i++) { + ((HistogramRecorder) list.get(i)).swapAndUpdate(agg); + } + agg.publish(time); + schedule(keyBuffer, agg.resolutionMs()); + } + if (aggregate instanceof TpsAggregate agg) { + for (int i = 0; i < list.size(); i++) { + ((TpsRecorder) list.get(i)).swapAndUpdate(agg); + } + agg.publish(time); + schedule(keyBuffer, TPS_RESOLUTION); + } + + return true; + } + + private void schedule(DirectBuffer keyBuffer, long resolutionMs) { + final var deadline = cachedEpochClock.time() + resolutionMs; + final var timerId = timerWheel.scheduleTimer(deadline); + scheduledTimers.put(timerId, keyBuffer); + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/MetricsTransmitter.java b/metrics/src/main/java/io/scalecube/metrics/MetricsTransmitter.java new file mode 100644 index 0000000..a106d9e --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/MetricsTransmitter.java @@ -0,0 +1,369 @@ +package io.scalecube.metrics; + +import static java.util.concurrent.atomic.AtomicIntegerFieldUpdater.newUpdater; +import static org.agrona.IoUtil.ensureDirectoryExists; +import static org.agrona.concurrent.broadcast.BroadcastBufferDescriptor.TRAILER_LENGTH; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.ConcurrentModificationException; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import org.agrona.BitUtil; +import org.agrona.CloseHelper; +import org.agrona.ErrorHandler; +import org.agrona.LangUtil; +import org.agrona.SystemUtil; +import org.agrona.concurrent.AgentInvoker; +import org.agrona.concurrent.AgentRunner; +import org.agrona.concurrent.AtomicBuffer; +import org.agrona.concurrent.BackoffIdleStrategy; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.IdleStrategy; +import org.agrona.concurrent.SystemEpochClock; +import org.agrona.concurrent.UnsafeBuffer; +import org.agrona.concurrent.broadcast.BroadcastTransmitter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Component that reads metrics data (histograms, tps values) from many-to-one ring-buffer produced + * by {@link MetricsRecorder}(s), and publishing those metrics to one-to-many ring-buffer for + * further processing by multiple subscribers. + * + * @see MetricsRecorder + */ +public class MetricsTransmitter implements AutoCloseable { + + private static final Logger LOGGER = LoggerFactory.getLogger(MetricsTransmitter.class); + + private final Context context; + + private final AgentInvoker agentInvoker; + private final AgentRunner agentRunner; + + private MetricsTransmitter(Context context) { + context.conclude(); + this.context = context; + + final var agent = + new MetricsTransmitterAgent( + context.metricsDir(), + context.warnIfMetricsNotExists(), + context.epochClock(), + context.retryInterval(), + context.heartbeatTimeout(), + context.broadcastTransmitter()); + + if (context.useAgentInvoker()) { + agentRunner = null; + agentInvoker = new AgentInvoker(context.errorHandler(), null, agent); + } else { + agentInvoker = null; + agentRunner = new AgentRunner(context.idleStrategy(), context.errorHandler(), null, agent); + } + } + + /** + * Launch {@link MetricsTransmitter} with default {@link Context}. + * + * @return newly started {@link MetricsTransmitter} + */ + public static MetricsTransmitter launch() { + return launch(new Context()); + } + + /** + * Launch {@link MetricsTransmitter} with provided {@link Context}. + * + * @param context context + * @return newly started {@link MetricsTransmitter} + */ + public static MetricsTransmitter launch(Context context) { + final var metricsRegistry = new MetricsTransmitter(context); + if (metricsRegistry.agentInvoker != null) { + metricsRegistry.agentInvoker.start(); + } else { + AgentRunner.startOnThread(metricsRegistry.agentRunner); + } + return metricsRegistry; + } + + /** + * Returns {@link Context} instance. + * + * @return {@link Context} instance + */ + public Context context() { + return context; + } + + /** + * Returns {@link AgentInvoker} instance when running without threads, or null if running with + * {@link AgentRunner}. + * + * @return {@link AgentInvoker} instance, or null + */ + public AgentInvoker agentInvoker() { + return agentInvoker; + } + + @Override + public void close() { + CloseHelper.quietCloseAll(agentInvoker, agentRunner); + } + + public static class Context { + + public static final String METRICS_DIRECTORY_NAME_PROP_NAME = + "io.scalecube.metrics.transmitter.metricsDirectoryName"; + public static final String WARN_IF_METRICS_NOT_EXISTS_PROP_NAME = + "io.scalecube.metrics.transmitter.warnIfMetricsNotExists"; + public static final String RETRY_INTERVAL_PROP_NAME = + "io.scalecube.metrics.transmitter.retryInterval"; + public static final String HEARTBEAT_TIMEOUT_PROP_NAME = + "io.scalecube.metrics.transmitter.heartbeatTimeout"; + public static final String BROADCAST_BUFFER_LENGTH_PROP_NAME = + "io.scalecube.metrics.transmitter.broadcastBufferLength"; + public static final String IDLE_STRATEGY_PROP_NAME = + "io.scalecube.metrics.transmitter.idleStrategy"; + + public static final int DEFAULT_METRICS_BROADCAST_BUFFER_LENGTH = 32 * 1024 * 1024; + + private static final AtomicIntegerFieldUpdater IS_CONCLUDED_UPDATER = + newUpdater(Context.class, "isConcluded"); + + private volatile int isConcluded; + + private File metricsDir; + private String metricsDirectoryName = System.getProperty(METRICS_DIRECTORY_NAME_PROP_NAME); + private boolean warnIfMetricsNotExists = + Boolean.getBoolean(WARN_IF_METRICS_NOT_EXISTS_PROP_NAME); + private EpochClock epochClock; + private Duration retryInterval; + private Duration heartbeatTimeout; + private int broadcastBufferLength = Integer.getInteger(BROADCAST_BUFFER_LENGTH_PROP_NAME, 0); + private AtomicBuffer broadcastBuffer; + private BroadcastTransmitter broadcastTransmitter; + private boolean useAgentInvoker; + private ErrorHandler errorHandler; + private IdleStrategy idleStrategy; + + public Context() {} + + public Context(Properties props) { + metricsDirectoryName(props.getProperty(METRICS_DIRECTORY_NAME_PROP_NAME)); + warnIfMetricsNotExists(props.getProperty(WARN_IF_METRICS_NOT_EXISTS_PROP_NAME)); + retryInterval(props.getProperty(RETRY_INTERVAL_PROP_NAME)); + heartbeatTimeout(props.getProperty(HEARTBEAT_TIMEOUT_PROP_NAME)); + broadcastBufferLength(props.getProperty(BROADCAST_BUFFER_LENGTH_PROP_NAME)); + idleStrategy(props.getProperty(IDLE_STRATEGY_PROP_NAME)); + } + + private void conclude() { + if (0 != IS_CONCLUDED_UPDATER.getAndSet(this, 1)) { + throw new ConcurrentModificationException(); + } + + concludeMetricsDirectory(); + concludeTransmitter(); + + if (epochClock == null) { + epochClock = SystemEpochClock.INSTANCE; + } + + if (retryInterval == null) { + retryInterval = Duration.ofSeconds(3); + } + + if (heartbeatTimeout == null) { + heartbeatTimeout = Duration.ofSeconds(1); + } + + if (errorHandler == null) { + errorHandler = ex -> LOGGER.error("Exception occurred: ", ex); + } + + if (idleStrategy == null) { + idleStrategy = new BackoffIdleStrategy(); + } + } + + private void concludeMetricsDirectory() { + if (metricsDirectoryName == null) { + metricsDirectoryName = MetricsRecorder.Context.DEFAULT_METRICS_DIR_NAME; + } + + if (metricsDir == null) { + try { + metricsDir = new File(metricsDirectoryName).getCanonicalFile(); + } catch (IOException e) { + LangUtil.rethrowUnchecked(e); + } + } + + ensureDirectoryExists(metricsDir, "metrics"); + } + + private void concludeTransmitter() { + if (broadcastBufferLength == 0) { + broadcastBufferLength = DEFAULT_METRICS_BROADCAST_BUFFER_LENGTH; + } + + final var min = DEFAULT_METRICS_BROADCAST_BUFFER_LENGTH; + if (broadcastBufferLength < min) { + throw new IllegalArgumentException("broadcastBufferLength must be at least " + min); + } + if (!BitUtil.isPowerOfTwo(broadcastBufferLength)) { + throw new IllegalArgumentException("broadcastBufferLength must be power of 2"); + } + + final var broadcastByteBuffer = + ByteBuffer.allocateDirect(broadcastBufferLength + TRAILER_LENGTH); + broadcastBuffer = new UnsafeBuffer(broadcastByteBuffer); + broadcastTransmitter = new BroadcastTransmitter(broadcastBuffer); + } + + public File metricsDir() { + return metricsDir; + } + + public Context metricsDir(File metricsDir) { + this.metricsDir = metricsDir; + return this; + } + + public String metricsDirectoryName() { + return metricsDirectoryName; + } + + public Context metricsDirectoryName(String metricsDirectoryName) { + this.metricsDirectoryName = metricsDirectoryName; + return this; + } + + public boolean warnIfMetricsNotExists() { + return warnIfMetricsNotExists; + } + + public Context warnIfMetricsNotExists(boolean warnIfMetricsNotExists) { + this.warnIfMetricsNotExists = warnIfMetricsNotExists; + return this; + } + + public Context warnIfMetricsNotExists(String warnIfMetricsNotExists) { + if (warnIfMetricsNotExists != null) { + this.warnIfMetricsNotExists = Boolean.parseBoolean(warnIfMetricsNotExists); + } + return this; + } + + public EpochClock epochClock() { + return epochClock; + } + + public Context epochClock(EpochClock epochClock) { + this.epochClock = epochClock; + return this; + } + + public Duration retryInterval() { + return retryInterval; + } + + public Context retryInterval(Duration retryInterval) { + this.retryInterval = retryInterval; + return this; + } + + public Context retryInterval(String retryInterval) { + if (retryInterval != null) { + this.retryInterval = + Duration.ofNanos(SystemUtil.parseDuration("retryInterval", retryInterval)); + } + return this; + } + + public Duration heartbeatTimeout() { + return heartbeatTimeout; + } + + public Context heartbeatTimeout(Duration heartbeatTimeout) { + this.heartbeatTimeout = heartbeatTimeout; + return this; + } + + public Context heartbeatTimeout(String heartbeatTimeout) { + if (heartbeatTimeout != null) { + this.heartbeatTimeout = + Duration.ofNanos(SystemUtil.parseDuration("heartbeatTimeout", heartbeatTimeout)); + } + return this; + } + + public boolean useAgentInvoker() { + return useAgentInvoker; + } + + public Context useAgentInvoker(boolean useAgentInvoker) { + this.useAgentInvoker = useAgentInvoker; + return this; + } + + public ErrorHandler errorHandler() { + return errorHandler; + } + + public Context errorHandler(ErrorHandler errorHandler) { + this.errorHandler = errorHandler; + return this; + } + + public Context idleStrategy(IdleStrategy idleStrategy) { + this.idleStrategy = idleStrategy; + return this; + } + + public Context idleStrategy(String idleStrategy) { + if (idleStrategy != null) { + try { + this.idleStrategy = + (IdleStrategy) Class.forName(idleStrategy).getConstructor().newInstance(); + } catch (Exception ex) { + LangUtil.rethrowUnchecked(ex); + } + } + return this; + } + + public IdleStrategy idleStrategy() { + return idleStrategy; + } + + public int broadcastBufferLength() { + return broadcastBufferLength; + } + + public Context broadcastBufferLength(int broadcastBufferLength) { + this.broadcastBufferLength = broadcastBufferLength; + return this; + } + + public Context broadcastBufferLength(String broadcastBufferLength) { + if (broadcastBufferLength != null) { + this.broadcastBufferLength = Integer.parseInt(broadcastBufferLength); + } + return this; + } + + public AtomicBuffer broadcastBuffer() { + return broadcastBuffer; + } + + BroadcastTransmitter broadcastTransmitter() { + return broadcastTransmitter; + } + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/MetricsTransmitterAgent.java b/metrics/src/main/java/io/scalecube/metrics/MetricsTransmitterAgent.java new file mode 100644 index 0000000..f020b0d --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/MetricsTransmitterAgent.java @@ -0,0 +1,199 @@ +package io.scalecube.metrics; + +import static io.scalecube.metrics.MetricsRecorder.Context.METRICS_FILE; +import static org.agrona.IoUtil.mapExistingFile; + +import io.scalecube.metrics.MetricsRecorder.LayoutDescriptor; +import java.io.File; +import java.nio.MappedByteBuffer; +import java.time.Duration; +import org.agrona.BufferUtil; +import org.agrona.MutableDirectBuffer; +import org.agrona.concurrent.Agent; +import org.agrona.concurrent.AgentTerminationException; +import org.agrona.concurrent.EpochClock; +import org.agrona.concurrent.MessageHandler; +import org.agrona.concurrent.UnsafeBuffer; +import org.agrona.concurrent.broadcast.BroadcastTransmitter; +import org.agrona.concurrent.ringbuffer.ManyToOneRingBuffer; +import org.agrona.concurrent.ringbuffer.RingBuffer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +class MetricsTransmitterAgent implements Agent, MessageHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(MetricsTransmitterAgent.class); + + public enum State { + INIT, + RUNNING, + CLEANUP, + CLOSED + } + + private final File metricsDir; + private final boolean warnIfMetricsNotExists; + private final BroadcastTransmitter broadcastTransmitter; + + private final Delay retryInterval; + private final Delay heartbeatTimeout; + private File metricsFile; + private RingBuffer metricsBuffer; + private MappedByteBuffer metricsByteBuffer; + private final UnsafeBuffer headerBuffer = new UnsafeBuffer(); + private long metricsStartTimestamp = -1; + private long metricsPid = -1; + private State state = State.CLOSED; + + MetricsTransmitterAgent( + File metricsDir, + boolean warnIfMetricsNotExists, + EpochClock epochClock, + Duration retryInterval, + Duration heartbeatTimeout, + BroadcastTransmitter broadcastTransmitter) { + this.metricsDir = metricsDir; + this.warnIfMetricsNotExists = warnIfMetricsNotExists; + this.broadcastTransmitter = broadcastTransmitter; + this.retryInterval = new Delay(epochClock, retryInterval.toMillis()); + this.heartbeatTimeout = new Delay(epochClock, heartbeatTimeout.toMillis()); + } + + @Override + public String roleName() { + return "MetricsTransmitterAgent"; + } + + @Override + public void onStart() { + if (state != State.CLOSED) { + throw new AgentTerminationException("Illegal state: " + state); + } + state(State.INIT); + } + + @Override + public int doWork() { + try { + return switch (state) { + case INIT -> init(); + case RUNNING -> running(); + case CLEANUP -> cleanup(); + default -> throw new AgentTerminationException("Unknown state: " + state); + }; + } catch (AgentTerminationException e) { + throw e; + } catch (Exception e) { + state(State.CLEANUP); + throw e; + } + } + + private int init() { + if (retryInterval.isNotOverdue()) { + return 0; + } + + metricsFile = new File(metricsDir, METRICS_FILE); + + if (!isActive(metricsFile)) { + state(State.CLEANUP); + return 0; + } + + metricsByteBuffer = mapExistingFile(metricsFile, METRICS_FILE); + headerBuffer.wrap(metricsByteBuffer, 0, LayoutDescriptor.HEADER_LENGTH); + metricsStartTimestamp = LayoutDescriptor.startTimestamp(headerBuffer); + metricsPid = LayoutDescriptor.pid(headerBuffer); + + final var headerLength = LayoutDescriptor.HEADER_LENGTH; + final var totalLength = metricsByteBuffer.capacity(); + final var length = totalLength - headerLength; + + metricsBuffer = + new ManyToOneRingBuffer(new UnsafeBuffer(metricsByteBuffer, headerLength, length)); + + state(State.RUNNING); + LOGGER.info("[{}] Initialized, now running", roleName()); + return 1; + } + + private int running() { + if (heartbeatTimeout.isOverdue()) { + heartbeatTimeout.delay(); + if (!isActive(metricsFile)) { + state(State.CLEANUP); + LOGGER.warn("[{}] {} is not active, proceed to cleanup", roleName(), metricsFile); + return 0; + } + } + return metricsBuffer.read(this, 100); + } + + private boolean isActive(File metricsFile) { + if (!metricsFile.exists()) { + if (warnIfMetricsNotExists) { + LOGGER.warn("[{}] {} not exists", roleName(), metricsFile); + } + return false; + } + + final var buffer = mapExistingFile(metricsFile, METRICS_FILE); + try { + if (!LayoutDescriptor.isMetricsHeaderLengthSufficient(buffer.capacity())) { + LOGGER.warn("[{}] {} has not sufficient length", roleName(), metricsFile); + return false; + } + headerBuffer.wrap(buffer, 0, LayoutDescriptor.HEADER_LENGTH); + if (!LayoutDescriptor.isMetricsFileLengthSufficient(headerBuffer, buffer.capacity())) { + LOGGER.warn("[{}] {} has not sufficient length", roleName(), metricsFile); + return false; + } + if (metricsStartTimestamp != -1 + && !LayoutDescriptor.isMetricsActive(headerBuffer, metricsStartTimestamp, metricsPid)) { + LOGGER.warn("[{}] {} is not active", roleName(), metricsFile); + return false; + } + } finally { + BufferUtil.free(buffer); + } + + return true; + } + + @Override + public void onMessage(int msgTypeId, MutableDirectBuffer buffer, int index, int length) { + heartbeatTimeout.delay(); + broadcastTransmitter.transmit(msgTypeId, buffer, index, length); + } + + private int cleanup() { + BufferUtil.free(metricsByteBuffer); + metricsByteBuffer = null; + metricsFile = null; + metricsStartTimestamp = -1; + metricsPid = -1; + + State previous = state; + if (previous != State.CLOSED) { // when it comes from onClose() + retryInterval.delay(); + state(State.INIT); + } + return 1; + } + + @Override + public void onClose() { + state(State.CLOSED); + cleanup(); + } + + private void state(State state) { + LOGGER.debug("[{}][state] {}->{}", roleName(), this.state, state); + this.state = state; + } + + public State state() { + return state; + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/PropertiesRegistry.java b/metrics/src/main/java/io/scalecube/metrics/PropertiesRegistry.java new file mode 100644 index 0000000..b93f862 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/PropertiesRegistry.java @@ -0,0 +1,87 @@ +package io.scalecube.metrics; + +import static io.scalecube.metrics.CounterDescriptor.byType; + +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; +import java.util.function.Predicate; +import org.agrona.concurrent.status.AtomicCounter; +import org.agrona.concurrent.status.CountersManager; +import org.agrona.concurrent.status.CountersReader; + +public class PropertiesRegistry { + + public static final int PROPERTY_COUNTER_TYPE_ID = 2; + + private final CountersManager countersManager; + + private final ThreadLocal counterAllocatorHolder; + private final Map counters = new ConcurrentHashMap<>(); + + public PropertiesRegistry(CountersManager countersManager) { + this.countersManager = countersManager; + this.counterAllocatorHolder = + ThreadLocal.withInitial(() -> new CounterAllocator(this.countersManager)); + } + + public void put(String name, Object value) { + Objects.requireNonNull(name, "name"); + Objects.requireNonNull(value, "value"); + counters.compute( + name, + (k, counter) -> { + if (counter == null) { + final var counterAllocator = counterAllocatorHolder.get(); + return counterAllocator.newCounter( + PROPERTY_COUNTER_TYPE_ID, + name + "=" + value, + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("visibility", "private")); + } else { + countersManager.setCounterLabel(counter.id(), name + "=" + value); + return counter; + } + }); + } + + public static Byte getByteProperty(CountersReader countersReader, String name) { + return getProperty(countersReader, name, Byte::parseByte); + } + + public static Short getShortProperty(CountersReader countersReader, String name) { + return getProperty(countersReader, name, Short::parseShort); + } + + public static Integer getIntProperty(CountersReader countersReader, String name) { + return getProperty(countersReader, name, Integer::parseInt); + } + + public static Long getLongProperty(CountersReader countersReader, String name) { + return getProperty(countersReader, name, Long::parseLong); + } + + public static Double getDoubleProperty(CountersReader countersReader, String name) { + return getProperty(countersReader, name, Double::parseDouble); + } + + public static > T getEnumProperty( + CountersReader countersReader, String name, Function enumFunc) { + return getProperty(countersReader, name, enumFunc); + } + + public static String getProperty(CountersReader countersReader, String name) { + return getProperty(countersReader, name, s -> s); + } + + public static T getProperty( + CountersReader countersReader, String name, Function converter) { + final var counter = CounterDescriptor.findFirstCounter(countersReader, byPropertyName(name)); + return counter != null ? converter.apply(counter.label().split("=")[1]) : null; + } + + public static Predicate byPropertyName(String name) { + return byType(PropertiesRegistry.PROPERTY_COUNTER_TYPE_ID) + .and(descriptor -> name.equals(descriptor.label().split("=")[0])); + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/TpsAggregate.java b/metrics/src/main/java/io/scalecube/metrics/TpsAggregate.java new file mode 100644 index 0000000..8d744a7 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/TpsAggregate.java @@ -0,0 +1,35 @@ +package io.scalecube.metrics; + +import io.scalecube.metrics.MetricsRecorder.MetricsPublication; +import org.agrona.DirectBuffer; +import org.agrona.collections.MutableLong; + +class TpsAggregate { + + private final DirectBuffer keyBuffer; + private final MetricsEncoder encoder; + private final MetricsPublication metricsPublication; + + private final MutableLong counter = new MutableLong(); + + TpsAggregate( + DirectBuffer keyBuffer, MetricsEncoder encoder, MetricsPublication metricsPublication) { + this.keyBuffer = keyBuffer; + this.encoder = encoder; + this.metricsPublication = metricsPublication; + } + + void update(long value) { + counter.addAndGet(value); + } + + void publish(long timestamp) { + try { + final var value = counter.get(); + final var length = encoder.encodeTps(timestamp, keyBuffer, value); + metricsPublication.publish(encoder.buffer(), 0, length); + } finally { + counter.set(0); + } + } +} diff --git a/metrics/src/main/java/io/scalecube/metrics/TpsMetric.java b/metrics/src/main/java/io/scalecube/metrics/TpsMetric.java new file mode 100644 index 0000000..1e65507 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/TpsMetric.java @@ -0,0 +1,10 @@ +package io.scalecube.metrics; + +import org.agrona.DirectBuffer; + +public interface TpsMetric { + + DirectBuffer keyBuffer(); + + void record(); +} diff --git a/metrics/src/main/java/io/scalecube/metrics/TpsRecorder.java b/metrics/src/main/java/io/scalecube/metrics/TpsRecorder.java new file mode 100644 index 0000000..05c7465 --- /dev/null +++ b/metrics/src/main/java/io/scalecube/metrics/TpsRecorder.java @@ -0,0 +1,36 @@ +package io.scalecube.metrics; + +import org.agrona.DirectBuffer; +import org.agrona.collections.MutableLong; + +class TpsRecorder implements TpsMetric { + + private final DirectBuffer keyBuffer; + + private volatile MutableLong current; + private volatile MutableLong swap; + + TpsRecorder(DirectBuffer keyBuffer) { + this.keyBuffer = keyBuffer; + current = new MutableLong(); + swap = new MutableLong(); + } + + @Override + public DirectBuffer keyBuffer() { + return keyBuffer; + } + + @Override + public void record() { + current.increment(); + } + + void swapAndUpdate(TpsAggregate aggregate) { + final var counter = current; + current = swap; + swap = counter; + aggregate.update(counter.get()); + counter.set(0); + } +} diff --git a/metrics/src/main/resources/metrics-schema.xml b/metrics/src/main/resources/metrics-schema.xml new file mode 100644 index 0000000..73163eb --- /dev/null +++ b/metrics/src/main/resources/metrics-schema.xml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + 2 + 3 + 4 + 5 + 6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/metrics/src/test/java/io/scalecube/metrics/ConcurrentCountersTest.java b/metrics/src/test/java/io/scalecube/metrics/ConcurrentCountersTest.java new file mode 100644 index 0000000..3c7351a --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/ConcurrentCountersTest.java @@ -0,0 +1,133 @@ +package io.scalecube.metrics; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertSame; + +import org.agrona.CloseHelper; +import org.agrona.concurrent.status.CountersManager; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +class ConcurrentCountersTest { + + private final CountersRegistry countersRegistry = CountersRegistry.create(); + private final CountersManager countersManager = countersRegistry.countersManager(); + private final ConcurrentCounters concurrentCounters = new ConcurrentCounters(countersManager); + + @AfterEach + void afterEach() { + CloseHelper.quietCloseAll(countersRegistry); + } + + @Test + void shouldCreateCounterWithName() { + var counter = concurrentCounters.counter("test-counter", null); + assertNotNull(counter, "Counter should not be null"); + + counter.increment(); + assertEquals(1, counter.get(), "Counter should increment correctly"); + } + + @Test + void shouldReturnSameCounterForSameNameAndType() { + var counter1 = concurrentCounters.counter("same-counter", null); + var counter2 = concurrentCounters.counter("same-counter", null); + + assertSame(counter1, counter2, "Same name and type should return same counter instance"); + + counter1.increment(); + assertEquals(1, counter2.get(), "Incrementing one should reflect on the other"); + } + + @Test + void shouldCreateDistinctCountersForDifferentNames() { + var counter1 = concurrentCounters.counter("counter-1", null); + var counter2 = concurrentCounters.counter("counter-2", null); + + assertNotSame(counter1, counter2, "Different names should create distinct counters"); + } + + @Test + void shouldIncludeKeyAttributes() { + var counter = + concurrentCounters.counter("key-counter", key -> key.tagsCount(1).intValue("k", 42)); + + // Retrieve it again with same key attributes + var sameCounter = + concurrentCounters.counter("key-counter", key -> key.tagsCount(1).intValue("k", 42)); + + assertSame(counter, sameCounter, "Same name and key attributes should return same counter"); + + // Changing the key attribute should create a new counter + var differentCounter = + concurrentCounters.counter("key-counter", key -> key.tagsCount(1).intValue("k", 43)); + assertNotSame(counter, differentCounter, "Different key attributes should create new counter"); + } + + @Test + void shouldWorkConcurrently() throws InterruptedException { + final var threads = 10; + final var iterations = 1000; + final var counter = concurrentCounters.counter("concurrent-counter", null); + + Thread[] workers = new Thread[threads]; + for (int i = 0; i < threads; i++) { + workers[i] = + new Thread( + () -> { + for (int j = 0; j < iterations; j++) { + counter.increment(); + } + }); + workers[i].start(); + } + + for (var t : workers) { + t.join(); + } + + assertEquals( + threads * iterations, + counter.get(), + "Counter should be incremented correctly concurrently"); + } + + @Test + void shouldCreateDistinctCountersForDifferentTypeIds() { + final var typeId1 = 1; + final var typeId2 = 2; + final var name = "typed-counter"; + + final var counter1 = concurrentCounters.counter(typeId1, name, null); + final var counter2 = concurrentCounters.counter(typeId2, name, null); + + assertNotSame( + counter1, counter2, "Same name but different typeId should create distinct counters"); + + // increment both and verify independent values + counter1.increment(); + counter2.increment(); + counter2.increment(); + + assertEquals(1, counter1.get(), "Counter1 should have its own value"); + assertEquals(2, counter2.get(), "Counter2 should have its own value"); + } + + @Test + void shouldDistinguishByTypeIdAndKeyAttributes() { + int typeId = 10; + String name = "complex-counter"; + + var counter1 = + concurrentCounters.counter(typeId, name, key -> key.tagsCount(1).intValue("g", 100)); + var counter2 = + concurrentCounters.counter(typeId, name, key -> key.tagsCount(1).intValue("g", 200)); + + assertNotSame( + counter1, + counter2, + "Different key attributes with same typeId and name should create distinct counters"); + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/CounterAllocatorTest.java b/metrics/src/test/java/io/scalecube/metrics/CounterAllocatorTest.java new file mode 100644 index 0000000..b1104ca --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/CounterAllocatorTest.java @@ -0,0 +1,52 @@ +package io.scalecube.metrics; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import io.scalecube.metrics.CountersRegistry.Context; +import java.util.ArrayList; +import java.util.List; +import org.agrona.CloseHelper; +import org.agrona.concurrent.status.AtomicCounter; +import org.agrona.concurrent.status.CountersReader; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +public class CounterAllocatorTest { + + private static final int TYPE_ID = 100; + + private CountersRegistry countersRegistry; + + @AfterEach + void tearDown() { + CloseHelper.quietCloseAll(countersRegistry); + } + + @ValueSource(ints = {2 * 1024 * 1024, 4 * 1024 * 1024, 8 * 1024 * 1024, 16 * 1024 * 1024}) + @ParameterizedTest + void testAllocator(int countersValuesBufferLength) { + countersRegistry = + CountersRegistry.create( + new Context().countersValuesBufferLength(countersValuesBufferLength)); + final var countersManager = countersRegistry.countersManager(); + final var counterAllocator = new CounterAllocator(countersManager); + final var maxCounters = countersValuesBufferLength / CountersReader.COUNTER_LENGTH; + + for (int times = 0; times < 3; times++) { + List counters = new ArrayList<>(maxCounters); + for (int i = 0; i < maxCounters; i++) { + final var counter = counterAllocator.newCounter(TYPE_ID, "name-" + times + "-" + i, null); + counter.set(times * i); + counters.add(counter); + } + for (int i = 0; i < counters.size(); i++) { + final var counter = counters.get(i); + final var descriptor = CounterDescriptor.getCounter(countersManager, counter.id()); + assertEquals(TYPE_ID, descriptor.typeId()); + assertEquals((long) times * i, descriptor.value()); + } + CloseHelper.quietCloseAll(counters); + } + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/CounterDescriptorTest.java b/metrics/src/test/java/io/scalecube/metrics/CounterDescriptorTest.java new file mode 100644 index 0000000..84c36ac --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/CounterDescriptorTest.java @@ -0,0 +1,173 @@ +package io.scalecube.metrics; + +import static org.junit.jupiter.api.Assertions.*; + +import org.agrona.CloseHelper; +import org.agrona.concurrent.status.CountersManager; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class CounterDescriptorTest { + + private static final int TYPE_ID = 100; + private static final int ANOTHER_TYPE_ID = 200; + private static final int VALUE = 100500; + + private final CountersRegistry countersRegistry = CountersRegistry.create(); + private final CountersManager countersManager = countersRegistry.countersManager(); + private final CounterAllocator counterAllocator = new CounterAllocator(countersManager); + + @BeforeEach + void beforeEach() { + counterAllocator.newCounter(TYPE_ID, "foo", null); + counterAllocator.newCounter(TYPE_ID, "bar", null); + counterAllocator.newCounter(TYPE_ID, "baz", null); + counterAllocator.newCounter(TYPE_ID, "by_name", null); + counterAllocator.newCounter(TYPE_ID, "by_value", null).set(VALUE); + counterAllocator.newCounter(ANOTHER_TYPE_ID, "another_type_id", null); + counterAllocator.newCounter( + TYPE_ID, "by_byte_tag", k -> k.tagsCount(1).byteValue("byte_tag", (byte) 1)); + counterAllocator.newCounter( + TYPE_ID, "by_short_tag", k -> k.tagsCount(1).shortValue("short_tag", (short) 1)); + counterAllocator.newCounter(TYPE_ID, "by_int_tag", k -> k.tagsCount(1).intValue("int_tag", 1)); + counterAllocator.newCounter( + TYPE_ID, "by_long_tag", k -> k.tagsCount(1).longValue("long_tag", 1L)); + counterAllocator.newCounter( + TYPE_ID, "by_double_tag", k -> k.tagsCount(1).doubleValue("double_tag", 1.0)); + counterAllocator.newCounter( + TYPE_ID, "by_string_tag", k -> k.tagsCount(1).stringValue("string_tag", "42")); + } + + @AfterEach + void afterEach() { + CloseHelper.quietCloseAll(countersRegistry); + } + + @Test + void findFirstCounter() { + final var firstCounter = + CounterDescriptor.findFirstCounter(countersManager, descriptor -> true); + assertNotNull(firstCounter); + assertEquals(TYPE_ID, firstCounter.typeId()); + assertEquals("foo", firstCounter.label()); + } + + @Test + void findLastCounter() { + final var lastCounter = CounterDescriptor.findLastCounter(countersManager, descriptor -> true); + assertNotNull(lastCounter); + assertEquals(TYPE_ID, lastCounter.typeId()); + assertEquals("by_string_tag", lastCounter.label()); + } + + @Test + void findAllCounters() { + final var allCounters = + CounterDescriptor.findAllCounters( + countersManager, descriptor -> descriptor.label().startsWith("b")); + assertEquals(10, allCounters.size()); + } + + @Test + void byName() { + final var counter = + CounterDescriptor.findFirstCounter(countersManager, CounterDescriptor.byName("by_name")); + assertNotNull(counter); + assertEquals("by_name", counter.label()); + } + + @Test + void byValue() { + final var counter = + CounterDescriptor.findFirstCounter(countersManager, CounterDescriptor.byValue(VALUE)); + assertNotNull(counter); + assertEquals("by_value", counter.label()); + assertEquals(VALUE, counter.value()); + } + + @Test + void byType() { + final var counter = + CounterDescriptor.findFirstCounter( + countersManager, CounterDescriptor.byType(ANOTHER_TYPE_ID)); + assertNotNull(counter); + assertEquals(ANOTHER_TYPE_ID, counter.typeId()); + assertEquals("another_type_id", counter.label()); + } + + @Test + void byByteTag() { + final var counter = + CounterDescriptor.findFirstCounter( + countersManager, CounterDescriptor.byTag("byte_tag", (byte) 1)); + assertNotNull(counter); + assertEquals("by_byte_tag", counter.label()); + } + + @Test + void byShortTag() { + final var counter = + CounterDescriptor.findFirstCounter( + countersManager, CounterDescriptor.byTag("short_tag", (short) 1)); + assertNotNull(counter); + assertEquals("by_short_tag", counter.label()); + } + + @Test + void byIntTag() { + final var counter = + CounterDescriptor.findFirstCounter(countersManager, CounterDescriptor.byTag("int_tag", 1)); + assertNotNull(counter); + assertEquals("by_int_tag", counter.label()); + } + + @Test + void byLongTag() { + final var counter = + CounterDescriptor.findFirstCounter( + countersManager, CounterDescriptor.byTag("long_tag", 1L)); + assertNotNull(counter); + assertEquals("by_long_tag", counter.label()); + } + + @Test + void byDoubleTag() { + final var counter = + CounterDescriptor.findFirstCounter( + countersManager, CounterDescriptor.byTag("double_tag", 1.0)); + assertNotNull(counter); + assertEquals("by_double_tag", counter.label()); + } + + @Test + void byStringTag() { + final var counter = + CounterDescriptor.findFirstCounter( + countersManager, CounterDescriptor.byTag("string_tag", "42")); + assertNotNull(counter); + assertEquals("by_string_tag", counter.label()); + } + + @Test + void byEnumTag() { + // Allocate a counter with a byte representing enum ordinal + counterAllocator.newCounter( + TYPE_ID, + "by_enum_tag", + k -> k.tagsCount(1).byteValue("enum_tag", (byte) SampleEnum.B.ordinal())); + + final var counter = + CounterDescriptor.findFirstCounter( + countersManager, + CounterDescriptor.byTag("enum_tag", SampleEnum.B, b -> SampleEnum.values()[b])); + assertNotNull(counter); + assertEquals("by_enum_tag", counter.label()); + } + + enum SampleEnum { + A, + B, + C + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/CountersReaderAgentTest.java b/metrics/src/test/java/io/scalecube/metrics/CountersReaderAgentTest.java new file mode 100644 index 0000000..cb2f12f --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/CountersReaderAgentTest.java @@ -0,0 +1,110 @@ +package io.scalecube.metrics; + +import static io.scalecube.metrics.CountersRegistry.Context.COUNTERS_FILE; +import static io.scalecube.metrics.CountersRegistry.Context.DEFAULT_COUNTERS_DIR_NAME; +import static org.agrona.IoUtil.delete; +import static org.agrona.IoUtil.mapExistingFile; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +import io.scalecube.metrics.CountersReaderAgent.State; +import io.scalecube.metrics.CountersRegistry.Context; +import io.scalecube.metrics.CountersRegistry.LayoutDescriptor; +import java.io.File; +import java.time.Duration; +import org.agrona.BufferUtil; +import org.agrona.concurrent.CachedEpochClock; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class CountersReaderAgentTest { + + private static final Duration READ_INTERVAL = Duration.ofSeconds(3); + private static final long OLD_START_TIMESTAMP = 10042; + private static final long OLD_PID = 100500; + private static final int OLD_BUFFER_LENGTH = 8 * 1024 * 1024; + + private final CachedEpochClock epochClock = new CachedEpochClock(); + private final CountersHandler countersHandler = mock(CountersHandler.class); + private CountersReaderAgent agent; + + @BeforeEach + void beforeEach() { + delete(new File(DEFAULT_COUNTERS_DIR_NAME), true); + agent = + new CountersReaderAgent( + "CountersReaderAgent", + new File(DEFAULT_COUNTERS_DIR_NAME), + true, + epochClock, + READ_INTERVAL, + countersHandler); + agent.onStart(); + } + + @AfterEach + void afterEach() { + if (agent != null) { + agent.onClose(); + } + } + + @Test + void testWorkWithCounters() { + try (final var countersRegistry = CountersRegistry.create()) { + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + epochClock.advance(READ_INTERVAL.toMillis() + 1); + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + verify(countersHandler).accept(anyLong(), anyList()); + } + } + + @Test + void testStartWithoutCounters() { + agent.doWork(); + assertEquals(State.CLEANUP, agent.state()); + } + + @Test + void testWorkWhenCountersShutdown() { + try (final var countersRegistry = + CountersRegistry.create(new Context().dirDeleteOnShutdown(true))) { + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + } + epochClock.advance(READ_INTERVAL.toMillis() + 1); + agent.doWork(); + assertEquals(State.CLEANUP, agent.state()); + } + + @Test + void testWorkWhenCountersRestarted() { + try (final var countersRegistry = CountersRegistry.create(new Context())) { + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + } + try (final var countersRegistry = CountersRegistry.create(new Context())) { + updateCountersHeader(OLD_START_TIMESTAMP, OLD_PID, OLD_BUFFER_LENGTH); + epochClock.advance(READ_INTERVAL.toMillis() + 1); + agent.doWork(); + assertEquals(State.CLEANUP, agent.state()); + } + } + + private static void updateCountersHeader(long startTimestamp, long pid, int bufferLength) { + final var file = new File(DEFAULT_COUNTERS_DIR_NAME, COUNTERS_FILE); + final var mappedByteBuffer = mapExistingFile(file, COUNTERS_FILE); + try { + final var headerBuffer = LayoutDescriptor.createHeaderBuffer(mappedByteBuffer); + LayoutDescriptor.fillHeaderBuffer(headerBuffer, startTimestamp, pid, bufferLength); + } finally { + BufferUtil.free(mappedByteBuffer); + } + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/CountersRegistryTest.java b/metrics/src/test/java/io/scalecube/metrics/CountersRegistryTest.java new file mode 100644 index 0000000..660ff64 --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/CountersRegistryTest.java @@ -0,0 +1,301 @@ +package io.scalecube.metrics; + +import static io.scalecube.metrics.CountersRegistry.Context.COUNTERS_DIR_NAME_PROP_NAME; +import static io.scalecube.metrics.CountersRegistry.Context.COUNTERS_VALUES_BUFFER_LENGTH_PROP_NAME; +import static io.scalecube.metrics.CountersRegistry.Context.DIR_DELETE_ON_SHUTDOWN_PROP_NAME; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import io.scalecube.metrics.CountersRegistry.LayoutDescriptor; +import java.nio.charset.StandardCharsets; +import java.nio.file.Paths; +import java.util.Properties; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import org.agrona.CloseHelper; +import org.agrona.SystemUtil; +import org.agrona.collections.MutableInteger; +import org.agrona.concurrent.UnsafeBuffer; +import org.agrona.concurrent.status.CountersManager; +import org.agrona.concurrent.status.CountersReader; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class CountersRegistryTest { + + private static final int TYPE_ID = 100; + + private static final AtomicInteger INT_COUNTER = new AtomicInteger(1); + private static final AtomicLong LONG_COUNTER = new AtomicLong(1000L); + private static final AtomicInteger PATH_COUNTER = new AtomicInteger(1); + private static final Random RANDOM = new Random(12345L); + + private final KeyCodec keyCodec = new KeyCodec(); + private CountersRegistry countersRegistry; + private CountersManager countersManager; + + @BeforeEach + void beforeEach() { + countersRegistry = CountersRegistry.create(); + countersManager = countersRegistry.countersManager(); + } + + @AfterEach + void afterEach() { + CloseHelper.quietClose(countersRegistry); + } + + @Test + void testPopulateFromProperties() { + // given + final var countersDirectoryName = nextPath(); + final var countersValuesBufferLength = nextLong(); + final var dirDeleteOnShutdown = nextBoolean(); + + Properties props = new Properties(); + props.setProperty(COUNTERS_DIR_NAME_PROP_NAME, countersDirectoryName); + props.setProperty( + COUNTERS_VALUES_BUFFER_LENGTH_PROP_NAME, String.valueOf(countersValuesBufferLength)); + props.setProperty(DIR_DELETE_ON_SHUTDOWN_PROP_NAME, String.valueOf(dirDeleteOnShutdown)); + + // when + CountersRegistry.Context context = new CountersRegistry.Context(props); + + // then + assertEquals(countersDirectoryName, context.countersDirectoryName()); + assertEquals(countersValuesBufferLength, context.countersValuesBufferLength()); + assertEquals(dirDeleteOnShutdown, context.dirDeleteOnShutdown()); + } + + @Test + void testLabel() { + final String label = "ABC"; + final var counter = countersManager.newCounter(label); + assertNotNull(counter); + assertEquals(label, countersManager.getCounterLabel(counter.id())); + } + + @Test + void testBufferLengthTooSmall() { + var context = new CountersRegistry.Context().countersValuesBufferLength(1024); + assertThrows(IllegalArgumentException.class, () -> CountersRegistry.create(context)); + } + + @Test + void testBufferLengthNotPowerOfTwo() { + var context = new CountersRegistry.Context().countersValuesBufferLength(12345); + assertThrows(IllegalArgumentException.class, () -> CountersRegistry.create(context)); + } + + @Test + void testCounterValueUpdates() { + final var counter = countersManager.newCounter("update_counter", TYPE_ID); + for (int i = 0; i < 100; i++) { + counter.set(i); + assertEquals(i, countersManager.getCounterValue(counter.id())); + } + } + + @Test + void testCounterWithKeyFlyweight() { + final var counter = + countersManager.newCounter( + "fly", + TYPE_ID, + buffer -> + new KeyFlyweight() + .wrap(buffer, 0) + .tagsCount(3) + .intValue("tag1", 1) + .intValue("tag2", 2) + .intValue("tag3", 3)); + counter.set(101); + + final var descriptor = CounterDescriptor.getCounter(countersManager, counter.id()); + + assertEquals(TYPE_ID, descriptor.typeId()); + assertEquals(101, descriptor.value()); + + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + assertEquals(1, key.intValue("tag1")); + assertEquals(2, key.intValue("tag2")); + assertEquals(3, key.intValue("tag3")); + } + + @Test + void testCounterWithEmptyKeyFlyweight() { + final var counter = countersManager.newCounter("fly", TYPE_ID); + counter.set(101); + + final var descriptor = CounterDescriptor.getCounter(countersManager, counter.id()); + + assertEquals(TYPE_ID, descriptor.typeId()); + assertEquals(101, descriptor.value()); + + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + assertNotNull(key, "key"); + assertEquals(0, key.tags().size(), "tags.size"); + } + + @Test + void testCounterWithoutKeyFlyweight() { + final var sessionCounter = countersManager.newCounter("session_counter"); + final var counterDescriptor = + CounterDescriptor.getCounter(countersManager, sessionCounter.id()); + final var key = keyCodec.decodeKey(counterDescriptor.keyBuffer(), 0); + assertNotNull(key, "key"); + assertEquals(0, key.tags().size(), "tags.size"); + } + + @Test + void testHeaderLayoutParsing() { + final var context = countersRegistry.context(); + final var mappedBuffer = context.countersMetaDataBuffer().byteBuffer(); + final var headerBuffer = LayoutDescriptor.createHeaderBuffer(mappedBuffer); + + assertEquals( + context.countersValuesBufferLength(), + LayoutDescriptor.countersValuesBufferLength(headerBuffer)); + } + + @Test + void testCounterAllocation() { + final var countersValuesBufferLength = countersRegistry.context().countersValuesBufferLength(); + final var capacity = countersValuesBufferLength / CountersReader.COUNTER_LENGTH; + + for (int i = 0; i < capacity; i++) { + final var counter = countersManager.newCounter("counter_" + i, TYPE_ID); + counter.set(i); + } + + final var capCounter = new MutableInteger(); + countersManager.forEach( + (counterId, typeId, keyBuffer, label) -> { + assertTrue(counterId >= 0, "counterId"); + assertTrue(typeId >= 0, "typeId"); + final var counterValue = countersManager.getCounterValue(counterId); + assertEquals(capCounter.value, counterValue, "counterValue"); + assertEquals("counter_" + capCounter.value, label, "label"); + capCounter.increment(); + }); + + assertEquals(capacity, capCounter.value); + } + + @Test + void testCapacityLimit() { + final var countersValuesBufferLength = countersRegistry.context().countersValuesBufferLength(); + final var capacity = countersValuesBufferLength / CountersReader.COUNTER_LENGTH; + + for (int i = 0; i < capacity; i++) { + countersManager.newCounter("counter_" + i); + } + + // Allocate one more + assertThrows(IllegalStateException.class, () -> countersManager.newCounter("foo")); + } + + @Test + void testCounterMetadata() { + final String keyString = "This is key"; + final var keyBuffer = new UnsafeBuffer(keyString.getBytes(StandardCharsets.US_ASCII)); + + final String labelString = "This is label"; + final var labelBuffer = new UnsafeBuffer(labelString.getBytes(StandardCharsets.US_ASCII)); + + final var typeId = 100; + final var value = 42; + final var counter = + countersManager.newCounter( + typeId, keyBuffer, 0, keyBuffer.capacity(), labelBuffer, 0, labelBuffer.capacity()); + counter.set(value); + + final var counterDescriptor = CounterDescriptor.getCounter(countersManager, counter.id()); + + assertEquals( + keyString, + counterDescriptor.keyBuffer().getStringWithoutLengthAscii(0, keyBuffer.capacity()), + "keyString"); + assertEquals(labelString, counterDescriptor.label(), "labelString"); + assertEquals(counter.id(), counterDescriptor.counterId(), "counterId"); + assertEquals(typeId, counterDescriptor.typeId(), "typeId"); + assertEquals(value, counterDescriptor.value(), "value"); + } + + @Test + void testCounterAllocatorWithoutKey() { + final var counterAllocator = new CounterAllocator(countersRegistry.countersManager()); + final var fooCounter = counterAllocator.newCounter(TYPE_ID, "foo_counter", null); + fooCounter.set(100500); + final var counterDescriptor = CounterDescriptor.getCounter(countersManager, fooCounter.id()); + assertNotNull(counterDescriptor); + assertEquals("foo_counter", counterDescriptor.label(), "labelString"); + assertEquals(TYPE_ID, counterDescriptor.typeId(), "typeId"); + assertEquals(100500, counterDescriptor.value(), "value"); + + final var key = keyCodec.decodeKey(counterDescriptor.keyBuffer(), 0); + assertEquals(0, key.tags().size(), "tags.size"); + } + + @Test + void testCounterAllocatorWithEmptyKey() { + final var counterAllocator = new CounterAllocator(countersRegistry.countersManager()); + final var fooCounter = counterAllocator.newCounter(TYPE_ID, "foo_counter", keyFlyweight -> {}); + fooCounter.set(100500); + final var counterDescriptor = CounterDescriptor.getCounter(countersManager, fooCounter.id()); + assertNotNull(counterDescriptor); + assertEquals("foo_counter", counterDescriptor.label(), "labelString"); + assertEquals(TYPE_ID, counterDescriptor.typeId(), "typeId"); + assertEquals(100500, counterDescriptor.value(), "value"); + + final var key = keyCodec.decodeKey(counterDescriptor.keyBuffer(), 0); + assertEquals(0, key.tags().size(), "tags.size"); + } + + @Test + void testCounterAllocatorWithKey() { + final var counterAllocator = new CounterAllocator(countersRegistry.countersManager()); + final var fooCounter = + counterAllocator.newCounter( + TYPE_ID, + "fly", + keyFlyweight -> + keyFlyweight + .tagsCount(3) + .intValue("tag1", 1) + .intValue("tag2", 2) + .intValue("tag3", 3)); + fooCounter.set(101); + + final var descriptor = CounterDescriptor.getCounter(countersManager, fooCounter.id()); + + assertEquals(TYPE_ID, descriptor.typeId()); + assertEquals(101, descriptor.value()); + + final var key = keyCodec.decodeKey(descriptor.keyBuffer(), 0); + assertEquals(1, key.intValue("tag1")); + assertEquals(2, key.intValue("tag2")); + assertEquals(3, key.intValue("tag3")); + } + + private static int nextInt() { + return INT_COUNTER.getAndIncrement(); + } + + private static long nextLong() { + return LONG_COUNTER.getAndAdd(100L); + } + + private static String nextPath() { + return Paths.get(SystemUtil.tmpDirName() + "test-path-" + PATH_COUNTER.getAndIncrement()) + .toString(); + } + + private static boolean nextBoolean() { + return RANDOM.nextBoolean(); + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/HistogramMetricTest.java b/metrics/src/test/java/io/scalecube/metrics/HistogramMetricTest.java new file mode 100644 index 0000000..1d4c525 --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/HistogramMetricTest.java @@ -0,0 +1,331 @@ +package io.scalecube.metrics; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import io.scalecube.metrics.MetricsTransmitter.Context; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import org.HdrHistogram.Histogram; +import org.agrona.CloseHelper; +import org.agrona.DirectBuffer; +import org.agrona.concurrent.CachedEpochClock; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; + +class HistogramMetricTest { + + private static final long HIGHEST_TRACKABLE_VALUE = TimeUnit.SECONDS.toNanos(1); + private static final double CONVERSION_FACTOR = 1e-3; + private static final long RESOLUTION = TimeUnit.SECONDS.toMillis(1); + + private final CachedEpochClock epochClock = new CachedEpochClock(); + private final MetricsHandlerImpl metricsHandler = new MetricsHandlerImpl(); + private MetricsRecorder metricsRecorder; + private MetricsTransmitter metricsTransmitter; + private MetricsReader metricsReader; + + @BeforeEach + void beforeEach() { + metricsRecorder = + MetricsRecorder.launch( + new MetricsRecorder.Context().useAgentInvoker(true).epochClock(epochClock)); + metricsRecorder.agentInvoker().invoke(); // init delays + + metricsTransmitter = + MetricsTransmitter.launch(new Context().useAgentInvoker(true).epochClock(epochClock)); + metricsTransmitter.agentInvoker().invoke(); // kick-off + + metricsReader = new MetricsReader(metricsTransmitter.context().broadcastBuffer()); + } + + @AfterEach + void afterEach() { + CloseHelper.quietCloseAll(metricsReader, metricsTransmitter, metricsRecorder); + } + + @Nested + class Single { + + @Test + void testRecord() { + final var name = "foo"; + final var histogram = + metricsRecorder.newHistogram( + keyFlyweight -> + keyFlyweight + .tagsCount(3) + .stringValue("name", name) + .stringValue("kind", "k") + .stringValue("type", "t"), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION); + metricsRecorder.agentInvoker().invoke(); // on-board + + histogram.record(100); + histogram.record(200); + histogram.record(300); + histogram.record(200); + + advanceClock(RESOLUTION); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + assertEquals(4, metricsHandler.accumulated.getTotalCount(), "accumulated.totalCount"); + assertEquals(4, metricsHandler.distinct.getTotalCount(), "distinct.totalCount"); + metricsHandler.assertKey( + key -> { + Assertions.assertEquals(name, key.stringValue("name"), "name"); + Assertions.assertEquals("k", key.stringValue("kind"), "kind"); + Assertions.assertEquals("t", key.stringValue("type"), "type"); + }); + } + + @Test + void testRepeatedSemantic() { + final var name = "foo"; + final var histogram = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION); + metricsRecorder.agentInvoker().invoke(); // on-board + + for (int i = 1; i <= 4; i++) { + histogram.record(100); + histogram.record(200); + histogram.record(300); + histogram.record(200); + + advanceClock(RESOLUTION); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + assertEquals(i * 4, metricsHandler.accumulated.getTotalCount(), "accumulated.totalCount"); + assertEquals(4, metricsHandler.distinct.getTotalCount(), "distinct.totalCount"); + } + } + } + + @Nested + class Aggregation { + + @Test + void testRecord() { + final var name = "foo"; + final var foo1 = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION); + final var foo2 = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION); + metricsRecorder.agentInvoker().invoke(); // on-board + metricsRecorder.agentInvoker().invoke(); // on-board + + // foo1 + + foo1.record(100); + foo1.record(200); + foo1.record(300); + foo1.record(200); + + // foo2 + + foo2.record(100); + foo2.record(200); + foo2.record(300); + foo2.record(200); + + // publish + + advanceClock(RESOLUTION); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + assertEquals(8, metricsHandler.accumulated.getTotalCount(), "accumulated.totalCount"); + assertEquals(8, metricsHandler.distinct.getTotalCount(), "distinct.totalCount"); + } + + @Test + void testRecordWithTags() { + final var name = "foo"; + final var foo1 = + metricsRecorder.newHistogram( + keyFlyweight -> + keyFlyweight + .tagsCount(3) + .stringValue("name", name) + .stringValue("kind", "k") + .stringValue("type", "t"), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION); + final var foo2 = + metricsRecorder.newHistogram( + keyFlyweight -> + keyFlyweight + .tagsCount(3) + .stringValue("name", name) + .stringValue("kind", "k") + .stringValue("type", "t"), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION); + metricsRecorder.agentInvoker().invoke(); // on-board + metricsRecorder.agentInvoker().invoke(); // on-board + + // foo1 + + foo1.record(100); + foo1.record(200); + foo1.record(300); + foo1.record(200); + + // foo2 + + foo2.record(100); + foo2.record(200); + foo2.record(300); + foo2.record(200); + + // publish + + advanceClock(RESOLUTION); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + assertEquals(8, metricsHandler.accumulated.getTotalCount(), "accumulated.totalCount"); + assertEquals(8, metricsHandler.distinct.getTotalCount(), "distinct.totalCount"); + metricsHandler.assertKey( + key -> { + Assertions.assertEquals(name, key.stringValue("name"), "name"); + Assertions.assertEquals("k", key.stringValue("kind"), "kind"); + Assertions.assertEquals("t", key.stringValue("type"), "type"); + }); + } + + @Test + void testRepeatedSemantic() { + final var name = "foo"; + final var foo1 = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION); + final var foo2 = + metricsRecorder.newHistogram( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name), + HIGHEST_TRACKABLE_VALUE, + CONVERSION_FACTOR, + RESOLUTION); + metricsRecorder.agentInvoker().invoke(); // on-board + metricsRecorder.agentInvoker().invoke(); // on-board + + for (int i = 1; i <= 4; i++) { + // foo1 + + foo1.record(100); + foo1.record(200); + foo1.record(300); + foo1.record(200); + + // foo2 + + foo2.record(100); + foo2.record(200); + foo2.record(300); + foo2.record(200); + + // publish + + advanceClock(RESOLUTION); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + assertEquals(i * 8, metricsHandler.accumulated.getTotalCount(), "accumulated.totalCount"); + assertEquals(8, metricsHandler.distinct.getTotalCount(), "distinct.totalCount"); + } + } + } + + private void advanceClock(long step) { + epochClock.advance(step); + } + + private static class MetricsHandlerImpl implements MetricsHandler { + + long timestamp; + Key key; + Histogram accumulated; + Histogram distinct; + long highestTrackableValue; + double conversionFactor; + + @Override + public void onHistogram( + long timestamp, + DirectBuffer keyBuffer, + int keyOffset, + int keyLength, + Histogram accumulated, + Histogram distinct, + long highestTrackableValue, + double conversionFactor) { + this.timestamp = timestamp; + this.key = new KeyCodec().decodeKey(keyBuffer, keyOffset); + this.accumulated = accumulated; + this.distinct = distinct; + this.highestTrackableValue = highestTrackableValue; + this.conversionFactor = conversionFactor; + } + + MetricsHandlerImpl reset() { + timestamp = 0; + key = null; + accumulated = null; + distinct = null; + return this; + } + + void assertHasRead() { + assertTrue(timestamp > 0); + } + + void assertName(String name) { + assertNotNull(key, "key"); + assertEquals(name, key.stringValue("name"), "name"); + } + + void assertKey(Consumer consumer) { + consumer.accept(key); + } + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/JvmMetricsAgentTest.java b/metrics/src/test/java/io/scalecube/metrics/JvmMetricsAgentTest.java new file mode 100644 index 0000000..18164c2 --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/JvmMetricsAgentTest.java @@ -0,0 +1,46 @@ +package io.scalecube.metrics; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.agrona.concurrent.CachedEpochClock; +import org.junit.jupiter.api.Test; + +public class JvmMetricsAgentTest { + + public JvmMetricsAgentTest() {} + + @Test + public void testAgent() { + final var clock = new CachedEpochClock(); + + final var reg = CountersRegistry.create(); + + JvmMetricsAgent cut = + new JvmMetricsAgent(new CounterAllocator(reg.countersManager()), clock, 1000L, true); + cut.onStart(); + assertEquals(JvmMetricsAgent.State.INIT, cut.state()); + int ret = cut.doWork(); + assertEquals(JvmMetricsAgent.State.RUNNING, cut.state()); + assertEquals(1, ret); + ret = cut.doWork(); + assertEquals(JvmMetricsAgent.State.RUNNING, cut.state()); + assertEquals(0, ret); + ret = cut.doWork(); + assertEquals(JvmMetricsAgent.State.RUNNING, cut.state()); + assertEquals(0, ret); + + clock.advance(1001); + ret = cut.doWork(); + assertEquals(1, ret); + + reg.countersManager() + .forEach( + (long value, int counterId, String label) -> { + final var descriptor = CounterDescriptor.getCounter(reg.countersManager(), counterId); + final var key = new KeyCodec().decodeKey(descriptor.keyBuffer(), 0); + System.out.println( + String.format( + "label %s, id:%d, tags %s, value: %d", label, counterId, key.tags(), value)); + }); + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/KeyEqualityTest.java b/metrics/src/test/java/io/scalecube/metrics/KeyEqualityTest.java new file mode 100644 index 0000000..e188e9f --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/KeyEqualityTest.java @@ -0,0 +1,39 @@ +package io.scalecube.metrics; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + +import java.util.HashMap; +import java.util.Map; +import org.junit.jupiter.api.Test; + +class KeyEqualityTest { + + @Test + void testEqualityWithSameTags() { + Map tags1 = new HashMap<>(); + tags1.put("id", 123); + tags1.put("name", "alpha"); + + Map tags2 = new HashMap<>(); + tags2.put("id", 123); + tags2.put("name", "alpha"); + + Key key1 = new Key(tags1); + Key key2 = new Key(tags2); + + assertEquals(key1, key2, "Keys with same tag values should be equal"); + assertEquals(key1.hashCode(), key2.hashCode(), "Equal keys must have same hashCode"); + } + + @Test + void testInequalityWithDifferentTags() { + Map tags1 = Map.of("id", 123, "name", "alpha"); + Map tags2 = Map.of("id", 123, "name", "beta"); + + Key key1 = new Key(tags1); + Key key2 = new Key(tags2); + + assertNotEquals(key1, key2, "Keys with different tags should not be equal"); + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/KeyFlyweightTest.java b/metrics/src/test/java/io/scalecube/metrics/KeyFlyweightTest.java new file mode 100644 index 0000000..8a3fd0d --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/KeyFlyweightTest.java @@ -0,0 +1,232 @@ +package io.scalecube.metrics; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.util.ArrayList; +import java.util.Map; +import org.agrona.ExpandableArrayBuffer; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.jupiter.api.Test; + +class KeyFlyweightTest { + + private final ExpandableArrayBuffer keyBuffer = new ExpandableArrayBuffer(); + private final KeyFlyweight keyFlyweight = new KeyFlyweight(); + private final KeyCodec keyCodec = new KeyCodec(); + + @Test + void testEncodeTags() { + keyFlyweight + .wrap(keyBuffer, 0) + .tagsCount(4) + .intValue("tag1", 100) + .longValue("tag2", 100L) + .doubleValue("tag3", 100.0) + .stringValue("tag4", "hello"); + + final var key = keyCodec.decodeKey(keyBuffer, 0); + final var tags = key.tags(); + assertEquals(4, tags.size(), "tags.size"); + assertEquals(100, tags.get("tag1")); + assertEquals(100L, tags.get("tag2")); + assertEquals(100.0, tags.get("tag3")); + assertEquals("hello", tags.get("tag4")); + } + + @Test + void testEncodeAllSupportedTypes() { + keyFlyweight + .wrap(keyBuffer, 0) + .tagsCount(6) + .longValue("tag1", 100L) + .doubleValue("tag2", 100.500) + .stringValue("tag3", "hello") + .longValue("tag4", 100L) + .doubleValue("tag5", 100.500) + .stringValue("tag6", "hello"); + + final var key = keyCodec.decodeKey(keyBuffer, 0); + final var tags = key.tags(); + assertEquals(6, tags.size(), "tags.size"); + assertEquals(100L, tags.get("tag1")); + assertEquals(100.500, tags.get("tag2")); + assertEquals("hello", tags.get("tag3")); + assertEquals(100L, tags.get("tag4")); + assertEquals(100.500, tags.get("tag5")); + assertEquals("hello", tags.get("tag6")); + } + + @Test + void testEncodeOnlyByteTags() { + keyFlyweight + .wrap(keyBuffer, 0) + .tagsCount(3) + .byteValue("tag1", (byte) 1) + .byteValue("tag2", (byte) 2) + .byteValue("tag3", (byte) 3); + + final var key = keyCodec.decodeKey(keyBuffer, 0); + final var tags = key.tags(); + assertEquals(3, tags.size(), "tags.size"); + assertEquals((byte) 1, key.byteValue("tag1")); + assertEquals((byte) 2, key.byteValue("tag2")); + assertEquals((byte) 3, key.byteValue("tag3")); + } + + @Test + void testEncodeOnlyShortTags() { + keyFlyweight + .wrap(keyBuffer, 0) + .tagsCount(3) + .shortValue("tag1", (short) 1) + .shortValue("tag2", (short) 2) + .shortValue("tag3", (short) 3); + + final var key = keyCodec.decodeKey(keyBuffer, 0); + final var tags = key.tags(); + assertEquals(3, tags.size(), "tags.size"); + assertEquals((short) 1, key.shortValue("tag1")); + assertEquals((short) 2, key.shortValue("tag2")); + assertEquals((short) 3, key.shortValue("tag3")); + } + + @Test + void testEncodeOnlyIntTags() { + keyFlyweight + .wrap(keyBuffer, 0) + .tagsCount(3) + .intValue("tag1", 1) + .intValue("tag2", 2) + .intValue("tag3", 3); + + final var key = keyCodec.decodeKey(keyBuffer, 0); + final var tags = key.tags(); + assertEquals(3, tags.size(), "tags.size"); + assertEquals(1, key.intValue("tag1")); + assertEquals(2, key.intValue("tag2")); + assertEquals(3, key.intValue("tag3")); + } + + @Test + void testEncodeOnlyLongTags() { + keyFlyweight + .wrap(keyBuffer, 0) + .tagsCount(3) + .longValue("tag1", 1L) + .longValue("tag2", 2L) + .longValue("tag3", 3L); + + final var key = keyCodec.decodeKey(keyBuffer, 0); + final var tags = key.tags(); + assertEquals(3, tags.size(), "tags.size"); + assertEquals(1, key.longValue("tag1")); + assertEquals(2, key.longValue("tag2")); + assertEquals(3, key.longValue("tag3")); + } + + @Test + void testEncodeOnlyDoubleTags() { + keyFlyweight + .wrap(keyBuffer, 0) + .tagsCount(3) + .doubleValue("tag1", 1.0) + .doubleValue("tag2", 2.0) + .doubleValue("tag3", 3.0); + + final var key = keyCodec.decodeKey(keyBuffer, 0); + final var tags = key.tags(); + assertEquals(3, tags.size(), "tags.size"); + assertEquals(1.0, key.doubleValue("tag1")); + assertEquals(2.0, key.doubleValue("tag2")); + assertEquals(3.0, key.doubleValue("tag3")); + } + + @Test + void testEncodeOnlyStringTags() { + keyFlyweight + .wrap(keyBuffer, 0) + .tagsCount(3) + .stringValue("tag1", "foo") + .stringValue("tag2", "bar") + .stringValue("tag3", "baz"); + + final var key = keyCodec.decodeKey(keyBuffer, 0); + final var tags = key.tags(); + assertEquals(3, tags.size(), "tags.size"); + assertEquals("foo", key.stringValue("tag1")); + assertEquals("bar", key.stringValue("tag2")); + assertEquals("baz", key.stringValue("tag3")); + } + + @Test + void testEquality() { + final var n = 100; + final var bufferList = new ArrayList(); + + for (int i = 0, offset = 0; i < n; i++, offset += keyFlyweight.length()) { + keyFlyweight.wrap(keyBuffer, offset).tagsCount(1).stringValue("tag1", "10"); + bufferList.add( + new UnsafeBuffer(keyFlyweight.buffer(), keyFlyweight.offset(), keyFlyweight.length())); + } + + for (UnsafeBuffer b1 : bufferList) { + for (UnsafeBuffer b2 : bufferList) { + if (b1 != b2) { + assertEquals(b1, b2); + } + } + } + } + + @Test + void testOverwriteBuffer() { + keyFlyweight.wrap(keyBuffer, 0).tagsCount(2).intValue("tag1", 42).stringValue("tag2", "foo"); + + var key1 = keyCodec.decodeKey(keyBuffer, 0); + + keyFlyweight.wrap(keyBuffer, 0).tagsCount(1).stringValue("tag1", "bar"); + + var key2 = keyCodec.decodeKey(keyBuffer, 0); + + assertEquals(2, key1.tags().size()); + assertEquals(1, key2.tags().size()); + assertEquals("bar", key2.tags().get("tag1")); + } + + @Test + void testLongStringValue() { + String longString = "a".repeat(1024); + keyFlyweight.wrap(keyBuffer, 0).tagsCount(1).stringValue("tag1", longString); + var key = keyCodec.decodeKey(keyBuffer, 0); + assertEquals(longString, key.stringValue("tag1")); + } + + @Test + void testEncodingWithOffsets() { + var offset1 = 0; + var offset2 = 512; + + keyFlyweight.wrap(keyBuffer, offset1).tagsCount(1).intValue("tag1", 123); + keyFlyweight.wrap(keyBuffer, offset2).tagsCount(1).intValue("tag1", 456); + + assertEquals(123, keyCodec.decodeKey(keyBuffer, offset1).intValue("tag1")); + assertEquals(456, keyCodec.decodeKey(keyBuffer, offset2).intValue("tag1")); + } + + @Test + void testEnumRoundTrip() { + enum Color { + RED, + GREEN, + BLUE + } + + final var toCode = Map.of(Color.RED, (byte) 1, Color.GREEN, (byte) 2, Color.BLUE, (byte) 3); + final var fromCode = Map.of((byte) 1, Color.RED, (byte) 2, Color.GREEN, (byte) 3, Color.BLUE); + + keyFlyweight.wrap(keyBuffer, 0).tagsCount(1).enumValue("tag1", Color.GREEN, toCode::get); + var key = keyCodec.decodeKey(keyBuffer, 0); + var decoded = key.enumValue("tag1", fromCode::get); + assertEquals(Color.GREEN, decoded); + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/LoggingExtension.java b/metrics/src/test/java/io/scalecube/metrics/LoggingExtension.java new file mode 100644 index 0000000..6882931 --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/LoggingExtension.java @@ -0,0 +1,51 @@ +package io.scalecube.metrics; + +import java.lang.reflect.Method; +import org.junit.jupiter.api.extension.AfterAllCallback; +import org.junit.jupiter.api.extension.AfterEachCallback; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A JUnit5 Extension, merely writes the test's name at start and finish. Make sure to start JAVA + * with -Djunit.jupiter.extensions.autodetection.enabled=true to activate this + * extension + */ +public class LoggingExtension + implements AfterEachCallback, BeforeEachCallback, AfterAllCallback, BeforeAllCallback { + + private static final Logger LOGGER = LoggerFactory.getLogger(LoggingExtension.class); + + @Override + public void beforeAll(ExtensionContext context) { + LOGGER.info( + "***** Setup: {} *****", context.getTestClass().map(Class::getSimpleName).orElse("")); + } + + @Override + public void afterEach(ExtensionContext context) { + LOGGER.info( + "***** Test finished: {}.{}.{} *****", + context.getTestClass().map(Class::getSimpleName).orElse(""), + context.getTestMethod().map(Method::getName).orElse(""), + context.getDisplayName()); + } + + @Override + public void beforeEach(ExtensionContext context) { + LOGGER.info( + "***** Test started: {}.{}.{} *****", + context.getTestClass().map(Class::getSimpleName).orElse(""), + context.getTestMethod().map(Method::getName).orElse(""), + context.getDisplayName()); + } + + @Override + public void afterAll(ExtensionContext context) { + LOGGER.info( + "***** TearDown: {} *****", context.getTestClass().map(Class::getSimpleName).orElse("")); + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/MetricNamesTest.java b/metrics/src/test/java/io/scalecube/metrics/MetricNamesTest.java new file mode 100644 index 0000000..e45cb34 --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/MetricNamesTest.java @@ -0,0 +1,79 @@ +package io.scalecube.metrics; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +class MetricNamesTest { + + @Test + void testSanitizeName_validName() { + Assertions.assertEquals("valid_name", MetricNames.sanitizeName("valid_name")); + } + + @Test + void testSanitizeName_null() { + assertEquals("_", MetricNames.sanitizeName(null)); + } + + @Test + void testSanitizeName_empty() { + assertEquals("_", MetricNames.sanitizeName("")); + } + + @Test + void testSanitizeName_illegalCharacters() { + assertEquals("a_b_c", MetricNames.sanitizeName("a!b@c")); + } + + @Test + void testSanitizeName_upperCase() { + assertEquals("abc", MetricNames.sanitizeName("ABC")); + } + + @Test + void testSanitizeName_startsWithDash() { + assertEquals("_abc", MetricNames.sanitizeName("-abc")); + } + + @Test + void testSanitizeName_startsWithDigit() { + assertEquals("_123abc", MetricNames.sanitizeName("123abc")); + } + + @Test + void testSanitizeName_allInvalid() { + assertEquals("_", MetricNames.sanitizeName("!!!")); + } + + @Test + void testSanitizeName_camelCaseToSnakeCase() { + assertEquals("cool_market_agent", MetricNames.sanitizeName("CoolMarketAgent")); + } + + @Test + void testSanitizeName_acronymPreserved() { + assertEquals("http_request", MetricNames.sanitizeName("HTTPRequest")); + } + + @Test + void testSanitizeName_acronymWithDigit() { + assertEquals("http2_connection_pool", MetricNames.sanitizeName("HTTP2ConnectionPool")); + } + + @Test + void testSanitizeName_mixedAcronymCamelCase() { + assertEquals("json_to_http_bridge", MetricNames.sanitizeName("JsonToHTTPBridge")); + } + + @Test + void testSanitizeName_digitInMiddle() { + assertEquals("metric2025_value", MetricNames.sanitizeName("Metric2025Value")); + } + + @Test + void testSanitizeName_multipleUpperCaseBlocks() { + assertEquals("ssl_tls_config", MetricNames.sanitizeName("SSL_TLSConfig")); + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/MetricsRecorderTest.java b/metrics/src/test/java/io/scalecube/metrics/MetricsRecorderTest.java new file mode 100644 index 0000000..3b104db --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/MetricsRecorderTest.java @@ -0,0 +1,190 @@ +package io.scalecube.metrics; + +import static io.scalecube.metrics.MetricsRecorder.Context.DEFAULT_METRICS_DIR_NAME; +import static io.scalecube.metrics.MetricsRecorder.Context.DIR_DELETE_ON_SHUTDOWN_PROP_NAME; +import static io.scalecube.metrics.MetricsRecorder.Context.IDLE_STRATEGY_PROP_NAME; +import static io.scalecube.metrics.MetricsRecorder.Context.METRICS_BUFFER_LENGTH_PROP_NAME; +import static io.scalecube.metrics.MetricsRecorder.Context.METRICS_DIRECTORY_NAME_PROP_NAME; +import static io.scalecube.metrics.MetricsRecorder.Context.METRICS_FILE; +import static org.agrona.IoUtil.delete; +import static org.agrona.IoUtil.mapExistingFile; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import io.scalecube.metrics.MetricsRecorder.Context; +import io.scalecube.metrics.MetricsRecorder.LayoutDescriptor; +import java.io.File; +import java.lang.management.ManagementFactory; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import org.agrona.BufferUtil; +import org.agrona.CloseHelper; +import org.agrona.SystemUtil; +import org.agrona.concurrent.UnsafeBuffer; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class MetricsRecorderTest { + + private static final long OLD_START_TIMESTAMP = 10042; + private static final long OLD_PID = 100500; + private static final int OLD_BUFFER_LENGTH = 8 * 1024 * 1024; + private static final long START_TIMESTAMP = ManagementFactory.getRuntimeMXBean().getStartTime(); + private static final long PID = ManagementFactory.getRuntimeMXBean().getPid(); + + private static final AtomicInteger INT_COUNTER = new AtomicInteger(1); + private static final AtomicLong LONG_COUNTER = new AtomicLong(1000L); + private static final AtomicInteger PATH_COUNTER = new AtomicInteger(1); + private static final Random RANDOM = new Random(12345L); + + private final List resources = new ArrayList<>(); + + @BeforeEach + void beforeEach() { + CloseHelper.quietCloseAll(resources); + resources.clear(); + delete(new File(DEFAULT_METRICS_DIR_NAME), true); + } + + @Test + void testPopulateFromProperties() { + // given + final var metricsDirectoryName = nextPath(); + final var dirDeleteOnShutdown = nextBoolean(); + final var metricsBufferLength = nextInt(); + final var idleStrategy = "org.agrona.concurrent.SleepingIdleStrategy"; + + Properties props = new Properties(); + props.setProperty(METRICS_DIRECTORY_NAME_PROP_NAME, metricsDirectoryName); + props.setProperty(DIR_DELETE_ON_SHUTDOWN_PROP_NAME, String.valueOf(dirDeleteOnShutdown)); + props.setProperty(METRICS_BUFFER_LENGTH_PROP_NAME, String.valueOf(metricsBufferLength)); + props.setProperty(IDLE_STRATEGY_PROP_NAME, idleStrategy); + + // when + Context context = new Context(props); + + // then + assertEquals(metricsDirectoryName, context.metricsDirectoryName()); + assertEquals(dirDeleteOnShutdown, context.dirDeleteOnShutdown()); + assertEquals(metricsBufferLength, context.metricsBufferLength()); + assertEquals(idleStrategy, context.idleStrategy().getClass().getName()); + } + + @Test + void testStartManyFromScratch() { + final var foo = addResource(MetricsRecorder.launch(new Context().dirDeleteOnShutdown(true))); + final var bar = addResource(MetricsRecorder.launch(new Context().dirDeleteOnShutdown(true))); + final var baz = addResource(MetricsRecorder.launch(new Context().dirDeleteOnShutdown(true))); + + final var fileList = + List.of( + new File(foo.context().metricsDir(), METRICS_FILE), + new File(bar.context().metricsDir(), METRICS_FILE), + new File(baz.context().metricsDir(), METRICS_FILE)); + + for (var f1 : fileList) { + for (var f2 : fileList) { + if (f1 != f2) { + assertTrue(f1.exists()); + assertTrue(f2.exists()); + assertEquals(f1, f2); + final var b1 = new UnsafeBuffer(mapExistingFile(f1, f1.getName())); + final var b2 = new UnsafeBuffer(mapExistingFile(f2, f2.getName())); + assertEquals(b1, b2); + } + } + } + + assertMetricsHeader(START_TIMESTAMP, PID); + } + + @Test + void testStartManyAfterRestart() { + var foo = addResource(MetricsRecorder.launch(new Context().dirDeleteOnShutdown(false))); + var bar = addResource(MetricsRecorder.launch(new Context().dirDeleteOnShutdown(false))); + var baz = addResource(MetricsRecorder.launch(new Context().dirDeleteOnShutdown(false))); + + // Stop + + CloseHelper.quietCloseAll(resources); + resources.clear(); + + // Restart + + updateMetricsHeader(OLD_START_TIMESTAMP, OLD_PID, OLD_BUFFER_LENGTH); + foo = addResource(MetricsRecorder.launch(new Context().dirDeleteOnShutdown(true))); + bar = addResource(MetricsRecorder.launch(new Context().dirDeleteOnShutdown(true))); + baz = addResource(MetricsRecorder.launch(new Context().dirDeleteOnShutdown(true))); + + final var fileList = + List.of( + new File(foo.context().metricsDir(), METRICS_FILE), + new File(bar.context().metricsDir(), METRICS_FILE), + new File(baz.context().metricsDir(), METRICS_FILE)); + + for (var f1 : fileList) { + for (var f2 : fileList) { + if (f1 != f2) { + assertTrue(f1.exists()); + assertTrue(f2.exists()); + assertEquals(f1, f2); + final var b1 = new UnsafeBuffer(mapExistingFile(f1, f1.getName())); + final var b2 = new UnsafeBuffer(mapExistingFile(f2, f2.getName())); + assertEquals(b1, b2); + } + } + } + + assertMetricsHeader(START_TIMESTAMP, PID); + } + + private static void updateMetricsHeader(long startTimestamp, long pid, int bufferLength) { + final var file = new File(DEFAULT_METRICS_DIR_NAME, METRICS_FILE); + final var mappedByteBuffer = mapExistingFile(file, METRICS_FILE); + try { + final var headerBuffer = LayoutDescriptor.createHeaderBuffer(mappedByteBuffer); + LayoutDescriptor.fillHeaderBuffer(headerBuffer, startTimestamp, pid, bufferLength); + } finally { + BufferUtil.free(mappedByteBuffer); + } + } + + private static void assertMetricsHeader(long startTimestamp, long pid) { + final var file = new File(DEFAULT_METRICS_DIR_NAME, METRICS_FILE); + final var mappedByteBuffer = mapExistingFile(file, METRICS_FILE); + try { + final var headerBuffer = LayoutDescriptor.createHeaderBuffer(mappedByteBuffer); + assertEquals(startTimestamp, LayoutDescriptor.startTimestamp(headerBuffer), "startTimestamp"); + assertEquals(pid, LayoutDescriptor.pid(headerBuffer), "pid"); + } finally { + BufferUtil.free(mappedByteBuffer); + } + } + + private T addResource(T resource) { + resources.add(resource); + return resource; + } + + private static int nextInt() { + return INT_COUNTER.getAndIncrement(); + } + + private static long nextLong() { + return LONG_COUNTER.getAndAdd(100L); + } + + private static String nextPath() { + return Paths.get(SystemUtil.tmpDirName() + "test-path-" + PATH_COUNTER.getAndIncrement()) + .toString(); + } + + private static boolean nextBoolean() { + return RANDOM.nextBoolean(); + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/MetricsTransmitterTest.java b/metrics/src/test/java/io/scalecube/metrics/MetricsTransmitterTest.java new file mode 100644 index 0000000..920dfba --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/MetricsTransmitterTest.java @@ -0,0 +1,168 @@ +package io.scalecube.metrics; + +import static io.scalecube.metrics.MetricsTransmitter.Context.BROADCAST_BUFFER_LENGTH_PROP_NAME; +import static io.scalecube.metrics.MetricsTransmitter.Context.HEARTBEAT_TIMEOUT_PROP_NAME; +import static io.scalecube.metrics.MetricsTransmitter.Context.IDLE_STRATEGY_PROP_NAME; +import static io.scalecube.metrics.MetricsTransmitter.Context.METRICS_DIRECTORY_NAME_PROP_NAME; +import static io.scalecube.metrics.MetricsTransmitter.Context.RETRY_INTERVAL_PROP_NAME; +import static io.scalecube.metrics.MetricsTransmitter.Context.WARN_IF_METRICS_NOT_EXISTS_PROP_NAME; +import static org.agrona.IoUtil.delete; +import static org.agrona.IoUtil.mapExistingFile; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import io.scalecube.metrics.CountersRegistry.Context; +import io.scalecube.metrics.MetricsRecorder.LayoutDescriptor; +import io.scalecube.metrics.MetricsTransmitterAgent.State; +import java.io.File; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.Properties; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import org.agrona.BufferUtil; +import org.agrona.CloseHelper; +import org.agrona.SystemUtil; +import org.agrona.concurrent.CachedEpochClock; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class MetricsTransmitterTest { + + private static final Duration RETRY_INTERVAL = Duration.ofSeconds(3); + private static final Duration HEARTBEAT_TIMEOUT = Duration.ofSeconds(1); + private static final long OLD_START_TIMESTAMP = 10042; + private static final long OLD_PID = 100500; + private static final int OLD_BUFFER_LENGTH = 8 * 1024 * 1024; + + private static final AtomicInteger INT_COUNTER = new AtomicInteger(1); + private static final AtomicLong LONG_COUNTER = new AtomicLong(1000L); + private static final AtomicInteger PATH_COUNTER = new AtomicInteger(1); + private static final Random RANDOM = new Random(12345L); + + private final CachedEpochClock epochClock = new CachedEpochClock(); + private MetricsTransmitter metricsTransmitter; + private MetricsTransmitterAgent agent; + + @BeforeEach + void beforeEach() { + delete(new File(METRICS_DIRECTORY_NAME_PROP_NAME), true); + metricsTransmitter = + MetricsTransmitter.launch( + new MetricsTransmitter.Context() + .useAgentInvoker(true) + .epochClock(epochClock) + .retryInterval(RETRY_INTERVAL) + .heartbeatTimeout(HEARTBEAT_TIMEOUT)); + agent = (MetricsTransmitterAgent) metricsTransmitter.agentInvoker().agent(); + } + + @AfterEach + void afterEach() { + CloseHelper.quietCloseAll(metricsTransmitter); + } + + @Test + void testPopulateFromProperties() { + // given + final var metricsDirectoryName = nextPath(); + final var warnIfMetricsNotExists = nextBoolean(); + final var retryInterval = nextLong(); + final var heartbeatTimeout = nextLong(); + final var broadcastBufferLength = nextInt(); + final var idleStrategy = "org.agrona.concurrent.SleepingIdleStrategy"; + + Properties props = new Properties(); + props.setProperty(METRICS_DIRECTORY_NAME_PROP_NAME, metricsDirectoryName); + props.setProperty(WARN_IF_METRICS_NOT_EXISTS_PROP_NAME, String.valueOf(warnIfMetricsNotExists)); + props.setProperty(RETRY_INTERVAL_PROP_NAME, String.valueOf(retryInterval)); + props.setProperty(HEARTBEAT_TIMEOUT_PROP_NAME, String.valueOf(heartbeatTimeout)); + props.setProperty(BROADCAST_BUFFER_LENGTH_PROP_NAME, String.valueOf(broadcastBufferLength)); + props.setProperty(IDLE_STRATEGY_PROP_NAME, idleStrategy); + + // when + MetricsTransmitter.Context context = new MetricsTransmitter.Context(props); + + // then + assertEquals(metricsDirectoryName, context.metricsDirectoryName()); + assertEquals(warnIfMetricsNotExists, context.warnIfMetricsNotExists()); + assertEquals(retryInterval, context.retryInterval().toNanos()); + assertEquals(heartbeatTimeout, context.heartbeatTimeout().toNanos()); + assertEquals(broadcastBufferLength, context.broadcastBufferLength()); + assertEquals(idleStrategy, context.idleStrategy().getClass().getName()); + } + + @Test + void testWorkWithMetrics() { + try (final var metricsRecorder = MetricsRecorder.launch()) { + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + epochClock.advance(HEARTBEAT_TIMEOUT.toMillis() + 1); + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + } + } + + @Test + void testStartWithoutMetrics() { + agent.doWork(); + assertEquals(State.CLEANUP, agent.state()); + } + + @Test + void testWorkWhenMetricsShutdown() { + try (final var metricsRecorder = + MetricsRecorder.launch(new MetricsRecorder.Context().dirDeleteOnShutdown(true))) { + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + } + epochClock.advance(HEARTBEAT_TIMEOUT.toMillis() + 1); + agent.doWork(); + assertEquals(State.CLEANUP, agent.state()); + } + + @Test + void testWorkWhenMetricsRestarted() { + try (final var metricsRecorder = MetricsRecorder.launch()) { + agent.doWork(); + assertEquals(State.RUNNING, agent.state()); + } + try (final var countersRegistry = CountersRegistry.create(new Context())) { + updateMetricsHeader(OLD_START_TIMESTAMP, OLD_PID, OLD_BUFFER_LENGTH); + epochClock.advance(HEARTBEAT_TIMEOUT.toMillis() + 1); + agent.doWork(); + assertEquals(State.CLEANUP, agent.state()); + } + } + + private static void updateMetricsHeader(long startTimestamp, long pid, int bufferLength) { + final var metricsDirName = MetricsRecorder.Context.DEFAULT_METRICS_DIR_NAME; + final var metricsFile = MetricsRecorder.Context.METRICS_FILE; + final var file = new File(metricsDirName, metricsFile); + final var mappedByteBuffer = mapExistingFile(file, metricsFile); + try { + final var headerBuffer = LayoutDescriptor.createHeaderBuffer(mappedByteBuffer); + LayoutDescriptor.fillHeaderBuffer(headerBuffer, startTimestamp, pid, bufferLength); + } finally { + BufferUtil.free(mappedByteBuffer); + } + } + + private static int nextInt() { + return INT_COUNTER.getAndIncrement(); + } + + private static long nextLong() { + return LONG_COUNTER.getAndAdd(100L); + } + + private static String nextPath() { + return Paths.get(SystemUtil.tmpDirName() + "test-path-" + PATH_COUNTER.getAndIncrement()) + .toString(); + } + + private static boolean nextBoolean() { + return RANDOM.nextBoolean(); + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/PropertiesRegistryTest.java b/metrics/src/test/java/io/scalecube/metrics/PropertiesRegistryTest.java new file mode 100644 index 0000000..c3b94f8 --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/PropertiesRegistryTest.java @@ -0,0 +1,107 @@ +package io.scalecube.metrics; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +import java.util.UUID; +import org.agrona.CloseHelper; +import org.agrona.concurrent.status.CountersManager; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class PropertiesRegistryTest { + + private CountersRegistry countersRegistry; + private CountersManager countersManager; + private PropertiesRegistry propertiesRegistry; + + @BeforeEach + void beforeEach() { + countersRegistry = CountersRegistry.create(); + countersManager = countersRegistry.countersManager(); + propertiesRegistry = new PropertiesRegistry(countersManager); + } + + @AfterEach + void afterEach() { + CloseHelper.quietCloseAll(countersRegistry); + } + + @Test + void stringProperty() { + final var value = UUID.randomUUID(); + final var name = "string_property"; + propertiesRegistry.put(name, value); + assertEquals(value.toString(), PropertiesRegistry.getProperty(countersManager, name)); + } + + @Test + void byteProperty() { + final String name = "byte_property"; + byte value = 42; + propertiesRegistry.put(name, value); + assertEquals(Byte.valueOf(value), PropertiesRegistry.getByteProperty(countersManager, name)); + } + + @Test + void shortProperty() { + final String name = "short_property"; + short value = 12345; + propertiesRegistry.put(name, value); + assertEquals(Short.valueOf(value), PropertiesRegistry.getShortProperty(countersManager, name)); + } + + @Test + void intProperty() { + final String name = "int_property"; + int value = 123456789; + propertiesRegistry.put(name, value); + assertEquals(Integer.valueOf(value), PropertiesRegistry.getIntProperty(countersManager, name)); + } + + @Test + void longProperty() { + final String name = "long_property"; + long value = 9876543210L; + propertiesRegistry.put(name, value); + assertEquals(Long.valueOf(value), PropertiesRegistry.getLongProperty(countersManager, name)); + } + + @Test + void doubleProperty() { + final String name = "double_property"; + double value = 3.14159; + propertiesRegistry.put(name, value); + assertEquals( + Double.valueOf(value), PropertiesRegistry.getDoubleProperty(countersManager, name)); + } + + @Test + void enumProperty() { + final String name = "enum_property"; + SampleEnum value = SampleEnum.B; + propertiesRegistry.put(name, value); + SampleEnum result = + PropertiesRegistry.getEnumProperty(countersManager, name, SampleEnum::valueOf); + assertEquals(value, result); + } + + @Test + void unknownPropertyReturnsNull() { + assertNull(PropertiesRegistry.getProperty(countersManager, "nonexistent")); + assertNull(PropertiesRegistry.getByteProperty(countersManager, "nonexistent")); + assertNull(PropertiesRegistry.getShortProperty(countersManager, "nonexistent")); + assertNull(PropertiesRegistry.getIntProperty(countersManager, "nonexistent")); + assertNull(PropertiesRegistry.getLongProperty(countersManager, "nonexistent")); + assertNull(PropertiesRegistry.getDoubleProperty(countersManager, "nonexistent")); + assertNull( + PropertiesRegistry.getEnumProperty(countersManager, "nonexistent", SampleEnum::valueOf)); + } + + enum SampleEnum { + A, + B, + C + } +} diff --git a/metrics/src/test/java/io/scalecube/metrics/TpsMetricTest.java b/metrics/src/test/java/io/scalecube/metrics/TpsMetricTest.java new file mode 100644 index 0000000..61b0076 --- /dev/null +++ b/metrics/src/test/java/io/scalecube/metrics/TpsMetricTest.java @@ -0,0 +1,309 @@ +package io.scalecube.metrics; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import io.scalecube.metrics.MetricsTransmitter.Context; +import java.util.function.Consumer; +import org.agrona.CloseHelper; +import org.agrona.DirectBuffer; +import org.agrona.concurrent.CachedEpochClock; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; + +class TpsMetricTest { + + private final CachedEpochClock epochClock = new CachedEpochClock(); + private final MetricsHandlerImpl metricsHandler = new MetricsHandlerImpl(); + private MetricsRecorder metricsRecorder; + private MetricsTransmitter metricsTransmitter; + private MetricsReader metricsReader; + + @BeforeEach + void beforeEach() { + metricsRecorder = + MetricsRecorder.launch( + new MetricsRecorder.Context().useAgentInvoker(true).epochClock(epochClock)); + metricsRecorder.agentInvoker().invoke(); // init delays + + metricsTransmitter = + MetricsTransmitter.launch(new Context().useAgentInvoker(true).epochClock(epochClock)); + metricsTransmitter.agentInvoker().invoke(); // kick-off + + metricsReader = new MetricsReader(metricsTransmitter.context().broadcastBuffer()); + } + + @AfterEach + void afterEach() { + CloseHelper.quietCloseAll(metricsReader, metricsTransmitter, metricsRecorder); + } + + @Nested + class Single { + + @Test + void testRecord() { + final var name = "foo"; + final var tps = + metricsRecorder.newTps( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name)); + metricsRecorder.agentInvoker().invoke(); // on-board + + tps.record(); + tps.record(); + tps.record(); + tps.record(); + + advanceClock(); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + metricsHandler.assertValue(4); + } + + @Test + void testRecordWithTags() { + final var name = "foo"; + final var tps = + metricsRecorder.newTps( + keyFlyweight -> + keyFlyweight + .tagsCount(3) + .stringValue("name", name) + .stringValue("kind", "k") + .intValue("mp_id", 100500)); + metricsRecorder.agentInvoker().invoke(); // on-board + + tps.record(); + tps.record(); + tps.record(); + tps.record(); + + advanceClock(); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + metricsHandler.assertValue(4); + metricsHandler.assertKey( + key -> { + Assertions.assertEquals(name, key.stringValue("name"), "name"); + Assertions.assertEquals("k", key.stringValue("kind"), "kind"); + Assertions.assertEquals(100500, key.intValue("mp_id"), "mpId"); + }); + } + + @Test + void testRepeatedSemantic() { + final var name = "foo"; + final var tps = + metricsRecorder.newTps( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name)); + metricsRecorder.agentInvoker().invoke(); // on-board + + for (int i = 1; i <= 4; i++) { + tps.record(); + tps.record(); + tps.record(); + tps.record(); + + advanceClock(); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + metricsHandler.assertValue(4); + } + } + } + + @Nested + class Aggregation { + + @Test + void testRecord() { + final var name = "foo"; + final var foo1 = + metricsRecorder.newTps( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name)); + final var foo2 = + metricsRecorder.newTps( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name)); + metricsRecorder.agentInvoker().invoke(); // on-board + metricsRecorder.agentInvoker().invoke(); // on-board + + // foo1 + + foo1.record(); + foo1.record(); + foo1.record(); + foo1.record(); + + // foo2 + + foo2.record(); + foo2.record(); + foo2.record(); + foo2.record(); + + // publish + + advanceClock(); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + metricsHandler.assertValue(8); + } + + @Test + void testRecordWithTags() { + final var name = "foo"; + final var foo1 = + metricsRecorder.newTps( + keyFlyweight -> + keyFlyweight + .tagsCount(3) + .stringValue("name", name) + .stringValue("kind", "k") + .intValue("mp_id", 100500)); + final var foo2 = + metricsRecorder.newTps( + keyFlyweight -> + keyFlyweight + .tagsCount(3) + .stringValue("name", name) + .stringValue("kind", "k") + .intValue("mp_id", 100500)); + metricsRecorder.agentInvoker().invoke(); // on-board + metricsRecorder.agentInvoker().invoke(); // on-board + + // foo1 + + foo1.record(); + foo1.record(); + foo1.record(); + foo1.record(); + + // foo2 + + foo2.record(); + foo2.record(); + foo2.record(); + foo2.record(); + + // publish + + advanceClock(); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + metricsHandler.assertValue(8); + metricsHandler.assertKey( + key -> { + Assertions.assertEquals(name, key.stringValue("name"), "name"); + Assertions.assertEquals("k", key.stringValue("kind"), "kind"); + Assertions.assertEquals(100500, key.intValue("mp_id"), "mpId"); + }); + } + + @Test + void testRepeatedSemantic() { + final var name = "foo"; + final var foo1 = + metricsRecorder.newTps( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name)); + final var foo2 = + metricsRecorder.newTps( + keyFlyweight -> keyFlyweight.tagsCount(1).stringValue("name", name)); + metricsRecorder.agentInvoker().invoke(); // on-board + metricsRecorder.agentInvoker().invoke(); // on-board + + for (int i = 1; i <= 4; i++) { + // foo1 + + foo1.record(); + foo1.record(); + foo1.record(); + foo1.record(); + + // foo2 + + foo2.record(); + foo2.record(); + foo2.record(); + foo2.record(); + + // publish + + advanceClock(); + metricsRecorder.agentInvoker().invoke(); + metricsTransmitter.agentInvoker().invoke(); + + metricsReader.read(metricsHandler.reset()); + metricsHandler.assertHasRead(); + metricsHandler.assertName(name); + metricsHandler.assertValue(8); + } + } + } + + private void advanceClock() { + epochClock.advance(1000); + } + + private static class MetricsHandlerImpl implements MetricsHandler { + + long timestamp; + Key key; + long value = -1; + + @Override + public void onTps( + long timestamp, DirectBuffer keyBuffer, int keyOffset, int keyLength, long value) { + this.timestamp = timestamp; + this.key = new KeyCodec().decodeKey(keyBuffer, keyOffset); + this.value = value; + } + + MetricsHandlerImpl reset() { + timestamp = 0; + key = null; + value = -1; + return this; + } + + void assertHasRead() { + assertTrue(timestamp > 0); + } + + void assertName(String name) { + assertNotNull(key, "key"); + assertEquals(name, key.stringValue("name"), "name"); + } + + void assertKey(Consumer consumer) { + consumer.accept(key); + } + + void assertValue(long value) { + assertEquals(value, this.value, "value"); + } + } +} diff --git a/metrics/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension b/metrics/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension new file mode 100644 index 0000000..13e78de --- /dev/null +++ b/metrics/src/test/resources/META-INF/services/org.junit.jupiter.api.extension.Extension @@ -0,0 +1 @@ +io.scalecube.metrics.LoggingExtension diff --git a/metrics/src/test/resources/junit-platform.properties b/metrics/src/test/resources/junit-platform.properties new file mode 100644 index 0000000..6efc0d5 --- /dev/null +++ b/metrics/src/test/resources/junit-platform.properties @@ -0,0 +1 @@ +junit.jupiter.extensions.autodetection.enabled=true diff --git a/metrics/src/test/resources/log4j2-test.xml b/metrics/src/test/resources/log4j2-test.xml new file mode 100644 index 0000000..ed8e666 --- /dev/null +++ b/metrics/src/test/resources/log4j2-test.xml @@ -0,0 +1,41 @@ + + + + + %level{length=1} %d{ISO8601} %c{1.} %m [%t]%n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pom.xml b/pom.xml new file mode 100644 index 0000000..be2acbd --- /dev/null +++ b/pom.xml @@ -0,0 +1,241 @@ + + + 4.0.0 + + + io.scalecube + scalecube-parent + 0.3.12 + + + scalecube-metrics-parent + 0.1.0-SNAPSHOT + pom + ${project.artifactId} + + + + github + GitHub Packages + https://maven.pkg.github.com/scalecube/packages + + false + + + + + false + + oss.jfrog + jfrog + https://oss.jfrog.org/libs-release + + + + false + + bintray + bintray + https://jcenter.bintray.com + + + + false + + central + central + https://repo1.maven.org + + + + + https://github.com/scalecube/scalecube-services + scm:git:https://github.com/scalecube/scalecube-services.git + scm:git:https://github.com/scalecube/scalecube-services.git + + HEAD + + + + 2.2.4 + 1.35.6 + 2.1.12 + 1.7.36 + + 5.13.4 + 5.19.0 + 1.3 + 1.21.3 + 1.48.6 + 1.15.1 + 1.37 + 2.17.2 + 3.4.2 + 2.19.2 + + https://maven.pkg.github.com/scalecube/scalecube-metrics + + checkstyle-suppressions.xml + + + + metrics + metrics-benchmarks + metrics-examples + metrics-mimir + metrics-aeron + metrics-prometheus + + + + + + + org.agrona + agrona + ${agrona.version} + + + + + io.aeron + aeron-agent + ${aeron.version} + + + io.aeron + aeron-all + ${aeron.version} + + + io.aeron + aeron-driver + ${aeron.version} + + + io.aeron + aeron-client + ${aeron.version} + + + io.aeron + aeron-cluster + ${aeron.version} + + + io.aeron + aeron-archive + ${aeron.version} + + + + + net.bytebuddy + byte-buddy + ${byte-buddy.version} + + + net.bytebuddy + byte-buddy-agent + ${byte-buddy.version} + + + + + org.hdrhistogram + HdrHistogram + ${hdrhistogram.version} + + + + + org.openjdk.jmh + jmh-core + ${jmh.version} + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + + + + + org.slf4j + slf4j-api + ${slf4j.version} + + + org.apache.logging.log4j + log4j-bom + ${log4j.version} + pom + import + + + com.lmax + disruptor + ${disruptor.version} + + + + + com.fasterxml.jackson + jackson-bom + ${jackson.version} + pom + import + + + + + org.junit + junit-bom + ${junit.version} + pom + import + + + org.mockito + mockito-core + ${mockito.version} + + + org.mockito + mockito-junit-jupiter + ${mockito.version} + + + net.bytebuddy + * + + + + + org.testcontainers + testcontainers-bom + ${testcontainers.version} + pom + import + + + + + + + + maven-surefire-plugin + + + -javaagent:${settings.localRepository}/org/mockito/mockito-core/${mockito.version}/mockito-core-${mockito.version}.jar + --add-exports java.base/jdk.internal.misc=ALL-UNNAMED + --add-exports java.base/sun.nio.ch=ALL-UNNAMED + + + + + + + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..bd6f234 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +requests==2.32.4