From 608bc1f1cb5f0175d7d6e5a8bef00a38b9fda012 Mon Sep 17 00:00:00 2001 From: dalthecow Date: Mon, 16 Jun 2025 10:39:17 -0400 Subject: [PATCH 01/20] add html save functionality --- pyproject.toml | 1 + src/guidellm/__main__.py | 2 +- src/guidellm/benchmark/output.py | 50 +++++++- src/guidellm/config.py | 19 ++- src/guidellm/presentation/__init__.py | 18 +++ src/guidellm/presentation/builder.py | 26 ++++ src/guidellm/presentation/data_models.py | 149 +++++++++++++++++++++++ src/guidellm/presentation/injector.py | 57 +++++++++ tests/unit/test_config.py | 34 ++++++ 9 files changed, 348 insertions(+), 8 deletions(-) create mode 100644 src/guidellm/presentation/__init__.py create mode 100644 src/guidellm/presentation/builder.py create mode 100644 src/guidellm/presentation/data_models.py create mode 100644 src/guidellm/presentation/injector.py diff --git a/pyproject.toml b/pyproject.toml index a78b1fc5..36ab1e8f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,7 @@ dependencies = [ "pyyaml>=6.0.0", "rich", "transformers", + "pyhumps>=3.8.0", ] [project.optional-dependencies] diff --git a/src/guidellm/__main__.py b/src/guidellm/__main__.py index 7dc06835..8ef06f93 100644 --- a/src/guidellm/__main__.py +++ b/src/guidellm/__main__.py @@ -206,7 +206,7 @@ def cli(): help=( "The path to save the output to. If it is a directory, " "it will save benchmarks.json under it. " - "Otherwise, json, yaml, or csv files are supported for output types " + "Otherwise, json, yaml, csv, or html files are supported for output types " "which will be read from the extension for the file path." ), ) diff --git a/src/guidellm/benchmark/output.py b/src/guidellm/benchmark/output.py index 4847160d..86a80f73 100644 --- a/src/guidellm/benchmark/output.py +++ b/src/guidellm/benchmark/output.py @@ -1,6 +1,7 @@ import csv import json import math +import humps from collections import OrderedDict from datetime import datetime from pathlib import Path @@ -27,7 +28,8 @@ ) from guidellm.scheduler import strategy_display_str from guidellm.utils import Colors, split_text_list_by_length - +from guidellm.utils.injector import create_report +from guidellm.presentation import UIDataBuilder __all__ = [ "GenerativeBenchmarksConsole", "GenerativeBenchmarksReport", @@ -67,6 +69,9 @@ def load_file(path: Union[str, Path]) -> "GenerativeBenchmarksReport": if type_ == "csv": raise ValueError(f"CSV file type is not supported for loading: {path}.") + + if type_ == "html": + raise ValueError(f"HTML file type is not supported for loading: {path}.") raise ValueError(f"Unsupported file type: {type_} for {path}.") @@ -114,6 +119,9 @@ def save_file(self, path: Union[str, Path]) -> Path: if type_ == "csv": return self.save_csv(path) + if type_ == "html": + return self.save_html(path) + raise ValueError(f"Unsupported file type: {type_} for {path}.") def save_json(self, path: Union[str, Path]) -> Path: @@ -220,11 +228,44 @@ def save_csv(self, path: Union[str, Path]) -> Path: return path + def save_html(self, path: str | Path) -> Path: + """ + Download html, inject report data and save to a file. + If the file is a directory, it will create the report in a file named + benchmarks.html under the directory. + + :param path: The path to create the report at. + :return: The path to the report. + """ + + # json_data = json.dumps(data, indent=2) + # thing = f'window.{variable_name} = {json_data};' + + data_builder = UIDataBuilder(self.benchmarks) + data = data_builder.to_dict() + camel_data = humps.camelize(data) + ui_api_data = { + f"window.{humps.decamelize(k)} = {{}};": f'window.{humps.decamelize(k)} = {json.dumps(v, indent=2)};\n' + for k, v in camel_data.items() + } + print("________") + print("________") + print("________") + print("________") + print("ui_api_data") + print(ui_api_data) + print("________") + print("________") + print("________") + print("________") + create_report(ui_api_data, path) + return path + @staticmethod def _file_setup( path: Union[str, Path], - default_file_type: Literal["json", "yaml", "csv"] = "json", - ) -> tuple[Path, Literal["json", "yaml", "csv"]]: + default_file_type: Literal["json", "yaml", "csv", "html"] = "json", + ) -> tuple[Path, Literal["json", "yaml", "csv", "html"]]: path = Path(path) if not isinstance(path, Path) else path if path.is_dir(): @@ -242,6 +283,9 @@ def _file_setup( if path_suffix in [".csv"]: return path, "csv" + if path_suffix in [".html"]: + return path, "html" + raise ValueError(f"Unsupported file extension: {path_suffix} for {path}.") @staticmethod diff --git a/src/guidellm/config.py b/src/guidellm/config.py index ed7e782b..ef2db2ab 100644 --- a/src/guidellm/config.py +++ b/src/guidellm/config.py @@ -30,10 +30,10 @@ class Environment(str, Enum): ENV_REPORT_MAPPING = { - Environment.PROD: "https://guidellm.neuralmagic.com/local-report/index.html", - Environment.STAGING: "https://staging.guidellm.neuralmagic.com/local-report/index.html", - Environment.DEV: "https://dev.guidellm.neuralmagic.com/local-report/index.html", - Environment.LOCAL: "tests/dummy/report.html", + Environment.PROD: "https://neuralmagic.github.io/ui/latest/index.html", + Environment.STAGING: "https://neuralmagic.github.io/ui/staging/latest/index.html", + Environment.DEV: "https://neuralmagic.github.io/ui/dev/index.html", + Environment.LOCAL: "https://neuralmagic.github.io/ui/dev/index.html", } @@ -86,6 +86,12 @@ class OpenAISettings(BaseModel): base_url: str = "http://localhost:8000" max_output_tokens: int = 16384 +class ReportGenerationSettings(BaseModel): + """ + Report generation settings for the application + """ + + source: str = "" class Settings(BaseSettings): """ @@ -140,6 +146,9 @@ class Settings(BaseSettings): ) openai: OpenAISettings = OpenAISettings() + # Report settings + report_generation: ReportGenerationSettings = ReportGenerationSettings() + # Output settings table_border_char: str = "=" table_headers_border_char: str = "-" @@ -148,6 +157,8 @@ class Settings(BaseSettings): @model_validator(mode="after") @classmethod def set_default_source(cls, values): + if not values.report_generation.source: + values.report_generation.source = ENV_REPORT_MAPPING.get(values.env) return values def generate_env_file(self) -> str: diff --git a/src/guidellm/presentation/__init__.py b/src/guidellm/presentation/__init__.py new file mode 100644 index 00000000..633b4c60 --- /dev/null +++ b/src/guidellm/presentation/__init__.py @@ -0,0 +1,18 @@ +from .builder import UIDataBuilder +from .data_models import (Bucket, Model, Dataset, RunInfo, TokenDistribution, TokenDetails, Server, WorkloadDetails, BenchmarkDatum) +from .injector import (create_report, inject_data) + +__all__ = [ + "UIDataBuilder", + "Bucket", + "Model", + "Dataset", + "RunInfo", + "TokenDistribution", + "TokenDetails", + "Server", + "WorkloadDetails", + "BenchmarkDatum", + "create_report", + "inject_data", +] diff --git a/src/guidellm/presentation/builder.py b/src/guidellm/presentation/builder.py new file mode 100644 index 00000000..9bf29656 --- /dev/null +++ b/src/guidellm/presentation/builder.py @@ -0,0 +1,26 @@ +from typing import Any +from .data_models import RunInfo, WorkloadDetails, BenchmarkDatum +from guidellm.benchmark.benchmark import GenerativeBenchmark + +__all__ = ["UIDataBuilder"] + + +class UIDataBuilder: + def __init__(self, benchmarks: list[GenerativeBenchmark]): + self.benchmarks = benchmarks + + def build_run_info(self): + return RunInfo.from_benchmarks(self.benchmarks) + + def build_workload_details(self): + return WorkloadDetails.from_benchmarks(self.benchmarks) + + def build_benchmarks(self): + return [ BenchmarkDatum.from_benchmark(b) for b in self.benchmarks ] + + def to_dict(self) -> dict[str, Any]: + return { + "run_info": self.build_run_info().dict(), + "workload_details": self.build_workload_details().dict(), + "benchmarks": [b.dict() for b in self.build_benchmarks()], + } \ No newline at end of file diff --git a/src/guidellm/presentation/data_models.py b/src/guidellm/presentation/data_models.py new file mode 100644 index 00000000..0eaa86d2 --- /dev/null +++ b/src/guidellm/presentation/data_models.py @@ -0,0 +1,149 @@ +from collections import defaultdict +from math import ceil +from pydantic import BaseModel +import random +from typing import List, Optional, Tuple + +from guidellm.benchmark.benchmark import GenerativeBenchmark +from guidellm.objects.statistics import DistributionSummary + +__all__ = ["Bucket", "Model", "Dataset", "RunInfo", "TokenDistribution", "TokenDetails", "Server", "WorkloadDetails", "BenchmarkDatum"] + +class Bucket(BaseModel): + value: float + count: int + + @staticmethod + def from_data( + data: List[float], + bucket_width: Optional[float] = None, + n_buckets: Optional[int] = None + ) -> Tuple[List["Bucket"], float]: + if not data: + return [], 1.0 + + min_v = min(data) + max_v = max(data) + range_v = max_v - min_v + + if bucket_width is None: + if n_buckets is None: + n_buckets = 10 + bucket_width = range_v / n_buckets + else: + n_buckets = ceil(range_v / bucket_width) + + bucket_counts = defaultdict(int) + for val in data: + idx = int((val - min_v) // bucket_width) + if idx >= n_buckets: + idx = n_buckets - 1 + bucket_start = min_v + idx * bucket_width + bucket_counts[bucket_start] += 1 + + buckets = [Bucket(value=start, count=count) for start, count in sorted(bucket_counts.items())] + return buckets, bucket_width + + +class Model(BaseModel): + name: str + size: int + +class Dataset(BaseModel): + name: str + +class RunInfo(BaseModel): + model: Model + task: str + timestamp: float + dataset: Dataset + + @classmethod + def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): + model = benchmarks[0].worker.backend_model or 'N/A' + timestamp = max(bm.run_stats.start_time for bm in benchmarks if bm.start_time is not None) + return cls( + model=Model(name=model, size=0), + task='N/A', + timestamp=timestamp, + dataset=Dataset(name="N/A") + ) + +class TokenDistribution(BaseModel): + statistics: Optional[DistributionSummary] = None + buckets: list[Bucket] + bucket_width: float + + +class TokenDetails(BaseModel): + samples: list[str] + token_distributions: TokenDistribution + +class Server(BaseModel): + target: str + +class RequestOverTime(BaseModel): + num_benchmarks: int + requests_over_time: TokenDistribution + +class WorkloadDetails(BaseModel): + prompts: TokenDetails + generations: TokenDetails + requests_over_time: RequestOverTime + rate_type: str + server: Server + @classmethod + def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): + target = benchmarks[0].worker.backend_target + rate_type = benchmarks[0].args.profile.type_ + successful_requests = [req for bm in benchmarks for req in bm.requests.successful] + sample_indices = random.sample(range(len(successful_requests)), min(5, len(successful_requests))) + sample_prompts = [successful_requests[i].prompt.replace("\n", " ").replace("\"", "'") for i in sample_indices] + sample_outputs = [successful_requests[i].output.replace("\n", " ").replace("\"", "'") for i in sample_indices] + + prompt_tokens = [req.prompt_tokens for bm in benchmarks for req in bm.requests.successful] + output_tokens = [req.output_tokens for bm in benchmarks for req in bm.requests.successful] + + prompt_token_buckets, _prompt_token_bucket_width = Bucket.from_data(prompt_tokens, 1) + output_token_buckets, _output_token_bucket_width = Bucket.from_data(output_tokens, 1) + + prompt_token_stats = DistributionSummary.from_values(prompt_tokens) + output_token_stats = DistributionSummary.from_values(output_tokens) + prompt_token_distributions = TokenDistribution(statistics=prompt_token_stats, buckets=prompt_token_buckets, bucket_width=1) + output_token_distributions = TokenDistribution(statistics=output_token_stats, buckets=output_token_buckets, bucket_width=1) + + min_start_time = benchmarks[0].run_stats.start_time + + all_req_times = [ + req.start_time - min_start_time + for bm in benchmarks + for req in bm.requests.successful + if req.start_time is not None + ] + number_of_buckets = len(benchmarks) + request_over_time_buckets, bucket_width = Bucket.from_data(all_req_times, None, number_of_buckets) + request_over_time_distribution = TokenDistribution(buckets=request_over_time_buckets, bucket_width=bucket_width) + return cls( + prompts=TokenDetails(samples=sample_prompts, token_distributions=prompt_token_distributions), + generations=TokenDetails(samples=sample_outputs, token_distributions=output_token_distributions), + requests_over_time=RequestOverTime(requests_over_time=request_over_time_distribution, num_benchmarks=number_of_buckets), + rate_type=rate_type, + server=Server(target=target) + ) + +class BenchmarkDatum(BaseModel): + requests_per_second: float + tpot: DistributionSummary + ttft: DistributionSummary + throughput: DistributionSummary + time_per_request: DistributionSummary + + @classmethod + def from_benchmark(cls, bm: GenerativeBenchmark): + return cls( + requests_per_second=bm.metrics.requests_per_second.successful.mean, + tpot=bm.metrics.inter_token_latency_ms.successful, + ttft=bm.metrics.time_to_first_token_ms.successful, + throughput=bm.metrics.output_tokens_per_second.successful, + time_per_request=bm.metrics.request_latency.successful, + ) diff --git a/src/guidellm/presentation/injector.py b/src/guidellm/presentation/injector.py new file mode 100644 index 00000000..ffa5ca35 --- /dev/null +++ b/src/guidellm/presentation/injector.py @@ -0,0 +1,57 @@ +from pathlib import Path +from typing import Union + +from guidellm.config import settings +from guidellm.utils.text import load_text + +__all__ = ["create_report", "inject_data"] + + +def create_report(js_data: dict, output_path: Union[str, Path]) -> Path: + """ + Creates a report from the dictionary and saves it to the output path. + + :param js_data: dict with match str and json data to inject + :type js_data: dict + :param output_path: the path, either a file or a directory, + to save the report to. If a directory, the report will be saved + as "report.html" inside of the directory. + :type output_path: str + :return: the path to the saved report + :rtype: str + """ + + if not isinstance(output_path, Path): + output_path = Path(output_path) + + if output_path.is_dir(): + output_path = output_path / "report.html" + + html_content = load_text(settings.report_generation.source) + report_content = inject_data( + js_data, + html_content, + ) + + output_path.parent.mkdir(parents=True, exist_ok=True) + output_path.write_text(report_content) + print(f'Report saved to {output_path}') + return output_path + +def inject_data( + js_data: dict, + html: str, +) -> str: + """ + Injects the json data into the HTML while replacing the placeholder. + + :param js_data: the json data to inject + :type js_data: dict + :param html: the html to inject the data into + :type html: str + :return: the html with the json data injected + :rtype: str + """ + for placeholder, script in js_data.items(): + html = html.replace(placeholder, script) + return html \ No newline at end of file diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 316f13e4..9ec41a8d 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -5,6 +5,7 @@ Environment, LoggingSettings, OpenAISettings, + ReportGenerationSettings, Settings, print_config, reload_settings, @@ -18,6 +19,10 @@ def test_default_settings(): assert settings.env == Environment.PROD assert settings.logging == LoggingSettings() assert settings.openai == OpenAISettings() + assert ( + settings.report_generation.source + == "https://guidellm.neuralmagic.com/local-report/index.html" + ) @pytest.mark.smoke @@ -29,6 +34,7 @@ def test_settings_from_env_variables(mocker): "GUIDELLM__logging__disabled": "true", "GUIDELLM__OPENAI__API_KEY": "test_key", "GUIDELLM__OPENAI__BASE_URL": "http://test.url", + "GUIDELLM__REPORT_GENERATION__SOURCE": "http://custom.url", }, ) @@ -37,6 +43,31 @@ def test_settings_from_env_variables(mocker): assert settings.logging.disabled is True assert settings.openai.api_key == "test_key" assert settings.openai.base_url == "http://test.url" + assert settings.report_generation.source == "http://custom.url" + + +@pytest.mark.smoke() +def test_report_generation_default_source(): + settings = Settings(env=Environment.LOCAL) + assert settings.report_generation.source == "https://neuralmagic.github.io/ui/dev/index.html" + + settings = Settings(env=Environment.DEV) + assert ( + settings.report_generation.source + == "https://neuralmagic.github.io/ui/dev/index.html" + ) + + settings = Settings(env=Environment.STAGING) + assert ( + settings.report_generation.source + == "https://neuralmagic.github.io/ui/staging/latest/index.html" + ) + + settings = Settings(env=Environment.PROD) + assert ( + settings.report_generation.source + == "https://neuralmagic.github.io/ui/latest/index.html" + ) @pytest.mark.sanity @@ -59,6 +90,9 @@ def test_openai_settings(): assert openai_settings.api_key == "test_api_key" assert openai_settings.base_url == "http://test.api" +def test_report_generation_settings(): + report_settings = ReportGenerationSettings(source="http://custom.report") + assert report_settings.source == "http://custom.report" @pytest.mark.sanity def test_generate_env_file(): From a33b55f11bf761a40f9b53f30085fe97ed372ef1 Mon Sep 17 00:00:00 2001 From: dalthecow Date: Wed, 18 Jun 2025 02:04:08 -0400 Subject: [PATCH 02/20] push up hopefully working build to test with guidellm after the preview is live in gh-pages branch --- .github/workflows/development.yml | 33 +++++++++++- .github/workflows/main.yml | 30 +++++++++++ src/guidellm/objects/statistics.py | 5 ++ src/guidellm/presentation/__init__.py | 4 +- src/guidellm/presentation/data_models.py | 52 +++++++++++++------ .../store/slices/benchmarks/benchmarks.api.ts | 10 ++-- .../benchmarks/benchmarks.interfaces.ts | 30 ++++------- .../slices/benchmarks/benchmarks.selectors.ts | 20 ++++--- .../workloadDetails.interfaces.ts | 17 +----- tests/unit/objects/test_statistics.py | 13 +++++ 10 files changed, 143 insertions(+), 71 deletions(-) diff --git a/.github/workflows/development.yml b/.github/workflows/development.yml index 5cf899f8..bc53dd75 100644 --- a/.github/workflows/development.yml +++ b/.github/workflows/development.yml @@ -29,6 +29,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci @@ -59,6 +64,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci @@ -89,6 +99,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci @@ -119,6 +134,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci @@ -149,6 +169,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci @@ -219,6 +244,11 @@ jobs: with: fetch-depth: 0 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Check if UI-related files changed id: check-changes run: | @@ -250,7 +280,8 @@ jobs: # Set asset prefix and base path with PR number ASSET_PREFIX=https://neuralmagic.github.io/guidellm/ui/pr/${PR_NUMBER} - USE_MOCK_DATA=true + # temporarily setting to false to test if this build works with guidellm + USE_MOCK_DATA=false BASE_PATH=/ui/pr/${PR_NUMBER} GIT_SHA=${{ github.sha }} export ASSET_PREFIX=${ASSET_PREFIX} diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 209f11c9..80c5aaa4 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -30,6 +30,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci @@ -60,6 +65,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci @@ -90,6 +100,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci @@ -120,6 +135,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci @@ -150,6 +170,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci @@ -165,6 +190,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 + - name: Set up Node.js 22 + uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install dependencies run: npm ci diff --git a/src/guidellm/objects/statistics.py b/src/guidellm/objects/statistics.py index 552b5c20..7831b2cf 100644 --- a/src/guidellm/objects/statistics.py +++ b/src/guidellm/objects/statistics.py @@ -37,6 +37,9 @@ class Percentiles(StandardBaseModel): p25: float = Field( description="The 25th percentile of the distribution.", ) + p50: float = Field( + description="The 50th percentile of the distribution.", + ) p75: float = Field( description="The 75th percentile of the distribution.", ) @@ -159,6 +162,7 @@ def from_distribution_function( p05=cdf[np.argmax(cdf[:, 1] >= 0.05), 0].item(), # noqa: PLR2004 p10=cdf[np.argmax(cdf[:, 1] >= 0.1), 0].item(), # noqa: PLR2004 p25=cdf[np.argmax(cdf[:, 1] >= 0.25), 0].item(), # noqa: PLR2004 + p50=cdf[np.argmax(cdf[:, 1] >= 0.50), 0].item(), # noqa: PLR2004 p75=cdf[np.argmax(cdf[:, 1] >= 0.75), 0].item(), # noqa: PLR2004 p90=cdf[np.argmax(cdf[:, 1] >= 0.9), 0].item(), # noqa: PLR2004 p95=cdf[np.argmax(cdf[:, 1] >= 0.95), 0].item(), # noqa: PLR2004 @@ -172,6 +176,7 @@ def from_distribution_function( p05=0, p10=0, p25=0, + p50=0, p75=0, p90=0, p95=0, diff --git a/src/guidellm/presentation/__init__.py b/src/guidellm/presentation/__init__.py index 633b4c60..4e3868fd 100644 --- a/src/guidellm/presentation/__init__.py +++ b/src/guidellm/presentation/__init__.py @@ -1,5 +1,5 @@ from .builder import UIDataBuilder -from .data_models import (Bucket, Model, Dataset, RunInfo, TokenDistribution, TokenDetails, Server, WorkloadDetails, BenchmarkDatum) +from .data_models import (Bucket, Model, Dataset, RunInfo, Distribution, TokenDetails, Server, WorkloadDetails, BenchmarkDatum) from .injector import (create_report, inject_data) __all__ = [ @@ -8,7 +8,7 @@ "Model", "Dataset", "RunInfo", - "TokenDistribution", + "Distribution", "TokenDetails", "Server", "WorkloadDetails", diff --git a/src/guidellm/presentation/data_models.py b/src/guidellm/presentation/data_models.py index 0eaa86d2..273978c5 100644 --- a/src/guidellm/presentation/data_models.py +++ b/src/guidellm/presentation/data_models.py @@ -1,13 +1,13 @@ from collections import defaultdict from math import ceil -from pydantic import BaseModel +from pydantic import computed_field, BaseModel import random from typing import List, Optional, Tuple from guidellm.benchmark.benchmark import GenerativeBenchmark from guidellm.objects.statistics import DistributionSummary -__all__ = ["Bucket", "Model", "Dataset", "RunInfo", "TokenDistribution", "TokenDetails", "Server", "WorkloadDetails", "BenchmarkDatum"] +__all__ = ["Bucket", "Model", "Dataset", "RunInfo", "Distribution", "TokenDetails", "Server", "WorkloadDetails", "BenchmarkDatum"] class Bucket(BaseModel): value: float @@ -69,7 +69,7 @@ def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): dataset=Dataset(name="N/A") ) -class TokenDistribution(BaseModel): +class Distribution(BaseModel): statistics: Optional[DistributionSummary] = None buckets: list[Bucket] bucket_width: float @@ -77,14 +77,14 @@ class TokenDistribution(BaseModel): class TokenDetails(BaseModel): samples: list[str] - token_distributions: TokenDistribution + token_distributions: Distribution class Server(BaseModel): target: str class RequestOverTime(BaseModel): num_benchmarks: int - requests_over_time: TokenDistribution + requests_over_time: Distribution class WorkloadDetails(BaseModel): prompts: TokenDetails @@ -109,8 +109,8 @@ def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): prompt_token_stats = DistributionSummary.from_values(prompt_tokens) output_token_stats = DistributionSummary.from_values(output_tokens) - prompt_token_distributions = TokenDistribution(statistics=prompt_token_stats, buckets=prompt_token_buckets, bucket_width=1) - output_token_distributions = TokenDistribution(statistics=output_token_stats, buckets=output_token_buckets, bucket_width=1) + prompt_token_distributions = Distribution(statistics=prompt_token_stats, buckets=prompt_token_buckets, bucket_width=1) + output_token_distributions = Distribution(statistics=output_token_stats, buckets=output_token_buckets, bucket_width=1) min_start_time = benchmarks[0].run_stats.start_time @@ -122,7 +122,7 @@ def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): ] number_of_buckets = len(benchmarks) request_over_time_buckets, bucket_width = Bucket.from_data(all_req_times, None, number_of_buckets) - request_over_time_distribution = TokenDistribution(buckets=request_over_time_buckets, bucket_width=bucket_width) + request_over_time_distribution = Distribution(buckets=request_over_time_buckets, bucket_width=bucket_width) return cls( prompts=TokenDetails(samples=sample_prompts, token_distributions=prompt_token_distributions), generations=TokenDetails(samples=sample_outputs, token_distributions=output_token_distributions), @@ -131,19 +131,39 @@ def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): server=Server(target=target) ) +class TabularDistributionSummary(DistributionSummary): + """ + Same fields as `DistributionSummary`, but adds a ready-to-serialize/iterate + `percentile_rows` helper. + """ + + @computed_field + @property + def percentile_rows(self) -> list[dict[str, float]]: + return [ + {"percentile": name, "value": value} + for name, value in self.percentiles.model_dump().items() + ] + + @classmethod + def from_distribution_summary( + cls, distribution: DistributionSummary + ) -> "TabularDistributionSummary": + return cls(**distribution.model_dump()) + class BenchmarkDatum(BaseModel): requests_per_second: float - tpot: DistributionSummary - ttft: DistributionSummary - throughput: DistributionSummary - time_per_request: DistributionSummary + tpot: TabularDistributionSummary + ttft: TabularDistributionSummary + throughput: TabularDistributionSummary + time_per_request: TabularDistributionSummary @classmethod def from_benchmark(cls, bm: GenerativeBenchmark): return cls( requests_per_second=bm.metrics.requests_per_second.successful.mean, - tpot=bm.metrics.inter_token_latency_ms.successful, - ttft=bm.metrics.time_to_first_token_ms.successful, - throughput=bm.metrics.output_tokens_per_second.successful, - time_per_request=bm.metrics.request_latency.successful, + tpot=TabularDistributionSummary.from_distribution_summary(bm.metrics.inter_token_latency_ms.successful), + ttft=TabularDistributionSummary.from_distribution_summary(bm.metrics.time_to_first_token_ms.successful), + throughput=TabularDistributionSummary.from_distribution_summary(bm.metrics.output_tokens_per_second.successful), + time_per_request=TabularDistributionSummary.from_distribution_summary(bm.metrics.request_latency.successful), ) diff --git a/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts b/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts index 67d867d7..7198d966 100644 --- a/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts +++ b/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts @@ -1,7 +1,7 @@ import { ThunkDispatch, UnknownAction } from '@reduxjs/toolkit'; import { createApi, fetchBaseQuery } from '@reduxjs/toolkit/query/react'; -import { Benchmarks, MetricData } from './benchmarks.interfaces'; +import { Benchmarks, Statistics } from './benchmarks.interfaces'; import { formatNumber } from '../../../utils/helpers'; import { defaultPercentile } from '../slo/slo.constants'; import { setSloData } from '../slo/slo.slice'; @@ -13,14 +13,14 @@ const fetchBenchmarks = () => { }; const getAverageValueForPercentile = ( - firstMetric: MetricData, - lastMetric: MetricData, + firstMetric: Statistics, + lastMetric: Statistics, percentile: string ) => { - const firstPercentile = firstMetric.percentiles.find( + const firstPercentile = firstMetric.percentileRows.find( (p) => p.percentile === percentile ); - const lastPercentile = lastMetric.percentiles.find( + const lastPercentile = lastMetric.percentileRows.find( (p) => p.percentile === percentile ); return ((firstPercentile?.value ?? 0) + (lastPercentile?.value ?? 0)) / 2; diff --git a/src/ui/lib/store/slices/benchmarks/benchmarks.interfaces.ts b/src/ui/lib/store/slices/benchmarks/benchmarks.interfaces.ts index 4dc755b2..602ae17e 100644 --- a/src/ui/lib/store/slices/benchmarks/benchmarks.interfaces.ts +++ b/src/ui/lib/store/slices/benchmarks/benchmarks.interfaces.ts @@ -1,44 +1,32 @@ export type Name = 'benchmarks'; -interface Statistics { +export interface Statistics { total: number; mean: number; std: number; median: number; min: number; max: number; + percentileRows: Percentile[]; + percentiles: Record; } export type PercentileValues = 'p50' | 'p90' | 'p95' | 'p99'; interface Percentile { - percentile: string; + percentile: PercentileValues; value: number; } -interface Bucket { - value: number; - count: number; -} - -export interface MetricData { - statistics: Statistics; - percentiles: Percentile[]; - buckets: Bucket[]; - bucketWidth: number; -} - export interface BenchmarkMetrics { - ttft: MetricData; - tpot: MetricData; - timePerRequest: MetricData; - throughput: MetricData; + ttft: Statistics; + tpot: Statistics; + timePerRequest: Statistics; + throughput: Statistics; } export interface Benchmark extends BenchmarkMetrics { requestsPerSecond: number; } -export type Benchmarks = { - benchmarks: Benchmark[]; -}; +export type Benchmarks = Benchmark[]; diff --git a/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts b/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts index 9453f772..2ba57e18 100644 --- a/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts +++ b/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts @@ -14,7 +14,7 @@ export const selectBenchmarks = (state: RootState) => state.benchmarks.data; export const selectMetricsSummaryLineData = createSelector( [selectBenchmarks, selectSloState], (benchmarks, sloState) => { - const sortedByRPS = benchmarks?.benchmarks + const sortedByRPS = benchmarks ?.slice() ?.sort((bm1, bm2) => (bm1.requestsPerSecond > bm2.requestsPerSecond ? 1 : -1)); const selectedPercentile = sloState.enforcedPercentile; @@ -34,7 +34,7 @@ export const selectMetricsSummaryLineData = createSelector( metrics.forEach((metric) => { const data: Point[] = []; sortedByRPS?.forEach((benchmark) => { - const percentile = benchmark[metric].percentiles.find( + const percentile = benchmark[metric].percentileRows.find( (p) => p.percentile === selectedPercentile ); data.push({ @@ -58,7 +58,7 @@ const getDefaultMetricValues = () => ({ export const selectInterpolatedMetrics = createSelector( [selectBenchmarks, selectSloState], (benchmarks, sloState) => { - const sortedByRPS = benchmarks?.benchmarks + const sortedByRPS = benchmarks ?.slice() ?.sort((bm1, bm2) => (bm1.requestsPerSecond > bm2.requestsPerSecond ? 1 : -1)); const requestRates = sortedByRPS?.map((bm) => bm.requestsPerSecond) || []; @@ -92,15 +92,13 @@ export const selectInterpolatedMetrics = createSelector( return metricData; } metrics.forEach((metric) => { - const meanValues = sortedByRPS.map((bm) => bm[metric].statistics.mean); + const meanValues = sortedByRPS.map((bm) => bm[metric].mean); const interpolateMeanAt = createMonotoneSpline(requestRates, meanValues); const interpolatedMeanValue: number = interpolateMeanAt(currentRequestRate) || 0; const percentiles: PercentileValues[] = ['p50', 'p90', 'p95', 'p99']; const valuesByPercentile = percentiles.map((p) => { const bmValuesAtP = sortedByRPS.map((bm) => { - const result = - bm[metric].percentiles.find((percentile) => percentile.percentile === p) - ?.value || 0; + const result = bm[metric].percentiles[p] || 0; return result; }); const interpolateValueAtP = createMonotoneSpline(requestRates, bmValuesAtP); @@ -126,7 +124,7 @@ export const selectMetricsDetailsLineData = createSelector( [selectBenchmarks], (benchmarks) => { const sortedByRPS = - benchmarks?.benchmarks + benchmarks ?.slice() ?.sort((bm1, bm2) => bm1.requestsPerSecond > bm2.requestsPerSecond ? 1 : -1 @@ -152,16 +150,16 @@ export const selectMetricsDetailsLineData = createSelector( } const data: { [key: string]: { data: Point[]; id: string; solid?: boolean } } = {}; - sortedByRPS[0].ttft.percentiles.forEach((p) => { + sortedByRPS[0].ttft.percentileRows.forEach((p) => { data[p.percentile] = { data: [], id: p.percentile }; }); data.mean = { data: [], id: 'mean', solid: true }; sortedByRPS?.forEach((benchmark) => { const rps = benchmark.requestsPerSecond; - benchmark[prop].percentiles.forEach((p) => { + benchmark[prop].percentileRows.forEach((p) => { data[p.percentile].data.push({ x: rps, y: p.value }); }); - const mean = benchmark[prop].statistics.mean; + const mean = benchmark[prop].mean; data.mean.data.push({ x: rps, y: mean }); }); lineData[prop] = Object.keys(data).map((key) => { diff --git a/src/ui/lib/store/slices/workloadDetails/workloadDetails.interfaces.ts b/src/ui/lib/store/slices/workloadDetails/workloadDetails.interfaces.ts index 2aa7619f..bbe5d7df 100644 --- a/src/ui/lib/store/slices/workloadDetails/workloadDetails.interfaces.ts +++ b/src/ui/lib/store/slices/workloadDetails/workloadDetails.interfaces.ts @@ -1,18 +1,6 @@ -export type Name = 'workloadDetails'; - -interface Statistics { - total: number; - mean: number; - std: number; - median: number; - min: number; - max: number; -} +import { Statistics } from '../benchmarks'; -interface Percentile { - percentile: string; - value: number; -} +export type Name = 'workloadDetails'; interface Bucket { value: number; @@ -21,7 +9,6 @@ interface Bucket { interface Distribution { statistics: Statistics; - percentiles: Percentile[]; buckets: Bucket[]; bucketWidth: number; } diff --git a/tests/unit/objects/test_statistics.py b/tests/unit/objects/test_statistics.py index f3332758..fa8cccd0 100644 --- a/tests/unit/objects/test_statistics.py +++ b/tests/unit/objects/test_statistics.py @@ -21,6 +21,7 @@ def create_default_percentiles() -> Percentiles: p05=5.0, p10=10.0, p25=25.0, + p50=50.0, p75=75.0, p90=90.0, p95=95.0, @@ -52,6 +53,7 @@ def test_percentiles_initialization(): assert percentiles.p05 == 5.0 assert percentiles.p10 == 10.0 assert percentiles.p25 == 25.0 + assert percentiles.p50 == 50.0 assert percentiles.p75 == 75.0 assert percentiles.p90 == 90.0 assert percentiles.p95 == 95.0 @@ -67,6 +69,7 @@ def test_percentiles_invalid_initialization(): "p05": 5.0, "p10": 10.0, "p25": 25.0, + "p50": 50.0, "p75": 75.0, "p90": 90.0, "p95": 95.0, @@ -108,6 +111,7 @@ def test_distribution_summary_initilaization(): assert distribution_summary.percentiles.p05 == 5.0 assert distribution_summary.percentiles.p10 == 10.0 assert distribution_summary.percentiles.p25 == 25.0 + assert distribution_summary.percentiles.p50 == 50.0 assert distribution_summary.percentiles.p75 == 75.0 assert distribution_summary.percentiles.p90 == 90.0 assert distribution_summary.percentiles.p95 == 95.0 @@ -175,6 +179,9 @@ def test_distribution_summary_from_distribution_function(): assert distribution_summary.percentiles.p25 == pytest.approx( np.percentile(values, 25.0) ) + assert distribution_summary.percentiles.p50 == pytest.approx( + np.percentile(values, 50.0) + ) assert distribution_summary.percentiles.p75 == pytest.approx( np.percentile(values, 75.0) ) @@ -226,6 +233,9 @@ def test_distribution_summary_from_values(): assert distribution_summary.percentiles.p25 == pytest.approx( np.percentile(values, 25.0) ) + assert distribution_summary.percentiles.p50 == pytest.approx( + np.percentile(values, 50.0) + ) assert distribution_summary.percentiles.p75 == pytest.approx( np.percentile(values, 75.0) ) @@ -284,6 +294,7 @@ def test_distribution_summary_from_request_times_concurrency(): assert distribution_summary.percentiles.p05 == pytest.approx(10) assert distribution_summary.percentiles.p10 == pytest.approx(10) assert distribution_summary.percentiles.p25 == pytest.approx(10) + assert distribution_summary.percentiles.p50 == pytest.approx(10) assert distribution_summary.percentiles.p75 == pytest.approx(10) assert distribution_summary.percentiles.p90 == pytest.approx(10) assert distribution_summary.percentiles.p95 == pytest.approx(10) @@ -318,6 +329,7 @@ def test_distribution_summary_from_request_times_rate(): assert distribution_summary.percentiles.p05 == pytest.approx(10.0) assert distribution_summary.percentiles.p10 == pytest.approx(10.0) assert distribution_summary.percentiles.p25 == pytest.approx(10.0) + assert distribution_summary.percentiles.p50 == pytest.approx(10.0) assert distribution_summary.percentiles.p75 == pytest.approx(10.0) assert distribution_summary.percentiles.p90 == pytest.approx(10.0) assert distribution_summary.percentiles.p95 == pytest.approx(10.0) @@ -358,6 +370,7 @@ def test_distribution_summary_from_iterable_request_times(): assert distribution_summary.percentiles.p05 == pytest.approx(80.0) assert distribution_summary.percentiles.p10 == pytest.approx(80.0) assert distribution_summary.percentiles.p25 == pytest.approx(80.0) + assert distribution_summary.percentiles.p50 == pytest.approx(80.0) assert distribution_summary.percentiles.p75 == pytest.approx(80.0) assert distribution_summary.percentiles.p90 == pytest.approx(160.0) assert distribution_summary.percentiles.p95 == pytest.approx(160.0) From 2f46ad2a6ac39296456230ef7b561f1f3d6adb99 Mon Sep 17 00:00:00 2001 From: dalthecow Date: Wed, 18 Jun 2025 02:41:14 -0400 Subject: [PATCH 03/20] fix broken test, type issues --- .../MetricsSummary.component.tsx | 2 +- .../WorkloadMetrics.component.tsx | 6 +- .../store/slices/benchmarks/benchmarks.api.ts | 4 +- .../slices/benchmarks/benchmarks.constants.ts | 4 +- .../workloadDetails.constants.ts | 11 +- tests/ui/integration/page.test.tsx | 5 +- tests/ui/unit/mocks/mockBenchmarks.ts | 148 +++++++++++------- 7 files changed, 105 insertions(+), 75 deletions(-) diff --git a/src/ui/lib/components/MetricsSummary/MetricsSummary.component.tsx b/src/ui/lib/components/MetricsSummary/MetricsSummary.component.tsx index ae9a428b..d6bf3725 100644 --- a/src/ui/lib/components/MetricsSummary/MetricsSummary.component.tsx +++ b/src/ui/lib/components/MetricsSummary/MetricsSummary.component.tsx @@ -95,7 +95,7 @@ export const Component = () => { }, ]; - if ((data?.benchmarks?.length ?? 0) <= 1) { + if ((data?.length ?? 0) <= 1) { return <>; } diff --git a/src/ui/lib/components/WorkloadMetrics/WorkloadMetrics.component.tsx b/src/ui/lib/components/WorkloadMetrics/WorkloadMetrics.component.tsx index b717bb11..7be48983 100644 --- a/src/ui/lib/components/WorkloadMetrics/WorkloadMetrics.component.tsx +++ b/src/ui/lib/components/WorkloadMetrics/WorkloadMetrics.component.tsx @@ -48,10 +48,8 @@ export const Component = () => { throughput: throughputAtRPS, } = useSelector(selectInterpolatedMetrics); - const minX = Math.floor( - Math.min(...(data?.benchmarks?.map((bm) => bm.requestsPerSecond) || [])) - ); - if ((data?.benchmarks?.length ?? 0) <= 1) { + const minX = Math.floor(Math.min(...(data?.map((bm) => bm.requestsPerSecond) || []))); + if ((data?.length ?? 0) <= 1) { return <>; } return ( diff --git a/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts b/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts index 7198d966..5c5e6776 100644 --- a/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts +++ b/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts @@ -32,8 +32,8 @@ const setDefaultSLOs = ( dispatch: ThunkDispatch ) => { // temporarily set default slo values, long term the backend should set default slos that will not just be the avg at the default percentile - const firstBM = data.benchmarks[0]; - const lastBM = data.benchmarks[data.benchmarks.length - 1]; + const firstBM = data[0]; + const lastBM = data[data.length - 1]; const ttftAvg = getAverageValueForPercentile( firstBM.ttft, diff --git a/src/ui/lib/store/slices/benchmarks/benchmarks.constants.ts b/src/ui/lib/store/slices/benchmarks/benchmarks.constants.ts index deb444b2..38bddb74 100644 --- a/src/ui/lib/store/slices/benchmarks/benchmarks.constants.ts +++ b/src/ui/lib/store/slices/benchmarks/benchmarks.constants.ts @@ -2,6 +2,4 @@ import { Benchmarks, Name } from './benchmarks.interfaces'; export const name: Readonly = 'benchmarks'; -export const initialState: Benchmarks = { - benchmarks: [], -}; +export const initialState: Benchmarks = []; diff --git a/src/ui/lib/store/slices/workloadDetails/workloadDetails.constants.ts b/src/ui/lib/store/slices/workloadDetails/workloadDetails.constants.ts index c45efa76..e6604add 100644 --- a/src/ui/lib/store/slices/workloadDetails/workloadDetails.constants.ts +++ b/src/ui/lib/store/slices/workloadDetails/workloadDetails.constants.ts @@ -1,5 +1,5 @@ import { Name, WorkloadDetails } from './workloadDetails.interfaces'; - +import { PercentileValues } from '../benchmarks/benchmarks.interfaces'; export const name: Readonly = 'workloadDetails'; export const initialState: WorkloadDetails = { @@ -13,8 +13,9 @@ export const initialState: WorkloadDetails = { median: 0, min: 0, max: 0, + percentiles: {} as Record, + percentileRows: [], }, - percentiles: [], buckets: [], bucketWidth: 0, }, @@ -29,8 +30,9 @@ export const initialState: WorkloadDetails = { median: 0, min: 0, max: 0, + percentiles: {} as Record, + percentileRows: [], }, - percentiles: [], buckets: [], bucketWidth: 0, }, @@ -45,8 +47,9 @@ export const initialState: WorkloadDetails = { median: 0, min: 0, max: 0, + percentiles: {} as Record, + percentileRows: [], }, - percentiles: [], buckets: [], bucketWidth: 0, }, diff --git a/tests/ui/integration/page.test.tsx b/tests/ui/integration/page.test.tsx index 85c4bee8..cbd8f324 100644 --- a/tests/ui/integration/page.test.tsx +++ b/tests/ui/integration/page.test.tsx @@ -17,10 +17,7 @@ const route = (input: RequestInfo) => { if (url.endsWith('/run-info')) return jsonResponse({}); if (url.endsWith('/workload-details')) return jsonResponse({}); - if (url.endsWith('/benchmarks')) - return jsonResponse({ - benchmarks: mockBenchmarks, - }); + if (url.endsWith('/benchmarks')) return jsonResponse(mockBenchmarks); /* fall-through → 404 */ return { ok: false, status: 404, json: () => Promise.resolve({}) }; diff --git a/tests/ui/unit/mocks/mockBenchmarks.ts b/tests/ui/unit/mocks/mockBenchmarks.ts index 5acd7d12..884e8b89 100644 --- a/tests/ui/unit/mocks/mockBenchmarks.ts +++ b/tests/ui/unit/mocks/mockBenchmarks.ts @@ -2,15 +2,19 @@ export const mockBenchmarks = [ { requestsPerSecond: 0.6668550387660497, tpot: { - statistics: { - total: 80, - mean: 23.00635663936911, - median: 22.959455611213805, - min: 22.880917503720237, - max: 24.14080301920573, - std: 0.18918760384209338, + total: 80, + mean: 23.00635663936911, + median: 22.959455611213805, + min: 22.880917503720237, + max: 24.14080301920573, + std: 0.18918760384209338, + percentiles: { + p50: 22.959455611213805, + p90: 23.01789086962503, + p95: 23.30297423947242, + p99: 24.14080301920573, }, - percentiles: [ + percentileRows: [ { percentile: 'p50', value: 22.959455611213805, @@ -30,15 +34,19 @@ export const mockBenchmarks = [ ], }, ttft: { - statistics: { - total: 80, - mean: 49.64659512042999, - median: 49.23129081726074, - min: 44.538259506225586, - max: 55.47308921813965, - std: 1.7735485090634995, + total: 80, + mean: 49.64659512042999, + median: 49.23129081726074, + min: 44.538259506225586, + max: 55.47308921813965, + std: 1.7735485090634995, + percentiles: { + p50: 49.23129081726074, + p90: 50.16160011291504, + p95: 54.918766021728516, + p99: 55.47308921813965, }, - percentiles: [ + percentileRows: [ { percentile: 'p50', value: 49.23129081726074, @@ -58,15 +66,19 @@ export const mockBenchmarks = [ ], }, throughput: { - statistics: { - total: 210, - mean: 42.58702991319684, - median: 43.536023084668, - min: 0.0, - max: 43.68247620237872, - std: 4.559764488536857, + total: 210, + mean: 42.58702991319684, + median: 43.536023084668, + min: 0.0, + max: 43.68247620237872, + std: 4.559764488536857, + percentiles: { + p50: 43.536023084668, + p90: 43.62613633999709, + p95: 43.64020767654067, + p99: 43.68202126662431, }, - percentiles: [ + percentileRows: [ { percentile: 'p50', value: 43.536023084668, @@ -86,15 +98,19 @@ export const mockBenchmarks = [ ], }, timePerRequest: { - statistics: { - total: 80, - mean: 1496.706646680832, - median: 1496.1087703704834, - min: 1490.584135055542, - max: 1505.8784484863281, - std: 3.4553340533022667, + total: 80, + mean: 1496.706646680832, + median: 1496.1087703704834, + min: 1490.584135055542, + max: 1505.8784484863281, + std: 3.4553340533022667, + percentiles: { + p50: 1496.1087703704834, + p90: 1500.9305477142334, + p95: 1505.3200721740723, + p99: 1505.8784484863281, }, - percentiles: [ + percentileRows: [ { percentile: 'p50', value: 1496.1087703704834, @@ -117,15 +133,19 @@ export const mockBenchmarks = [ { requestsPerSecond: 28.075330129628725, tpot: { - statistics: { - total: 3416, - mean: 126.08707076148656, - median: 125.30853256346687, - min: 23.034303907364134, - max: 138.08223756693178, - std: 3.508992115582193, + total: 3416, + mean: 126.08707076148656, + median: 125.30853256346687, + min: 23.034303907364134, + max: 138.08223756693178, + std: 3.508992115582193, + percentiles: { + p50: 125.30853256346687, + p90: 129.21135009281218, + p95: 129.52291770059554, + p99: 132.21229490686636, }, - percentiles: [ + percentileRows: [ { percentile: 'p50', value: 125.30853256346687, @@ -145,15 +165,19 @@ export const mockBenchmarks = [ ], }, ttft: { - statistics: { - total: 3416, - mean: 8585.486161415694, - median: 8965.316534042358, - min: 110.53991317749023, - max: 12575.379610061646, - std: 1929.5632525234505, + total: 3416, + mean: 8585.486161415694, + median: 8965.316534042358, + min: 110.53991317749023, + max: 12575.379610061646, + std: 1929.5632525234505, + percentiles: { + p50: 8965.316534042358, + p90: 9231.79316520691, + p95: 9485.00108718872, + p99: 12096.465587615967, }, - percentiles: [ + percentileRows: [ { percentile: 'p50', value: 8965.316534042358, @@ -181,7 +205,13 @@ export const mockBenchmarks = [ max: 838860.8, std: 5196.545581836957, }, - percentiles: [ + percentiles: { + p50: 670.1236619268253, + p90: 4068.1901066925316, + p95: 6374.322188449848, + p99: 16194.223938223939, + }, + percentileRows: [ { percentile: 'p50', value: 670.1236619268253, @@ -201,15 +231,19 @@ export const mockBenchmarks = [ ], }, timePerRequest: { - statistics: { - total: 3416, - mean: 16526.811318389147, - median: 17058.441638946533, - min: 1711.3444805145264, - max: 20646.55351638794, - std: 2054.9553770234484, + total: 3416, + mean: 16526.811318389147, + median: 17058.441638946533, + min: 1711.3444805145264, + max: 20646.55351638794, + std: 2054.9553770234484, + percentiles: { + p50: 17058.441638946533, + p90: 17143.84412765503, + p95: 17248.060703277588, + p99: 20116.52660369873, }, - percentiles: [ + percentileRows: [ { percentile: 'p50', value: 17058.441638946533, From dd1cef3ace37dec14925eeef64fc2b49802031e3 Mon Sep 17 00:00:00 2001 From: dalthecow Date: Wed, 18 Jun 2025 02:45:14 -0400 Subject: [PATCH 04/20] run precommit, style fixes, but not all passing --- src/guidellm/benchmark/output.py | 9 +- src/guidellm/config.py | 2 + src/guidellm/presentation/__init__.py | 24 +- src/guidellm/presentation/builder.py | 16 +- src/guidellm/presentation/data_models.py | 307 ++++++++++++++--------- src/guidellm/presentation/injector.py | 7 +- tests/unit/test_config.py | 9 +- 7 files changed, 231 insertions(+), 143 deletions(-) diff --git a/src/guidellm/benchmark/output.py b/src/guidellm/benchmark/output.py index 86a80f73..7aa69072 100644 --- a/src/guidellm/benchmark/output.py +++ b/src/guidellm/benchmark/output.py @@ -1,12 +1,12 @@ import csv import json import math -import humps from collections import OrderedDict from datetime import datetime from pathlib import Path from typing import Any, Literal, Optional, Union +import humps import yaml from pydantic import Field from rich.console import Console @@ -26,10 +26,11 @@ StandardBaseModel, StatusDistributionSummary, ) +from guidellm.presentation import UIDataBuilder from guidellm.scheduler import strategy_display_str from guidellm.utils import Colors, split_text_list_by_length from guidellm.utils.injector import create_report -from guidellm.presentation import UIDataBuilder + __all__ = [ "GenerativeBenchmarksConsole", "GenerativeBenchmarksReport", @@ -69,7 +70,7 @@ def load_file(path: Union[str, Path]) -> "GenerativeBenchmarksReport": if type_ == "csv": raise ValueError(f"CSV file type is not supported for loading: {path}.") - + if type_ == "html": raise ValueError(f"HTML file type is not supported for loading: {path}.") @@ -245,7 +246,7 @@ def save_html(self, path: str | Path) -> Path: data = data_builder.to_dict() camel_data = humps.camelize(data) ui_api_data = { - f"window.{humps.decamelize(k)} = {{}};": f'window.{humps.decamelize(k)} = {json.dumps(v, indent=2)};\n' + f"window.{humps.decamelize(k)} = {{}};": f"window.{humps.decamelize(k)} = {json.dumps(v, indent=2)};\n" for k, v in camel_data.items() } print("________") diff --git a/src/guidellm/config.py b/src/guidellm/config.py index ef2db2ab..678fd1cc 100644 --- a/src/guidellm/config.py +++ b/src/guidellm/config.py @@ -86,6 +86,7 @@ class OpenAISettings(BaseModel): base_url: str = "http://localhost:8000" max_output_tokens: int = 16384 + class ReportGenerationSettings(BaseModel): """ Report generation settings for the application @@ -93,6 +94,7 @@ class ReportGenerationSettings(BaseModel): source: str = "" + class Settings(BaseSettings): """ All the settings are powered by pydantic_settings and could be diff --git a/src/guidellm/presentation/__init__.py b/src/guidellm/presentation/__init__.py index 4e3868fd..872188db 100644 --- a/src/guidellm/presentation/__init__.py +++ b/src/guidellm/presentation/__init__.py @@ -1,18 +1,28 @@ from .builder import UIDataBuilder -from .data_models import (Bucket, Model, Dataset, RunInfo, Distribution, TokenDetails, Server, WorkloadDetails, BenchmarkDatum) -from .injector import (create_report, inject_data) +from .data_models import ( + BenchmarkDatum, + Bucket, + Dataset, + Distribution, + Model, + RunInfo, + Server, + TokenDetails, + WorkloadDetails, +) +from .injector import create_report, inject_data __all__ = [ - "UIDataBuilder", + "BenchmarkDatum", "Bucket", - "Model", "Dataset", - "RunInfo", "Distribution", - "TokenDetails", + "Model", + "RunInfo", "Server", + "TokenDetails", + "UIDataBuilder", "WorkloadDetails", - "BenchmarkDatum", "create_report", "inject_data", ] diff --git a/src/guidellm/presentation/builder.py b/src/guidellm/presentation/builder.py index 9bf29656..bcb85457 100644 --- a/src/guidellm/presentation/builder.py +++ b/src/guidellm/presentation/builder.py @@ -1,7 +1,9 @@ from typing import Any -from .data_models import RunInfo, WorkloadDetails, BenchmarkDatum + from guidellm.benchmark.benchmark import GenerativeBenchmark +from .data_models import BenchmarkDatum, RunInfo, WorkloadDetails + __all__ = ["UIDataBuilder"] @@ -16,11 +18,11 @@ def build_workload_details(self): return WorkloadDetails.from_benchmarks(self.benchmarks) def build_benchmarks(self): - return [ BenchmarkDatum.from_benchmark(b) for b in self.benchmarks ] - + return [BenchmarkDatum.from_benchmark(b) for b in self.benchmarks] + def to_dict(self) -> dict[str, Any]: return { - "run_info": self.build_run_info().dict(), - "workload_details": self.build_workload_details().dict(), - "benchmarks": [b.dict() for b in self.build_benchmarks()], - } \ No newline at end of file + "run_info": self.build_run_info().dict(), + "workload_details": self.build_workload_details().dict(), + "benchmarks": [b.dict() for b in self.build_benchmarks()], + } diff --git a/src/guidellm/presentation/data_models.py b/src/guidellm/presentation/data_models.py index 273978c5..d2a5d86c 100644 --- a/src/guidellm/presentation/data_models.py +++ b/src/guidellm/presentation/data_models.py @@ -1,135 +1,193 @@ +import random from collections import defaultdict from math import ceil -from pydantic import computed_field, BaseModel -import random from typing import List, Optional, Tuple +from pydantic import BaseModel, computed_field + from guidellm.benchmark.benchmark import GenerativeBenchmark from guidellm.objects.statistics import DistributionSummary -__all__ = ["Bucket", "Model", "Dataset", "RunInfo", "Distribution", "TokenDetails", "Server", "WorkloadDetails", "BenchmarkDatum"] +__all__ = [ + "BenchmarkDatum", + "Bucket", + "Dataset", + "Distribution", + "Model", + "RunInfo", + "Server", + "TokenDetails", + "WorkloadDetails", +] + class Bucket(BaseModel): - value: float - count: int - - @staticmethod - def from_data( - data: List[float], - bucket_width: Optional[float] = None, - n_buckets: Optional[int] = None - ) -> Tuple[List["Bucket"], float]: - if not data: - return [], 1.0 - - min_v = min(data) - max_v = max(data) - range_v = max_v - min_v - - if bucket_width is None: - if n_buckets is None: - n_buckets = 10 - bucket_width = range_v / n_buckets - else: - n_buckets = ceil(range_v / bucket_width) - - bucket_counts = defaultdict(int) - for val in data: - idx = int((val - min_v) // bucket_width) - if idx >= n_buckets: - idx = n_buckets - 1 - bucket_start = min_v + idx * bucket_width - bucket_counts[bucket_start] += 1 - - buckets = [Bucket(value=start, count=count) for start, count in sorted(bucket_counts.items())] - return buckets, bucket_width + value: float + count: int + + @staticmethod + def from_data( + data: List[float], + bucket_width: Optional[float] = None, + n_buckets: Optional[int] = None, + ) -> Tuple[List["Bucket"], float]: + if not data: + return [], 1.0 + + min_v = min(data) + max_v = max(data) + range_v = max_v - min_v + + if bucket_width is None: + if n_buckets is None: + n_buckets = 10 + bucket_width = range_v / n_buckets + else: + n_buckets = ceil(range_v / bucket_width) + + bucket_counts = defaultdict(int) + for val in data: + idx = int((val - min_v) // bucket_width) + if idx >= n_buckets: + idx = n_buckets - 1 + bucket_start = min_v + idx * bucket_width + bucket_counts[bucket_start] += 1 + + buckets = [ + Bucket(value=start, count=count) + for start, count in sorted(bucket_counts.items()) + ] + return buckets, bucket_width class Model(BaseModel): - name: str - size: int + name: str + size: int + class Dataset(BaseModel): - name: str + name: str + class RunInfo(BaseModel): - model: Model - task: str - timestamp: float - dataset: Dataset - - @classmethod - def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): - model = benchmarks[0].worker.backend_model or 'N/A' - timestamp = max(bm.run_stats.start_time for bm in benchmarks if bm.start_time is not None) - return cls( - model=Model(name=model, size=0), - task='N/A', - timestamp=timestamp, - dataset=Dataset(name="N/A") - ) + model: Model + task: str + timestamp: float + dataset: Dataset + + @classmethod + def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): + model = benchmarks[0].worker.backend_model or "N/A" + timestamp = max( + bm.run_stats.start_time for bm in benchmarks if bm.start_time is not None + ) + return cls( + model=Model(name=model, size=0), + task="N/A", + timestamp=timestamp, + dataset=Dataset(name="N/A"), + ) + class Distribution(BaseModel): - statistics: Optional[DistributionSummary] = None - buckets: list[Bucket] - bucket_width: float + statistics: Optional[DistributionSummary] = None + buckets: list[Bucket] + bucket_width: float class TokenDetails(BaseModel): - samples: list[str] - token_distributions: Distribution + samples: list[str] + token_distributions: Distribution + class Server(BaseModel): - target: str + target: str + class RequestOverTime(BaseModel): - num_benchmarks: int - requests_over_time: Distribution + num_benchmarks: int + requests_over_time: Distribution + class WorkloadDetails(BaseModel): - prompts: TokenDetails - generations: TokenDetails - requests_over_time: RequestOverTime - rate_type: str - server: Server - @classmethod - def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): - target = benchmarks[0].worker.backend_target - rate_type = benchmarks[0].args.profile.type_ - successful_requests = [req for bm in benchmarks for req in bm.requests.successful] - sample_indices = random.sample(range(len(successful_requests)), min(5, len(successful_requests))) - sample_prompts = [successful_requests[i].prompt.replace("\n", " ").replace("\"", "'") for i in sample_indices] - sample_outputs = [successful_requests[i].output.replace("\n", " ").replace("\"", "'") for i in sample_indices] - - prompt_tokens = [req.prompt_tokens for bm in benchmarks for req in bm.requests.successful] - output_tokens = [req.output_tokens for bm in benchmarks for req in bm.requests.successful] - - prompt_token_buckets, _prompt_token_bucket_width = Bucket.from_data(prompt_tokens, 1) - output_token_buckets, _output_token_bucket_width = Bucket.from_data(output_tokens, 1) - - prompt_token_stats = DistributionSummary.from_values(prompt_tokens) - output_token_stats = DistributionSummary.from_values(output_tokens) - prompt_token_distributions = Distribution(statistics=prompt_token_stats, buckets=prompt_token_buckets, bucket_width=1) - output_token_distributions = Distribution(statistics=output_token_stats, buckets=output_token_buckets, bucket_width=1) - - min_start_time = benchmarks[0].run_stats.start_time - - all_req_times = [ - req.start_time - min_start_time - for bm in benchmarks - for req in bm.requests.successful - if req.start_time is not None - ] - number_of_buckets = len(benchmarks) - request_over_time_buckets, bucket_width = Bucket.from_data(all_req_times, None, number_of_buckets) - request_over_time_distribution = Distribution(buckets=request_over_time_buckets, bucket_width=bucket_width) - return cls( - prompts=TokenDetails(samples=sample_prompts, token_distributions=prompt_token_distributions), - generations=TokenDetails(samples=sample_outputs, token_distributions=output_token_distributions), - requests_over_time=RequestOverTime(requests_over_time=request_over_time_distribution, num_benchmarks=number_of_buckets), - rate_type=rate_type, - server=Server(target=target) - ) + prompts: TokenDetails + generations: TokenDetails + requests_over_time: RequestOverTime + rate_type: str + server: Server + + @classmethod + def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): + target = benchmarks[0].worker.backend_target + rate_type = benchmarks[0].args.profile.type_ + successful_requests = [ + req for bm in benchmarks for req in bm.requests.successful + ] + sample_indices = random.sample( + range(len(successful_requests)), min(5, len(successful_requests)) + ) + sample_prompts = [ + successful_requests[i].prompt.replace("\n", " ").replace('"', "'") + for i in sample_indices + ] + sample_outputs = [ + successful_requests[i].output.replace("\n", " ").replace('"', "'") + for i in sample_indices + ] + + prompt_tokens = [ + req.prompt_tokens for bm in benchmarks for req in bm.requests.successful + ] + output_tokens = [ + req.output_tokens for bm in benchmarks for req in bm.requests.successful + ] + + prompt_token_buckets, _prompt_token_bucket_width = Bucket.from_data( + prompt_tokens, 1 + ) + output_token_buckets, _output_token_bucket_width = Bucket.from_data( + output_tokens, 1 + ) + + prompt_token_stats = DistributionSummary.from_values(prompt_tokens) + output_token_stats = DistributionSummary.from_values(output_tokens) + prompt_token_distributions = Distribution( + statistics=prompt_token_stats, buckets=prompt_token_buckets, bucket_width=1 + ) + output_token_distributions = Distribution( + statistics=output_token_stats, buckets=output_token_buckets, bucket_width=1 + ) + + min_start_time = benchmarks[0].run_stats.start_time + + all_req_times = [ + req.start_time - min_start_time + for bm in benchmarks + for req in bm.requests.successful + if req.start_time is not None + ] + number_of_buckets = len(benchmarks) + request_over_time_buckets, bucket_width = Bucket.from_data( + all_req_times, None, number_of_buckets + ) + request_over_time_distribution = Distribution( + buckets=request_over_time_buckets, bucket_width=bucket_width + ) + return cls( + prompts=TokenDetails( + samples=sample_prompts, token_distributions=prompt_token_distributions + ), + generations=TokenDetails( + samples=sample_outputs, token_distributions=output_token_distributions + ), + requests_over_time=RequestOverTime( + requests_over_time=request_over_time_distribution, + num_benchmarks=number_of_buckets, + ), + rate_type=rate_type, + server=Server(target=target), + ) + class TabularDistributionSummary(DistributionSummary): """ @@ -151,19 +209,28 @@ def from_distribution_summary( ) -> "TabularDistributionSummary": return cls(**distribution.model_dump()) + class BenchmarkDatum(BaseModel): - requests_per_second: float - tpot: TabularDistributionSummary - ttft: TabularDistributionSummary - throughput: TabularDistributionSummary - time_per_request: TabularDistributionSummary - - @classmethod - def from_benchmark(cls, bm: GenerativeBenchmark): - return cls( - requests_per_second=bm.metrics.requests_per_second.successful.mean, - tpot=TabularDistributionSummary.from_distribution_summary(bm.metrics.inter_token_latency_ms.successful), - ttft=TabularDistributionSummary.from_distribution_summary(bm.metrics.time_to_first_token_ms.successful), - throughput=TabularDistributionSummary.from_distribution_summary(bm.metrics.output_tokens_per_second.successful), - time_per_request=TabularDistributionSummary.from_distribution_summary(bm.metrics.request_latency.successful), - ) + requests_per_second: float + tpot: TabularDistributionSummary + ttft: TabularDistributionSummary + throughput: TabularDistributionSummary + time_per_request: TabularDistributionSummary + + @classmethod + def from_benchmark(cls, bm: GenerativeBenchmark): + return cls( + requests_per_second=bm.metrics.requests_per_second.successful.mean, + tpot=TabularDistributionSummary.from_distribution_summary( + bm.metrics.inter_token_latency_ms.successful + ), + ttft=TabularDistributionSummary.from_distribution_summary( + bm.metrics.time_to_first_token_ms.successful + ), + throughput=TabularDistributionSummary.from_distribution_summary( + bm.metrics.output_tokens_per_second.successful + ), + time_per_request=TabularDistributionSummary.from_distribution_summary( + bm.metrics.request_latency.successful + ), + ) diff --git a/src/guidellm/presentation/injector.py b/src/guidellm/presentation/injector.py index ffa5ca35..873b2578 100644 --- a/src/guidellm/presentation/injector.py +++ b/src/guidellm/presentation/injector.py @@ -20,7 +20,7 @@ def create_report(js_data: dict, output_path: Union[str, Path]) -> Path: :return: the path to the saved report :rtype: str """ - + if not isinstance(output_path, Path): output_path = Path(output_path) @@ -35,9 +35,10 @@ def create_report(js_data: dict, output_path: Union[str, Path]) -> Path: output_path.parent.mkdir(parents=True, exist_ok=True) output_path.write_text(report_content) - print(f'Report saved to {output_path}') + print(f"Report saved to {output_path}") return output_path + def inject_data( js_data: dict, html: str, @@ -54,4 +55,4 @@ def inject_data( """ for placeholder, script in js_data.items(): html = html.replace(placeholder, script) - return html \ No newline at end of file + return html diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 9ec41a8d..ca084ec5 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -46,10 +46,13 @@ def test_settings_from_env_variables(mocker): assert settings.report_generation.source == "http://custom.url" -@pytest.mark.smoke() +@pytest.mark.smoke def test_report_generation_default_source(): settings = Settings(env=Environment.LOCAL) - assert settings.report_generation.source == "https://neuralmagic.github.io/ui/dev/index.html" + assert ( + settings.report_generation.source + == "https://neuralmagic.github.io/ui/dev/index.html" + ) settings = Settings(env=Environment.DEV) assert ( @@ -90,10 +93,12 @@ def test_openai_settings(): assert openai_settings.api_key == "test_api_key" assert openai_settings.base_url == "http://test.api" + def test_report_generation_settings(): report_settings = ReportGenerationSettings(source="http://custom.report") assert report_settings.source == "http://custom.report" + @pytest.mark.sanity def test_generate_env_file(): settings = Settings() From db9d9573d3854f5ff0f394687462d998a7e78276 Mon Sep 17 00:00:00 2001 From: dalthecow Date: Wed, 18 Jun 2025 14:14:44 -0400 Subject: [PATCH 05/20] get ui working locally --- benchmarks.html | 819 ++++++++++++++++++ src/guidellm/config.py | 10 +- src/guidellm/presentation/builder.py | 6 +- src/ui/.env.local | 2 +- src/ui/lib/store/benchmarksWindowData.ts | 484 +++++++---- .../store/slices/benchmarks/benchmarks.api.ts | 29 +- .../slices/benchmarks/benchmarks.selectors.ts | 1 + 7 files changed, 1167 insertions(+), 184 deletions(-) create mode 100644 benchmarks.html diff --git a/benchmarks.html b/benchmarks.html new file mode 100644 index 00000000..3c02dc09 --- /dev/null +++ b/benchmarks.html @@ -0,0 +1,819 @@ +GuideLLM \ No newline at end of file diff --git a/src/guidellm/config.py b/src/guidellm/config.py index 678fd1cc..f8a2eecd 100644 --- a/src/guidellm/config.py +++ b/src/guidellm/config.py @@ -30,10 +30,10 @@ class Environment(str, Enum): ENV_REPORT_MAPPING = { - Environment.PROD: "https://neuralmagic.github.io/ui/latest/index.html", - Environment.STAGING: "https://neuralmagic.github.io/ui/staging/latest/index.html", - Environment.DEV: "https://neuralmagic.github.io/ui/dev/index.html", - Environment.LOCAL: "https://neuralmagic.github.io/ui/dev/index.html", + Environment.PROD: "https://neuralmagic.github.io/guidellm/ui/latest/index.html", + Environment.STAGING: "https://neuralmagic.github.io/guidellm/ui/staging/latest/index.html", + Environment.DEV: "https://neuralmagic.github.io/guidellm/ui/pr/191/index.html", + Environment.LOCAL: "https://neuralmagic.github.io/guidellm/ui/dev/index.html", } @@ -117,7 +117,7 @@ class Settings(BaseSettings): ) # general settings - env: Environment = Environment.PROD + env: Environment = Environment.DEV default_async_loop_sleep: float = 10e-5 logging: LoggingSettings = LoggingSettings() default_sweep_number: int = 10 diff --git a/src/guidellm/presentation/builder.py b/src/guidellm/presentation/builder.py index bcb85457..986939a4 100644 --- a/src/guidellm/presentation/builder.py +++ b/src/guidellm/presentation/builder.py @@ -22,7 +22,7 @@ def build_benchmarks(self): def to_dict(self) -> dict[str, Any]: return { - "run_info": self.build_run_info().dict(), - "workload_details": self.build_workload_details().dict(), - "benchmarks": [b.dict() for b in self.build_benchmarks()], + "run_info": self.build_run_info().model_dump(), + "workload_details": self.build_workload_details().model_dump(), + "benchmarks": [b.model_dump() for b in self.build_benchmarks()], } diff --git a/src/ui/.env.local b/src/ui/.env.local index 44ab168b..b9d5ff2b 100644 --- a/src/ui/.env.local +++ b/src/ui/.env.local @@ -1,4 +1,4 @@ ASSET_PREFIX=http://localhost:3000 BASE_PATH=http://localhost:3000 NEXT_PUBLIC_USE_MOCK_API=true -USE_MOCK_DATA=true +USE_MOCK_DATA=false diff --git a/src/ui/lib/store/benchmarksWindowData.ts b/src/ui/lib/store/benchmarksWindowData.ts index e8a5cc40..7bcb209a 100644 --- a/src/ui/lib/store/benchmarksWindowData.ts +++ b/src/ui/lib/store/benchmarksWindowData.ts @@ -1,17 +1,20 @@ -export const benchmarksScript = `window.benchmarks = { - "benchmarks": [ +export const benchmarksScript = `window.benchmarks = [ { "requestsPerSecond": 0.6668550387660497, "tpot": { - "statistics": { "total": 80, "mean": 23.00635663936911, "median": 22.959455611213805, "min": 22.880917503720237, "max": 24.14080301920573, - "std": 0.18918760384209338 - }, - "percentiles": [ + "std": 0.18918760384209338, + "percentiles": { + "p50": 22.959455611213805, + "p90": 23.01789086962503, + "p95": 23.30297423947242, + "p99": 24.14080301920573, + }, + "percentileRows": [ { "percentile": "p50", "value": 22.959455611213805 @@ -31,15 +34,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "ttft": { - "statistics": { "total": 80, "mean": 49.64659512042999, "median": 49.23129081726074, "min": 44.538259506225586, "max": 55.47308921813965, - "std": 1.7735485090634995 - }, - "percentiles": [ + "std": 1.7735485090634995, + "percentiles": { + "p50": 49.23129081726074, + "p90": 50.16160011291504, + "p95": 54.918766021728516, + "p99": 55.47308921813965, + }, + "percentileRows": [ { "percentile": "p50", "value": 49.23129081726074 @@ -59,15 +66,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "throughput": { - "statistics": { "total": 210, "mean": 42.58702991319684, "median": 43.536023084668, "min": 0.0, "max": 43.68247620237872, - "std": 4.559764488536857 + "std": 4.559764488536857, + "percentiles": { + "p50": 43.536023084668, + "p90": 43.62613633999709, + "p95": 43.64020767654067, + "p99": 43.68202126662431, }, - "percentiles": [ + "percentileRows": [ { "percentile": "p50", "value": 43.536023084668 @@ -87,15 +98,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "timePerRequest": { - "statistics": { "total": 80, "mean": 1496.706646680832, "median": 1496.1087703704834, "min": 1490.584135055542, "max": 1505.8784484863281, - "std": 3.4553340533022667 - }, - "percentiles": [ + "std": 3.4553340533022667, + "percentiles": { + "p50": 1496.1087703704834, + "p90": 1500.9305477142334, + "p95": 1505.3200721740723, + "p99": 1505.8784484863281, + }, + "percentileRows": [ { "percentile": "p50", "value": 1496.1087703704834 @@ -118,15 +133,19 @@ export const benchmarksScript = `window.benchmarks = { { "requestsPerSecond": 28.075330129628725, "tpot": { - "statistics": { "total": 3416, "mean": 126.08707076148656, "median": 125.30853256346687, "min": 23.034303907364134, "max": 138.08223756693178, - "std": 3.508992115582193 - }, - "percentiles": [ + "std": 3.508992115582193, + "percentiles": { + "p50": 125.30853256346687, + "p90": 129.21135009281218, + "p95": 129.52291770059554, + "p99": 132.21229490686636, + }, + "percentileRows": [ { "percentile": "p50", "value": 125.30853256346687 @@ -146,15 +165,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "ttft": { - "statistics": { "total": 3416, "mean": 8585.486161415694, "median": 8965.316534042358, "min": 110.53991317749023, "max": 12575.379610061646, - "std": 1929.5632525234505 - }, - "percentiles": [ + "std": 1929.5632525234505, + "percentiles": { + "p50": 8965.316534042358, + "p90": 9231.79316520691, + "p95": 9485.00108718872, + "p99": 12096.465587615967, + }, + "percentileRows": [ { "percentile": "p50", "value": 8965.316534042358 @@ -174,15 +197,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "throughput": { - "statistics": { "total": 15981, "mean": 1795.4403743554367, "median": 670.1236619268253, "min": 0.0, "max": 838860.8, - "std": 5196.545581836957 - }, - "percentiles": [ + "std": 5196.545581836957, + "percentiles": { + "p50": 670.1236619268253, + "p90": 4068.1901066925316, + "p95": 6374.322188449848, + "p99": 16194.223938223939, + }, + "percentileRows": [ { "percentile": "p50", "value": 670.1236619268253 @@ -202,15 +229,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "timePerRequest": { - "statistics": { "total": 3416, "mean": 16526.811318389147, "median": 17058.441638946533, "min": 1711.3444805145264, "max": 20646.55351638794, - "std": 2054.9553770234484 - }, - "percentiles": [ + "std": 2054.9553770234484, + "percentiles": { + "p50": 17058.441638946533, + "p90": 17143.84412765503, + "p95": 17248.060703277588, + "p99": 20116.52660369873, + }, + "percentileRows": [ { "percentile": "p50", "value": 17058.441638946533 @@ -233,15 +264,19 @@ export const benchmarksScript = `window.benchmarks = { { "requestsPerSecond": 4.071681142252993, "tpot": { - "statistics": { "total": 488, "mean": 24.898151556004148, "median": 24.889995181371294, "min": 24.822999560643755, "max": 26.217273871103924, - "std": 0.11227504505081555 - }, - "percentiles": [ + "std": 0.11227504505081555, + "percentiles": { + "p50": 24.889995181371294, + "p90": 24.90483389960395, + "p95": 24.965975019666885, + "p99": 25.306613214554325, + }, + "percentileRows": [ { "percentile": "p50", "value": 24.889995181371294 @@ -261,15 +296,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "ttft": { - "statistics": { "total": 488, "mean": 58.341102033364976, "median": 58.38632583618164, "min": 44.857025146484375, "max": 111.23061180114746, - "std": 8.190008649880411 - }, - "percentiles": [ + "std": 8.190008649880411, + "percentiles": { + "p50": 58.38632583618164, + "p90": 67.66843795776367, + "p95": 68.76754760742188, + "p99": 71.46525382995605, + }, + "percentileRows": [ { "percentile": "p50", "value": 58.38632583618164 @@ -289,15 +328,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "throughput": { - "statistics": { "total": 11338, "mean": 260.42072092623033, "median": 47.630070406540995, "min": 0.0, "max": 838860.8, - "std": 886.8274389295076 - }, - "percentiles": [ + "std": 886.8274389295076, + "percentiles": { + "p50": 47.630070406540995, + "p90": 604.8895298528987, + "p95": 1621.9273008507348, + "p99": 3054.846321922797, + }, + "percentileRows": [ { "percentile": "p50", "value": 47.630070406540995 @@ -317,15 +360,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "timePerRequest": { - "statistics": { "total": 488, "mean": 1626.5668087318297, "median": 1626.236915588379, "min": 1611.9341850280762, "max": 1690.2406215667725, - "std": 8.871477705542668 - }, - "percentiles": [ + "std": 8.871477705542668, + "percentiles": { + "p50": 1626.236915588379, + "p90": 1635.761022567749, + "p95": 1637.390375137329, + "p99": 1643.500804901123, + }, + "percentileRows": [ { "percentile": "p50", "value": 1626.236915588379 @@ -348,15 +395,19 @@ export const benchmarksScript = `window.benchmarks = { { "requestsPerSecond": 7.466101414346809, "tpot": { - "statistics": { "total": 895, "mean": 27.56459906601014, "median": 27.525402250744047, "min": 26.69054911686824, "max": 29.5785041082473, - "std": 0.18545649185329754 - }, - "percentiles": [ + "std": 0.18545649185329754, + "percentiles": { + "p50": 27.525402250744047, + "p90": 27.62497795952691, + "p95": 27.947206345815506, + "p99": 28.41202157442687, + }, + "percentileRows": [ { "percentile": "p50", "value": 27.525402250744047 @@ -376,15 +427,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "ttft": { - "statistics": { "total": 895, "mean": 64.73036744741088, "median": 62.484025955200195, "min": 48.038482666015625, "max": 256.4809322357178, - "std": 21.677914089867077 - }, - "percentiles": [ + "std": 21.677914089867077, + "percentiles": { + "p50": 62.484025955200195, + "p90": 72.04723358154297, + "p95": 72.50738143920898, + "p99": 229.35032844543457, + }, + "percentileRows": [ { "percentile": "p50", "value": 62.484025955200195 @@ -404,15 +459,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "throughput": { - "statistics": { "total": 12465, "mean": 477.5134940335642, "median": 49.76925541382379, "min": 0.0, "max": 1677721.6, - "std": 2472.852317203968 - }, - "percentiles": [ + "std": 2472.852317203968, + "percentiles": { + "p50": 49.76925541382379, + "p90": 1191.5636363636363, + "p95": 2501.075730471079, + "p99": 7025.634840871022, + }, + "percentileRows": [ { "percentile": "p50", "value": 49.76925541382379 @@ -432,15 +491,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "timePerRequest": { - "statistics": { "total": 895, "mean": 1800.9132816804852, "median": 1797.5835800170898, "min": 1756.2305927276611, "max": 1994.28129196167, - "std": 24.24935353039552 - }, - "percentiles": [ + "std": 24.24935353039552, + "percentiles": { + "p50": 1797.5835800170898, + "p90": 1808.2549571990967, + "p95": 1813.141107559204, + "p99": 1967.8056240081787, + }, + "percentileRows": [ { "percentile": "p50", "value": 1797.5835800170898 @@ -463,15 +526,19 @@ export const benchmarksScript = `window.benchmarks = { { "requestsPerSecond": 10.83989165148388, "tpot": { - "statistics": { "total": 1300, "mean": 31.6048062981453, "median": 31.577579558841766, "min": 30.171105355927438, "max": 33.10690323511759, - "std": 0.15146862300990216 - }, - "percentiles": [ + "std": 0.15146862300990216, + "percentiles": { + "p50": 31.577579558841766, + "p90": 31.63230986822219, + "p95": 31.682415614052424, + "p99": 32.138043834317116, + }, + "percentileRows": [ { "percentile": "p50", "value": 31.577579558841766 @@ -491,15 +558,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "ttft": { - "statistics": { "total": 1300, "mean": 66.61205951984113, "median": 65.78803062438965, "min": 51.81550979614258, "max": 244.69709396362305, - "std": 14.858653160342651 - }, - "percentiles": [ + "std": 14.858653160342651, + "percentiles": { + "p50": 65.78803062438965, + "p90": 76.70044898986816, + "p95": 77.78120040893555, + "p99": 88.29903602600098, + }, + "percentileRows": [ { "percentile": "p50", "value": 65.78803062438965 @@ -519,15 +590,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "throughput": { - "statistics": { "total": 12708, "mean": 693.3695002980695, "median": 55.59272071785492, "min": 0.0, "max": 838860.8, - "std": 2454.288991845712 - }, - "percentiles": [ + "std": 2454.288991845712, + "percentiles": { + "p50": 55.59272071785492, + "p90": 1897.875113122172, + "p95": 2931.030048916841, + "p99": 7108.989830508474, + }, + "percentileRows": [ { "percentile": "p50", "value": 55.59272071785492 @@ -547,15 +622,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "timePerRequest": { - "statistics": { "total": 1300, "mean": 2057.3723330864545, "median": 2056.5311908721924, "min": 2027.0307064056396, "max": 2233.853578567505, - "std": 16.334707021033957 - }, - "percentiles": [ + "std": 16.334707021033957, + "percentiles": { + "p50": 2056.5311908721924, + "p90": 2065.953254699707, + "p95": 2067.810297012329, + "p99": 2087.8031253814697, + }, + "percentileRows": [ { "percentile": "p50", "value": 2056.5311908721924 @@ -578,15 +657,19 @@ export const benchmarksScript = `window.benchmarks = { { "requestsPerSecond": 14.211845819540324, "tpot": { - "statistics": { "total": 1704, "mean": 35.695500394825224, "median": 35.60370869106717, "min": 34.798149078611345, "max": 38.94662857055664, - "std": 0.24967658675392423 - }, - "percentiles": [ + "std": 0.24967658675392423, + "percentiles": { + "p50": 35.60370869106717, + "p90": 35.84100708128914, + "p95": 36.09923778041716, + "p99": 36.71476489207784, + }, + "percentileRows": [ { "percentile": "p50", "value": 35.60370869106717 @@ -606,15 +689,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "ttft": { - "statistics": { "total": 1704, "mean": 74.19940031750102, "median": 71.50626182556152, "min": 53.643226623535156, "max": 322.6609230041504, - "std": 23.98415146629138 - }, - "percentiles": [ + "std": 23.98415146629138, + "percentiles": { + "p50": 71.50626182556152, + "p90": 83.71734619140625, + "p95": 98.2356071472168, + "p99": 113.44718933105469, + }, + "percentileRows": [ { "percentile": "p50", "value": 71.50626182556152 @@ -634,15 +721,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "throughput": { - "statistics": { "total": 15532, "mean": 908.715763654939, "median": 98.84067397195712, "min": 0.0, "max": 838860.8, - "std": 3628.67537220603 - }, - "percentiles": [ + "std": 3628.67537220603, + "percentiles": { + "p50": 98.84067397195712, + "p90": 2205.2071503680336, + "p95": 3775.251125112511, + "p99": 10512.040100250626, + }, + "percentileRows": [ { "percentile": "p50", "value": 98.84067397195712 @@ -662,15 +753,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "timePerRequest": { - "statistics": { "total": 1704, "mean": 2321.92987861208, "median": 2313.3785724639893, "min": 2290.93074798584, "max": 2594.4881439208984, - "std": 29.46118583560937 - }, - "percentiles": [ + "std": 29.46118583560937, + "percentiles": { + "p50": 2313.3785724639893, + "p90": 2339.4439220428467, + "p95": 2341.9249057769775, + "p99": 2370.450496673584, + }, + "percentileRows": [ { "percentile": "p50", "value": 2313.3785724639893 @@ -693,15 +788,19 @@ export const benchmarksScript = `window.benchmarks = { { "requestsPerSecond": 17.5623040970073, "tpot": { - "statistics": { "total": 2106, "mean": 39.546438065771135, "median": 39.47442675393725, "min": 38.74176740646362, "max": 43.32651032341851, - "std": 0.3121106751660994 - }, - "percentiles": [ + "std": 0.3121106751660994, + "percentiles": { + "p50": 39.47442675393725, + "p90": 39.722594003828746, + "p95": 40.083578654697966, + "p99": 40.73049983040231, + }, + "percentileRows": [ { "percentile": "p50", "value": 39.47442675393725 @@ -721,15 +820,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "ttft": { - "statistics": { "total": 2106, "mean": 85.68002797259905, "median": 89.88213539123535, "min": 57.360172271728516, "max": 362.8504276275635, - "std": 27.802786177158218 - }, - "percentiles": [ + "std": 27.802786177158218, + "percentiles": { + "p50": 89.88213539123535, + "p90": 101.7305850982666, + "p95": 103.26790809631348, + "p99": 138.88931274414062, + }, + "percentileRows": [ { "percentile": "p50", "value": 89.88213539123535 @@ -749,15 +852,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "throughput": { - "statistics": { "total": 15121, "mean": 1123.0284569989917, "median": 99.91909855397003, "min": 0.0, "max": 932067.5555555555, - "std": 4358.833642800455 - }, - "percentiles": [ + "std": 4358.833642800455, + "percentiles": { + "p50": 99.91909855397003, + "p90": 2868.8809849521203, + "p95": 4848.906358381503, + "p99": 12905.55076923077, + }, + "percentileRows": [ { "percentile": "p50", "value": 99.91909855397003 @@ -777,15 +884,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "timePerRequest": { - "statistics": { "total": 2106, "mean": 2575.916517267653, "median": 2573.6281871795654, "min": 2533.904790878296, "max": 2894.4458961486816, - "std": 33.18594265783404 - }, - "percentiles": [ + "std": 33.18594265783404, + "percentiles": { + "p50": 2573.6281871795654, + "p90": 2588.9015197753906, + "p95": 2591.136932373047, + "p99": 2700.568437576294, + }, + "percentileRows": [ { "percentile": "p50", "value": 2573.6281871795654 @@ -808,15 +919,19 @@ export const benchmarksScript = `window.benchmarks = { { "requestsPerSecond": 20.885632360055222, "tpot": { - "statistics": { "total": 2505, "mean": 44.20494748431818, "median": 44.02147020612444, "min": 42.981475591659546, "max": 52.62617986710345, - "std": 1.0422073399474652 - }, - "percentiles": [ + "std": 1.0422073399474652, + "percentiles": { + "p50": 44.02147020612444, + "p90": 44.47330747331892, + "p95": 45.131300316482296, + "p99": 50.400745301019576, + }, + "percentileRows": [ { "percentile": "p50", "value": 44.02147020612444 @@ -836,15 +951,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "ttft": { - "statistics": { "total": 2505, "mean": 98.4621736103903, "median": 95.84355354309082, "min": 61.09285354614258, "max": 524.099588394165, - "std": 34.20521833421915 - }, - "percentiles": [ + "std": 34.20521833421915, + "percentiles": { + "p50": 95.84355354309082, + "p90": 109.4822883605957, + "p95": 111.46354675292969, + "p99": 334.31243896484375, + }, + "percentileRows": [ { "percentile": "p50", "value": 95.84355354309082 @@ -864,15 +983,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "throughput": { - "statistics": { "total": 14779, "mean": 1335.7133120200747, "median": 104.45284522475407, "min": 0.0, "max": 1677721.6, - "std": 5200.1934248077005 - }, - "percentiles": [ + "std": 5200.1934248077005, + "percentiles": { + "p50": 104.45284522475407, + "p90": 3472.1059602649007, + "p95": 5882.6143057503505, + "p99": 15768.060150375939, + }, + "percentileRows": [ { "percentile": "p50", "value": 104.45284522475407 @@ -892,15 +1015,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "timePerRequest": { - "statistics": { "total": 2505, "mean": 2882.6246785070603, "median": 2869.71378326416, "min": 2826.8485069274902, "max": 3324.9876499176025, - "std": 78.07038363701177 - }, - "percentiles": [ + "std": 78.07038363701177, + "percentiles": { + "p50": 2869.71378326416, + "p90": 2888.715982437134, + "p95": 2937.7262592315674, + "p99": 3282.898426055908, + }, + "percentileRows": [ { "percentile": "p50", "value": 2869.71378326416 @@ -923,15 +1050,19 @@ export const benchmarksScript = `window.benchmarks = { { "requestsPerSecond": 24.179871480414207, "tpot": { - "statistics": { "total": 2900, "mean": 51.023722283946924, "median": 50.24327550615583, "min": 47.58137645143451, "max": 60.63385087935651, - "std": 2.0749227872708285 - }, - "percentiles": [ + "std": 2.0749227872708285, + "percentiles": { + "p50": 50.24327550615583, + "p90": 52.928451507810564, + "p95": 57.28437408568367, + "p99": 58.51330454387362, + }, + "percentileRows": [ { "percentile": "p50", "value": 50.24327550615583 @@ -951,15 +1082,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "ttft": { - "statistics": { "total": 2900, "mean": 123.56691516678907, "median": 115.33927917480469, "min": 88.05131912231445, "max": 594.1901206970215, - "std": 44.50765227271787 - }, - "percentiles": [ + "std": 44.50765227271787, + "percentiles": { + "p50": 115.33927917480469, + "p90": 141.8297290802002, + "p95": 144.49095726013184, + "p99": 375.5221366882324, + }, + "percentileRows": [ { "percentile": "p50", "value": 115.33927917480469 @@ -979,15 +1114,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "throughput": { - "statistics": { "total": 14925, "mean": 1546.3194569459229, "median": 138.59511614843208, "min": 0.0, "max": 1677721.6, - "std": 5844.302138842639 - }, - "percentiles": [ + "std": 5844.302138842639, + "percentiles": { + "p50": 138.59511614843208, + "p90": 3916.250233426704, + "p95": 6678.828025477707, + "p99": 17924.37606837607, + }, + "percentileRows": [ { "percentile": "p50", "value": 138.59511614843208 @@ -1007,15 +1146,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "timePerRequest": { - "statistics": { "total": 2900, "mean": 3336.9750574539444, "median": 3282.672882080078, "min": 3228.010654449463, "max": 3863.8863563537598, - "std": 141.37106520368962 - }, - "percentiles": [ + "std": 141.37106520368962, + "percentiles": { + "p50": 3282.672882080078, + "p90": 3561.7692470550537, + "p95": 3737.921953201294, + "p99": 3811.5434646606445, + }, + "percentileRows": [ { "percentile": "p50", "value": 3282.672882080078 @@ -1038,15 +1181,19 @@ export const benchmarksScript = `window.benchmarks = { { "requestsPerSecond": 27.382251189847466, "tpot": { - "statistics": { "total": 3285, "mean": 62.44881585866599, "median": 60.908238093058266, "min": 58.94644298250713, "max": 72.59870383699061, - "std": 2.9764436606898887 - }, - "percentiles": [ + "std": 2.9764436606898887, + "percentiles": { + "p50": 60.908238093058266, + "p90": 68.3861043718126, + "p95": 69.21934324597555, + "p99": 70.13290269034249, + }, + "percentileRows": [ { "percentile": "p50", "value": 60.908238093058266 @@ -1066,15 +1213,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "ttft": { - "statistics": { "total": 3285, "mean": 142.7834399758953, "median": 129.18686866760254, "min": 92.2248363494873, "max": 802.5562763214111, - "std": 54.896961282893 - }, - "percentiles": [ + "std": 54.896961282893, + "percentiles": { + "p50": 129.18686866760254, + "p90": 158.26964378356934, + "p95": 166.79859161376953, + "p99": 422.8503704071045, + }, + "percentileRows": [ { "percentile": "p50", "value": 129.18686866760254 @@ -1094,15 +1245,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "throughput": { - "statistics": { "total": 15706, "mean": 1751.1720673421933, "median": 318.5950626661603, "min": 0.0, "max": 1677721.6, - "std": 6434.120608249914 - }, - "percentiles": [ + "std": 6434.120608249914, + "percentiles": { + "p50": 318.5950626661603, + "p90": 4165.147964250248, + "p95": 7194.346483704974, + "p99": 19878.218009478675, + }, + "percentileRows": [ { "percentile": "p50", "value": 318.5950626661603 @@ -1122,15 +1277,19 @@ export const benchmarksScript = `window.benchmarks = { ] }, "timePerRequest": { - "statistics": { "total": 3285, "mean": 4076.002237894764, "median": 3972.564697265625, "min": 3890.990972518921, "max": 4623.138666152954, - "std": 197.81266460135544 - }, - "percentiles": [ + "std": 197.81266460135544, + "percentiles": { + "p50": 3972.564697265625, + "p90": 4444.445371627808, + "p95": 4506.659030914307, + "p99": 4553.745985031128, + }, + "percentileRows": [ { "percentile": "p50", "value": 3972.564697265625 @@ -1150,5 +1309,4 @@ export const benchmarksScript = `window.benchmarks = { ] } } - ] -};`; +];`; diff --git a/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts b/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts index 5c5e6776..838dbc7a 100644 --- a/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts +++ b/src/ui/lib/store/slices/benchmarks/benchmarks.api.ts @@ -8,8 +8,13 @@ import { setSloData } from '../slo/slo.slice'; const USE_MOCK_API = process.env.NEXT_PUBLIC_USE_MOCK_API === 'true'; +// currently the injector requires 'window.benchmarks = {};' to be present in the html, but benchmarks is expected to be null or an array const fetchBenchmarks = () => { - return { data: window.benchmarks as Benchmarks }; + let benchmarks = window.benchmarks; + if (!Array.isArray(benchmarks)) { + benchmarks = []; + } + return { data: benchmarks as Benchmarks }; }; const getAverageValueForPercentile = ( @@ -17,10 +22,10 @@ const getAverageValueForPercentile = ( lastMetric: Statistics, percentile: string ) => { - const firstPercentile = firstMetric.percentileRows.find( + const firstPercentile = firstMetric?.percentileRows.find( (p) => p.percentile === percentile ); - const lastPercentile = lastMetric.percentileRows.find( + const lastPercentile = lastMetric?.percentileRows.find( (p) => p.percentile === percentile ); return ((firstPercentile?.value ?? 0) + (lastPercentile?.value ?? 0)) / 2; @@ -36,29 +41,29 @@ const setDefaultSLOs = ( const lastBM = data[data.length - 1]; const ttftAvg = getAverageValueForPercentile( - firstBM.ttft, - lastBM.ttft, + firstBM?.ttft, + lastBM?.ttft, defaultPercentile ); const tpotAvg = getAverageValueForPercentile( - firstBM.tpot, - lastBM.tpot, + firstBM?.tpot, + lastBM?.tpot, defaultPercentile ); const timePerRequestAvg = getAverageValueForPercentile( - firstBM.timePerRequest, - lastBM.timePerRequest, + firstBM?.timePerRequest, + lastBM?.timePerRequest, defaultPercentile ); const throughputAvg = getAverageValueForPercentile( - firstBM.throughput, - lastBM.throughput, + firstBM?.throughput, + lastBM?.throughput, defaultPercentile ); dispatch( setSloData({ - currentRequestRate: firstBM.requestsPerSecond, + currentRequestRate: firstBM?.requestsPerSecond, current: { ttft: formatNumber(ttftAvg, 0), tpot: formatNumber(tpotAvg, 0), diff --git a/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts b/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts index 2ba57e18..9418bc64 100644 --- a/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts +++ b/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts @@ -14,6 +14,7 @@ export const selectBenchmarks = (state: RootState) => state.benchmarks.data; export const selectMetricsSummaryLineData = createSelector( [selectBenchmarks, selectSloState], (benchmarks, sloState) => { + console.log('🚀 ~ benchmarks.selectors.ts:18 ~ benchmarks:', benchmarks); const sortedByRPS = benchmarks ?.slice() ?.sort((bm1, bm2) => (bm1.requestsPerSecond > bm2.requestsPerSecond ? 1 : -1)); From 917f4f62cdec2760b7357b935b2ea5397b18e954 Mon Sep 17 00:00:00 2001 From: dalthecow Date: Wed, 18 Jun 2025 19:15:42 -0400 Subject: [PATCH 06/20] update injector to only read from in --- src/guidellm/presentation/injector.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/src/guidellm/presentation/injector.py b/src/guidellm/presentation/injector.py index 873b2578..e60a72ed 100644 --- a/src/guidellm/presentation/injector.py +++ b/src/guidellm/presentation/injector.py @@ -1,4 +1,5 @@ from pathlib import Path +import re from typing import Union from guidellm.config import settings @@ -13,9 +14,7 @@ def create_report(js_data: dict, output_path: Union[str, Path]) -> Path: :param js_data: dict with match str and json data to inject :type js_data: dict - :param output_path: the path, either a file or a directory, - to save the report to. If a directory, the report will be saved - as "report.html" inside of the directory. + :param output_path: the file to save the report to. :type output_path: str :return: the path to the saved report :rtype: str @@ -24,9 +23,6 @@ def create_report(js_data: dict, output_path: Union[str, Path]) -> Path: if not isinstance(output_path, Path): output_path = Path(output_path) - if output_path.is_dir(): - output_path = output_path / "report.html" - html_content = load_text(settings.report_generation.source) report_content = inject_data( js_data, @@ -38,13 +34,12 @@ def create_report(js_data: dict, output_path: Union[str, Path]) -> Path: print(f"Report saved to {output_path}") return output_path - def inject_data( js_data: dict, html: str, ) -> str: """ - Injects the json data into the HTML while replacing the placeholder. + Injects the json data into the HTML, replacing placeholders only within the section. :param js_data: the json data to inject :type js_data: dict @@ -53,6 +48,18 @@ def inject_data( :return: the html with the json data injected :rtype: str """ + head_match = re.search(r"]*>(.*?)", html, re.DOTALL | re.IGNORECASE) + if not head_match: + return html # or raise error? + + head_content = head_match.group(1) + + # Replace placeholders only inside the content for placeholder, script in js_data.items(): - html = html.replace(placeholder, script) - return html + head_content = head_content.replace(placeholder, script) + + # Rebuild the HTML + new_head = f"{head_content}" + html = html[:head_match.start()] + new_head + html[head_match.end():] + + return html \ No newline at end of file From 835d292ac64e7228656fca8924b325cf5fbe768a Mon Sep 17 00:00:00 2001 From: dalthecow Date: Wed, 18 Jun 2025 22:45:13 -0400 Subject: [PATCH 07/20] fix case with only one benchmark --- src/guidellm/benchmark/output.py | 2 +- .../store/slices/benchmarks/benchmarks.selectors.ts | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/guidellm/benchmark/output.py b/src/guidellm/benchmark/output.py index 7aa69072..9bd10f1a 100644 --- a/src/guidellm/benchmark/output.py +++ b/src/guidellm/benchmark/output.py @@ -27,9 +27,9 @@ StatusDistributionSummary, ) from guidellm.presentation import UIDataBuilder +from guidellm.presentation.injector import create_report from guidellm.scheduler import strategy_display_str from guidellm.utils import Colors, split_text_list_by_length -from guidellm.utils.injector import create_report __all__ = [ "GenerativeBenchmarksConsole", diff --git a/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts b/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts index 9418bc64..71366448 100644 --- a/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts +++ b/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts @@ -59,11 +59,6 @@ const getDefaultMetricValues = () => ({ export const selectInterpolatedMetrics = createSelector( [selectBenchmarks, selectSloState], (benchmarks, sloState) => { - const sortedByRPS = benchmarks - ?.slice() - ?.sort((bm1, bm2) => (bm1.requestsPerSecond > bm2.requestsPerSecond ? 1 : -1)); - const requestRates = sortedByRPS?.map((bm) => bm.requestsPerSecond) || []; - const { enforcedPercentile, currentRequestRate } = sloState; const metricData: { [K in keyof BenchmarkMetrics | 'mean']: { enforcedPercentileValue: number; @@ -77,6 +72,14 @@ export const selectInterpolatedMetrics = createSelector( throughput: getDefaultMetricValues(), mean: getDefaultMetricValues(), }; + if ((benchmarks?.length || 0) < 2) { + return metricData; + } + const sortedByRPS = benchmarks + ?.slice() + ?.sort((bm1, bm2) => (bm1.requestsPerSecond > bm2.requestsPerSecond ? 1 : -1)); + const requestRates = sortedByRPS?.map((bm) => bm.requestsPerSecond) || []; + const { enforcedPercentile, currentRequestRate } = sloState; const metrics: (keyof BenchmarkMetrics)[] = [ 'ttft', 'tpot', From 9b384e9777a3a76469a436c75b36fa9ae21e6c3d Mon Sep 17 00:00:00 2001 From: dalthecow Date: Fri, 20 Jun 2025 16:40:42 -0400 Subject: [PATCH 08/20] fix all known bugs, add details for locally hosted UI, remove broken log scaling on y axis --- .gitignore | 2 + DEVELOPING.md | 10 + README.md | 29 +- package-lock.json | 701 +++++++++++++++++- package.json | 2 + src/guidellm/benchmark/output.py | 10 - src/guidellm/config.py | 8 +- src/guidellm/presentation/data_models.py | 8 +- src/guidellm/presentation/injector.py | 7 +- src/ui/.env.local | 2 +- .../components/CustomTick/index.tsx | 4 +- .../MetricsSummary.component.tsx | 5 + .../RequestOverTime.component.tsx | 2 +- .../WorkloadMetrics.component.tsx | 5 + src/ui/serve.json | 3 + tests/unit/presentation/test_data_models.py | 20 + tests/unit/presentation/test_injectory.py | 70 ++ 17 files changed, 845 insertions(+), 43 deletions(-) create mode 100644 src/ui/serve.json create mode 100644 tests/unit/presentation/test_data_models.py create mode 100644 tests/unit/presentation/test_injectory.py diff --git a/.gitignore b/.gitignore index 350233a9..ac489723 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ src/guidellm/version.py benchmarks.json benchmarks.yaml benchmarks.csv +benchmarks.html # Byte-compiled / optimized / DLL files __pycache__/ @@ -224,4 +225,5 @@ src/ui/next-env.d.ts !tsconfig.*.json !src/ui/lib !src/ui/public/manifest.json +!src/ui/serve.json .eslintcache diff --git a/DEVELOPING.md b/DEVELOPING.md index dde51744..517ce689 100644 --- a/DEVELOPING.md +++ b/DEVELOPING.md @@ -267,6 +267,16 @@ Reference [https://www.npmjs.com/package/jest-runner-groups](jest-runner-groups) */ ``` +### 🧪 UI Development Notes + +During development, it can be helpful to view sample data. We include a sample benchmark run wired into the Redux store under: + +``` +src/ui/lib/store/[runInfo/workloadDetails/benchmarks]WindowData.ts +``` + +In the future this will be replaced by a configurable untracked file for dev use. + ## Additional Resources - [CONTRIBUTING.md](https://github.com/neuralmagic/guidellm/blob/main/CONTRIBUTING.md): Guidelines for contributing to the project. diff --git a/README.md b/README.md index fb70f072..e28e107b 100644 --- a/README.md +++ b/README.md @@ -157,17 +157,18 @@ The `guidellm benchmark` command is used to run benchmarks against a generative GuideLLM UI is a companion frontend for visualizing the results of a GuideLLM benchmark run. -### 🛠 Running the UI +### 🛠 Generating an HTML report with a benchmark run -1. Use the Hosted Build (Recommended for Most Users) +Set the output to benchmarks.html for your run: +```base +--output-path=benchmarks.html +``` -After running a benchmark with GuideLLM, a report.html file will be generated (by default at guidellm_report/report.html). This file references the latest stable version of the UI hosted at: +1. Use the Hosted Build (Recommended for Most Users) -``` -https://neuralmagic.github.io/guidellm/ui/dev/ -``` +This is preconfigured. The latest stable version of the hosted UI (https://neuralmagic.github.io/guidellm/ui/latest) will be used to build the local html file. -Open the file in your browser and you're done—no setup required. +Open benchmarks.html in your browser and you're done—no setup required. 2. Build and Serve the UI Locally (For Development) This option is useful if: @@ -180,20 +181,10 @@ Open the file in your browser and you're done—no setup required. ```bash npm install npm run build -npx serve out -``` - -This will start a local server (e.g., at http://localhost:3000). Then, in your GuideLLM config or CLI flags, point to this local server as the asset base for report generation. - -### 🧪 Development Notes - -During UI development, it can be helpful to view sample data. We include a sample benchmark run wired into the Redux store under: - -``` -src/lib/store/[runInfo/workloadDetails/benchmarks]WindowData.ts +npm run serve ``` -In the future this will be replaced by a configurable untracked file for dev use. +This will start a local server (e.g., at http://localhost:3000). Then, in your GuideLLM config or CLI flags, point to this local server as the asset base for report generation. You can set the Environment to LOCAL before running your benchmarks to accomplish this. ## Resources diff --git a/package-lock.json b/package-lock.json index e802d52c..4f07a79c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -61,6 +61,7 @@ "jest-runner-groups": "^2.2.0", "jest-transform-stub": "^2.0.0", "prettier": "^3.5.3", + "serve": "^14.2.4", "typescript": "^5", "typescript-eslint": "^8.34.0" }, @@ -5841,6 +5842,13 @@ "win32" ] }, + "node_modules/@zeit/schemas": { + "version": "2.36.0", + "resolved": "https://registry.npmjs.org/@zeit/schemas/-/schemas-2.36.0.tgz", + "integrity": "sha512-7kjMwcChYEzMKjeex9ZFXkt1AyNov9R5HZtjBKVsmVpw7pa7ZtlCGvCBC2vnnXctaYN+aRI61HjIqeetZW5ROg==", + "dev": true, + "license": "MIT" + }, "node_modules/abab": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.6.tgz", @@ -5849,6 +5857,20 @@ "dev": true, "license": "BSD-3-Clause" }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dev": true, + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, "node_modules/acorn": { "version": "8.15.0", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", @@ -5940,6 +5962,16 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.1.0" + } + }, "node_modules/ansi-colors": { "version": "4.1.3", "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", @@ -6027,6 +6059,13 @@ ], "license": "MIT" }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", @@ -6576,6 +6615,146 @@ "dev": true, "license": "ISC" }, + "node_modules/boxen": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.0.0.tgz", + "integrity": "sha512-j//dBVuyacJbvW+tvZ9HuH03fZ46QcaKvvhZickZqtB271DxJ7SNRSNxrV/dZX0085m7hISRZWbzWlJvx/rHSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^7.0.0", + "chalk": "^5.0.1", + "cli-boxes": "^3.0.0", + "string-width": "^5.1.2", + "type-fest": "^2.13.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boxen/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/boxen/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/boxen/node_modules/camelcase": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", + "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boxen/node_modules/chalk": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.1.tgz", + "integrity": "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/boxen/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boxen/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/boxen/node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boxen/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/brace-expansion": { "version": "1.1.12", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", @@ -6696,6 +6875,16 @@ "node": ">=10.16.0" } }, + "node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/cachedir": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/cachedir/-/cachedir-2.4.0.tgz", @@ -6822,6 +7011,22 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/chalk-template": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/chalk-template/-/chalk-template-0.4.0.tgz", + "integrity": "sha512-/ghrgmhfY8RaSdeo43hNXxpoHAtxdbskUHjPpfqUWGttFgycUhYPGx3YZBCnUCvOa7Doivn1IZec3DEGFoMgLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/chalk-template?sponsor=1" + } + }, "node_modules/char-regex": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", @@ -6875,6 +7080,19 @@ "node": ">=6" } }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/cli-cursor": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", @@ -6927,6 +7145,71 @@ "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", "license": "MIT" }, + "node_modules/clipboardy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/clipboardy/-/clipboardy-3.0.0.tgz", + "integrity": "sha512-Su+uU5sr1jkUy1sGRpLKjKrvEOVXgSgiSInwa/qeID6aJ07yh+5NWc3h2QfjHjBnfX4LhtFcuAWKUsJ3r+fjbg==", + "dev": true, + "license": "MIT", + "dependencies": { + "arch": "^2.2.0", + "execa": "^5.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/clipboardy/node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/clipboardy/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/clipboardy/node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, "node_modules/cliui": { "version": "8.0.1", "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", @@ -7054,6 +7337,62 @@ "node": ">=4.0.0" } }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", + "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "accepts": "~1.3.5", + "bytes": "3.0.0", + "compressible": "~2.0.16", + "debug": "2.6.9", + "on-headers": "~1.0.2", + "safe-buffer": "5.1.2", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "dev": true, + "license": "MIT" + }, + "node_modules/compression/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", @@ -7061,6 +7400,16 @@ "dev": true, "license": "MIT" }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, "node_modules/convert-source-map": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", @@ -7633,6 +7982,16 @@ } } }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -7879,8 +8238,7 @@ "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", "dev": true, - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/ecc-jsbn": { "version": "0.1.2", @@ -10157,6 +10515,22 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "dev": true, + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -10315,6 +10689,19 @@ "node": ">=8" } }, + "node_modules/is-port-reachable": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-port-reachable/-/is-port-reachable-4.0.0.tgz", + "integrity": "sha512-9UoipoxYmSk6Xy7QFgRv2HDyaysmgSG75TFQs6S+3pDM7ZhKTF/bskZV+0UlABHzKjNVhPjYCLfeZUEg1wXxig==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/is-potential-custom-element-name": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", @@ -10500,6 +10887,19 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/isarray": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", @@ -13246,6 +13646,16 @@ "dev": true, "license": "MIT" }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, "node_modules/next": { "version": "15.3.2", "resolved": "https://registry.npmjs.org/next/-/next-15.3.2.tgz", @@ -13536,6 +13946,16 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -13747,6 +14167,13 @@ "node": ">=0.10.0" } }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==", + "dev": true, + "license": "(WTFPL OR MIT)" + }, "node_modules/path-key": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", @@ -13789,6 +14216,13 @@ "license": "ISC", "peer": true }, + "node_modules/path-to-regexp": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-3.3.0.tgz", + "integrity": "sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw==", + "dev": true, + "license": "MIT" + }, "node_modules/path-type": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", @@ -14200,6 +14634,49 @@ ], "license": "MIT" }, + "node_modules/range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dev": true, + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true, + "license": "ISC" + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/react": { "version": "18.3.1", "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", @@ -14404,6 +14881,30 @@ "node": ">=4" } }, + "node_modules/registry-auth-token": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-3.3.2.tgz", + "integrity": "sha512-JL39c60XlzCVgNrO+qq68FoNb56w/m7JYvGR2jT5iR1xBrUA3Mfx5Twk5rqTThPmQKMWydGmq8oFtDlxfrmxnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "rc": "^1.1.6", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/registry-url": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-3.1.0.tgz", + "integrity": "sha512-ZbgR5aZEdf4UKZVBPYIgaglBmSF2Hi94s2PcIHhRGFjKYu+chjJdYfHn4rt3hB6eCKLJ8giVIIfgMa1ehDfZKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "rc": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/regjsgen": { "version": "0.8.0", "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.8.0.tgz", @@ -14457,6 +14958,16 @@ "node": ">=0.10.0" } }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/requires-port": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", @@ -14729,6 +15240,108 @@ "semver": "bin/semver.js" } }, + "node_modules/serve": { + "version": "14.2.4", + "resolved": "https://registry.npmjs.org/serve/-/serve-14.2.4.tgz", + "integrity": "sha512-qy1S34PJ/fcY8gjVGszDB3EXiPSk5FKhUa7tQe0UPRddxRidc2V6cNHPNewbE1D7MAkgLuWEt3Vw56vYy73tzQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@zeit/schemas": "2.36.0", + "ajv": "8.12.0", + "arg": "5.0.2", + "boxen": "7.0.0", + "chalk": "5.0.1", + "chalk-template": "0.4.0", + "clipboardy": "3.0.0", + "compression": "1.7.4", + "is-port-reachable": "4.0.0", + "serve-handler": "6.1.6", + "update-check": "1.5.4" + }, + "bin": { + "serve": "build/main.js" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/serve-handler": { + "version": "6.1.6", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.6.tgz", + "integrity": "sha512-x5RL9Y2p5+Sh3D38Fh9i/iQ5ZK+e4xuXRd/pGbM4D13tgo/MGwbttUk8emytcr1YYzBYs+apnUngBDFYfpjPuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "mime-types": "2.1.18", + "minimatch": "3.1.2", + "path-is-inside": "1.0.2", + "path-to-regexp": "3.3.0", + "range-parser": "1.2.0" + } + }, + "node_modules/serve-handler/node_modules/mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-handler/node_modules/mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "mime-db": "~1.33.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/serve/node_modules/chalk": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.0.1.tgz", + "integrity": "sha512-Fo07WOYGqMfCWHOzSXOt2CxDbC6skS/jO9ynEcmpANMoPrD+W1r1K6Vx7iNm+AQmETU1Xr2t+n8nzkV9t6xh3w==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/serve/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true, + "license": "MIT" + }, "node_modules/set-function-length": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", @@ -16056,6 +16669,17 @@ "browserslist": ">= 4.21.0" } }, + "node_modules/update-check": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/update-check/-/update-check-1.5.4.tgz", + "integrity": "sha512-5YHsflzHP4t1G+8WGPlvKbJEbAJGCgw+Em+dGR1KmBUbr1J36SJBqlHLjR7oob7sco5hWHGQVcr9B2poIVDDTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "registry-auth-token": "3.3.2", + "registry-url": "3.1.0" + } + }, "node_modules/uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", @@ -16118,6 +16742,16 @@ "dev": true, "license": "MIT" }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/verror": { "version": "1.10.0", "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", @@ -16308,6 +16942,69 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "dev": true, + "license": "MIT", + "dependencies": { + "string-width": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/widest-line/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/widest-line/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/widest-line/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, "node_modules/word-wrap": { "version": "1.2.5", "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", diff --git a/package.json b/package.json index 1437f86c..98273e12 100644 --- a/package.json +++ b/package.json @@ -5,6 +5,7 @@ "scripts": { "dev": "next dev src/ui", "build": "next build src/ui", + "serve": "serve src/ui/out -c ../serve.json --cors", "lint": "next lint --fix src/ui", "type-check": "tsc -p src/ui/tsconfig.json --noEmit && tsc -p tsconfig.test.json --noEmit && tsc -p tsconfig.cypress.json --noEmit", "format": "prettier --write .", @@ -71,6 +72,7 @@ "jest-runner-groups": "^2.2.0", "jest-transform-stub": "^2.0.0", "prettier": "^3.5.3", + "serve": "^14.2.4", "typescript": "^5", "typescript-eslint": "^8.34.0" }, diff --git a/src/guidellm/benchmark/output.py b/src/guidellm/benchmark/output.py index 9bd10f1a..407b13c4 100644 --- a/src/guidellm/benchmark/output.py +++ b/src/guidellm/benchmark/output.py @@ -249,16 +249,6 @@ def save_html(self, path: str | Path) -> Path: f"window.{humps.decamelize(k)} = {{}};": f"window.{humps.decamelize(k)} = {json.dumps(v, indent=2)};\n" for k, v in camel_data.items() } - print("________") - print("________") - print("________") - print("________") - print("ui_api_data") - print(ui_api_data) - print("________") - print("________") - print("________") - print("________") create_report(ui_api_data, path) return path diff --git a/src/guidellm/config.py b/src/guidellm/config.py index f8a2eecd..134d3208 100644 --- a/src/guidellm/config.py +++ b/src/guidellm/config.py @@ -31,9 +31,9 @@ class Environment(str, Enum): ENV_REPORT_MAPPING = { Environment.PROD: "https://neuralmagic.github.io/guidellm/ui/latest/index.html", - Environment.STAGING: "https://neuralmagic.github.io/guidellm/ui/staging/latest/index.html", - Environment.DEV: "https://neuralmagic.github.io/guidellm/ui/pr/191/index.html", - Environment.LOCAL: "https://neuralmagic.github.io/guidellm/ui/dev/index.html", + Environment.STAGING: "https://neuralmagic.github.io/guidellm/ui/release/latest/index.html", + Environment.DEV: "https://neuralmagic.github.io/guidellm/ui/dev/index.html", + Environment.LOCAL: "http://localhost:3000/index.html", } @@ -117,7 +117,7 @@ class Settings(BaseSettings): ) # general settings - env: Environment = Environment.DEV + env: Environment = Environment.PROD default_async_loop_sleep: float = 10e-5 logging: LoggingSettings = LoggingSettings() default_sweep_number: int = 10 diff --git a/src/guidellm/presentation/data_models.py b/src/guidellm/presentation/data_models.py index d2a5d86c..e4b53d66 100644 --- a/src/guidellm/presentation/data_models.py +++ b/src/guidellm/presentation/data_models.py @@ -36,7 +36,7 @@ def from_data( min_v = min(data) max_v = max(data) - range_v = max_v - min_v + range_v = (1 + max_v) - min_v if bucket_width is None: if n_buckets is None: @@ -198,10 +198,14 @@ class TabularDistributionSummary(DistributionSummary): @computed_field @property def percentile_rows(self) -> list[dict[str, float]]: - return [ + rows = [ {"percentile": name, "value": value} for name, value in self.percentiles.model_dump().items() ] + filtered_rows = list( + filter(lambda row: row["percentile"] in ["p50", "p90", "p95", "p99"], rows) + ) + return filtered_rows @classmethod def from_distribution_summary( diff --git a/src/guidellm/presentation/injector.py b/src/guidellm/presentation/injector.py index e60a72ed..f5949eee 100644 --- a/src/guidellm/presentation/injector.py +++ b/src/guidellm/presentation/injector.py @@ -1,5 +1,5 @@ -from pathlib import Path import re +from pathlib import Path from typing import Union from guidellm.config import settings @@ -34,6 +34,7 @@ def create_report(js_data: dict, output_path: Union[str, Path]) -> Path: print(f"Report saved to {output_path}") return output_path + def inject_data( js_data: dict, html: str, @@ -60,6 +61,6 @@ def inject_data( # Rebuild the HTML new_head = f"{head_content}" - html = html[:head_match.start()] + new_head + html[head_match.end():] + html = html[: head_match.start()] + new_head + html[head_match.end() :] - return html \ No newline at end of file + return html diff --git a/src/ui/.env.local b/src/ui/.env.local index b9d5ff2b..44ab168b 100644 --- a/src/ui/.env.local +++ b/src/ui/.env.local @@ -1,4 +1,4 @@ ASSET_PREFIX=http://localhost:3000 BASE_PATH=http://localhost:3000 NEXT_PUBLIC_USE_MOCK_API=true -USE_MOCK_DATA=false +USE_MOCK_DATA=true diff --git a/src/ui/lib/components/Charts/MiniCombined/components/CustomTick/index.tsx b/src/ui/lib/components/Charts/MiniCombined/components/CustomTick/index.tsx index c7941c97..8aac9fd4 100644 --- a/src/ui/lib/components/Charts/MiniCombined/components/CustomTick/index.tsx +++ b/src/ui/lib/components/Charts/MiniCombined/components/CustomTick/index.tsx @@ -1,5 +1,7 @@ import { useTheme } from '@mui/material'; +import { formatNumber } from '@/lib/utils/helpers'; + import { CustomTickProps } from './CustomTick.interfaces'; const CustomTick = ({ @@ -45,7 +47,7 @@ const CustomTick = ({ fontSize={theme.typography.axisLabel.fontSize} fill={theme.palette.surface.onSurface} > - {tick} + {formatNumber(tick)} ); diff --git a/src/ui/lib/components/MetricsSummary/MetricsSummary.component.tsx b/src/ui/lib/components/MetricsSummary/MetricsSummary.component.tsx index d6bf3725..7a84e581 100644 --- a/src/ui/lib/components/MetricsSummary/MetricsSummary.component.tsx +++ b/src/ui/lib/components/MetricsSummary/MetricsSummary.component.tsx @@ -35,6 +35,7 @@ import { StyledFormControl, } from './MetricsSummary.styles'; import { useSummary } from './useSummary'; +import { ScaleType } from '../Charts/DashedLine/DashedLine.interfaces'; const percentileOptions = ['p50', 'p90', 'p95', 'p99']; @@ -207,6 +208,7 @@ export const Component = () => { data={[{ id: 'ttft', data: lineDataByRps.ttft || [] }]} threshold={ttftSLO} lineColor={LineColor.Primary} + yScaleType={ScaleType.linear} /> @@ -225,6 +227,7 @@ export const Component = () => { data={[{ id: 'tpot', data: lineDataByRps.tpot || [] }]} threshold={tpotSLO} lineColor={LineColor.Secondary} + yScaleType={ScaleType.linear} /> @@ -245,6 +248,7 @@ export const Component = () => { ]} threshold={timePerRequestSLO} lineColor={LineColor.Tertiary} + yScaleType={ScaleType.linear} /> @@ -265,6 +269,7 @@ export const Component = () => { data={[{ id: 'throughput', data: lineDataByRps.throughput || [] }]} threshold={throughputSLO} lineColor={LineColor.Quarternary} + yScaleType={ScaleType.linear} /> diff --git a/src/ui/lib/components/RequestOverTime/RequestOverTime.component.tsx b/src/ui/lib/components/RequestOverTime/RequestOverTime.component.tsx index b0633ba1..ac1455a7 100644 --- a/src/ui/lib/components/RequestOverTime/RequestOverTime.component.tsx +++ b/src/ui/lib/components/RequestOverTime/RequestOverTime.component.tsx @@ -50,7 +50,7 @@ export const Component: FC = ({ lines={lines} width={312} height={85} - xLegend="time" + xLegend="time (sec)" margins={{ bottom: 30 }} containerSize={containerSize} /> diff --git a/src/ui/lib/components/WorkloadMetrics/WorkloadMetrics.component.tsx b/src/ui/lib/components/WorkloadMetrics/WorkloadMetrics.component.tsx index 7be48983..36986e36 100644 --- a/src/ui/lib/components/WorkloadMetrics/WorkloadMetrics.component.tsx +++ b/src/ui/lib/components/WorkloadMetrics/WorkloadMetrics.component.tsx @@ -19,6 +19,7 @@ import { BlockHeader } from '../BlockHeader'; import { GraphTitle } from '../GraphTitle'; import { MetricsContainer } from '../MetricsContainer'; import { GraphsWrapper } from './WorkloadMetrics.styles'; +import { ScaleType } from '../Charts/DashedLine/DashedLine.interfaces'; export const columnContent = ( rpsValue: number, @@ -73,6 +74,7 @@ export const Component = () => { xLegend="request per sec" yLegend="ttft (ms)" minX={minX} + yScaleType={ScaleType.linear} /> @@ -93,6 +95,7 @@ export const Component = () => { xLegend="request per sec" yLegend="tpot (ms)" minX={minX} + yScaleType={ScaleType.linear} /> @@ -119,6 +122,7 @@ export const Component = () => { xLegend="request per sec" yLegend="latency (ms)" minX={minX} + yScaleType={ScaleType.linear} /> @@ -138,6 +142,7 @@ export const Component = () => { xLegend="request per sec" yLegend="throughput (tok/s)" minX={minX} + yScaleType={ScaleType.linear} /> diff --git a/src/ui/serve.json b/src/ui/serve.json new file mode 100644 index 00000000..b308df80 --- /dev/null +++ b/src/ui/serve.json @@ -0,0 +1,3 @@ +{ + "cleanUrls": false +} diff --git a/tests/unit/presentation/test_data_models.py b/tests/unit/presentation/test_data_models.py new file mode 100644 index 00000000..c1663c43 --- /dev/null +++ b/tests/unit/presentation/test_data_models.py @@ -0,0 +1,20 @@ +import pytest + +from guidellm.presentation.data_models import Bucket + + +@pytest.mark.smoke +def test_bucket_from_data(): + buckets, bucket_width = Bucket.from_data([8, 8, 8, 8, 8, 8], 1) + assert len(buckets) == 1 + assert buckets[0].value == 8.0 + assert buckets[0].count == 6 + assert bucket_width == 1 + + buckets, bucket_width = Bucket.from_data([8, 8, 8, 8, 8, 7], 1) + assert len(buckets) == 2 + assert buckets[0].value == 7.0 + assert buckets[0].count == 1 + assert buckets[1].value == 8.0 + assert buckets[1].count == 5 + assert bucket_width == 1 diff --git a/tests/unit/presentation/test_injectory.py b/tests/unit/presentation/test_injectory.py new file mode 100644 index 00000000..2292a2bc --- /dev/null +++ b/tests/unit/presentation/test_injectory.py @@ -0,0 +1,70 @@ +from pathlib import Path + +import pytest +from pydantic import BaseModel + +from guidellm.config import settings +from guidellm.presentation.injector import create_report, inject_data + + +class ExampleModel(BaseModel): + name: str + version: str + + +@pytest.mark.smoke +def test_inject_data(): + model = ExampleModel(name="Example App", version="1.0.0") + html = "window.report_data = {};" + expected_html = 'window.report_data = {"name":"Example App","version":"1.0.0"};' + + result = inject_data( + model, + html, + settings.report_generation.report_html_match, + settings.report_generation.report_html_placeholder, + ) + assert result == expected_html + + +@pytest.mark.smoke +def test_create_report_to_file(tmpdir): + model = ExampleModel(name="Example App", version="1.0.0") + html_content = "window.report_data = {};" + expected_html_content = ( + 'window.report_data = {"name":"Example App","version":"1.0.0"};' + ) + + mock_html_path = tmpdir.join("template.html") + mock_html_path.write(html_content) + settings.report_generation.source = str(mock_html_path) + + output_path = tmpdir.join("output.html") + result_path = create_report(model, str(output_path)) + result_content = result_path.read_text() + + assert result_path == output_path + assert result_content == expected_html_content + + +@pytest.mark.smoke +def test_create_report_to_directory(tmpdir): + model = ExampleModel(name="Example App", version="1.0.0") + html_content = "window.report_data = {};" + expected_html_content = ( + 'window.report_data = {"name":"Example App","version":"1.0.0"};' + ) + + mock_html_path = tmpdir.join("template.html") + mock_html_path.write(html_content) + settings.report_generation.source = str(mock_html_path) + + output_dir = tmpdir.mkdir("output_dir") + output_path = Path(output_dir) / "report.html" + result_path = create_report(model, str(output_dir)) + + with Path(result_path).open("r") as file: + result_content = file.read() + + assert result_path == output_path + assert result_content == expected_html_content From 1f642557c5ea9703dedcc8ed769dfab7d7c6b4ec Mon Sep 17 00:00:00 2001 From: dalthecow Date: Fri, 20 Jun 2025 16:59:04 -0400 Subject: [PATCH 09/20] remove benchmarks.html --- benchmarks.html | 819 ------------------------------------------------ 1 file changed, 819 deletions(-) delete mode 100644 benchmarks.html diff --git a/benchmarks.html b/benchmarks.html deleted file mode 100644 index 3c02dc09..00000000 --- a/benchmarks.html +++ /dev/null @@ -1,819 +0,0 @@ -GuideLLM \ No newline at end of file From 7368fdc5c9e5f6df8794d3619626da37e2c31c29 Mon Sep 17 00:00:00 2001 From: dalthecow Date: Fri, 20 Jun 2025 17:01:56 -0400 Subject: [PATCH 10/20] set mock data flag back in pr build --- .github/workflows/development.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/development.yml b/.github/workflows/development.yml index bc53dd75..761eaeaf 100644 --- a/.github/workflows/development.yml +++ b/.github/workflows/development.yml @@ -280,8 +280,7 @@ jobs: # Set asset prefix and base path with PR number ASSET_PREFIX=https://neuralmagic.github.io/guidellm/ui/pr/${PR_NUMBER} - # temporarily setting to false to test if this build works with guidellm - USE_MOCK_DATA=false + USE_MOCK_DATA=true BASE_PATH=/ui/pr/${PR_NUMBER} GIT_SHA=${{ github.sha }} export ASSET_PREFIX=${ASSET_PREFIX} From c62e7b9fea057583ff1cb7afa37fbfd3a9d8b7fd Mon Sep 17 00:00:00 2001 From: dalthecow Date: Fri, 20 Jun 2025 17:26:11 -0400 Subject: [PATCH 11/20] remove comments in save html method --- src/guidellm/benchmark/output.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/guidellm/benchmark/output.py b/src/guidellm/benchmark/output.py index 407b13c4..45a7f770 100644 --- a/src/guidellm/benchmark/output.py +++ b/src/guidellm/benchmark/output.py @@ -232,16 +232,11 @@ def save_csv(self, path: Union[str, Path]) -> Path: def save_html(self, path: str | Path) -> Path: """ Download html, inject report data and save to a file. - If the file is a directory, it will create the report in a file named - benchmarks.html under the directory. :param path: The path to create the report at. :return: The path to the report. """ - # json_data = json.dumps(data, indent=2) - # thing = f'window.{variable_name} = {json_data};' - data_builder = UIDataBuilder(self.benchmarks) data = data_builder.to_dict() camel_data = humps.camelize(data) From d4b04813ac6068d862d88e5efeb6ec510aaf63c0 Mon Sep 17 00:00:00 2001 From: dalthecow Date: Fri, 20 Jun 2025 17:31:47 -0400 Subject: [PATCH 12/20] remove console.log --- src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts b/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts index 71366448..53d54f40 100644 --- a/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts +++ b/src/ui/lib/store/slices/benchmarks/benchmarks.selectors.ts @@ -14,7 +14,6 @@ export const selectBenchmarks = (state: RootState) => state.benchmarks.data; export const selectMetricsSummaryLineData = createSelector( [selectBenchmarks, selectSloState], (benchmarks, sloState) => { - console.log('🚀 ~ benchmarks.selectors.ts:18 ~ benchmarks:', benchmarks); const sortedByRPS = benchmarks ?.slice() ?.sort((bm1, bm2) => (bm1.requestsPerSecond > bm2.requestsPerSecond ? 1 : -1)); From d4af2274f0f03d056eaa8ae7a39e7de6f2bf0952 Mon Sep 17 00:00:00 2001 From: dalthecow Date: Mon, 23 Jun 2025 12:52:54 -0400 Subject: [PATCH 13/20] fix broken unit tests --- src/guidellm/benchmark/output.py | 2 +- src/guidellm/presentation/builder.py | 10 ++-- src/guidellm/presentation/data_models.py | 25 +++----- src/guidellm/presentation/injector.py | 3 - tests/unit/presentation/test_injector.py | 63 ++++++++++++++++++++ tests/unit/presentation/test_injectory.py | 70 ----------------------- tests/unit/test_config.py | 10 ++-- 7 files changed, 80 insertions(+), 103 deletions(-) create mode 100644 tests/unit/presentation/test_injector.py delete mode 100644 tests/unit/presentation/test_injectory.py diff --git a/src/guidellm/benchmark/output.py b/src/guidellm/benchmark/output.py index 45a7f770..ca891079 100644 --- a/src/guidellm/benchmark/output.py +++ b/src/guidellm/benchmark/output.py @@ -236,7 +236,7 @@ def save_html(self, path: str | Path) -> Path: :param path: The path to create the report at. :return: The path to the report. """ - + from guidellm.presentation import UIDataBuilder data_builder = UIDataBuilder(self.benchmarks) data = data_builder.to_dict() camel_data = humps.camelize(data) diff --git a/src/guidellm/presentation/builder.py b/src/guidellm/presentation/builder.py index 986939a4..84ae8217 100644 --- a/src/guidellm/presentation/builder.py +++ b/src/guidellm/presentation/builder.py @@ -1,14 +1,12 @@ -from typing import Any +from typing import Any, TYPE_CHECKING -from guidellm.benchmark.benchmark import GenerativeBenchmark +if TYPE_CHECKING: + from guidellm.benchmark.benchmark import GenerativeBenchmark from .data_models import BenchmarkDatum, RunInfo, WorkloadDetails -__all__ = ["UIDataBuilder"] - - class UIDataBuilder: - def __init__(self, benchmarks: list[GenerativeBenchmark]): + def __init__(self, benchmarks: list["GenerativeBenchmark"]): self.benchmarks = benchmarks def build_run_info(self): diff --git a/src/guidellm/presentation/data_models.py b/src/guidellm/presentation/data_models.py index e4b53d66..223f1378 100644 --- a/src/guidellm/presentation/data_models.py +++ b/src/guidellm/presentation/data_models.py @@ -1,25 +1,14 @@ import random from collections import defaultdict from math import ceil -from typing import List, Optional, Tuple +from typing import List, Optional, Tuple, TYPE_CHECKING from pydantic import BaseModel, computed_field -from guidellm.benchmark.benchmark import GenerativeBenchmark -from guidellm.objects.statistics import DistributionSummary - -__all__ = [ - "BenchmarkDatum", - "Bucket", - "Dataset", - "Distribution", - "Model", - "RunInfo", - "Server", - "TokenDetails", - "WorkloadDetails", -] +if TYPE_CHECKING: + from guidellm.benchmark.benchmark import GenerativeBenchmark +from guidellm.objects.statistics import DistributionSummary class Bucket(BaseModel): value: float @@ -76,7 +65,7 @@ class RunInfo(BaseModel): dataset: Dataset @classmethod - def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): + def from_benchmarks(cls, benchmarks: list["GenerativeBenchmark"]): model = benchmarks[0].worker.backend_model or "N/A" timestamp = max( bm.run_stats.start_time for bm in benchmarks if bm.start_time is not None @@ -117,7 +106,7 @@ class WorkloadDetails(BaseModel): server: Server @classmethod - def from_benchmarks(cls, benchmarks: list[GenerativeBenchmark]): + def from_benchmarks(cls, benchmarks: list["GenerativeBenchmark"]): target = benchmarks[0].worker.backend_target rate_type = benchmarks[0].args.profile.type_ successful_requests = [ @@ -222,7 +211,7 @@ class BenchmarkDatum(BaseModel): time_per_request: TabularDistributionSummary @classmethod - def from_benchmark(cls, bm: GenerativeBenchmark): + def from_benchmark(cls, bm: "GenerativeBenchmark"): return cls( requests_per_second=bm.metrics.requests_per_second.successful.mean, tpot=TabularDistributionSummary.from_distribution_summary( diff --git a/src/guidellm/presentation/injector.py b/src/guidellm/presentation/injector.py index f5949eee..a3ec079c 100644 --- a/src/guidellm/presentation/injector.py +++ b/src/guidellm/presentation/injector.py @@ -5,8 +5,6 @@ from guidellm.config import settings from guidellm.utils.text import load_text -__all__ = ["create_report", "inject_data"] - def create_report(js_data: dict, output_path: Union[str, Path]) -> Path: """ @@ -31,7 +29,6 @@ def create_report(js_data: dict, output_path: Union[str, Path]) -> Path: output_path.parent.mkdir(parents=True, exist_ok=True) output_path.write_text(report_content) - print(f"Report saved to {output_path}") return output_path diff --git a/tests/unit/presentation/test_injector.py b/tests/unit/presentation/test_injector.py new file mode 100644 index 00000000..f3bf621b --- /dev/null +++ b/tests/unit/presentation/test_injector.py @@ -0,0 +1,63 @@ +from pathlib import Path + +import pytest +from pydantic import BaseModel + +from guidellm.config import settings +from guidellm.presentation.injector import create_report, inject_data + + +class ExampleModel(BaseModel): + name: str + version: str + + +@pytest.mark.smoke +def test_inject_data(): + html = "" + expected_html = '' + js_data = { "window.run_info = {};": "window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };" } + result = inject_data( + js_data, + html, + ) + assert result == expected_html + + +@pytest.mark.smoke +def test_create_report_to_file(tmpdir): + js_data = { "window.run_info = {};": "window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };" } + html_content = "" + expected_html_content = '' + + mock_html_path = tmpdir.join("template.html") + mock_html_path.write(html_content) + settings.report_generation.source = str(mock_html_path) + + output_path = tmpdir.join("output.html") + result_path = create_report(js_data, str(output_path)) + result_content = result_path.read_text() + + assert result_path == output_path + assert result_content == expected_html_content + + +@pytest.mark.smoke +def test_create_report_with_file_nested_in_dir(tmpdir): + js_data = { "window.run_info = {};": "window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };" } + html_content = "" + expected_html_content = '' + + output_dir = tmpdir.mkdir("output_dir") + mock_html_path = tmpdir.join("template.html") + mock_html_path.write(html_content) + settings.report_generation.source = str(mock_html_path) + + output_path = Path(output_dir) / "report.html" + result_path = create_report(js_data, str(output_path)) + + with Path(result_path).open("r") as file: + result_content = file.read() + + assert result_path == output_path + assert result_content == expected_html_content diff --git a/tests/unit/presentation/test_injectory.py b/tests/unit/presentation/test_injectory.py deleted file mode 100644 index 2292a2bc..00000000 --- a/tests/unit/presentation/test_injectory.py +++ /dev/null @@ -1,70 +0,0 @@ -from pathlib import Path - -import pytest -from pydantic import BaseModel - -from guidellm.config import settings -from guidellm.presentation.injector import create_report, inject_data - - -class ExampleModel(BaseModel): - name: str - version: str - - -@pytest.mark.smoke -def test_inject_data(): - model = ExampleModel(name="Example App", version="1.0.0") - html = "window.report_data = {};" - expected_html = 'window.report_data = {"name":"Example App","version":"1.0.0"};' - - result = inject_data( - model, - html, - settings.report_generation.report_html_match, - settings.report_generation.report_html_placeholder, - ) - assert result == expected_html - - -@pytest.mark.smoke -def test_create_report_to_file(tmpdir): - model = ExampleModel(name="Example App", version="1.0.0") - html_content = "window.report_data = {};" - expected_html_content = ( - 'window.report_data = {"name":"Example App","version":"1.0.0"};' - ) - - mock_html_path = tmpdir.join("template.html") - mock_html_path.write(html_content) - settings.report_generation.source = str(mock_html_path) - - output_path = tmpdir.join("output.html") - result_path = create_report(model, str(output_path)) - result_content = result_path.read_text() - - assert result_path == output_path - assert result_content == expected_html_content - - -@pytest.mark.smoke -def test_create_report_to_directory(tmpdir): - model = ExampleModel(name="Example App", version="1.0.0") - html_content = "window.report_data = {};" - expected_html_content = ( - 'window.report_data = {"name":"Example App","version":"1.0.0"};' - ) - - mock_html_path = tmpdir.join("template.html") - mock_html_path.write(html_content) - settings.report_generation.source = str(mock_html_path) - - output_dir = tmpdir.mkdir("output_dir") - output_path = Path(output_dir) / "report.html" - result_path = create_report(model, str(output_dir)) - - with Path(result_path).open("r") as file: - result_content = file.read() - - assert result_path == output_path - assert result_content == expected_html_content diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index ca084ec5..24ee1f8a 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -21,7 +21,7 @@ def test_default_settings(): assert settings.openai == OpenAISettings() assert ( settings.report_generation.source - == "https://guidellm.neuralmagic.com/local-report/index.html" + == "https://neuralmagic.github.io/guidellm/ui/latest/index.html" ) @@ -51,25 +51,25 @@ def test_report_generation_default_source(): settings = Settings(env=Environment.LOCAL) assert ( settings.report_generation.source - == "https://neuralmagic.github.io/ui/dev/index.html" + == "http://localhost:3000/index.html" ) settings = Settings(env=Environment.DEV) assert ( settings.report_generation.source - == "https://neuralmagic.github.io/ui/dev/index.html" + == "https://neuralmagic.github.io/guidellm/ui/dev/index.html" ) settings = Settings(env=Environment.STAGING) assert ( settings.report_generation.source - == "https://neuralmagic.github.io/ui/staging/latest/index.html" + == "https://neuralmagic.github.io/guidellm/ui/release/latest/index.html" ) settings = Settings(env=Environment.PROD) assert ( settings.report_generation.source - == "https://neuralmagic.github.io/ui/latest/index.html" + == "https://neuralmagic.github.io/guidellm/ui/latest/index.html" ) From 8fae0a1755f623def7b70c0d644f57d4eaf7347e Mon Sep 17 00:00:00 2001 From: dalthecow Date: Mon, 23 Jun 2025 14:24:41 -0400 Subject: [PATCH 14/20] try to fix type error in test --- src/guidellm/benchmark/output.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/guidellm/benchmark/output.py b/src/guidellm/benchmark/output.py index ca891079..dd7982ee 100644 --- a/src/guidellm/benchmark/output.py +++ b/src/guidellm/benchmark/output.py @@ -229,7 +229,7 @@ def save_csv(self, path: Union[str, Path]) -> Path: return path - def save_html(self, path: str | Path) -> Path: + def save_html(self, path: Union[str, Path]) -> Path: """ Download html, inject report data and save to a file. @@ -244,8 +244,7 @@ def save_html(self, path: str | Path) -> Path: f"window.{humps.decamelize(k)} = {{}};": f"window.{humps.decamelize(k)} = {json.dumps(v, indent=2)};\n" for k, v in camel_data.items() } - create_report(ui_api_data, path) - return path + return create_report(ui_api_data, path) @staticmethod def _file_setup( From 904a4dd41033b3dc066ed028bd35aaabc333fa6b Mon Sep 17 00:00:00 2001 From: dalthecow Date: Mon, 23 Jun 2025 15:01:10 -0400 Subject: [PATCH 15/20] fix type issues --- src/guidellm/presentation/data_models.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/guidellm/presentation/data_models.py b/src/guidellm/presentation/data_models.py index 223f1378..d29d09ea 100644 --- a/src/guidellm/presentation/data_models.py +++ b/src/guidellm/presentation/data_models.py @@ -1,7 +1,7 @@ import random from collections import defaultdict from math import ceil -from typing import List, Optional, Tuple, TYPE_CHECKING +from typing import List, Optional, Tuple, TYPE_CHECKING, Union from pydantic import BaseModel, computed_field @@ -11,12 +11,12 @@ from guidellm.objects.statistics import DistributionSummary class Bucket(BaseModel): - value: float + value: Union[float, int] count: int @staticmethod def from_data( - data: List[float], + data: Union[List[float], List[int]], bucket_width: Optional[float] = None, n_buckets: Optional[int] = None, ) -> Tuple[List["Bucket"], float]: @@ -34,7 +34,7 @@ def from_data( else: n_buckets = ceil(range_v / bucket_width) - bucket_counts = defaultdict(int) + bucket_counts: defaultdict[Union[float, int], int] = defaultdict(int) for val in data: idx = int((val - min_v) // bucket_width) if idx >= n_buckets: @@ -125,10 +125,10 @@ def from_benchmarks(cls, benchmarks: list["GenerativeBenchmark"]): ] prompt_tokens = [ - req.prompt_tokens for bm in benchmarks for req in bm.requests.successful + float(req.prompt_tokens) for bm in benchmarks for req in bm.requests.successful ] output_tokens = [ - req.output_tokens for bm in benchmarks for req in bm.requests.successful + float(req.output_tokens) for bm in benchmarks for req in bm.requests.successful ] prompt_token_buckets, _prompt_token_bucket_width = Bucket.from_data( @@ -184,7 +184,6 @@ class TabularDistributionSummary(DistributionSummary): `percentile_rows` helper. """ - @computed_field @property def percentile_rows(self) -> list[dict[str, float]]: rows = [ From f41fea64332dd803dc0b7437bf914105e9e26edf Mon Sep 17 00:00:00 2001 From: dalthecow Date: Mon, 23 Jun 2025 19:06:51 -0400 Subject: [PATCH 16/20] fix all quality/typing issues --- README.md | 1 + pyproject.toml | 2 +- src/guidellm/benchmark/output.py | 13 +++++---- src/guidellm/presentation/builder.py | 3 +- src/guidellm/presentation/data_models.py | 20 +++++++------ src/guidellm/presentation/injector.py | 7 ++--- tests/unit/presentation/__init__.py | 0 tests/unit/presentation/test_injector.py | 36 ++++++++++++++++++++---- tests/unit/test_config.py | 5 +--- 9 files changed, 57 insertions(+), 30 deletions(-) create mode 100644 tests/unit/presentation/__init__.py diff --git a/README.md b/README.md index e28e107b..0af5c35b 100644 --- a/README.md +++ b/README.md @@ -160,6 +160,7 @@ GuideLLM UI is a companion frontend for visualizing the results of a GuideLLM be ### 🛠 Generating an HTML report with a benchmark run Set the output to benchmarks.html for your run: + ```base --output-path=benchmarks.html ``` diff --git a/pyproject.toml b/pyproject.toml index 36ab1e8f..aef3f44a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,10 +53,10 @@ dependencies = [ "protobuf", "pydantic>=2.0.0", "pydantic-settings>=2.0.0", + "pyhumps>=3.8.0", "pyyaml>=6.0.0", "rich", "transformers", - "pyhumps>=3.8.0", ] [project.optional-dependencies] diff --git a/src/guidellm/benchmark/output.py b/src/guidellm/benchmark/output.py index dd7982ee..792097cb 100644 --- a/src/guidellm/benchmark/output.py +++ b/src/guidellm/benchmark/output.py @@ -6,7 +6,7 @@ from pathlib import Path from typing import Any, Literal, Optional, Union -import humps +import humps # type: ignore[import-not-found] import yaml from pydantic import Field from rich.console import Console @@ -236,14 +236,15 @@ def save_html(self, path: Union[str, Path]) -> Path: :param path: The path to create the report at. :return: The path to the report. """ - from guidellm.presentation import UIDataBuilder + data_builder = UIDataBuilder(self.benchmarks) data = data_builder.to_dict() camel_data = humps.camelize(data) - ui_api_data = { - f"window.{humps.decamelize(k)} = {{}};": f"window.{humps.decamelize(k)} = {json.dumps(v, indent=2)};\n" - for k, v in camel_data.items() - } + ui_api_data = {} + for k, v in camel_data.items(): + key = f"window.{humps.decamelize(k)} = {{}};" + value = f"window.{humps.decamelize(k)} = {json.dumps(v, indent=2)};\n" + ui_api_data[key] = value return create_report(ui_api_data, path) @staticmethod diff --git a/src/guidellm/presentation/builder.py b/src/guidellm/presentation/builder.py index 84ae8217..a27d7cec 100644 --- a/src/guidellm/presentation/builder.py +++ b/src/guidellm/presentation/builder.py @@ -1,10 +1,11 @@ -from typing import Any, TYPE_CHECKING +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from guidellm.benchmark.benchmark import GenerativeBenchmark from .data_models import BenchmarkDatum, RunInfo, WorkloadDetails + class UIDataBuilder: def __init__(self, benchmarks: list["GenerativeBenchmark"]): self.benchmarks = benchmarks diff --git a/src/guidellm/presentation/data_models.py b/src/guidellm/presentation/data_models.py index d29d09ea..0af7c3d0 100644 --- a/src/guidellm/presentation/data_models.py +++ b/src/guidellm/presentation/data_models.py @@ -1,25 +1,26 @@ import random from collections import defaultdict from math import ceil -from typing import List, Optional, Tuple, TYPE_CHECKING, Union +from typing import TYPE_CHECKING, Optional, Union -from pydantic import BaseModel, computed_field +from pydantic import BaseModel if TYPE_CHECKING: from guidellm.benchmark.benchmark import GenerativeBenchmark from guidellm.objects.statistics import DistributionSummary + class Bucket(BaseModel): value: Union[float, int] count: int @staticmethod def from_data( - data: Union[List[float], List[int]], + data: Union[list[float], list[int]], bucket_width: Optional[float] = None, n_buckets: Optional[int] = None, - ) -> Tuple[List["Bucket"], float]: + ) -> tuple[list["Bucket"], float]: if not data: return [], 1.0 @@ -125,10 +126,14 @@ def from_benchmarks(cls, benchmarks: list["GenerativeBenchmark"]): ] prompt_tokens = [ - float(req.prompt_tokens) for bm in benchmarks for req in bm.requests.successful + float(req.prompt_tokens) + for bm in benchmarks + for req in bm.requests.successful ] output_tokens = [ - float(req.output_tokens) for bm in benchmarks for req in bm.requests.successful + float(req.output_tokens) + for bm in benchmarks + for req in bm.requests.successful ] prompt_token_buckets, _prompt_token_bucket_width = Bucket.from_data( @@ -190,10 +195,9 @@ def percentile_rows(self) -> list[dict[str, float]]: {"percentile": name, "value": value} for name, value in self.percentiles.model_dump().items() ] - filtered_rows = list( + return list( filter(lambda row: row["percentile"] in ["p50", "p90", "p95", "p99"], rows) ) - return filtered_rows @classmethod def from_distribution_summary( diff --git a/src/guidellm/presentation/injector.py b/src/guidellm/presentation/injector.py index a3ec079c..a2a4855a 100644 --- a/src/guidellm/presentation/injector.py +++ b/src/guidellm/presentation/injector.py @@ -37,7 +37,8 @@ def inject_data( html: str, ) -> str: """ - Injects the json data into the HTML, replacing placeholders only within the section. + Injects the json data into the HTML, + replacing placeholders only within the section. :param js_data: the json data to inject :type js_data: dict @@ -58,6 +59,4 @@ def inject_data( # Rebuild the HTML new_head = f"{head_content}" - html = html[: head_match.start()] + new_head + html[head_match.end() :] - - return html + return html[: head_match.start()] + new_head + html[head_match.end() :] diff --git a/tests/unit/presentation/__init__.py b/tests/unit/presentation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/presentation/test_injector.py b/tests/unit/presentation/test_injector.py index f3bf621b..cdaa7619 100644 --- a/tests/unit/presentation/test_injector.py +++ b/tests/unit/presentation/test_injector.py @@ -15,8 +15,16 @@ class ExampleModel(BaseModel): @pytest.mark.smoke def test_inject_data(): html = "" - expected_html = '' - js_data = { "window.run_info = {};": "window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };" } + expected_html = ( + "" + ) + js_data = { + "window.run_info = {};": "window.run_info =" + '{ "model": { "name": "neuralmagic/Qwen2.5-7B-quantized.w8a8" } };' + } result = inject_data( js_data, html, @@ -26,9 +34,17 @@ def test_inject_data(): @pytest.mark.smoke def test_create_report_to_file(tmpdir): - js_data = { "window.run_info = {};": "window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };" } + js_data = { + "window.run_info = {};": "window.run_info =" + '{ "model": { "name": "neuralmagic/Qwen2.5-7B-quantized.w8a8" } };' + } html_content = "" - expected_html_content = '' + expected_html_content = ( + "" + ) mock_html_path = tmpdir.join("template.html") mock_html_path.write(html_content) @@ -44,9 +60,17 @@ def test_create_report_to_file(tmpdir): @pytest.mark.smoke def test_create_report_with_file_nested_in_dir(tmpdir): - js_data = { "window.run_info = {};": "window.run_info = { \"model\": { \"name\": \"neuralmagic/Qwen2.5-7B-quantized.w8a8\" } };" } + js_data = { + "window.run_info = {};": "window.run_info =" + '{ "model": { "name": "neuralmagic/Qwen2.5-7B-quantized.w8a8" } };' + } html_content = "" - expected_html_content = '' + expected_html_content = ( + "" + ) output_dir = tmpdir.mkdir("output_dir") mock_html_path = tmpdir.join("template.html") diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 24ee1f8a..d9ccd885 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -49,10 +49,7 @@ def test_settings_from_env_variables(mocker): @pytest.mark.smoke def test_report_generation_default_source(): settings = Settings(env=Environment.LOCAL) - assert ( - settings.report_generation.source - == "http://localhost:3000/index.html" - ) + assert settings.report_generation.source == "http://localhost:3000/index.html" settings = Settings(env=Environment.DEV) assert ( From bf09d6577a6512d77114b624b5d758d7ecaf43ba Mon Sep 17 00:00:00 2001 From: dalthecow Date: Mon, 23 Jun 2025 20:07:50 -0400 Subject: [PATCH 17/20] fix trailing line issue --- src/ui/.env.local | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ui/.env.local b/src/ui/.env.local index 44ab168b..da69b612 100644 --- a/src/ui/.env.local +++ b/src/ui/.env.local @@ -1,4 +1,4 @@ ASSET_PREFIX=http://localhost:3000 BASE_PATH=http://localhost:3000 NEXT_PUBLIC_USE_MOCK_API=true -USE_MOCK_DATA=true +USE_MOCK_DATA=true \ No newline at end of file From a4f60acd8ea7fbe34bfa0858cb62c3d65da37f46 Mon Sep 17 00:00:00 2001 From: dalthecow Date: Tue, 24 Jun 2025 18:22:11 -0400 Subject: [PATCH 18/20] add computed field back into data_models (this will fail the CI checks, need to find better solution) --- README.md | 8 +++++++- src/guidellm/presentation/data_models.py | 3 ++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0af5c35b..91da7273 100644 --- a/README.md +++ b/README.md @@ -185,7 +185,13 @@ npm run build npm run serve ``` -This will start a local server (e.g., at http://localhost:3000). Then, in your GuideLLM config or CLI flags, point to this local server as the asset base for report generation. You can set the Environment to LOCAL before running your benchmarks to accomplish this. +This will start a local server (e.g., at http://localhost:3000). Then set the Environment to LOCAL before running your benchmarks. + +```bash +export GUIDELLM__ENV=local + +Alternatively, in config.py update the ENV_REPORT_MAPPING used as the asset base for report generation to the LOCAL option. +``` ## Resources diff --git a/src/guidellm/presentation/data_models.py b/src/guidellm/presentation/data_models.py index 0af7c3d0..4e3d9f61 100644 --- a/src/guidellm/presentation/data_models.py +++ b/src/guidellm/presentation/data_models.py @@ -3,7 +3,7 @@ from math import ceil from typing import TYPE_CHECKING, Optional, Union -from pydantic import BaseModel +from pydantic import BaseModel, computed_field if TYPE_CHECKING: from guidellm.benchmark.benchmark import GenerativeBenchmark @@ -189,6 +189,7 @@ class TabularDistributionSummary(DistributionSummary): `percentile_rows` helper. """ + @computed_field @property def percentile_rows(self) -> list[dict[str, float]]: rows = [ From 1b66b68cc6091aeaeffabb5e31f13b816245969e Mon Sep 17 00:00:00 2001 From: dalthecow Date: Tue, 24 Jun 2025 18:27:09 -0400 Subject: [PATCH 19/20] remove @property --- src/guidellm/presentation/data_models.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/guidellm/presentation/data_models.py b/src/guidellm/presentation/data_models.py index 4e3d9f61..ff5221e3 100644 --- a/src/guidellm/presentation/data_models.py +++ b/src/guidellm/presentation/data_models.py @@ -190,7 +190,6 @@ class TabularDistributionSummary(DistributionSummary): """ @computed_field - @property def percentile_rows(self) -> list[dict[str, float]]: rows = [ {"percentile": name, "value": value} From e9972e05695ebb360be87f64043b7505917260cd Mon Sep 17 00:00:00 2001 From: dalthecow Date: Mon, 30 Jun 2025 11:49:05 -0400 Subject: [PATCH 20/20] fix log scaled y axis in charts --- package-lock.json | 30 + src/ui/.env.development | 2 +- src/ui/.env.example | 1 + src/ui/.env.local | 6 +- src/ui/.env.production | 2 +- src/ui/.env.staging | 2 +- .../DashedLine/DashedLine.component.tsx | 9 +- .../components/Charts/DashedLine/helpers.ts | 28 +- .../MetricLine/MetricLine.component.tsx | 4 +- .../PageHeader/PageHeader.component.tsx | 12 +- .../WorkloadMetrics.component.tsx | 5 - src/ui/lib/store/benchmarksWindowData.ts | 3042 ++++++++++------- .../Charts/DashedLine/helpers.test.ts | 52 +- 13 files changed, 1867 insertions(+), 1328 deletions(-) diff --git a/package-lock.json b/package-lock.json index 849396a4..a210be66 100644 --- a/package-lock.json +++ b/package-lock.json @@ -17593,6 +17593,36 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.3.2.tgz", + "integrity": "sha512-uRBo6THWei0chz+Y5j37qzx+BtoDRFIkDzZjlpCItBRXyMPIg079eIkOCl3aqr2tkxL4HFyJ4GHDes7W8HuAUg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "15.3.2", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.3.2.tgz", + "integrity": "sha512-+uxFlPuCNx/T9PdMClOqeE8USKzj8tVz37KflT3Kdbx/LOlZBRI2yxuIcmx1mPNK8DwSOMNCr4ureSet7eyC0w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } } } } diff --git a/src/ui/.env.development b/src/ui/.env.development index 938fd8bd..2d03b789 100644 --- a/src/ui/.env.development +++ b/src/ui/.env.development @@ -1,3 +1,3 @@ -ASSET_PREFIX=https://neuralmagic.github.io/ui/dev +ASSET_PREFIX=https://neuralmagic.github.io/guidellm/ui/dev BASE_PATH=/ui/dev NEXT_PUBLIC_USE_MOCK_API=true diff --git a/src/ui/.env.example b/src/ui/.env.example index 06812a30..b9d5ff2b 100644 --- a/src/ui/.env.example +++ b/src/ui/.env.example @@ -1,3 +1,4 @@ ASSET_PREFIX=http://localhost:3000 BASE_PATH=http://localhost:3000 NEXT_PUBLIC_USE_MOCK_API=true +USE_MOCK_DATA=false diff --git a/src/ui/.env.local b/src/ui/.env.local index 44ab168b..60b459a6 100644 --- a/src/ui/.env.local +++ b/src/ui/.env.local @@ -1,4 +1,4 @@ -ASSET_PREFIX=http://localhost:3000 -BASE_PATH=http://localhost:3000 +ASSET_PREFIX=http://localhost:3001 +BASE_PATH=http://localhost:3001 NEXT_PUBLIC_USE_MOCK_API=true -USE_MOCK_DATA=true +USE_MOCK_DATA=false diff --git a/src/ui/.env.production b/src/ui/.env.production index 2aa8deb6..981e7d86 100644 --- a/src/ui/.env.production +++ b/src/ui/.env.production @@ -1,3 +1,3 @@ -ASSET_PREFIX=https://neuralmagic.github.io/ui/latest +ASSET_PREFIX=https://neuralmagic.github.io/guidellm/ui/latest BASE_PATH=/ui/latest NEXT_PUBLIC_USE_MOCK_API=true diff --git a/src/ui/.env.staging b/src/ui/.env.staging index a2280817..416e04c3 100644 --- a/src/ui/.env.staging +++ b/src/ui/.env.staging @@ -1,3 +1,3 @@ -ASSET_PREFIX=https://neuralmagic.github.io/ui/release/latest +ASSET_PREFIX=https://neuralmagic.github.io/guidellm/ui/release/latest BASE_PATH=/ui/release/latest NEXT_PUBLIC_USE_MOCK_API=true diff --git a/src/ui/lib/components/Charts/DashedLine/DashedLine.component.tsx b/src/ui/lib/components/Charts/DashedLine/DashedLine.component.tsx index a3b9ab87..650c62ee 100644 --- a/src/ui/lib/components/Charts/DashedLine/DashedLine.component.tsx +++ b/src/ui/lib/components/Charts/DashedLine/DashedLine.component.tsx @@ -7,7 +7,7 @@ import { DashedLineProps, ScaleType } from './DashedLine.interfaces'; import { spacedLogValues } from './helpers'; export const getMinTick = (data: readonly Serie[]) => { - return Math.max( + return Math.min( ...data.map((lineData) => Math.min(...lineData.data.map((point) => point.y as number)) ) @@ -80,11 +80,16 @@ export const Component = ({ let extraLeftAxisOptions = {}; let extraYScaleOptions = {}; if (yScaleType === ScaleType.log) { - const ticks = spacedLogValues(getMinTick(data), getMaxTick(data), 6); + const ticks = spacedLogValues( + Math.floor(getMinTick(data)), + Math.ceil(getMaxTick(data)), + 6 + ); extraLeftAxisOptions = { tickValues: ticks, }; extraYScaleOptions = { + min: ticks[0], max: ticks[ticks.length - 1], }; } diff --git a/src/ui/lib/components/Charts/DashedLine/helpers.ts b/src/ui/lib/components/Charts/DashedLine/helpers.ts index c73405ed..0c5a6bd0 100644 --- a/src/ui/lib/components/Charts/DashedLine/helpers.ts +++ b/src/ui/lib/components/Charts/DashedLine/helpers.ts @@ -13,6 +13,21 @@ const allowedMultipliers = [ 1, 1.2, 1.4, 1.5, 1.6, 1.8, 2, 2.5, 3, 3.5, 4, 5, 6, 7, 7.5, 8, 9, 10, ]; +export function roundDownNice(x: number) { + if (x <= 0) { + return x; + } + const exponent = Math.floor(Math.log10(x)); + const base = Math.pow(10, exponent); + const fraction = x / base; + for (const m of allowedMultipliers.slice().reverse()) { + if (m <= fraction) { + return Math.floor(m * base); + } + } + return Math.floor(10 * base); +} + export function roundUpNice(x: number) { if (x <= 0) { return x; @@ -22,10 +37,10 @@ export function roundUpNice(x: number) { const fraction = x / base; for (const m of allowedMultipliers) { if (m >= fraction) { - return Math.round(m * base); + return Math.ceil(m * base); } } - return Math.round(10 * base); + return Math.ceil(10 * base); } export function roundNearestNice(x: number) { @@ -51,11 +66,14 @@ export function spacedLogValues(min: number, max: number, steps: number) { if (steps < 2) { return []; } + if (steps > max - min) { + steps = max - min + 1; + } if (min === 0) { const nonzeroCount = steps - 1; - const exponent = Math.floor(Math.log10(max)) - (nonzeroCount - 1); - const lowerNonZero = roundNearestNice(Math.pow(10, exponent)); + const exponent = Math.log10(max) / (nonzeroCount - 1); + const lowerNonZero = roundDownNice(Math.pow(10, exponent)); const upperTick = roundUpNice(max); const r = Math.pow(upperTick / lowerNonZero, 1 / (nonzeroCount - 1)); const ticks = [0]; @@ -65,7 +83,7 @@ export function spacedLogValues(min: number, max: number, steps: number) { } return ticks; } else { - const lowerTick = roundUpNice(min); + const lowerTick = roundNearestNice(min); const upperTick = roundUpNice(max); const r = Math.pow(upperTick / lowerTick, 1 / (steps - 1)); const ticks = []; diff --git a/src/ui/lib/components/Charts/MetricLine/MetricLine.component.tsx b/src/ui/lib/components/Charts/MetricLine/MetricLine.component.tsx index 06bb386e..8b1b4df2 100644 --- a/src/ui/lib/components/Charts/MetricLine/MetricLine.component.tsx +++ b/src/ui/lib/components/Charts/MetricLine/MetricLine.component.tsx @@ -59,8 +59,8 @@ export const Component: FC = ({ xScale={{ type: 'linear', min: minX }} yScale={{ type: yScaleType, - min: 'auto', - max: 'auto', + min: minY, + max: maxY, ...extraYScaleOptions, }} axisBottom={null} diff --git a/src/ui/lib/components/PageHeader/PageHeader.component.tsx b/src/ui/lib/components/PageHeader/PageHeader.component.tsx index 48af2ffc..a443fb7c 100644 --- a/src/ui/lib/components/PageHeader/PageHeader.component.tsx +++ b/src/ui/lib/components/PageHeader/PageHeader.component.tsx @@ -32,11 +32,11 @@ const Component = () => { variant="metric2" withTooltip /> - + />*/} {/**/} {/* { {/* key="Version"*/} {/* />*/} {/**/} - + {/* - - + */} + {/* { } /> - + */} { xLegend="request per sec" yLegend="ttft (ms)" minX={minX} - yScaleType={ScaleType.linear} /> @@ -95,7 +93,6 @@ export const Component = () => { xLegend="request per sec" yLegend="tpot (ms)" minX={minX} - yScaleType={ScaleType.linear} /> @@ -122,7 +119,6 @@ export const Component = () => { xLegend="request per sec" yLegend="latency (ms)" minX={minX} - yScaleType={ScaleType.linear} /> @@ -142,7 +138,6 @@ export const Component = () => { xLegend="request per sec" yLegend="throughput (tok/s)" minX={minX} - yScaleType={ScaleType.linear} /> diff --git a/src/ui/lib/store/benchmarksWindowData.ts b/src/ui/lib/store/benchmarksWindowData.ts index 7bcb209a..eb198179 100644 --- a/src/ui/lib/store/benchmarksWindowData.ts +++ b/src/ui/lib/store/benchmarksWindowData.ts @@ -1,1312 +1,1752 @@ -export const benchmarksScript = `window.benchmarks = [ - { - "requestsPerSecond": 0.6668550387660497, - "tpot": { - "total": 80, - "mean": 23.00635663936911, - "median": 22.959455611213805, - "min": 22.880917503720237, - "max": 24.14080301920573, - "std": 0.18918760384209338, - "percentiles": { - "p50": 22.959455611213805, - "p90": 23.01789086962503, - "p95": 23.30297423947242, - "p99": 24.14080301920573, +export const benchmarksScript = `window.benchmarks = [ + { + requestsPerSecond: 11.411616848282272, + tpot: { + mean: 8.758024845683707, + median: 8.788176945277623, + mode: 7.119315011160714, + variance: 0.3289263782653722, + stdDev: 0.5735210355909992, + min: 7.119315011160714, + max: 10.9208311353411, + count: 141, + totalSum: 1234.8815032414027, + percentiles: { + p001: 7.119315011160714, + p01: 7.164444242204938, + p05: 7.513999938964844, + p10: 8.169140134538923, + p25: 8.586951664515905, + p50: 8.788176945277623, + p75: 9.003571101597377, + p90: 9.308576583862305, + p95: 9.504761014665876, + p99: 10.393142700195312, + p999: 10.9208311353411, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 8.788176945277623, + }, + { + percentile: 'p90', + value: 9.308576583862305, + }, + { + percentile: 'p95', + value: 9.504761014665876, + }, + { + percentile: 'p99', + value: 10.393142700195312, + }, + ], + }, + ttft: { + mean: 24.680864726398006, + median: 23.874998092651367, + mode: 22.01700210571289, + variance: 6.960879030702799, + stdDev: 2.6383477842587015, + min: 22.01700210571289, + max: 38.763999938964844, + count: 141, + totalSum: 3480.001926422119, + percentiles: { + p001: 22.01700210571289, + p01: 22.218942642211914, + p05: 22.49598503112793, + p10: 22.723674774169922, + p25: 23.18596839904785, + p50: 23.874998092651367, + p75: 24.981975555419922, + p90: 27.83489227294922, + p95: 30.942916870117188, + p99: 34.74783897399902, + p999: 38.763999938964844, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 23.874998092651367, + }, + { + percentile: 'p90', + value: 27.83489227294922, + }, + { + percentile: 'p95', + value: 30.942916870117188, + }, + { + percentile: 'p99', + value: 34.74783897399902, + }, + ], + }, + throughput: { + mean: 91.21200133343346, + median: 111.06031880527459, + mode: 112.25822337606724, + variance: 1131.7836977561178, + stdDev: 33.64199307050814, + min: 0.0, + max: 132.49633560778366, + count: 388, + totalSum: 33659.178494722335, + percentiles: { + p001: 0.0, + p01: 29.332438178359627, + p05: 37.242645687749174, + p10: 39.52416132679985, + p25: 42.258712582994974, + p50: 111.06031880527459, + p75: 114.19908516663037, + p90: 116.31136130445634, + p95: 117.65228611500702, + p99: 122.08714888662495, + p999: 132.49633560778366, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 111.06031880527459, + }, + { + percentile: 'p90', + value: 116.31136130445634, + }, + { + percentile: 'p95', + value: 117.65228611500702, + }, + { + percentile: 'p99', + value: 122.08714888662495, + }, + ], + }, + timePerRequest: { + mean: 0.0869351363351159, + median: 0.08605790138244629, + mode: 0.08000683784484863, + variance: 1.7069091574769884e-5, + stdDev: 0.004131475713927153, + min: 0.08000683784484863, + max: 0.10561299324035645, + count: 141, + totalSum: 12.257854223251343, + percentiles: { + p001: 0.08000683784484863, + p01: 0.08143115043640137, + p05: 0.08298420906066895, + p10: 0.08342385292053223, + p25: 0.08434486389160156, + p50: 0.08605790138244629, + p75: 0.08783197402954102, + p90: 0.09138989448547363, + p95: 0.09475302696228027, + p99: 0.10389590263366699, + p999: 0.10561299324035645, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 0.08605790138244629, + }, + { + percentile: 'p90', + value: 0.09138989448547363, + }, + { + percentile: 'p95', + value: 0.09475302696228027, + }, + { + percentile: 'p99', + value: 0.10389590263366699, + }, + ], }, - "percentileRows": [ - { - "percentile": "p50", - "value": 22.959455611213805 - }, - { - "percentile": "p90", - "value": 23.01789086962503 - }, - { - "percentile": "p95", - "value": 23.30297423947242 - }, - { - "percentile": "p99", - "value": 24.14080301920573 - } - ] - }, - "ttft": { - "total": 80, - "mean": 49.64659512042999, - "median": 49.23129081726074, - "min": 44.538259506225586, - "max": 55.47308921813965, - "std": 1.7735485090634995, - "percentiles": { - "p50": 49.23129081726074, - "p90": 50.16160011291504, - "p95": 54.918766021728516, - "p99": 55.47308921813965, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 49.23129081726074 - }, - { - "percentile": "p90", - "value": 50.16160011291504 - }, - { - "percentile": "p95", - "value": 54.918766021728516 - }, - { - "percentile": "p99", - "value": 55.47308921813965 - } - ] - }, - "throughput": { - "total": 210, - "mean": 42.58702991319684, - "median": 43.536023084668, - "min": 0.0, - "max": 43.68247620237872, - "std": 4.559764488536857, - "percentiles": { - "p50": 43.536023084668, - "p90": 43.62613633999709, - "p95": 43.64020767654067, - "p99": 43.68202126662431, }, - "percentileRows": [ - { - "percentile": "p50", - "value": 43.536023084668 - }, - { - "percentile": "p90", - "value": 43.62613633999709 - }, - { - "percentile": "p95", - "value": 43.64020767654067 - }, - { - "percentile": "p99", - "value": 43.68202126662431 - } - ] - }, - "timePerRequest": { - "total": 80, - "mean": 1496.706646680832, - "median": 1496.1087703704834, - "min": 1490.584135055542, - "max": 1505.8784484863281, - "std": 3.4553340533022667, - "percentiles": { - "p50": 1496.1087703704834, - "p90": 1500.9305477142334, - "p95": 1505.3200721740723, - "p99": 1505.8784484863281, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 1496.1087703704834 - }, - { - "percentile": "p90", - "value": 1500.9305477142334 - }, - { - "percentile": "p95", - "value": 1505.3200721740723 - }, - { - "percentile": "p99", - "value": 1505.8784484863281 - } - ] - } - }, - { - "requestsPerSecond": 28.075330129628725, - "tpot": { - "total": 3416, - "mean": 126.08707076148656, - "median": 125.30853256346687, - "min": 23.034303907364134, - "max": 138.08223756693178, - "std": 3.508992115582193, - "percentiles": { - "p50": 125.30853256346687, - "p90": 129.21135009281218, - "p95": 129.52291770059554, - "p99": 132.21229490686636, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 125.30853256346687 - }, - { - "percentile": "p90", - "value": 129.21135009281218 - }, - { - "percentile": "p95", - "value": 129.52291770059554 - }, - { - "percentile": "p99", - "value": 132.21229490686636 - } - ] - }, - "ttft": { - "total": 3416, - "mean": 8585.486161415694, - "median": 8965.316534042358, - "min": 110.53991317749023, - "max": 12575.379610061646, - "std": 1929.5632525234505, - "percentiles": { - "p50": 8965.316534042358, - "p90": 9231.79316520691, - "p95": 9485.00108718872, - "p99": 12096.465587615967, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 8965.316534042358 - }, - { - "percentile": "p90", - "value": 9231.79316520691 - }, - { - "percentile": "p95", - "value": 9485.00108718872 - }, - { - "percentile": "p99", - "value": 12096.465587615967 - } - ] - }, - "throughput": { - "total": 15981, - "mean": 1795.4403743554367, - "median": 670.1236619268253, - "min": 0.0, - "max": 838860.8, - "std": 5196.545581836957, - "percentiles": { - "p50": 670.1236619268253, - "p90": 4068.1901066925316, - "p95": 6374.322188449848, - "p99": 16194.223938223939, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 670.1236619268253 - }, - { - "percentile": "p90", - "value": 4068.1901066925316 - }, - { - "percentile": "p95", - "value": 6374.322188449848 - }, - { - "percentile": "p99", - "value": 16194.223938223939 - } - ] - }, - "timePerRequest": { - "total": 3416, - "mean": 16526.811318389147, - "median": 17058.441638946533, - "min": 1711.3444805145264, - "max": 20646.55351638794, - "std": 2054.9553770234484, - "percentiles": { - "p50": 17058.441638946533, - "p90": 17143.84412765503, - "p95": 17248.060703277588, - "p99": 20116.52660369873, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 17058.441638946533 - }, - { - "percentile": "p90", - "value": 17143.84412765503 - }, - { - "percentile": "p95", - "value": 17248.060703277588 - }, - { - "percentile": "p99", - "value": 20116.52660369873 - } - ] - } - }, - { - "requestsPerSecond": 4.071681142252993, - "tpot": { - "total": 488, - "mean": 24.898151556004148, - "median": 24.889995181371294, - "min": 24.822999560643755, - "max": 26.217273871103924, - "std": 0.11227504505081555, - "percentiles": { - "p50": 24.889995181371294, - "p90": 24.90483389960395, - "p95": 24.965975019666885, - "p99": 25.306613214554325, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 24.889995181371294 - }, - { - "percentile": "p90", - "value": 24.90483389960395 - }, - { - "percentile": "p95", - "value": 24.965975019666885 - }, - { - "percentile": "p99", - "value": 25.306613214554325 - } - ] - }, - "ttft": { - "total": 488, - "mean": 58.341102033364976, - "median": 58.38632583618164, - "min": 44.857025146484375, - "max": 111.23061180114746, - "std": 8.190008649880411, - "percentiles": { - "p50": 58.38632583618164, - "p90": 67.66843795776367, - "p95": 68.76754760742188, - "p99": 71.46525382995605, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 58.38632583618164 - }, - { - "percentile": "p90", - "value": 67.66843795776367 - }, - { - "percentile": "p95", - "value": 68.76754760742188 - }, - { - "percentile": "p99", - "value": 71.46525382995605 - } - ] - }, - "throughput": { - "total": 11338, - "mean": 260.42072092623033, - "median": 47.630070406540995, - "min": 0.0, - "max": 838860.8, - "std": 886.8274389295076, - "percentiles": { - "p50": 47.630070406540995, - "p90": 604.8895298528987, - "p95": 1621.9273008507348, - "p99": 3054.846321922797, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 47.630070406540995 - }, - { - "percentile": "p90", - "value": 604.8895298528987 - }, - { - "percentile": "p95", - "value": 1621.9273008507348 - }, - { - "percentile": "p99", - "value": 3054.846321922797 - } - ] - }, - "timePerRequest": { - "total": 488, - "mean": 1626.5668087318297, - "median": 1626.236915588379, - "min": 1611.9341850280762, - "max": 1690.2406215667725, - "std": 8.871477705542668, - "percentiles": { - "p50": 1626.236915588379, - "p90": 1635.761022567749, - "p95": 1637.390375137329, - "p99": 1643.500804901123, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 1626.236915588379 - }, - { - "percentile": "p90", - "value": 1635.761022567749 - }, - { - "percentile": "p95", - "value": 1637.390375137329 - }, - { - "percentile": "p99", - "value": 1643.500804901123 - } - ] - } - }, - { - "requestsPerSecond": 7.466101414346809, - "tpot": { - "total": 895, - "mean": 27.56459906601014, - "median": 27.525402250744047, - "min": 26.69054911686824, - "max": 29.5785041082473, - "std": 0.18545649185329754, - "percentiles": { - "p50": 27.525402250744047, - "p90": 27.62497795952691, - "p95": 27.947206345815506, - "p99": 28.41202157442687, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 27.525402250744047 - }, - { - "percentile": "p90", - "value": 27.62497795952691 - }, - { - "percentile": "p95", - "value": 27.947206345815506 - }, - { - "percentile": "p99", - "value": 28.41202157442687 - } - ] - }, - "ttft": { - "total": 895, - "mean": 64.73036744741088, - "median": 62.484025955200195, - "min": 48.038482666015625, - "max": 256.4809322357178, - "std": 21.677914089867077, - "percentiles": { - "p50": 62.484025955200195, - "p90": 72.04723358154297, - "p95": 72.50738143920898, - "p99": 229.35032844543457, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 62.484025955200195 - }, - { - "percentile": "p90", - "value": 72.04723358154297 - }, - { - "percentile": "p95", - "value": 72.50738143920898 - }, - { - "percentile": "p99", - "value": 229.35032844543457 - } - ] - }, - "throughput": { - "total": 12465, - "mean": 477.5134940335642, - "median": 49.76925541382379, - "min": 0.0, - "max": 1677721.6, - "std": 2472.852317203968, - "percentiles": { - "p50": 49.76925541382379, - "p90": 1191.5636363636363, - "p95": 2501.075730471079, - "p99": 7025.634840871022, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 49.76925541382379 - }, - { - "percentile": "p90", - "value": 1191.5636363636363 - }, - { - "percentile": "p95", - "value": 2501.075730471079 - }, - { - "percentile": "p99", - "value": 7025.634840871022 - } - ] - }, - "timePerRequest": { - "total": 895, - "mean": 1800.9132816804852, - "median": 1797.5835800170898, - "min": 1756.2305927276611, - "max": 1994.28129196167, - "std": 24.24935353039552, - "percentiles": { - "p50": 1797.5835800170898, - "p90": 1808.2549571990967, - "p95": 1813.141107559204, - "p99": 1967.8056240081787, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 1797.5835800170898 - }, - { - "percentile": "p90", - "value": 1808.2549571990967 - }, - { - "percentile": "p95", - "value": 1813.141107559204 - }, - { - "percentile": "p99", - "value": 1967.8056240081787 - } - ] - } - }, - { - "requestsPerSecond": 10.83989165148388, - "tpot": { - "total": 1300, - "mean": 31.6048062981453, - "median": 31.577579558841766, - "min": 30.171105355927438, - "max": 33.10690323511759, - "std": 0.15146862300990216, - "percentiles": { - "p50": 31.577579558841766, - "p90": 31.63230986822219, - "p95": 31.682415614052424, - "p99": 32.138043834317116, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 31.577579558841766 - }, - { - "percentile": "p90", - "value": 31.63230986822219 - }, - { - "percentile": "p95", - "value": 31.682415614052424 - }, - { - "percentile": "p99", - "value": 32.138043834317116 - } - ] - }, - "ttft": { - "total": 1300, - "mean": 66.61205951984113, - "median": 65.78803062438965, - "min": 51.81550979614258, - "max": 244.69709396362305, - "std": 14.858653160342651, - "percentiles": { - "p50": 65.78803062438965, - "p90": 76.70044898986816, - "p95": 77.78120040893555, - "p99": 88.29903602600098, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 65.78803062438965 - }, - { - "percentile": "p90", - "value": 76.70044898986816 - }, - { - "percentile": "p95", - "value": 77.78120040893555 - }, - { - "percentile": "p99", - "value": 88.29903602600098 - } - ] - }, - "throughput": { - "total": 12708, - "mean": 693.3695002980695, - "median": 55.59272071785492, - "min": 0.0, - "max": 838860.8, - "std": 2454.288991845712, - "percentiles": { - "p50": 55.59272071785492, - "p90": 1897.875113122172, - "p95": 2931.030048916841, - "p99": 7108.989830508474, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 55.59272071785492 - }, - { - "percentile": "p90", - "value": 1897.875113122172 - }, - { - "percentile": "p95", - "value": 2931.030048916841 - }, - { - "percentile": "p99", - "value": 7108.989830508474 - } - ] - }, - "timePerRequest": { - "total": 1300, - "mean": 2057.3723330864545, - "median": 2056.5311908721924, - "min": 2027.0307064056396, - "max": 2233.853578567505, - "std": 16.334707021033957, - "percentiles": { - "p50": 2056.5311908721924, - "p90": 2065.953254699707, - "p95": 2067.810297012329, - "p99": 2087.8031253814697, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 2056.5311908721924 - }, - { - "percentile": "p90", - "value": 2065.953254699707 - }, - { - "percentile": "p95", - "value": 2067.810297012329 - }, - { - "percentile": "p99", - "value": 2087.8031253814697 - } - ] - } - }, - { - "requestsPerSecond": 14.211845819540324, - "tpot": { - "total": 1704, - "mean": 35.695500394825224, - "median": 35.60370869106717, - "min": 34.798149078611345, - "max": 38.94662857055664, - "std": 0.24967658675392423, - "percentiles": { - "p50": 35.60370869106717, - "p90": 35.84100708128914, - "p95": 36.09923778041716, - "p99": 36.71476489207784, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 35.60370869106717 + { + requestsPerSecond: 36.289181300710815, + tpot: { + mean: 588.0161376137819, + median: 461.7137227739607, + mode: 323.1611592429025, + variance: 19560.214456505688, + stdDev: 139.85783659311224, + min: 323.1611592429025, + max: 988.006728036063, + count: 256, + totalSum: 150532.13122912816, + percentiles: { + p001: 323.1611592429025, + p01: 388.2312774658203, + p05: 394.99473571777344, + p10: 460.00800813947404, + p25: 460.8048711504255, + p50: 461.7137227739607, + p75: 726.0744231087821, + p90: 726.3181550162179, + p95: 726.4585835593087, + p99: 726.9112723214286, + p999: 988.006728036063, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 461.7137227739607, + }, + { + percentile: 'p90', + value: 726.3181550162179, + }, + { + percentile: 'p95', + value: 726.4585835593087, + }, + { + percentile: 'p99', + value: 726.9112723214286, + }, + ], + }, + ttft: { + mean: 2801.9311213865876, + median: 3645.9569931030273, + mode: 120.23282051086426, + variance: 876438.9633642528, + stdDev: 936.1831889989548, + min: 120.23282051086426, + max: 4289.892911911011, + count: 256, + totalSum: 717294.3670749664, + percentiles: { + p001: 120.23282051086426, + p01: 1860.9709739685059, + p05: 1867.4709796905518, + p10: 1874.9330043792725, + p25: 1898.1659412384033, + p50: 3645.9569931030273, + p75: 3683.4659576416016, + p90: 3707.062005996704, + p95: 3714.3311500549316, + p99: 4193.973064422607, + p999: 4289.892911911011, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 3645.9569931030273, + }, + { + percentile: 'p90', + value: 3707.062005996704, + }, + { + percentile: 'p95', + value: 3714.3311500549316, + }, + { + percentile: 'p99', + value: 4193.973064422607, + }, + ], + }, + throughput: { + mean: 290.17169579123066, + median: 2.574805445619618, + mode: 1.0106780980195134, + variance: 32683153.434954323, + stdDev: 5716.91817633892, + min: 0.0, + max: 1677721.6, + count: 541, + totalSum: 26954108.473821327, + percentiles: { + p001: 0.0, + p01: 0.0, + p05: 1.0106780980195134, + p10: 1.0106780980195134, + p25: 1.1875075558681243, + p50: 2.574805445619618, + p75: 4.724922214536926, + p90: 7.90451942158339, + p95: 16.513072886113726, + p99: 1795.5068493150684, + p999: 62601.55223880597, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 2.574805445619618, + }, + { + percentile: 'p90', + value: 7.90451942158339, + }, + { + percentile: 'p95', + value: 16.513072886113726, + }, + { + percentile: 'p99', + value: 1795.5068493150684, + }, + ], + }, + timePerRequest: { + mean: 6.962754532694817, + median: 6.958127975463867, + mode: 6.888041973114014, + variance: 0.002266230397153679, + stdDev: 0.04760494089013954, + min: 6.888041973114014, + max: 7.051568031311035, + count: 256, + totalSum: 1782.465160369873, + percentiles: { + p001: 6.888041973114014, + p01: 6.888926029205322, + p05: 6.892949104309082, + p10: 6.902682065963745, + p25: 6.92248797416687, + p50: 6.958127975463867, + p75: 6.9972498416900635, + p90: 7.035952091217041, + p95: 7.041622877120972, + p99: 7.051159858703613, + p999: 7.051568031311035, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 6.958127975463867, + }, + { + percentile: 'p90', + value: 7.035952091217041, + }, + { + percentile: 'p95', + value: 7.041622877120972, + }, + { + percentile: 'p99', + value: 7.051159858703613, + }, + ], }, - { - "percentile": "p90", - "value": 35.84100708128914 - }, - { - "percentile": "p95", - "value": 36.09923778041716 - }, - { - "percentile": "p99", - "value": 36.71476489207784 - } - ] - }, - "ttft": { - "total": 1704, - "mean": 74.19940031750102, - "median": 71.50626182556152, - "min": 53.643226623535156, - "max": 322.6609230041504, - "std": 23.98415146629138, - "percentiles": { - "p50": 71.50626182556152, - "p90": 83.71734619140625, - "p95": 98.2356071472168, - "p99": 113.44718933105469, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 71.50626182556152 - }, - { - "percentile": "p90", - "value": 83.71734619140625 - }, - { - "percentile": "p95", - "value": 98.2356071472168 - }, - { - "percentile": "p99", - "value": 113.44718933105469 - } - ] - }, - "throughput": { - "total": 15532, - "mean": 908.715763654939, - "median": 98.84067397195712, - "min": 0.0, - "max": 838860.8, - "std": 3628.67537220603, - "percentiles": { - "p50": 98.84067397195712, - "p90": 2205.2071503680336, - "p95": 3775.251125112511, - "p99": 10512.040100250626, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 98.84067397195712 - }, - { - "percentile": "p90", - "value": 2205.2071503680336 - }, - { - "percentile": "p95", - "value": 3775.251125112511 - }, - { - "percentile": "p99", - "value": 10512.040100250626 - } - ] - }, - "timePerRequest": { - "total": 1704, - "mean": 2321.92987861208, - "median": 2313.3785724639893, - "min": 2290.93074798584, - "max": 2594.4881439208984, - "std": 29.46118583560937, - "percentiles": { - "p50": 2313.3785724639893, - "p90": 2339.4439220428467, - "p95": 2341.9249057769775, - "p99": 2370.450496673584, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 2313.3785724639893 - }, - { - "percentile": "p90", - "value": 2339.4439220428467 - }, - { - "percentile": "p95", - "value": 2341.9249057769775 - }, - { - "percentile": "p99", - "value": 2370.450496673584 - } - ] - } - }, - { - "requestsPerSecond": 17.5623040970073, - "tpot": { - "total": 2106, - "mean": 39.546438065771135, - "median": 39.47442675393725, - "min": 38.74176740646362, - "max": 43.32651032341851, - "std": 0.3121106751660994, - "percentiles": { - "p50": 39.47442675393725, - "p90": 39.722594003828746, - "p95": 40.083578654697966, - "p99": 40.73049983040231, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 39.47442675393725 - }, - { - "percentile": "p90", - "value": 39.722594003828746 - }, - { - "percentile": "p95", - "value": 40.083578654697966 - }, - { - "percentile": "p99", - "value": 40.73049983040231 - } - ] - }, - "ttft": { - "total": 2106, - "mean": 85.68002797259905, - "median": 89.88213539123535, - "min": 57.360172271728516, - "max": 362.8504276275635, - "std": 27.802786177158218, - "percentiles": { - "p50": 89.88213539123535, - "p90": 101.7305850982666, - "p95": 103.26790809631348, - "p99": 138.88931274414062, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 89.88213539123535 - }, - { - "percentile": "p90", - "value": 101.7305850982666 - }, - { - "percentile": "p95", - "value": 103.26790809631348 - }, - { - "percentile": "p99", - "value": 138.88931274414062 - } - ] - }, - "throughput": { - "total": 15121, - "mean": 1123.0284569989917, - "median": 99.91909855397003, - "min": 0.0, - "max": 932067.5555555555, - "std": 4358.833642800455, - "percentiles": { - "p50": 99.91909855397003, - "p90": 2868.8809849521203, - "p95": 4848.906358381503, - "p99": 12905.55076923077, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 99.91909855397003 - }, - { - "percentile": "p90", - "value": 2868.8809849521203 - }, - { - "percentile": "p95", - "value": 4848.906358381503 - }, - { - "percentile": "p99", - "value": 12905.55076923077 - } - ] - }, - "timePerRequest": { - "total": 2106, - "mean": 2575.916517267653, - "median": 2573.6281871795654, - "min": 2533.904790878296, - "max": 2894.4458961486816, - "std": 33.18594265783404, - "percentiles": { - "p50": 2573.6281871795654, - "p90": 2588.9015197753906, - "p95": 2591.136932373047, - "p99": 2700.568437576294, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 2573.6281871795654 - }, - { - "percentile": "p90", - "value": 2588.9015197753906 - }, - { - "percentile": "p95", - "value": 2591.136932373047 - }, - { - "percentile": "p99", - "value": 2700.568437576294 - } - ] - } - }, - { - "requestsPerSecond": 20.885632360055222, - "tpot": { - "total": 2505, - "mean": 44.20494748431818, - "median": 44.02147020612444, - "min": 42.981475591659546, - "max": 52.62617986710345, - "std": 1.0422073399474652, - "percentiles": { - "p50": 44.02147020612444, - "p90": 44.47330747331892, - "p95": 45.131300316482296, - "p99": 50.400745301019576, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 44.02147020612444 - }, - { - "percentile": "p90", - "value": 44.47330747331892 - }, - { - "percentile": "p95", - "value": 45.131300316482296 - }, - { - "percentile": "p99", - "value": 50.400745301019576 - } - ] - }, - "ttft": { - "total": 2505, - "mean": 98.4621736103903, - "median": 95.84355354309082, - "min": 61.09285354614258, - "max": 524.099588394165, - "std": 34.20521833421915, - "percentiles": { - "p50": 95.84355354309082, - "p90": 109.4822883605957, - "p95": 111.46354675292969, - "p99": 334.31243896484375, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 95.84355354309082 - }, - { - "percentile": "p90", - "value": 109.4822883605957 - }, - { - "percentile": "p95", - "value": 111.46354675292969 - }, - { - "percentile": "p99", - "value": 334.31243896484375 - } - ] - }, - "throughput": { - "total": 14779, - "mean": 1335.7133120200747, - "median": 104.45284522475407, - "min": 0.0, - "max": 1677721.6, - "std": 5200.1934248077005, - "percentiles": { - "p50": 104.45284522475407, - "p90": 3472.1059602649007, - "p95": 5882.6143057503505, - "p99": 15768.060150375939, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 104.45284522475407 - }, - { - "percentile": "p90", - "value": 3472.1059602649007 - }, - { - "percentile": "p95", - "value": 5882.6143057503505 - }, - { - "percentile": "p99", - "value": 15768.060150375939 - } - ] - }, - "timePerRequest": { - "total": 2505, - "mean": 2882.6246785070603, - "median": 2869.71378326416, - "min": 2826.8485069274902, - "max": 3324.9876499176025, - "std": 78.07038363701177, - "percentiles": { - "p50": 2869.71378326416, - "p90": 2888.715982437134, - "p95": 2937.7262592315674, - "p99": 3282.898426055908, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 2869.71378326416 - }, - { - "percentile": "p90", - "value": 2888.715982437134 - }, - { - "percentile": "p95", - "value": 2937.7262592315674 - }, - { - "percentile": "p99", - "value": 3282.898426055908 - } - ] - } - }, - { - "requestsPerSecond": 24.179871480414207, - "tpot": { - "total": 2900, - "mean": 51.023722283946924, - "median": 50.24327550615583, - "min": 47.58137645143451, - "max": 60.63385087935651, - "std": 2.0749227872708285, - "percentiles": { - "p50": 50.24327550615583, - "p90": 52.928451507810564, - "p95": 57.28437408568367, - "p99": 58.51330454387362, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 50.24327550615583 - }, - { - "percentile": "p90", - "value": 52.928451507810564 - }, - { - "percentile": "p95", - "value": 57.28437408568367 - }, - { - "percentile": "p99", - "value": 58.51330454387362 - } - ] - }, - "ttft": { - "total": 2900, - "mean": 123.56691516678907, - "median": 115.33927917480469, - "min": 88.05131912231445, - "max": 594.1901206970215, - "std": 44.50765227271787, - "percentiles": { - "p50": 115.33927917480469, - "p90": 141.8297290802002, - "p95": 144.49095726013184, - "p99": 375.5221366882324, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 115.33927917480469 - }, - { - "percentile": "p90", - "value": 141.8297290802002 - }, - { - "percentile": "p95", - "value": 144.49095726013184 - }, - { - "percentile": "p99", - "value": 375.5221366882324 - } - ] - }, - "throughput": { - "total": 14925, - "mean": 1546.3194569459229, - "median": 138.59511614843208, - "min": 0.0, - "max": 1677721.6, - "std": 5844.302138842639, - "percentiles": { - "p50": 138.59511614843208, - "p90": 3916.250233426704, - "p95": 6678.828025477707, - "p99": 17924.37606837607, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 138.59511614843208 - }, - { - "percentile": "p90", - "value": 3916.250233426704 - }, - { - "percentile": "p95", - "value": 6678.828025477707 - }, - { - "percentile": "p99", - "value": 17924.37606837607 - } - ] - }, - "timePerRequest": { - "total": 2900, - "mean": 3336.9750574539444, - "median": 3282.672882080078, - "min": 3228.010654449463, - "max": 3863.8863563537598, - "std": 141.37106520368962, - "percentiles": { - "p50": 3282.672882080078, - "p90": 3561.7692470550537, - "p95": 3737.921953201294, - "p99": 3811.5434646606445, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 3282.672882080078 - }, - { - "percentile": "p90", - "value": 3561.7692470550537 - }, - { - "percentile": "p95", - "value": 3737.921953201294 - }, - { - "percentile": "p99", - "value": 3811.5434646606445 - } - ] - } - }, - { - "requestsPerSecond": 27.382251189847466, - "tpot": { - "total": 3285, - "mean": 62.44881585866599, - "median": 60.908238093058266, - "min": 58.94644298250713, - "max": 72.59870383699061, - "std": 2.9764436606898887, - "percentiles": { - "p50": 60.908238093058266, - "p90": 68.3861043718126, - "p95": 69.21934324597555, - "p99": 70.13290269034249, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 60.908238093058266 - }, - { - "percentile": "p90", - "value": 68.3861043718126 - }, - { - "percentile": "p95", - "value": 69.21934324597555 - }, - { - "percentile": "p99", - "value": 70.13290269034249 - } - ] - }, - "ttft": { - "total": 3285, - "mean": 142.7834399758953, - "median": 129.18686866760254, - "min": 92.2248363494873, - "max": 802.5562763214111, - "std": 54.896961282893, - "percentiles": { - "p50": 129.18686866760254, - "p90": 158.26964378356934, - "p95": 166.79859161376953, - "p99": 422.8503704071045, - }, - "percentileRows": [ - { - "percentile": "p50", - "value": 129.18686866760254 - }, - { - "percentile": "p90", - "value": 158.26964378356934 - }, - { - "percentile": "p95", - "value": 166.79859161376953 - }, - { - "percentile": "p99", - "value": 422.8503704071045 - } - ] - }, - "throughput": { - "total": 15706, - "mean": 1751.1720673421933, - "median": 318.5950626661603, - "min": 0.0, - "max": 1677721.6, - "std": 6434.120608249914, - "percentiles": { - "p50": 318.5950626661603, - "p90": 4165.147964250248, - "p95": 7194.346483704974, - "p99": 19878.218009478675, + }, + { + requestsPerSecond: 20.752070927855794, + tpot: { + mean: 116.28360712595156, + median: 26.769569941929408, + mode: 10.624987738473076, + variance: 12697.514443947253, + stdDev: 112.68324828450436, + min: 10.624987738473076, + max: 378.4891196659633, + count: 229, + totalSum: 26628.946031842912, + percentiles: { + p001: 10.624987738473076, + p01: 11.029584067208427, + p05: 12.239864894321986, + p10: 12.511866433279854, + p25: 13.14084870474679, + p50: 26.769569941929408, + p75: 254.47828429085868, + p90: 254.67797688075476, + p95: 254.72869191850936, + p99: 340.54568835667203, + p999: 378.4891196659633, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 26.769569941929408, + }, + { + percentile: 'p90', + value: 254.67797688075476, + }, + { + percentile: 'p95', + value: 254.72869191850936, + }, + { + percentile: 'p99', + value: 340.54568835667203, + }, + ], + }, + ttft: { + mean: 350.13929725213853, + median: 48.3551025390625, + mode: 23.19192886352539, + variance: 171533.22641308085, + stdDev: 414.1656992232467, + min: 23.19192886352539, + max: 974.7560024261475, + count: 229, + totalSum: 80181.89907073975, + percentiles: { + p001: 23.19192886352539, + p01: 23.784875869750977, + p05: 25.762081146240234, + p10: 27.454376220703125, + p25: 30.905961990356445, + p50: 48.3551025390625, + p75: 924.9362945556641, + p90: 960.2079391479492, + p95: 966.1390781402588, + p99: 974.3149280548096, + p999: 974.7560024261475, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 48.3551025390625, + }, + { + percentile: 'p90', + value: 960.2079391479492, + }, + { + percentile: 'p95', + value: 966.1390781402588, + }, + { + percentile: 'p99', + value: 974.3149280548096, + }, + ], + }, + throughput: { + mean: 165.92594702578148, + median: 78.71010358804985, + mode: 2.640067325020095, + variance: 6402491.209634123, + stdDev: 2530.314448766027, + min: 0.0, + max: 1048576.0, + count: 1287, + totalSum: 19225083.512811106, + percentiles: { + p001: 0.0, + p01: 2.640067325020095, + p05: 2.6408868303973385, + p10: 8.261512885765386, + p25: 63.60692133877254, + p50: 78.71010358804985, + p75: 113.60827758064953, + p90: 186.8536552768744, + p95: 244.65142323845077, + p99: 554.5087255420412, + p999: 19878.218009478675, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 78.71010358804985, + }, + { + percentile: 'p90', + value: 186.8536552768744, + }, + { + percentile: 'p95', + value: 244.65142323845077, + }, + { + percentile: 'p99', + value: 554.5087255420412, + }, + ], + }, + timePerRequest: { + mean: 1.1796869305023459, + median: 0.22142529487609863, + mode: 0.10968017578125, + variance: 1.3985689524884573, + stdDev: 1.1826110740596238, + min: 0.10968017578125, + max: 2.7613768577575684, + count: 229, + totalSum: 270.14830708503723, + percentiles: { + p001: 0.10968017578125, + p01: 0.11065411567687988, + p05: 0.11456704139709473, + p10: 0.11728477478027344, + p25: 0.12422728538513184, + p50: 0.22142529487609863, + p75: 2.7111761569976807, + p90: 2.748539924621582, + p95: 2.7520828247070312, + p99: 2.7607710361480713, + p999: 2.7613768577575684, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 0.22142529487609863, + }, + { + percentile: 'p90', + value: 2.748539924621582, + }, + { + percentile: 'p95', + value: 2.7520828247070312, + }, + { + percentile: 'p99', + value: 2.7607710361480713, + }, + ], }, - "percentileRows": [ - { - "percentile": "p50", - "value": 318.5950626661603 + }, + { + requestsPerSecond: 26.81917480361788, + tpot: { + mean: 299.7306064613554, + median: 372.7384294782366, + mode: 13.360295976911273, + variance: 43881.168589896006, + stdDev: 209.47832486893722, + min: 13.360295976911273, + max: 646.5137345450265, + count: 278, + totalSum: 83325.1085962568, + percentiles: { + p001: 13.360295976911273, + p01: 14.045170375279017, + p05: 17.420836857387, + p10: 18.262999398367747, + p25: 35.025290080479216, + p50: 372.7384294782366, + p75: 484.5614092690604, + p90: 484.67516899108887, + p95: 646.2434019361224, + p99: 646.4788573128836, + p999: 646.5137345450265, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 372.7384294782366, + }, + { + percentile: 'p90', + value: 484.67516899108887, + }, + { + percentile: 'p95', + value: 646.2434019361224, + }, + { + percentile: 'p99', + value: 646.4788573128836, + }, + ], + }, + ttft: { + mean: 603.4488720859555, + median: 323.30799102783203, + mode: 23.794889450073242, + variance: 385351.6075362678, + stdDev: 620.7669510663948, + min: 23.794889450073242, + max: 2183.549165725708, + count: 278, + totalSum: 167758.78643989563, + percentiles: { + p001: 23.794889450073242, + p01: 25.097131729125977, + p05: 28.53107452392578, + p10: 31.769990921020508, + p25: 42.52219200134277, + p50: 323.30799102783203, + p75: 1421.4229583740234, + p90: 1454.333782196045, + p95: 1465.749979019165, + p99: 1481.3649654388428, + p999: 2183.549165725708, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 323.30799102783203, + }, + { + percentile: 'p90', + value: 1454.333782196045, + }, + { + percentile: 'p95', + value: 1465.749979019165, + }, + { + percentile: 'p99', + value: 1481.3649654388428, + }, + ], + }, + throughput: { + mean: 214.45692657713144, + median: 59.739410340407346, + mode: 1.5484621717509202, + variance: 10330273.607661681, + stdDev: 3214.0743002708696, + min: 0.0, + max: 1677721.6, + count: 1408, + totalSum: 23048554.08119256, + percentiles: { + p001: 0.0, + p01: 0.0, + p05: 1.5484621717509202, + p10: 1.5484621717509202, + p25: 11.441965016422422, + p50: 59.739410340407346, + p75: 104.92580177115124, + p90: 204.92007035372288, + p95: 328.06445052796244, + p99: 1077.6731757451182, + p999: 27776.847682119205, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 59.739410340407346, + }, + { + percentile: 'p90', + value: 204.92007035372288, + }, + { + percentile: 'p95', + value: 328.06445052796244, + }, + { + percentile: 'p99', + value: 1077.6731757451182, + }, + ], + }, + timePerRequest: { + mean: 2.727286984594606, + median: 2.9081506729125977, + mode: 0.12541699409484863, + variance: 3.887494791582503, + stdDev: 1.9716730945018504, + min: 0.12541699409484863, + max: 4.927099943161011, + count: 278, + totalSum: 758.1857817173004, + percentiles: { + p001: 0.12541699409484863, + p01: 0.12821292877197266, + p05: 0.15644598007202148, + p10: 0.1615278720855713, + p25: 0.2925078868865967, + p50: 2.9081506729125977, + p75: 4.833041191101074, + p90: 4.866931915283203, + p95: 4.889725923538208, + p99: 4.91511082649231, + p999: 4.927099943161011, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 2.9081506729125977, + }, + { + percentile: 'p90', + value: 4.866931915283203, + }, + { + percentile: 'p95', + value: 4.889725923538208, + }, + { + percentile: 'p99', + value: 4.91511082649231, + }, + ], }, - { - "percentile": "p90", - "value": 4165.147964250248 + }, + { + requestsPerSecond: 26.823988819498975, + tpot: { + mean: 683.8011571339198, + median: 742.2689029148647, + mode: 317.1694278717041, + variance: 28604.497606927893, + stdDev: 169.12864218377646, + min: 317.1694278717041, + max: 1093.381404876709, + count: 282, + totalSum: 192831.9263117654, + percentiles: { + p001: 317.1694278717041, + p01: 321.53899329049244, + p05: 339.3098626817976, + p10: 382.89002009800504, + p25: 576.0242598397391, + p50: 742.2689029148647, + p75: 835.7884543282645, + p90: 835.8725479670933, + p95: 835.9301771436419, + p99: 835.9494209289551, + p999: 1093.381404876709, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 742.2689029148647, + }, + { + percentile: 'p90', + value: 835.8725479670933, + }, + { + percentile: 'p95', + value: 835.9301771436419, + }, + { + percentile: 'p99', + value: 835.9494209289551, + }, + ], + }, + ttft: { + mean: 1175.8291366252492, + median: 1099.5359420776367, + mode: 31.527996063232422, + variance: 656345.7486101, + stdDev: 810.1516824706963, + min: 31.527996063232422, + max: 3620.6698417663574, + count: 282, + totalSum: 331583.8165283203, + percentiles: { + p001: 31.527996063232422, + p01: 45.86386680603027, + p05: 83.20093154907227, + p10: 158.12087059020996, + p25: 363.6949062347412, + p50: 1099.5359420776367, + p75: 1955.5552005767822, + p90: 2000.9040832519531, + p95: 2015.0668621063232, + p99: 2435.93692779541, + p999: 3620.6698417663574, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 1099.5359420776367, + }, + { + percentile: 'p90', + value: 2000.9040832519531, + }, + { + percentile: 'p95', + value: 2015.0668621063232, + }, + { + percentile: 'p99', + value: 2435.93692779541, + }, + ], + }, + throughput: { + mean: 214.4967900282631, + median: 7.5584893487808, + mode: 0.9142165629474438, + variance: 14926330.042152546, + stdDev: 3863.4608891708153, + min: 0.0, + max: 838860.8, + count: 888, + totalSum: 21536027.789829955, + percentiles: { + p001: 0.0, + p01: 0.0, + p05: 0.9142165629474438, + p10: 0.9142165629474438, + p25: 3.153194765204003, + p50: 7.5584893487808, + p75: 23.973067975925787, + p90: 67.82180683343304, + p95: 126.88864014521252, + p99: 1646.116169544741, + p999: 39568.90566037736, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 7.5584893487808, + }, + { + percentile: 'p90', + value: 67.82180683343304, + }, + { + percentile: 'p95', + value: 126.88864014521252, + }, + { + percentile: 'p99', + value: 1646.116169544741, + }, + ], + }, + timePerRequest: { + mean: 5.992494348938584, + median: 6.383468866348267, + mode: 2.272388219833374, + variance: 3.362285611082358, + stdDev: 1.8336536235293617, + min: 2.272388219833374, + max: 7.87853217124939, + count: 282, + totalSum: 1689.8834064006805, + percentiles: { + p001: 2.272388219833374, + p01: 2.36775279045105, + p05: 2.9363210201263428, + p10: 3.3975088596343994, + p25: 4.363567113876343, + p50: 6.383468866348267, + p75: 7.805588245391846, + p90: 7.848597049713135, + p95: 7.86446475982666, + p99: 7.874046802520752, + p999: 7.87853217124939, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 6.383468866348267, + }, + { + percentile: 'p90', + value: 7.848597049713135, + }, + { + percentile: 'p95', + value: 7.86446475982666, + }, + { + percentile: 'p99', + value: 7.874046802520752, + }, + ], }, - { - "percentile": "p95", - "value": 7194.346483704974 + }, + { + requestsPerSecond: 24.50047903792646, + tpot: { + mean: 742.9258901891964, + median: 773.0941431862967, + mode: 538.750410079956, + variance: 5888.534815943889, + stdDev: 76.73678919490891, + min: 538.750410079956, + max: 1112.7384049551827, + count: 256, + totalSum: 190189.02788843427, + percentiles: { + p001: 538.750410079956, + p01: 559.9275997706821, + p05: 622.4285534449986, + p10: 651.3757365090506, + p25: 691.4628573826382, + p50: 773.0941431862967, + p75: 803.8818495614188, + p90: 804.060867854527, + p95: 804.1924408503941, + p99: 804.2235374450684, + p999: 1112.7384049551827, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 773.0941431862967, + }, + { + percentile: 'p90', + value: 804.060867854527, + }, + { + percentile: 'p95', + value: 804.1924408503941, + }, + { + percentile: 'p99', + value: 804.2235374450684, + }, + ], + }, + ttft: { + mean: 1639.041354879737, + median: 2199.6800899505615, + mode: 40.383100509643555, + variance: 769961.5680314268, + stdDev: 877.4745398194906, + min: 40.383100509643555, + max: 3934.627056121826, + count: 256, + totalSum: 419594.58684921265, + percentiles: { + p001: 40.383100509643555, + p01: 46.3411808013916, + p05: 128.04388999938965, + p10: 259.2899799346924, + p25: 663.4221076965332, + p50: 2199.6800899505615, + p75: 2240.969181060791, + p90: 2276.355028152466, + p95: 2569.640874862671, + p99: 2960.084915161133, + p999: 3934.627056121826, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 2199.6800899505615, + }, + { + percentile: 'p90', + value: 2276.355028152466, + }, + { + percentile: 'p95', + value: 2569.640874862671, + }, + { + percentile: 'p99', + value: 2960.084915161133, + }, + ], + }, + throughput: { + mean: 195.90812730716976, + median: 5.651687770251424, + mode: 0.8974143971723011, + variance: 16305893.758862041, + stdDev: 4038.0556904111713, + min: 0.0, + max: 1048576.0, + count: 718, + totalSum: 25179910.958649173, + percentiles: { + p001: 0.0, + p01: 0.0, + p05: 0.8974143971723011, + p10: 0.8974143971723011, + p25: 1.3957095523970422, + p50: 5.651687770251424, + p75: 13.66877299553858, + p90: 32.61435580818488, + p95: 59.516467299533154, + p99: 990.1567516525024, + p999: 40136.88038277512, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 5.651687770251424, + }, + { + percentile: 'p90', + value: 32.61435580818488, + }, + { + percentile: 'p95', + value: 59.516467299533154, + }, + { + percentile: 'p99', + value: 990.1567516525024, + }, + ], + }, + timePerRequest: { + mean: 6.883434834890068, + median: 7.825762987136841, + mode: 4.416188955307007, + variance: 1.3703848800474456, + stdDev: 1.1706343921342162, + min: 4.416188955307007, + max: 7.9228410720825195, + count: 256, + totalSum: 1762.1593177318573, + percentiles: { + p001: 4.416188955307007, + p01: 4.504012584686279, + p05: 4.920926094055176, + p10: 5.141816139221191, + p25: 5.672410011291504, + p50: 7.825762987136841, + p75: 7.86903715133667, + p90: 7.897527694702148, + p95: 7.910752058029175, + p99: 7.920928716659546, + p999: 7.9228410720825195, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 7.825762987136841, + }, + { + percentile: 'p90', + value: 7.897527694702148, + }, + { + percentile: 'p95', + value: 7.910752058029175, + }, + { + percentile: 'p99', + value: 7.920928716659546, + }, + ], }, - { - "percentile": "p99", - "value": 19878.218009478675 - } - ] - }, - "timePerRequest": { - "total": 3285, - "mean": 4076.002237894764, - "median": 3972.564697265625, - "min": 3890.990972518921, - "max": 4623.138666152954, - "std": 197.81266460135544, - "percentiles": { - "p50": 3972.564697265625, - "p90": 4444.445371627808, - "p95": 4506.659030914307, - "p99": 4553.745985031128, + }, + { + requestsPerSecond: 25.617829792196602, + tpot: { + mean: 663.3098317044122, + median: 613.7458937508719, + mode: 440.9824098859514, + variance: 10479.9469011006, + stdDev: 102.37161179301907, + min: 440.9824098859514, + max: 1060.6839997427803, + count: 256, + totalSum: 169807.31691632952, + percentiles: { + p001: 440.9824098859514, + p01: 440.9982817513602, + p05: 442.1650000980922, + p10: 534.6532889774868, + p25: 612.1257373264858, + p50: 613.7458937508719, + p75: 755.2382605416434, + p90: 755.9503146580288, + p95: 756.0351576123919, + p99: 786.0629899161203, + p999: 1060.6839997427803, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 613.7458937508719, + }, + { + percentile: 'p90', + value: 755.9503146580288, + }, + { + percentile: 'p95', + value: 756.0351576123919, + }, + { + percentile: 'p99', + value: 786.0629899161203, + }, + ], + }, + ttft: { + mean: 1987.0930286124349, + median: 2171.497106552124, + mode: 26.77607536315918, + variance: 755024.7838922634, + stdDev: 868.9216212595146, + min: 26.77607536315918, + max: 4371.719121932983, + count: 256, + totalSum: 508695.8153247833, + percentiles: { + p001: 26.77607536315918, + p01: 55.07302284240723, + p05: 291.4888858795166, + p10: 515.1617527008057, + p25: 1566.1020278930664, + p50: 2171.497106552124, + p75: 2225.597858428955, + p90: 3119.4918155670166, + p95: 3129.302978515625, + p99: 4363.926887512207, + p999: 4371.719121932983, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 2171.497106552124, + }, + { + percentile: 'p90', + value: 3119.4918155670166, + }, + { + percentile: 'p95', + value: 3129.302978515625, + }, + { + percentile: 'p99', + value: 4363.926887512207, + }, + ], + }, + throughput: { + mean: 204.84256868994706, + median: 1.3254838186715343, + mode: 0.9418142869653279, + variance: 13137897.553754935, + stdDev: 3624.623780995061, + min: 0.0, + max: 1677721.6, + count: 808, + totalSum: 26909147.73473306, + percentiles: { + p001: 0.0, + p01: 0.0, + p05: 0.9418142869653279, + p10: 0.9418142869653279, + p25: 1.2725896353660826, + p50: 1.3254838186715343, + p75: 8.018996417141132, + p90: 20.67656874682652, + p95: 52.4720894738159, + p99: 2868.8809849521203, + p999: 36157.793103448275, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 1.3254838186715343, + }, + { + percentile: 'p90', + value: 20.67656874682652, + }, + { + percentile: 'p95', + value: 52.4720894738159, + }, + { + percentile: 'p99', + value: 2868.8809849521203, + }, + ], + }, + timePerRequest: { + mean: 6.655263062566519, + median: 7.432342052459717, + mode: 3.603327989578247, + variance: 1.427610769055824, + stdDev: 1.194826669042763, + min: 3.603327989578247, + max: 7.537046670913696, + count: 256, + totalSum: 1703.7473440170288, + percentiles: { + p001: 3.603327989578247, + p01: 3.6770501136779785, + p05: 4.052419900894165, + p10: 4.532166004180908, + p25: 5.912662982940674, + p50: 7.432342052459717, + p75: 7.480893135070801, + p90: 7.51776123046875, + p95: 7.526960849761963, + p99: 7.536363124847412, + p999: 7.537046670913696, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 7.432342052459717, + }, + { + percentile: 'p90', + value: 7.51776123046875, + }, + { + percentile: 'p95', + value: 7.526960849761963, + }, + { + percentile: 'p99', + value: 7.536363124847412, + }, + ], }, - "percentileRows": [ - { - "percentile": "p50", - "value": 3972.564697265625 + }, + { + requestsPerSecond: 37.02892550982192, + tpot: { + mean: 606.4144710877113, + median: 543.5235500335693, + mode: 331.6155501774379, + variance: 9907.596850846778, + stdDev: 99.53691200176334, + min: 331.6155501774379, + max: 970.1211452484131, + count: 256, + totalSum: 155242.10459845408, + percentiles: { + p001: 331.6155501774379, + p01: 401.9838741847447, + p05: 471.85257502964566, + p10: 482.9780033656529, + p25: 542.1572753361294, + p50: 543.5235500335693, + p75: 707.3319980076382, + p90: 708.0604348863874, + p95: 708.2712990897043, + p99: 708.6352961403983, + p999: 970.1211452484131, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 543.5235500335693, + }, + { + percentile: 'p90', + value: 708.0604348863874, + }, + { + percentile: 'p95', + value: 708.2712990897043, + }, + { + percentile: 'p99', + value: 708.6352961403983, + }, + ], + }, + ttft: { + mean: 1941.031264141202, + median: 1882.4608325958252, + mode: 95.6277847290039, + variance: 475070.5414439769, + stdDev: 689.2536118468854, + min: 95.6277847290039, + max: 4049.8838424682617, + count: 256, + totalSum: 496904.0036201477, + percentiles: { + p001: 95.6277847290039, + p01: 381.1471462249756, + p05: 627.6748180389404, + p10: 1059.0367317199707, + p25: 1838.1130695343018, + p50: 1882.4608325958252, + p75: 2040.8010482788086, + p90: 2977.8239727020264, + p95: 2986.7701530456543, + p99: 3983.0429553985596, + p999: 4049.8838424682617, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 1882.4608325958252, + }, + { + percentile: 'p90', + value: 2977.8239727020264, + }, + { + percentile: 'p95', + value: 2986.7701530456543, + }, + { + percentile: 'p99', + value: 3983.0429553985596, + }, + ], + }, + throughput: { + mean: 296.0867598383026, + median: 2.8321597670693794, + mode: 1.0269062248433556, + variance: 26077653.461617615, + stdDev: 5106.628384914808, + min: 0.0, + max: 838860.8, + count: 659, + totalSum: 24515718.391140904, + percentiles: { + p001: 0.0, + p01: 0.0, + p05: 1.0269062248433556, + p10: 1.0269062248433556, + p25: 1.155029909085852, + p50: 2.8321597670693794, + p75: 6.789239657033897, + p90: 12.755506761996577, + p95: 21.059231700030626, + p99: 4969.554502369669, + p999: 56679.78378378379, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 2.8321597670693794, + }, + { + percentile: 'p90', + value: 12.755506761996577, + }, + { + percentile: 'p95', + value: 21.059231700030626, + }, + { + percentile: 'p99', + value: 4969.554502369669, + }, + ], + }, + timePerRequest: { + mean: 6.242897774092853, + median: 6.808126211166382, + mode: 3.6642260551452637, + variance: 0.919577384180231, + stdDev: 0.958945975631699, + min: 3.6642260551452637, + max: 6.912218809127808, + count: 256, + totalSum: 1598.1818301677704, + percentiles: { + p001: 3.6642260551452637, + p01: 3.728823661804199, + p05: 4.065090894699097, + p10: 4.494028091430664, + p25: 5.758455991744995, + p50: 6.808126211166382, + p75: 6.852805137634277, + p90: 6.882004976272583, + p95: 6.897234916687012, + p99: 6.907586097717285, + p999: 6.912218809127808, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 6.808126211166382, + }, + { + percentile: 'p90', + value: 6.882004976272583, + }, + { + percentile: 'p95', + value: 6.897234916687012, + }, + { + percentile: 'p99', + value: 6.907586097717285, + }, + ], }, - { - "percentile": "p90", - "value": 4444.445371627808 + }, + { + requestsPerSecond: 37.29183354201869, + tpot: { + mean: 603.3237551205925, + median: 528.1183038439069, + mode: 400.96027510506764, + variance: 12393.495352536762, + stdDev: 111.32607669605878, + min: 400.96027510506764, + max: 963.4451525551932, + count: 256, + totalSum: 154450.8813108717, + percentiles: { + p001: 400.96027510506764, + p01: 409.7368376595633, + p05: 410.0832939147949, + p10: 477.33085496085033, + p25: 527.9027053288052, + p50: 528.1183038439069, + p75: 722.0331260136196, + p90: 722.1321378435407, + p95: 722.210134778704, + p99: 722.3572731018066, + p999: 963.4451525551932, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 528.1183038439069, + }, + { + percentile: 'p90', + value: 722.1321378435407, + }, + { + percentile: 'p95', + value: 722.210134778704, + }, + { + percentile: 'p99', + value: 722.3572731018066, + }, + ], + }, + ttft: { + mean: 2091.1083230748773, + median: 1747.6298809051514, + mode: 90.54684638977051, + variance: 479250.36269232794, + stdDev: 692.2791075081841, + min: 90.54684638977051, + max: 3954.521894454956, + count: 256, + totalSum: 535323.7307071686, + percentiles: { + p001: 90.54684638977051, + p01: 905.7919979095459, + p05: 1236.3860607147217, + p10: 1478.6958694458008, + p25: 1703.301191329956, + p50: 1747.6298809051514, + p75: 2842.387914657593, + p90: 3039.8709774017334, + p95: 3047.684907913208, + p99: 3951.2219429016113, + p999: 3954.521894454956, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 1747.6298809051514, + }, + { + percentile: 'p90', + value: 3039.8709774017334, + }, + { + percentile: 'p95', + value: 3047.684907913208, + }, + { + percentile: 'p99', + value: 3951.2219429016113, + }, + ], + }, + throughput: { + mean: 298.188997111376, + median: 3.797001003045347, + mode: 1.0358592736273142, + variance: 19142664.16642712, + stdDev: 4375.2330413850095, + min: 0.0, + max: 1398101.3333333333, + count: 783, + totalSum: 26051541.418045178, + percentiles: { + p001: 0.0, + p01: 0.0, + p05: 1.0358592736273142, + p10: 1.0358592736273142, + p25: 1.3815590063114291, + p50: 3.797001003045347, + p75: 5.743851552603649, + p90: 11.806858966960643, + p95: 24.753772699641765, + p99: 7781.640074211503, + p999: 43240.24742268041, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 3.797001003045347, + }, + { + percentile: 'p90', + value: 11.806858966960643, + }, + { + percentile: 'p95', + value: 24.753772699641765, + }, + { + percentile: 'p99', + value: 7781.640074211503, + }, + ], + }, + timePerRequest: { + mean: 6.360964580439031, + median: 6.761261701583862, + mode: 4.238183259963989, + variance: 0.5511043357306581, + stdDev: 0.7423640183431967, + min: 4.238183259963989, + max: 6.863919734954834, + count: 256, + totalSum: 1628.406932592392, + percentiles: { + p001: 4.238183259963989, + p01: 4.295440912246704, + p05: 4.5983030796051025, + p10: 4.984205961227417, + p25: 6.1305251121521, + p50: 6.761261701583862, + p75: 6.79938006401062, + p90: 6.837599039077759, + p95: 6.842914819717407, + p99: 6.856215000152588, + p999: 6.863919734954834, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 6.761261701583862, + }, + { + percentile: 'p90', + value: 6.837599039077759, + }, + { + percentile: 'p95', + value: 6.842914819717407, + }, + { + percentile: 'p99', + value: 6.856215000152588, + }, + ], }, - { - "percentile": "p95", - "value": 4506.659030914307 + }, + { + requestsPerSecond: 37.45318312972309, + tpot: { + mean: 600.7204526769262, + median: 626.2100083487375, + mode: 398.7384523664202, + variance: 19496.451141682686, + stdDev: 139.62969290835917, + min: 398.7384523664202, + max: 876.9458702632359, + count: 256, + totalSum: 153784.43588529312, + percentiles: { + p001: 398.7384523664202, + p01: 398.79986218043734, + p05: 465.77743121555875, + p10: 465.8282824925014, + p25: 465.9903049468994, + p50: 626.2100083487375, + p75: 626.3504368918283, + p90: 876.4010156903948, + p95: 876.5457017081125, + p99: 876.6791820526123, + p999: 876.9458702632359, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 626.2100083487375, + }, + { + percentile: 'p90', + value: 876.4010156903948, + }, + { + percentile: 'p95', + value: 876.5457017081125, + }, + { + percentile: 'p99', + value: 876.6791820526123, + }, + ], + }, + ttft: { + mean: 2270.3185863792896, + median: 2333.8708877563477, + mode: 624.4189739227295, + variance: 689884.3929942232, + stdDev: 830.5927961367249, + min: 624.4189739227295, + max: 4022.5632190704346, + count: 256, + totalSum: 581201.5581130981, + percentiles: { + p001: 624.4189739227295, + p01: 627.3941993713379, + p05: 636.2800598144531, + p10: 646.9879150390625, + p25: 2297.8010177612305, + p50: 2333.8708877563477, + p75: 2491.302967071533, + p90: 3417.022943496704, + p95: 3426.0239601135254, + p99: 3947.2179412841797, + p999: 4022.5632190704346, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 2333.8708877563477, + }, + { + percentile: 'p90', + value: 3417.022943496704, + }, + { + percentile: 'p95', + value: 3426.0239601135254, + }, + { + percentile: 'p99', + value: 3947.2179412841797, + }, + ], + }, + throughput: { + mean: 299.4791635411842, + median: 3.030949722688924, + mode: 1.1391714581369894, + variance: 22450634.582333777, + stdDev: 4738.210061018167, + min: 0.0, + max: 1258291.2, + count: 644, + totalSum: 24922318.936492577, + percentiles: { + p001: 0.0, + p01: 0.0, + p05: 0.0, + p10: 1.1391714581369894, + p25: 1.1490267949845356, + p50: 3.030949722688924, + p75: 5.887742339400797, + p90: 8.667779853522244, + p95: 16.13454481108487, + p99: 7358.428070175439, + p999: 49932.19047619047, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 3.030949722688924, + }, + { + percentile: 'p90', + value: 8.667779853522244, + }, + { + percentile: 'p95', + value: 16.13454481108487, + }, + { + percentile: 'p99', + value: 7358.428070175439, + }, + ], + }, + timePerRequest: { + mean: 6.510815089568496, + median: 6.725250959396362, + mode: 4.9165239334106445, + variance: 0.2361784686553011, + stdDev: 0.48598196330244714, + min: 4.9165239334106445, + max: 6.835154294967651, + count: 256, + totalSum: 1666.768662929535, + percentiles: { + p001: 4.9165239334106445, + p01: 4.9701738357543945, + p05: 5.246534109115601, + p10: 5.63304591178894, + p25: 6.683944940567017, + p50: 6.725250959396362, + p75: 6.763767957687378, + p90: 6.793000221252441, + p95: 6.804349184036255, + p99: 6.820380926132202, + p999: 6.835154294967651, + }, + cumulativeDistributionFunction: null, + percentileRows: [ + { + percentile: 'p50', + value: 6.725250959396362, + }, + { + percentile: 'p90', + value: 6.793000221252441, + }, + { + percentile: 'p95', + value: 6.804349184036255, + }, + { + percentile: 'p99', + value: 6.820380926132202, + }, + ], }, - { - "percentile": "p99", - "value": 4553.745985031128 - } - ] - } - } -];`; + }, + ];`; diff --git a/tests/ui/unit/components/Charts/DashedLine/helpers.test.ts b/tests/ui/unit/components/Charts/DashedLine/helpers.test.ts index e8a75732..1e455159 100644 --- a/tests/ui/unit/components/Charts/DashedLine/helpers.test.ts +++ b/tests/ui/unit/components/Charts/DashedLine/helpers.test.ts @@ -1,4 +1,5 @@ import { + roundDownNice, roundNearestNice, roundUpNice, spacedLogValues, @@ -51,12 +52,57 @@ describe('roundUpNice', () => { expect(roundUpNice(1000)).toBe(1000); expect(roundUpNice(1200)).toBe(1200); }); + it("doesn't round down", () => { + expect(roundUpNice(1.3)).toBeGreaterThanOrEqual(1.5); + expect(roundUpNice(3)).toBeGreaterThanOrEqual(3); + expect(roundUpNice(3.3)).toBeGreaterThanOrEqual(3.5); + expect(roundUpNice(7.3)).toBeGreaterThanOrEqual(7.5); + expect(roundUpNice(11)).toBeGreaterThanOrEqual(11); + expect(roundUpNice(19)).toBeGreaterThanOrEqual(19); + }); +}); + +describe('roundDownNice', () => { + it('rounds down to a nearby nice number', () => { + expect([10]).toContain(roundDownNice(11)); + expect([20, 25]).toContain(roundDownNice(27)); + expect([40, 45, 48]).toContain(roundDownNice(49)); + expect([70, 75]).toContain(roundDownNice(79)); + expect([75, 80]).toContain(roundDownNice(81)); + expect([700, 750, 800]).toContain(roundDownNice(810)); + expect([1200, 1250, 1300]).toContain(roundDownNice(1342)); + }); + it("doesn't round some nice numbers", () => { + expect(roundDownNice(15)).toBe(15); + expect(roundDownNice(20)).toBe(20); + expect(roundDownNice(30)).toBe(30); + expect(roundDownNice(40)).toBe(40); + expect(roundDownNice(75)).toBe(75); + expect(roundDownNice(100)).toBe(100); + expect(roundDownNice(150)).toBe(150); + expect(roundDownNice(200)).toBe(200); + expect(roundDownNice(400)).toBe(400); + expect(roundDownNice(1000)).toBe(1000); + expect(roundDownNice(1200)).toBe(1200); + }); + it("doesn't round up", () => { + expect(roundDownNice(1.6)).toBeLessThanOrEqual(1.5); + expect(roundDownNice(3)).toBeLessThanOrEqual(3); + expect(roundDownNice(3.6)).toBeLessThanOrEqual(3.5); + expect(roundDownNice(7.6)).toBeLessThanOrEqual(7.5); + expect(roundDownNice(11)).toBeLessThanOrEqual(11); + expect(roundDownNice(19)).toBeLessThanOrEqual(19); + }); }); describe('spacedLogValues', () => { const checkValuesRoughlyLogSpaced = (values: number[]) => { + let i = 1; + if (values[0] === 0) { + i++; + } const valuesRatios = []; - for (let i = 1; i < values.length; i++) { + for (i; i < values.length; i++) { valuesRatios.push(values[i] / values[i - 1]); } const valuesRatiosAvg = valuesRatios.reduce((a, b) => a + b) / valuesRatios.length; @@ -72,6 +118,10 @@ describe('spacedLogValues', () => { checkValuesRoughlyLogSpaced(spacedLogValues(1, 122, 6)); checkValuesRoughlyLogSpaced(spacedLogValues(1, 122, 9)); }); + it('can handle ticks for small numbers', () => { + checkValuesRoughlyLogSpaced(spacedLogValues(0, 8, 6)); + }); + it('generates an array of nice round numbers', () => { for (const value of spacedLogValues(1, 1000, 4)) { expect([roundUpNice(value), roundNearestNice(value)]).toContain(value);