From a2d755f3267fb8e8e55199af08d45ef0e2c99262 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Mon, 10 Mar 2025 20:12:22 +0300 Subject: [PATCH 01/22] v0.0.1 - rearrange files --- .../docs => docs-superlinear}/README.md | 0 .../docs => docs-superlinear}/make_guide.md | 0 .../docs => docs-superlinear}/model_setup.md | 0 .../docs => docs-superlinear}/run_guide.md | 0 poetry.lock | 352 ++++++++++++++---- pyproject.toml | 4 +- .../run-llama}/config.yaml | 7 +- .../run-llama}/run.py | 22 +- .../run-llama}/run_all_models.sh | 0 .../run-llama}/sample_outputs/gemma_log.txt | 0 .../sample_outputs/gemma_output.txt | 0 .../run-llama}/sample_outputs/log.txt | 0 .../run-llama}/sample_outputs/output.txt | 0 .../run-llama}/sample_outputs/rng_values.png | Bin .../run-llama}/sample_outputs/rng_values.txt | 0 .../config.yaml | 0 .../log.txt | 0 .../output.txt | 0 .../rng_distribution.png | Bin .../rng_values.txt | 0 .../config.yaml | 0 .../log.txt | 0 .../output.txt | 0 .../rng_distribution.png | Bin .../rng_values.txt | 0 25 files changed, 302 insertions(+), 83 deletions(-) rename {dev-superlinear/docs => docs-superlinear}/README.md (100%) rename {dev-superlinear/docs => docs-superlinear}/make_guide.md (100%) rename {dev-superlinear/docs => docs-superlinear}/model_setup.md (100%) rename {dev-superlinear/docs => docs-superlinear}/run_guide.md (100%) rename {dev-superlinear => tools-superlinear/run-llama}/config.yaml (62%) rename {dev-superlinear => tools-superlinear/run-llama}/run.py (91%) rename {dev-superlinear => tools-superlinear/run-llama}/run_all_models.sh (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_outputs/gemma_log.txt (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_outputs/gemma_output.txt (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_outputs/log.txt (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_outputs/output.txt (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_outputs/rng_values.png (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_outputs/rng_values.txt (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_runs/gemma-2-2b-it_normal_20250306_050002/config.yaml (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_runs/gemma-2-2b-it_normal_20250306_050002/log.txt (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_runs/gemma-2-2b-it_normal_20250306_050002/output.txt (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_distribution.png (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_values.txt (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_runs/gemma-2-2b_uniform_20250306_044755/config.yaml (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_runs/gemma-2-2b_uniform_20250306_044755/log.txt (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_runs/gemma-2-2b_uniform_20250306_044755/output.txt (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_distribution.png (100%) rename {dev-superlinear => tools-superlinear/run-llama}/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_values.txt (100%) diff --git a/dev-superlinear/docs/README.md b/docs-superlinear/README.md similarity index 100% rename from dev-superlinear/docs/README.md rename to docs-superlinear/README.md diff --git a/dev-superlinear/docs/make_guide.md b/docs-superlinear/make_guide.md similarity index 100% rename from dev-superlinear/docs/make_guide.md rename to docs-superlinear/make_guide.md diff --git a/dev-superlinear/docs/model_setup.md b/docs-superlinear/model_setup.md similarity index 100% rename from dev-superlinear/docs/model_setup.md rename to docs-superlinear/model_setup.md diff --git a/dev-superlinear/docs/run_guide.md b/docs-superlinear/run_guide.md similarity index 100% rename from dev-superlinear/docs/run_guide.md rename to docs-superlinear/run_guide.md diff --git a/poetry.lock b/poetry.lock index 76a337f00c4a0..f09d1d0512fcb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,37 @@ -# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.8.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +files = [ + {file = "anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a"}, + {file = "anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "atomicwrites" @@ -6,8 +39,6 @@ version = "1.4.1" description = "Atomic file writes." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -groups = ["dev"] -markers = "sys_platform == \"win32\"" files = [ {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"}, ] @@ -18,7 +49,6 @@ version = "25.1.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, @@ -38,7 +68,6 @@ version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, @@ -50,7 +79,6 @@ version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, @@ -146,18 +174,30 @@ files = [ {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] +[[package]] +name = "click" +version = "8.1.8" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -markers = {main = "platform_system == \"Windows\"", dev = "sys_platform == \"win32\""} [[package]] name = "contourpy" @@ -165,7 +205,6 @@ version = "1.3.1" description = "Python library for calculating contours of 2D quadrilateral grids" optional = false python-versions = ">=3.10" -groups = ["main"] files = [ {file = "contourpy-1.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a045f341a77b77e1c5de31e74e966537bba9f3c4099b35bf4c2e3939dd54cdab"}, {file = "contourpy-1.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:500360b77259914f7805af7462e41f9cb7ca92ad38e9f94d6c8641b089338124"}, @@ -239,7 +278,6 @@ version = "0.12.1" description = "Composable style cycles" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, @@ -249,13 +287,46 @@ files = [ docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] tests = ["pytest", "pytest-cov", "pytest-xdist"] +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "fastapi" +version = "0.115.11" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastapi-0.115.11-py3-none-any.whl", hash = "sha256:32e1541b7b74602e4ef4a0260ecaf3aadf9d4f19590bba3e1bf2ac4666aa2c64"}, + {file = "fastapi-0.115.11.tar.gz", hash = "sha256:cc81f03f688678b92600a65a5e618b93592c65005db37157147204d8924bf94f"}, +] + +[package.dependencies] +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +starlette = ">=0.40.0,<0.47.0" +typing-extensions = ">=4.8.0" + +[package.extras] +all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] + [[package]] name = "filelock" version = "3.17.0" description = "A platform independent file lock." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338"}, {file = "filelock-3.17.0.tar.gz", hash = "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e"}, @@ -272,7 +343,6 @@ version = "4.56.0" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "fonttools-4.56.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:331954d002dbf5e704c7f3756028e21db07097c19722569983ba4d74df014000"}, {file = "fonttools-4.56.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d1613abd5af2f93c05867b3a3759a56e8bf97eb79b1da76b2bc10892f96ff16"}, @@ -346,7 +416,6 @@ version = "2025.2.0" description = "File-system specification" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "fsspec-2025.2.0-py3-none-any.whl", hash = "sha256:9de2ad9ce1f85e1931858535bc882543171d197001a0a5eb2ddc04f1781ab95b"}, {file = "fsspec-2025.2.0.tar.gz", hash = "sha256:1c24b16eaa0a1798afa0337aa0db9b256718ab2a89c425371f5628d22c3b6afd"}, @@ -386,7 +455,6 @@ version = "0.15.0" description = "Read and write ML models in GGUF for GGML" optional = false python-versions = ">=3.8" -groups = ["main"] files = [] develop = false @@ -400,13 +468,23 @@ tqdm = ">=4.27" type = "directory" url = "gguf-py" +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + [[package]] name = "huggingface-hub" version = "0.29.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" -groups = ["main"] files = [ {file = "huggingface_hub-0.29.2-py3-none-any.whl", hash = "sha256:c56f20fca09ef19da84dcde2b76379ecdaddf390b083f59f166715584953307d"}, {file = "huggingface_hub-0.29.2.tar.gz", hash = "sha256:590b29c0dcbd0ee4b7b023714dc1ad8563fe4a68a91463438b74e980d28afaf3"}, @@ -441,7 +519,6 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -456,7 +533,6 @@ version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, @@ -474,7 +550,6 @@ version = "1.4.8" description = "A fast implementation of the Cassowary constraint solver" optional = false python-versions = ">=3.10" -groups = ["main"] files = [ {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db"}, {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b"}, @@ -564,7 +639,6 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -635,7 +709,6 @@ version = "3.10.1" description = "Python plotting package" optional = false python-versions = ">=3.10" -groups = ["main"] files = [ {file = "matplotlib-3.10.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ff2ae14910be903f4a24afdbb6d7d3a6c44da210fc7d42790b87aeac92238a16"}, {file = "matplotlib-3.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0721a3fd3d5756ed593220a8b86808a36c5031fce489adb5b31ee6dbb47dd5b2"}, @@ -693,7 +766,6 @@ version = "10.6.0" description = "More routines for operating on iterables, beyond itertools" optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "more-itertools-10.6.0.tar.gz", hash = "sha256:2cd7fad1009c31cc9fb6a035108509e6547547a7a738374f10bd49a09eb3ee3b"}, {file = "more_itertools-10.6.0-py3-none-any.whl", hash = "sha256:6eb054cb4b6db1473f6e15fcc676a08e4732548acd47c708f0e179c2c7c01e89"}, @@ -705,7 +777,6 @@ version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, @@ -723,7 +794,6 @@ version = "3.4.2" description = "Python package for creating and manipulating graphs and networks" optional = false python-versions = ">=3.10" -groups = ["main"] files = [ {file = "networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f"}, {file = "networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1"}, @@ -743,7 +813,6 @@ version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -789,8 +858,6 @@ version = "12.4.5.8" description = "CUBLAS native runtime libraries" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0f8aa1706812e00b9f19dfe0cdb3999b092ccb8ca168c0db5b8ea712456fd9b3"}, {file = "nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b"}, @@ -803,8 +870,6 @@ version = "12.4.127" description = "CUDA profiling tools runtime libs." optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:79279b35cf6f91da114182a5ce1864997fd52294a87a16179ce275773799458a"}, {file = "nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb"}, @@ -817,8 +882,6 @@ version = "12.4.127" description = "NVRTC native runtime libraries" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0eedf14185e04b76aa05b1fea04133e59f465b6f960c0cbf4e37c3cb6b0ea198"}, {file = "nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338"}, @@ -831,8 +894,6 @@ version = "12.4.127" description = "CUDA Runtime native Libraries" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:961fe0e2e716a2a1d967aab7caee97512f71767f852f67432d572e36cb3a11f3"}, {file = "nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5"}, @@ -845,8 +906,6 @@ version = "9.1.0.70" description = "cuDNN runtime libraries" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f"}, {file = "nvidia_cudnn_cu12-9.1.0.70-py3-none-win_amd64.whl", hash = "sha256:6278562929433d68365a07a4a1546c237ba2849852c0d4b2262a486e805b977a"}, @@ -861,8 +920,6 @@ version = "11.2.1.3" description = "CUFFT native runtime libraries" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399"}, {file = "nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9"}, @@ -878,8 +935,6 @@ version = "10.3.5.147" description = "CURAND native runtime libraries" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1f173f09e3e3c76ab084aba0de819c49e56614feae5c12f69883f4ae9bb5fad9"}, {file = "nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b"}, @@ -892,8 +947,6 @@ version = "11.6.1.9" description = "CUDA solver native runtime libraries" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e"}, {file = "nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260"}, @@ -911,8 +964,6 @@ version = "12.3.1.170" description = "CUSPARSE native runtime libraries" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3"}, {file = "nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1"}, @@ -928,8 +979,6 @@ version = "0.6.2" description = "NVIDIA cuSPARSELt" optional = false python-versions = "*" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_cusparselt_cu12-0.6.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:067a7f6d03ea0d4841c85f0c6f1991c5dda98211f6302cb83a4ab234ee95bef8"}, {file = "nvidia_cusparselt_cu12-0.6.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:df2c24502fd76ebafe7457dbc4716b2fec071aabaed4fb7691a201cde03704d9"}, @@ -942,8 +991,6 @@ version = "2.21.5" description = "NVIDIA Collective Communication Library (NCCL) Runtime" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:8579076d30a8c24988834445f8d633c697d42397e92ffc3f63fa26766d25e0a0"}, ] @@ -954,8 +1001,6 @@ version = "12.4.127" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83"}, {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57"}, @@ -968,8 +1013,6 @@ version = "12.4.127" description = "NVIDIA Tools Extension" optional = false python-versions = ">=3" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7959ad635db13edf4fc65c06a6e9f9e55fc2f92596db928d169c0bb031e88ef3"}, {file = "nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a"}, @@ -982,7 +1025,6 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -994,7 +1036,6 @@ version = "11.1.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "pillow-11.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8"}, {file = "pillow-11.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192"}, @@ -1083,7 +1124,6 @@ version = "0.13.1" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -groups = ["dev"] files = [ {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, @@ -1098,7 +1138,6 @@ version = "4.25.6" description = "" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "protobuf-4.25.6-cp310-abi3-win32.whl", hash = "sha256:61df6b5786e2b49fc0055f636c1e8f0aff263808bb724b95b164685ac1bcc13a"}, {file = "protobuf-4.25.6-cp310-abi3-win_amd64.whl", hash = "sha256:b8f837bfb77513fe0e2f263250f423217a173b6d85135be4d81e96a4653bcd3c"}, @@ -1119,19 +1158,149 @@ version = "1.11.0" description = "library with cross-python path, ini-parsing, io, code, log facilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -groups = ["dev"] files = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, ] +[[package]] +name = "pydantic" +version = "2.10.6" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, + {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.27.2" +typing-extensions = ">=4.12.2" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + [[package]] name = "pyparsing" version = "3.2.1" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "pyparsing-3.2.1-py3-none-any.whl", hash = "sha256:506ff4f4386c4cec0590ec19e6302d3aedb992fdc02c761e90416f158dacf8e1"}, {file = "pyparsing-3.2.1.tar.gz", hash = "sha256:61980854fd66de3a90028d679a954d5f2623e83144b5afe5ee86f43d762e5f0a"}, @@ -1146,7 +1315,6 @@ version = "5.4.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.5" -groups = ["dev"] files = [ {file = "pytest-5.4.3-py3-none-any.whl", hash = "sha256:5c0db86b698e8f170ba4582a492248919255fcd4c79b1ee64ace34301fb589a1"}, {file = "pytest-5.4.3.tar.gz", hash = "sha256:7979331bfcba207414f5e1263b5a0f8f521d0f457318836a7355531ed1a4c7d8"}, @@ -1172,7 +1340,6 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -1187,7 +1354,6 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -1250,7 +1416,6 @@ version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -1354,7 +1519,6 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -1376,7 +1540,6 @@ version = "0.5.3" description = "" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "safetensors-0.5.3-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd20eb133db8ed15b40110b7c00c6df51655a2998132193de2f75f72d99c7073"}, {file = "safetensors-0.5.3-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:21d01c14ff6c415c485616b8b0bf961c46b3b343ca59110d38d744e577f9cce7"}, @@ -1414,7 +1577,6 @@ version = "0.2.0" description = "SentencePiece python wrapper" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "sentencepiece-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:188779e1298a1c8b8253c7d3ad729cb0a9891e5cef5e5d07ce4592c54869e227"}, {file = "sentencepiece-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bed9cf85b296fa2b76fc2547b9cbb691a523864cebaee86304c43a7b4cb1b452"}, @@ -1477,8 +1639,6 @@ version = "75.8.2" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" -groups = ["main"] -markers = "python_version >= \"3.12\"" files = [ {file = "setuptools-75.8.2-py3-none-any.whl", hash = "sha256:558e47c15f1811c1fa7adbd0096669bf76c1d3f433f58324df69f3f5ecac4e8f"}, {file = "setuptools-75.8.2.tar.gz", hash = "sha256:4880473a969e5f23f2a2be3646b2dfd84af9028716d398e46192f84bc36900d2"}, @@ -1499,19 +1659,45 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "starlette" +version = "0.46.1" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.9" +files = [ + {file = "starlette-0.46.1-py3-none-any.whl", hash = "sha256:77c74ed9d2720138b25875133f3a2dae6d854af2ec37dceb56aef370c1d8a227"}, + {file = "starlette-0.46.1.tar.gz", hash = "sha256:3c88d58ee4bd1bb807c0d1acb381838afc7752f9ddaec81bbe4383611d833230"}, +] + +[package.dependencies] +anyio = ">=3.6.2,<5" + +[package.extras] +full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] + [[package]] name = "sympy" version = "1.13.1" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8"}, {file = "sympy-1.13.1.tar.gz", hash = "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f"}, @@ -1529,7 +1715,6 @@ version = "0.21.0" description = "" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "tokenizers-0.21.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3c4c93eae637e7d2aaae3d376f06085164e1660f89304c0ab2b1d08a406636b2"}, {file = "tokenizers-0.21.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:f53ea537c925422a2e0e92a24cce96f6bc5046bbef24a1652a5edc8ba975f62e"}, @@ -1562,7 +1747,6 @@ version = "2.6.0" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.9.0" -groups = ["main"] files = [ {file = "torch-2.6.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:6860df13d9911ac158f4c44031609700e1eba07916fff62e21e6ffa0a9e01961"}, {file = "torch-2.6.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c4f103a49830ce4c7561ef4434cc7926e5a5fe4e5eb100c19ab36ea1e2b634ab"}, @@ -1619,7 +1803,6 @@ version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -1641,7 +1824,6 @@ version = "4.49.0" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.9.0" -groups = ["main"] files = [ {file = "transformers-4.49.0-py3-none-any.whl", hash = "sha256:6b4fded1c5fee04d384b1014495b4235a2b53c87503d7d592423c06128cbbe03"}, {file = "transformers-4.49.0.tar.gz", hash = "sha256:7e40e640b5b8dc3f48743f5f5adbdce3660c82baafbd3afdfc04143cdbd2089e"}, @@ -1711,8 +1893,6 @@ version = "3.2.0" description = "A language and compiler for custom Deep Learning operations" optional = false python-versions = "*" -groups = ["main"] -markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ {file = "triton-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3e54983cd51875855da7c68ec05c05cf8bb08df361b1d5b69e05e40b0c9bd62"}, {file = "triton-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8009a1fb093ee8546495e96731336a33fb8856a38e45bb4ab6affd6dbc3ba220"}, @@ -1732,7 +1912,6 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -1744,7 +1923,6 @@ version = "2.3.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, @@ -1756,19 +1934,37 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "uvicorn" +version = "0.34.0" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.9" +files = [ + {file = "uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4"}, + {file = "uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9"}, +] + +[package.dependencies] +click = ">=7.0" +h11 = ">=0.8" +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} + +[package.extras] +standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] + [[package]] name = "wcwidth" version = "0.2.13" description = "Measures the displayed width of unicode strings in a terminal" optional = false python-versions = "*" -groups = ["dev"] files = [ {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, ] [metadata] -lock-version = "2.1" +lock-version = "2.0" python-versions = ">=3.10" -content-hash = "bb05a8e71ad20277735c7cfbdc4015c58bbdf4ea4aac01a615ad02b5d7184218" +content-hash = "a16f250760ab50c444fef037a812b02b32e23329e02198b0694ac4c192209728" diff --git a/pyproject.toml b/pyproject.toml index 5ff72378ed729..2325a0b98e587 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama-cpp-scripts" -version = "0.0.0" +version = "0.0.1" description = "Scripts that ship with llama.cpp" authors = ["GGML "] readme = "README.md" @@ -25,6 +25,8 @@ torch = ">=2.0.0" huggingface-hub = "^0.29.1" matplotlib = "^3.10.1" pyyaml = "^6.0.2" +fastapi = "^0.115.11" +uvicorn = "^0.34.0" [tool.poetry.group.dev.dependencies] pytest = "^5.2" diff --git a/dev-superlinear/config.yaml b/tools-superlinear/run-llama/config.yaml similarity index 62% rename from dev-superlinear/config.yaml rename to tools-superlinear/run-llama/config.yaml index 35ebdcb28c56c..bd711d14b6280 100644 --- a/dev-superlinear/config.yaml +++ b/tools-superlinear/run-llama/config.yaml @@ -1,8 +1,11 @@ # RNG Provider Configuration -# rng_provider: uniform # Options: uniform, normal +# Options: uniform, normal, external-api rng_provider: normal -# # Run Directory (where outputs will be saved) +# API URL (required when rng_provider is external-api) +# api_url: http://localhost:8000/random + +# Run Directory (where outputs will be saved) # run_dir: runs/default # Model Configuration diff --git a/dev-superlinear/run.py b/tools-superlinear/run-llama/run.py similarity index 91% rename from dev-superlinear/run.py rename to tools-superlinear/run-llama/run.py index 50ff77b9be552..33ce5d5ff6287 100755 --- a/dev-superlinear/run.py +++ b/tools-superlinear/run-llama/run.py @@ -176,7 +176,17 @@ def run_model(config, config_path): # Set environment variable for RNG provider env = os.environ.copy() - env["LLAMA_RNG_PROVIDER"] = config['rng_provider'] + + # Set different environment variables based on the RNG provider + if config['rng_provider'] == 'external-api': + # When using external API, we need to set the API URL environment variable + env["LLAMA_RNG_PROVIDER"] = "external-api" + env["LLAMA_RNG_API_URL"] = config['api_url'] + logger.info(f"Using external API RNG provider with URL: {config['api_url']}") + else: + # For built-in providers (uniform, normal), use the regular environment variable + env["LLAMA_RNG_PROVIDER"] = config['rng_provider'] + env["LLAMA_RNG_OUTPUT"] = rng_file # Save RNG values directly to run dir # Build the command @@ -246,6 +256,7 @@ def main(): parser.add_argument("-r", "--rng", help="Override RNG provider from config") parser.add_argument("-d", "--dir", help="Override run directory from config") parser.add_argument("-n", "--num-tokens", type=int, help="Override number of tokens to generate") + parser.add_argument("-a", "--api-url", help="API URL for external-api RNG provider") parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging") args = parser.parse_args() @@ -281,13 +292,20 @@ def main(): config['run_dir'] = args.dir if args.num_tokens: config['num_tokens'] = args.num_tokens + if args.api_url: + config['api_url'] = args.api_url # Validate RNG provider - valid_providers = ['uniform', 'normal'] + valid_providers = ['uniform', 'normal', 'external-api'] if config['rng_provider'] not in valid_providers: logger.error(f"Error: Invalid RNG provider '{config['rng_provider']}'") logger.info(f"Valid providers: {', '.join(valid_providers)}") return 1 + + # Check if external API URL is provided when using external-api provider + if config['rng_provider'] == 'external-api' and ('api_url' not in config or not config['api_url']): + logger.error(f"Error: api_url must be specified when using external-api RNG provider") + return 1 # Run the model success = run_model(config, args.config) diff --git a/dev-superlinear/run_all_models.sh b/tools-superlinear/run-llama/run_all_models.sh similarity index 100% rename from dev-superlinear/run_all_models.sh rename to tools-superlinear/run-llama/run_all_models.sh diff --git a/dev-superlinear/sample_outputs/gemma_log.txt b/tools-superlinear/run-llama/sample_outputs/gemma_log.txt similarity index 100% rename from dev-superlinear/sample_outputs/gemma_log.txt rename to tools-superlinear/run-llama/sample_outputs/gemma_log.txt diff --git a/dev-superlinear/sample_outputs/gemma_output.txt b/tools-superlinear/run-llama/sample_outputs/gemma_output.txt similarity index 100% rename from dev-superlinear/sample_outputs/gemma_output.txt rename to tools-superlinear/run-llama/sample_outputs/gemma_output.txt diff --git a/dev-superlinear/sample_outputs/log.txt b/tools-superlinear/run-llama/sample_outputs/log.txt similarity index 100% rename from dev-superlinear/sample_outputs/log.txt rename to tools-superlinear/run-llama/sample_outputs/log.txt diff --git a/dev-superlinear/sample_outputs/output.txt b/tools-superlinear/run-llama/sample_outputs/output.txt similarity index 100% rename from dev-superlinear/sample_outputs/output.txt rename to tools-superlinear/run-llama/sample_outputs/output.txt diff --git a/dev-superlinear/sample_outputs/rng_values.png b/tools-superlinear/run-llama/sample_outputs/rng_values.png similarity index 100% rename from dev-superlinear/sample_outputs/rng_values.png rename to tools-superlinear/run-llama/sample_outputs/rng_values.png diff --git a/dev-superlinear/sample_outputs/rng_values.txt b/tools-superlinear/run-llama/sample_outputs/rng_values.txt similarity index 100% rename from dev-superlinear/sample_outputs/rng_values.txt rename to tools-superlinear/run-llama/sample_outputs/rng_values.txt diff --git a/dev-superlinear/sample_runs/gemma-2-2b-it_normal_20250306_050002/config.yaml b/tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/config.yaml similarity index 100% rename from dev-superlinear/sample_runs/gemma-2-2b-it_normal_20250306_050002/config.yaml rename to tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/config.yaml diff --git a/dev-superlinear/sample_runs/gemma-2-2b-it_normal_20250306_050002/log.txt b/tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/log.txt similarity index 100% rename from dev-superlinear/sample_runs/gemma-2-2b-it_normal_20250306_050002/log.txt rename to tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/log.txt diff --git a/dev-superlinear/sample_runs/gemma-2-2b-it_normal_20250306_050002/output.txt b/tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/output.txt similarity index 100% rename from dev-superlinear/sample_runs/gemma-2-2b-it_normal_20250306_050002/output.txt rename to tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/output.txt diff --git a/dev-superlinear/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_distribution.png b/tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_distribution.png similarity index 100% rename from dev-superlinear/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_distribution.png rename to tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_distribution.png diff --git a/dev-superlinear/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_values.txt b/tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_values.txt similarity index 100% rename from dev-superlinear/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_values.txt rename to tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_values.txt diff --git a/dev-superlinear/sample_runs/gemma-2-2b_uniform_20250306_044755/config.yaml b/tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/config.yaml similarity index 100% rename from dev-superlinear/sample_runs/gemma-2-2b_uniform_20250306_044755/config.yaml rename to tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/config.yaml diff --git a/dev-superlinear/sample_runs/gemma-2-2b_uniform_20250306_044755/log.txt b/tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/log.txt similarity index 100% rename from dev-superlinear/sample_runs/gemma-2-2b_uniform_20250306_044755/log.txt rename to tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/log.txt diff --git a/dev-superlinear/sample_runs/gemma-2-2b_uniform_20250306_044755/output.txt b/tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/output.txt similarity index 100% rename from dev-superlinear/sample_runs/gemma-2-2b_uniform_20250306_044755/output.txt rename to tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/output.txt diff --git a/dev-superlinear/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_distribution.png b/tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_distribution.png similarity index 100% rename from dev-superlinear/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_distribution.png rename to tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_distribution.png diff --git a/dev-superlinear/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_values.txt b/tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_values.txt similarity index 100% rename from dev-superlinear/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_values.txt rename to tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_values.txt From a3f278defa46cf72a8a014722f57d304a3c9b581 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Mon, 10 Mar 2025 20:13:20 +0300 Subject: [PATCH 02/22] v0.0.2 - add basic python rng provider service To test the new api rng provider for llama.cpp --- pyproject.toml | 2 +- tools-superlinear/rng-provider/README.md | 99 +++++++++++++ tools-superlinear/rng-provider/rng_service.py | 140 ++++++++++++++++++ .../rng-provider/rng_service_requirements.txt | 2 + 4 files changed, 242 insertions(+), 1 deletion(-) create mode 100644 tools-superlinear/rng-provider/README.md create mode 100644 tools-superlinear/rng-provider/rng_service.py create mode 100644 tools-superlinear/rng-provider/rng_service_requirements.txt diff --git a/pyproject.toml b/pyproject.toml index 2325a0b98e587..cb677a436da9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama-cpp-scripts" -version = "0.0.1" +version = "0.0.2" description = "Scripts that ship with llama.cpp" authors = ["GGML "] readme = "README.md" diff --git a/tools-superlinear/rng-provider/README.md b/tools-superlinear/rng-provider/README.md new file mode 100644 index 0000000000000..9d9adb4bb357f --- /dev/null +++ b/tools-superlinear/rng-provider/README.md @@ -0,0 +1,99 @@ +# External API RNG Provider + +This directory contains a Python service that provides random numbers to llama.cpp via an HTTP API. + +## Overview + +The external API RNG provider allows: + +1. Serving random numbers from a file over HTTP +2. Controlling randomness in LLM text generation +3. Observing how specific random sequences affect output + +## Setup and Usage + +### 1. Install Requirements + +```bash +pip install -r rng_service_requirements.txt +``` + +### 2. Generate or Prepare Random Numbers + +You can either generate random numbers on the fly or prepare a file with specific numbers: + +```bash +# Generate 10,000 random numbers and save to a file +python rng_service.py --file random_numbers.txt --generate 10000 +``` + +Or create a custom file with specific random values (one value per line): + +```bash +# Example: Create an evenly distributed sequence +python -c "import numpy as np; np.savetxt('even_distribution.txt', np.linspace(0, 1, 1000))" +``` + +### 3. Start the RNG Service + +```bash +python rng_service.py --file random_numbers.txt --host 127.0.0.1 --port 8000 +``` + +### 4. Run llama.cpp with the External RNG Provider + +Use the `run.py` script from the `run-llama` directory: + +```bash +cd ../run-llama +python run.py --rng external-api --api-url http://localhost:8000/random --model gemma-2-2b-it --prompt "Tell me about AI" +``` + +Or modify the `config.yaml` file: + +```yaml +rng_provider: external-api +api_url: http://localhost:8000/random +``` + +## API Endpoints + +The RNG service provides the following endpoints: + +- **GET /** - Root endpoint showing service status +- **GET /random** - Returns a random number between 0 and 1 in JSON format: `{"random": 0.123}` +- **GET /status** - Shows current service status: total numbers loaded, current index, and remaining numbers + +## Advanced Usage + +### Creating Predictable Sequences + +For deterministic testing, you can create fixed sequences: + +```python +# Ascending values from 0 to 1 +values = [i/1000 for i in range(1001)] + +# Save to file +with open("predictable_seq.txt", "w") as f: + for v in values: + f.write(f"{v}\n") +``` + +### Repeating Sequences + +To see how a specific random sequence affects generation: + +1. Create a file with a small number of random values +2. The service will automatically wrap around to the beginning when all values have been used + +### Visualizing RNG Distribution + +The random values used during generation are saved to `rng_values.txt` in the run directory. +You can visualize the distribution using the tools in the repository. + +## Troubleshooting + +- **No numbers available**: Make sure the file exists and contains valid numbers +- **Connection refused**: Check that the host/port settings are correct +- **llama.cpp not finding the service**: Verify the API URL is correctly specified \ No newline at end of file diff --git a/tools-superlinear/rng-provider/rng_service.py b/tools-superlinear/rng-provider/rng_service.py new file mode 100644 index 0000000000000..61d8fbda16e36 --- /dev/null +++ b/tools-superlinear/rng-provider/rng_service.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +""" +Random Number Generator Service for llama.cpp + +This service provides random numbers from a file via a REST API. +It's intended to be used with the llama-rng-provider-api in llama.cpp. +""" + +import argparse +import os +import logging +from typing import List, Dict, Any +import uvicorn +from fastapi import FastAPI, HTTPException + +# Setup logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger("rng_service") + +app = FastAPI(title="RNG Service") + +# Global variables +random_numbers: List[float] = [] +current_index: int = 0 + + +@app.get("/") +async def root() -> Dict[str, str]: + """Root endpoint""" + return {"message": "RNG Service is running"} + + +@app.get("/random") +async def get_random() -> Dict[str, float]: + """Get a random number between 0 and 1""" + global current_index + + if not random_numbers: + raise HTTPException(status_code=500, detail="No random numbers available") + + if current_index >= len(random_numbers): + logger.warning("Reached end of random number list, wrapping around") + current_index = 0 + + value = random_numbers[current_index] + current_index += 1 + + logger.info(f"Serving random number: {value} (index: {current_index-1})") + + return {"random": value} + + +@app.get("/status") +async def status() -> Dict[str, Any]: + """Get service status""" + return { + "total_numbers": len(random_numbers), + "current_index": current_index, + "remaining": len(random_numbers) - current_index if random_numbers else 0 + } + + +def load_random_numbers(file_path: str) -> List[float]: + """Load random numbers from a file, one per line""" + if not os.path.exists(file_path): + logger.error(f"File not found: {file_path}") + return [] + + numbers = [] + with open(file_path, "r") as f: + for line in f: + line = line.strip() + if not line: + continue + + try: + value = float(line) + if 0.0 <= value <= 1.0: + numbers.append(value) + else: + logger.warning(f"Skipping out-of-range value: {value}") + except ValueError: + logger.warning(f"Skipping invalid line: {line}") + + logger.info(f"Loaded {len(numbers)} random numbers from {file_path}") + return numbers + + +def generate_random_numbers(count: int) -> List[float]: + """Generate random numbers if no file is provided""" + import random + logger.info(f"Generating {count} random numbers") + return [random.random() for _ in range(count)] + + +def save_random_numbers(file_path: str, numbers: List[float]) -> None: + """Save random numbers to a file""" + with open(file_path, "w") as f: + for num in numbers: + f.write(f"{num}\n") + logger.info(f"Saved {len(numbers)} random numbers to {file_path}") + + +def main(): + """Main entry point""" + parser = argparse.ArgumentParser(description="RNG Service") + parser.add_argument("--host", type=str, default="127.0.0.1", help="Host to bind to") + parser.add_argument("--port", type=int, default=8000, help="Port to bind to") + parser.add_argument("--file", type=str, required=True, help="Path to random numbers file") + parser.add_argument("--generate", type=int, default=0, + help="Generate N random numbers if file doesn't exist") + args = parser.parse_args() + + global random_numbers + global current_index + + # If file doesn't exist and --generate is specified, generate random numbers + if not os.path.exists(args.file) and args.generate > 0: + random_numbers = generate_random_numbers(args.generate) + save_random_numbers(args.file, random_numbers) + else: + # Load random numbers from file + random_numbers = load_random_numbers(args.file) + + if not random_numbers: + logger.error("No random numbers available. Exiting.") + return + + current_index = 0 + + # Start the server + logger.info(f"Starting server on {args.host}:{args.port}") + uvicorn.run(app, host=args.host, port=args.port) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tools-superlinear/rng-provider/rng_service_requirements.txt b/tools-superlinear/rng-provider/rng_service_requirements.txt new file mode 100644 index 0000000000000..9201e973e7779 --- /dev/null +++ b/tools-superlinear/rng-provider/rng_service_requirements.txt @@ -0,0 +1,2 @@ +fastapi>=0.103.1 +uvicorn>=0.23.2 \ No newline at end of file From 1932547a92a8cc5387e92852f61f9a26277a5921 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Mon, 10 Mar 2025 20:29:21 +0300 Subject: [PATCH 03/22] v0.0.3 - Add external API RNG provider - Implement ExternalAPIRNGProvider class using libcurl for HTTP requests - Add support in create_rng_provider for external-api provider type - Update run.py to validate required api_url parameter - Enhance config.yaml with better documentation for RNG providers - Update external-api-rng.md with proper implementation details The external API provider fetches random numbers from a REST API that returns JSON with a "random" field (value between 0-1). This enables reproducible token generation by controlling the random number sequence through an external service. --- CLAUDE.md | 36 ++++++ docs-superlinear/external-api-rng.md | 145 ++++++++++++++++++++++++ include/llama.h | 1 + pyproject.toml | 2 +- src/CMakeLists.txt | 13 ++- src/llama-rng-provider.h | 82 ++++++++++++++ tools-superlinear/run-llama/config.yaml | 9 +- tools-superlinear/run-llama/run.py | 4 + 8 files changed, 287 insertions(+), 5 deletions(-) create mode 100644 CLAUDE.md create mode 100644 docs-superlinear/external-api-rng.md diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000000000..7ca9f1e117ee8 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,36 @@ +# CLAUDE.md - llama.cpp Reference Guide + +## Build Commands +```bash +# Standard build +cmake -B build && cmake --build build --config Release -j 8 + +# With CUDA +cmake -B build -DGGML_CUDA=ON && cmake --build build --config Release -j 8 + +# With Metal (macOS) +cmake -B build && cmake --build build --config Release -j 8 +``` + +## Test Commands +```bash +# Run all tests +cd build && ctest + +# Run specific test +cd build && ctest -R test-tokenizer-0 + +# Run single test with args +./build/bin/test-tokenizer-0 models/ggml-vocab-llama-spm.gguf +``` + +## Code Style Guidelines +- 4 spaces for indentation, brackets on same line +- Use `snake_case` for functions, variables, types +- Optimize for longest common prefix (`number_small` vs `small_number`) +- Type declarations: prefer `void * ptr` and `int & a` (right-side pointers/references) +- Use sized integer types (`int32_t`) in public API +- Enums are UPPERCASE with prefix (`LLAMA_VOCAB_TYPE_NONE`) +- C/C++ files: lowercase with dashes (.h, .c, .cpp) +- Python files: lowercase with underscores +- Follow existing patterns, keep code simple (avoid templates, fancy STL) \ No newline at end of file diff --git a/docs-superlinear/external-api-rng.md b/docs-superlinear/external-api-rng.md new file mode 100644 index 0000000000000..0a4c39164caa6 --- /dev/null +++ b/docs-superlinear/external-api-rng.md @@ -0,0 +1,145 @@ +# External API RNG Provider + +This document explains how to set up and use the external API RNG provider with llama.cpp. + +## Overview + +The external API RNG provider replaces the default random number generator in llama.cpp with one that gets random numbers from an external HTTP API. This is useful for: + +1. Studying how different random sequences affect LLM outputs +2. Ensuring deterministic generation across different machines +3. Debugging sampling issues by using a controlled source of randomness + +## Setup + +### 1. Start the Python RNG Service + +The Python RNG service reads random numbers from a file and serves them via a REST API. + +```bash +# First, install the required dependencies +pip install -r rng_service_requirements.txt + +# Generate a file with 10,000 random numbers and start the service +python rng_service.py --file random_numbers.txt --generate 10000 --host 127.0.0.1 --port 8000 +``` + +The service will: +- If the file doesn't exist, generate N random numbers and save them to the file +- If the file exists, load the random numbers from the file +- Start a FastAPI service that serves these numbers via the `/random` endpoint + +### 2. Using with llama-run (run.py) + +To use the external API RNG with the `run.py` script, you can add a new mode by editing the script to include an option for the external RNG provider. + +```python +# Add to run.py +parser.add_argument("--external-rng", type=str, help="URL for external RNG API", default=None) + +# Add to the environment variables section +if args.external_rng: + # Set environment variable for external RNG + os.environ["LLAMA_EXTERNAL_RNG_URL"] = args.external_rng +``` + +Then, modify the llama-run code to check for this environment variable and initialize the external RNG provider if it's set. + +## Usage + +### Running with the External RNG + +```bash +# Start the RNG service +python rng_service.py --file random_numbers.txt --generate 10000 & + +# Run inference with the external RNG +python run.py --mode llama-run --external-rng http://localhost:8000/random --model models/gemma-1.1-7b-it.Q4_K_M.gguf --prompt "Tell me about artificial intelligence" +``` + +### Verifying It's Working + +When the external RNG provider is active, you should see: + +1. Log messages from the RNG service showing that it's serving random numbers +2. Log messages from llama.cpp showing the random values being used during sampling + +You can check the RNG service logs to confirm that it's receiving requests: + +```bash +curl http://localhost:8000/status +``` + +This will show: +- The total number of random numbers available +- The current index +- How many numbers remain before wrapping around + +## Advanced Usage + +### Creating Custom Random Sequences + +You can create custom random number sequences for specific testing: + +```bash +# Generate an evenly distributed sequence from 0 to 1 +python -c "import numpy as np; np.savetxt('even_distribution.txt', np.linspace(0, 1, 1000))" + +# Use a biased distribution to test sampling behavior +python -c "import numpy as np; np.savetxt('biased_high.txt', np.random.beta(5, 1, 1000))" +``` + +Then start the RNG service with your custom file: + +```bash +python rng_service.py --file even_distribution.txt +``` + +### Comparing Multiple Runs + +To compare how different random sequences affect generation: + +1. Create different random number files +2. Run the same prompt with each random number file +3. Compare the outputs to see how randomness influences generation + +```bash +# Run with first sequence +python rng_service.py --file sequence1.txt & +python run.py --mode llama-run --external-rng http://localhost:8000/random --model models/gemma-1.1-7b-it.Q4_K_M.gguf --prompt "Tell me about AI" > output1.txt + +# Kill the service and run with second sequence +kill %1 +python rng_service.py --file sequence2.txt & +python run.py --mode llama-run --external-rng http://localhost:8000/random --model models/gemma-1.1-7b-it.Q4_K_M.gguf --prompt "Tell me about AI" > output2.txt +``` + +## Implementation Details + +The external RNG provider is implemented in: +- `llama-rng-provider.h` - Defines the RNG provider interface +- `llama-rng-provider.cpp` - Contains the implementation for both default and API-based providers + +The provider is initialized by the factory function `create_rng_provider("external-api", seed)` which: +1. Reads the API URL from the LLAMA_RNG_API_URL environment variable +2. Creates a new ExternalAPIRNGProvider instance +3. Sets up the provider to make HTTP requests to the specified API URL +4. Parses the JSON responses to extract the `random` field value + +The Python service (`rng_service.py`) provides a simple API that: +1. Loads random numbers from a file or generates them if needed +2. Serves them one at a time via a REST API +3. Wraps around to the beginning when it reaches the end of the list + +## Troubleshooting + +1. **Service Not Starting**: Make sure you have FastAPI and uvicorn installed +2. **Connection Refused**: Verify the host/port settings and that the service is running +3. **Invalid Response**: Check that the API response contains a `random` field with a valid number +4. **Build Errors**: Ensure libcurl and nlohmann_json dependencies are properly installed + +## Performance Considerations + +- The external API adds network latency to each token generation +- For production use, consider running the service on the same machine to minimize latency +- Pre-generate large files of random numbers to avoid wrapping around too quickly \ No newline at end of file diff --git a/include/llama.h b/include/llama.h index b7ccfbc261da5..e9aaa756a44f5 100644 --- a/include/llama.h +++ b/include/llama.h @@ -60,6 +60,7 @@ extern "C" { struct llama_model; struct llama_context; struct llama_sampler; + struct llama_rng_provider; typedef int32_t llama_pos; typedef int32_t llama_token; diff --git a/pyproject.toml b/pyproject.toml index cb677a436da9b..d9151e7ff0c37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama-cpp-scripts" -version = "0.0.2" +version = "0.0.3" description = "Scripts that ship with llama.cpp" authors = ["GGML "] readme = "README.md" diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e1b02e4c08f07..ddbd6848a4ad2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -4,8 +4,17 @@ llama_add_compile_flags() # libraries # -# llama +# Find libcurl +find_package(CURL REQUIRED) +# Include nlohmann/json as a header-only library +include(FetchContent) +FetchContent_Declare( + json + URL https://github.com/nlohmann/json/releases/download/v3.11.2/json.tar.xz +) +FetchContent_MakeAvailable(json) +# llama add_library(llama ../include/llama.h llama.cpp @@ -32,7 +41,7 @@ add_library(llama target_include_directories(llama PUBLIC . ../include ../common) target_compile_features (llama PUBLIC cxx_std_17) # don't bump -target_link_libraries(llama PUBLIC ggml) +target_link_libraries(llama PUBLIC ggml CURL::libcurl nlohmann_json::nlohmann_json) if (BUILD_SHARED_LIBS) set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON) diff --git a/src/llama-rng-provider.h b/src/llama-rng-provider.h index aee608d008898..6036887548f3b 100644 --- a/src/llama-rng-provider.h +++ b/src/llama-rng-provider.h @@ -4,6 +4,8 @@ #include #include #include +#include +#include // Simple RNG Provider base class class RNGProvider { @@ -87,10 +89,90 @@ class NormalRNGProvider : public RNGProvider { std::mt19937 rng; }; +// External API-based RNG provider +class ExternalAPIRNGProvider : public RNGProvider { +public: + ExternalAPIRNGProvider(const std::string& api_url) + : RNGProvider("external-api"), api_url(api_url) { + // Initialize curl + curl_global_init(CURL_GLOBAL_DEFAULT); + curl = curl_easy_init(); + if (!curl) { + fprintf(stderr, "Failed to initialize curl\n"); + } + } + + ~ExternalAPIRNGProvider() override { + if (curl) { + curl_easy_cleanup(curl); + } + curl_global_cleanup(); + } + + double generate() override { + if (!curl) { + fprintf(stderr, "Curl not initialized, returning default value\n"); + return 0.5; // Default value if curl failed + } + + // Make request to the API + std::string response_data; + curl_easy_setopt(curl, CURLOPT_URL, api_url.c_str()); + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_callback); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response_data); + curl_easy_setopt(curl, CURLOPT_TIMEOUT, 5L); // 5 second timeout + + CURLcode res = curl_easy_perform(curl); + + if (res != CURLE_OK) { + fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); + return 0.5; // Default value on error + } + + // Parse JSON response + try { + nlohmann::json j = nlohmann::json::parse(response_data); + if (j.contains("random") && j["random"].is_number()) { + double value = j["random"]; + // Ensure value is in the range [0, 1] + value = std::max(0.0, std::min(1.0, value)); + log_value(value); + return value; + } else { + fprintf(stderr, "API response missing 'random' field: %s\n", response_data.c_str()); + } + } catch (std::exception& e) { + fprintf(stderr, "JSON parse error: %s\n", e.what()); + } + + return 0.5; // Default value on error + } + +private: + std::string api_url; + CURL* curl; + + // Static callback function for curl + static size_t write_callback(char* ptr, size_t size, size_t nmemb, void* userdata) { + std::string* response = reinterpret_cast(userdata); + response->append(ptr, size * nmemb); + return size * nmemb; + } +}; + // Factory function to create RNG providers inline RNGProvider* create_rng_provider(const std::string& type, uint32_t seed) { if (type == "normal") { return new NormalRNGProvider(seed); + } else if (type == "external-api") { + // Check for API URL environment variable + const char* api_url = std::getenv("LLAMA_RNG_API_URL"); + if (api_url == nullptr) { + fprintf(stderr, "Error: LLAMA_RNG_API_URL environment variable not set for external-api provider\n"); + return new UniformRNGProvider(seed); // Fallback to uniform + } + return new ExternalAPIRNGProvider(api_url); } // Default to uniform return new UniformRNGProvider(seed); diff --git a/tools-superlinear/run-llama/config.yaml b/tools-superlinear/run-llama/config.yaml index bd711d14b6280..6b060ce52426b 100644 --- a/tools-superlinear/run-llama/config.yaml +++ b/tools-superlinear/run-llama/config.yaml @@ -1,8 +1,13 @@ # RNG Provider Configuration -# Options: uniform, normal, external-api +# Options: +# - uniform: Standard uniform distribution (0-1) +# - normal: Normal distribution with mean 0.5, std dev 0.15, clamped to (0-1) +# - external-api: Uses an external HTTP API to provide random numbers rng_provider: normal -# API URL (required when rng_provider is external-api) +# API URL (REQUIRED when rng_provider is external-api) +# The API endpoint must return JSON with a 'random' field containing a number between 0-1 +# Example: {"random": 0.42} # api_url: http://localhost:8000/random # Run Directory (where outputs will be saved) diff --git a/tools-superlinear/run-llama/run.py b/tools-superlinear/run-llama/run.py index 33ce5d5ff6287..c90714036bd90 100755 --- a/tools-superlinear/run-llama/run.py +++ b/tools-superlinear/run-llama/run.py @@ -180,6 +180,10 @@ def run_model(config, config_path): # Set different environment variables based on the RNG provider if config['rng_provider'] == 'external-api': # When using external API, we need to set the API URL environment variable + if 'api_url' not in config or not config['api_url']: + logger.error("Error: api_url is required for external-api RNG provider") + return False + env["LLAMA_RNG_PROVIDER"] = "external-api" env["LLAMA_RNG_API_URL"] = config['api_url'] logger.info(f"Using external API RNG provider with URL: {config['api_url']}") From 3321d802926dccd79556f7b1c82076962b0fc554 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Mon, 10 Mar 2025 20:51:05 +0300 Subject: [PATCH 04/22] dump changes --- src/llama-sampling.cpp | 27 + tools-superlinear/README.md | 91 ++++ tools-superlinear/process_json_tokens.py | 171 +++++++ tools-superlinear/run_example.sh | 46 ++ .../token_probability_visualizer.py | 469 ++++++++++++++++++ 5 files changed, 804 insertions(+) create mode 100644 tools-superlinear/README.md create mode 100755 tools-superlinear/process_json_tokens.py create mode 100755 tools-superlinear/run_example.sh create mode 100755 tools-superlinear/token_probability_visualizer.py diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index 9ed4c625ff1e2..9cdf332e50611 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -206,6 +206,7 @@ static int llama_sample_dist(llama_token_data_array * cur_p, std::mt19937 & /*rn cumulative_probs.reserve(cur_p->size); float sum = 0.0f; + // Log token probabilities in human-readable format to stderr fprintf(stderr, "- Token probabilities:\n"); for (size_t i = 0; i < cur_p->size; ++i) { sum += cur_p->data[i].p; @@ -234,6 +235,32 @@ static int llama_sample_dist(llama_token_data_array * cur_p, std::mt19937 & /*rn fprintf(stderr, "RNG generated sample: %zu (token id: %d, probability: %f)\n", selected_idx, cur_p->data[selected_idx].id, cur_p->data[selected_idx].p); + // Log sampling data in JSON format to a file if environment variable is set + const char* token_data_file = std::getenv("LLAMA_TOKEN_DATA_FILE"); + if (token_data_file != nullptr) { + FILE* f = fopen(token_data_file, "a"); + if (f != nullptr) { + // Start JSON object + fprintf(f, "{\n"); + fprintf(f, " \"raw_random\": %f,\n", u); + fprintf(f, " \"scaled_random\": %f,\n", scaled); + fprintf(f, " \"selected_index\": %zu,\n", selected_idx); + fprintf(f, " \"selected_token_id\": %d,\n", cur_p->data[selected_idx].id); + fprintf(f, " \"selected_probability\": %f,\n", cur_p->data[selected_idx].p); + + // Token data array + fprintf(f, " \"tokens\": [\n"); + for (size_t i = 0; i < cur_p->size; ++i) { + fprintf(f, " {\"index\": %zu, \"token_id\": %d, \"probability\": %f, \"cumulative\": %f}%s\n", + i, cur_p->data[i].id, cur_p->data[i].p, cumulative_probs[i], + (i < cur_p->size - 1) ? "," : ""); + } + fprintf(f, " ]\n"); + fprintf(f, "}\n"); + fclose(f); + } + } + return selected_idx; } diff --git a/tools-superlinear/README.md b/tools-superlinear/README.md new file mode 100644 index 0000000000000..bccd1bbb5e383 --- /dev/null +++ b/tools-superlinear/README.md @@ -0,0 +1,91 @@ +# LLM Token Probability Visualization Tools + +This directory contains tools for analyzing and visualizing token probabilities during LLM text generation in llama.cpp. + +## Tools + +1. **token_probability_visualizer.py** - Visualizes token probabilities with color coding based on selection probability +2. **visualize_rng.py** (in tools/) - Visualizes RNG distribution patterns + +## Usage Instructions + +### Capturing Token Probability Data + +To capture token probability data during inference, you can use one of these approaches: + +#### 1. Using stderr output (simple) + +Run llama.cpp inference with stderr redirected to a file: + +```bash +./build/bin/llama-run models/your-model.gguf "Your prompt" 2> inference_log.txt > output.txt +``` + +#### 2. Using JSON format (advanced) + +Run with the `LLAMA_TOKEN_DATA_FILE` environment variable to output structured JSON: + +```bash +LLAMA_TOKEN_DATA_FILE=token_data.json ./build/bin/llama-run models/your-model.gguf "Your prompt" > output.txt +``` + +### Visualizing Token Probabilities + +Once you have the data, use the visualization tool: + +```bash +# Basic usage +python tools-superlinear/token_probability_visualizer.py inference_log.txt output.txt + +# Advanced options +python tools-superlinear/token_probability_visualizer.py inference_log.txt output.txt --mode relative --html visualization.html --json token_probs.json --plot token_probs_plot.png +``` + +### Visualizing RNG Distribution + +If you're using the RNG capture from the system: + +```bash +# Set the environment variable to output raw RNG values +LLAMA_RNG_OUTPUT=rng_values.txt ./build/bin/llama-run models/your-model.gguf "Your prompt" + +# Visualize the RNG distribution +python tools/visualize_rng.py rng_values.txt +``` + +## Visualization Modes + +### Absolute Mode (default) + +Colors tokens based on their absolute probability value: +- Red: High probability (close to 1.0) +- Yellow: Medium probability +- Green: Low probability (close to 0.0) + +### Relative Mode + +Colors tokens relative to the range of probabilities in the generated text: +- Red: Highest relative probability in the text +- Yellow: Medium relative probability +- Green: Lowest relative probability in the text + +## Output Files + +- **HTML visualization**: Interactive webpage showing the generated text with tokens colored by probability +- **JSON data**: Structured data of token probabilities for further analysis +- **Plot**: Visual chart showing the distribution of token probabilities over the sequence + +## Example + +To generate an example visualization: + +```bash +# Run inference with probability capture +./build/bin/llama-run models/gemma/gemma-1.1-7b-it.Q4_K_M.gguf "Tell me about deep learning" 2> inference_log.txt > output.txt + +# Generate visualization +python tools-superlinear/token_probability_visualizer.py inference_log.txt output.txt + +# Open the visualization in your browser +open visualization.html +``` \ No newline at end of file diff --git a/tools-superlinear/process_json_tokens.py b/tools-superlinear/process_json_tokens.py new file mode 100755 index 0000000000000..a22b7aa2f7c24 --- /dev/null +++ b/tools-superlinear/process_json_tokens.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 +""" +Process JSON token data from llama.cpp + +This script processes the JSON token data generated by llama.cpp when +the LLAMA_TOKEN_DATA_FILE environment variable is set. + +It converts the JSONL (JSON Lines) format into a single JSON array +that can be used for further analysis and visualization. +""" + +import json +import argparse +import sys +from pathlib import Path +import matplotlib.pyplot as plt +import numpy as np + +def convert_jsonl_to_array(input_file, output_file): + """Convert JSONL to a single JSON array""" + tokens = [] + + try: + with open(input_file, 'r', encoding='utf-8') as f: + for line in f: + try: + # Skip empty lines + if line.strip(): + token = json.loads(line) + tokens.append(token) + except json.JSONDecodeError as e: + print(f"Error parsing JSON line: {e}") + print(f"Problematic line: {line}") + except Exception as e: + print(f"Error reading file: {e}") + return False + + try: + with open(output_file, 'w', encoding='utf-8') as f: + json.dump(tokens, f, indent=2) + except Exception as e: + print(f"Error writing file: {e}") + return False + + return len(tokens) + +def create_probability_plot(tokens, output_file): + """Create a plot of token probabilities""" + if not tokens: + print("No tokens to plot") + return False + + probabilities = [token.get("selected_probability", 0) for token in tokens] + indices = range(len(probabilities)) + + # Create plot + plt.figure(figsize=(12, 8)) + plt.plot(indices, probabilities, 'b-', marker='o', markersize=4) + plt.title('Token Selection Probabilities') + plt.xlabel('Token Index') + plt.ylabel('Probability') + plt.grid(True, alpha=0.3) + + # Add rolling average + window_size = min(10, len(probabilities)) + if window_size > 1: + rolling_avg = np.convolve(probabilities, np.ones(window_size)/window_size, mode='valid') + plt.plot(range(window_size-1, len(probabilities)), rolling_avg, 'r-', + linewidth=2, label=f'{window_size}-token Moving Average') + plt.legend() + + # Add statistics + mean_prob = np.mean(probabilities) + median_prob = np.median(probabilities) + min_prob = min(probabilities) + max_prob = max(probabilities) + std_dev = np.std(probabilities) + + stats_text = ( + f"Statistics:\n" + f"Count: {len(probabilities)}\n" + f"Mean: {mean_prob:.6f}\n" + f"Median: {median_prob:.6f}\n" + f"Min: {min_prob:.6f}\n" + f"Max: {max_prob:.6f}\n" + f"Std Dev: {std_dev:.6f}" + ) + + plt.figtext(0.15, 0.02, stats_text, fontsize=10, + bbox=dict(facecolor='white', alpha=0.8)) + + plt.tight_layout() + plt.savefig(output_file) + return True + +def analyze_token_data(tokens): + """Analyze token data and print statistics""" + if not tokens: + print("No tokens to analyze") + return + + probabilities = [token.get("selected_probability", 0) for token in tokens] + random_values = [token.get("raw_random", 0) for token in tokens if "raw_random" in token] + + print("\nToken Statistics:") + print(f" Total tokens: {len(tokens)}") + print(f" Mean probability: {np.mean(probabilities):.6f}") + print(f" Median probability: {np.median(probabilities):.6f}") + print(f" Min probability: {min(probabilities):.6f}") + print(f" Max probability: {max(probabilities):.6f}") + print(f" Std Dev: {np.std(probabilities):.6f}") + + if random_values: + print("\nRandom Number Statistics:") + print(f" Total random values: {len(random_values)}") + print(f" Mean value: {np.mean(random_values):.6f}") + print(f" Median value: {np.median(random_values):.6f}") + print(f" Min value: {min(random_values):.6f}") + print(f" Max value: {max(random_values):.6f}") + print(f" Std Dev: {np.std(random_values):.6f}") + +def main(): + parser = argparse.ArgumentParser(description="Process JSON token data from llama.cpp") + parser.add_argument("input_file", help="Input JSONL file with token data") + parser.add_argument("--output", "-o", help="Output JSON file (default: token_data.json)", + default="token_data.json") + parser.add_argument("--plot", "-p", help="Generate probability plot (default: token_probs.png)", + default="token_probs.png") + parser.add_argument("--analyze", "-a", action="store_true", help="Print token statistics") + + args = parser.parse_args() + + input_path = Path(args.input_file) + if not input_path.exists(): + print(f"Error: Input file '{input_path}' does not exist", file=sys.stderr) + return 1 + + # Load and convert the token data + tokens = [] + try: + with open(input_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + if line: + token = json.loads(line) + tokens.append(token) + except Exception as e: + print(f"Error reading input file: {e}", file=sys.stderr) + return 1 + + # Write converted data to output file + try: + with open(args.output, 'w', encoding='utf-8') as f: + json.dump(tokens, f, indent=2) + print(f"Processed {len(tokens)} tokens, saved to {args.output}") + except Exception as e: + print(f"Error writing output file: {e}", file=sys.stderr) + return 1 + + # Generate probability plot + if create_probability_plot(tokens, args.plot): + print(f"Probability plot saved to {args.plot}") + + # Print analysis + if args.analyze: + analyze_token_data(tokens) + + return 0 + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/tools-superlinear/run_example.sh b/tools-superlinear/run_example.sh new file mode 100755 index 0000000000000..dc8ecbb2f5837 --- /dev/null +++ b/tools-superlinear/run_example.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Example script to run token probability visualization + +# Check if model path is provided +if [ $# -eq 0 ]; then + echo "Usage: $0 [prompt]" + echo "Example: $0 models/gemma/gemma-1.1-7b-it.Q4_K_M.gguf 'Tell me about AI'" + exit 1 +fi + +MODEL=$1 +PROMPT=${2:-"Tell me about artificial intelligence"} +OUTPUT_DIR="token_viz_output" + +# Create output directory +mkdir -p $OUTPUT_DIR + +# Run the model and capture output +echo "Running model with prompt: '$PROMPT'" +echo "Capturing token probability data..." + +# Run using both methods +./build/bin/llama-run --ngl 999 $MODEL "$PROMPT" 2> $OUTPUT_DIR/inference_log.txt > $OUTPUT_DIR/output.txt + +# Also save in JSON format +LLAMA_TOKEN_DATA_FILE=$OUTPUT_DIR/token_data.jsonl ./build/bin/llama-run --ngl 999 $MODEL "$PROMPT" > $OUTPUT_DIR/output_json.txt + +echo "Generating visualizations..." + +# Generate standard visualization +python tools-superlinear/token_probability_visualizer.py $OUTPUT_DIR/inference_log.txt $OUTPUT_DIR/output.txt --html $OUTPUT_DIR/visualization.html + +# Generate relative mode visualization +python tools-superlinear/token_probability_visualizer.py $OUTPUT_DIR/inference_log.txt $OUTPUT_DIR/output.txt --mode relative --html $OUTPUT_DIR/visualization_relative.html + +echo "Visualizations generated in $OUTPUT_DIR directory" +echo "- Standard visualization: $OUTPUT_DIR/visualization.html" +echo "- Relative mode visualization: $OUTPUT_DIR/visualization_relative.html" +echo "- Token probability JSON: $OUTPUT_DIR/token_probs.json" +echo "- Token probability plots: $OUTPUT_DIR/token_probs_plot.png" + +# Open the visualization (macOS specific, adjust for other platforms) +if [[ "$OSTYPE" == "darwin"* ]]; then + echo "Opening visualization in browser..." + open $OUTPUT_DIR/visualization.html +fi \ No newline at end of file diff --git a/tools-superlinear/token_probability_visualizer.py b/tools-superlinear/token_probability_visualizer.py new file mode 100755 index 0000000000000..0fbd9fd813846 --- /dev/null +++ b/tools-superlinear/token_probability_visualizer.py @@ -0,0 +1,469 @@ +#!/usr/bin/env python3 +""" +Token Probability Visualizer + +A tool to visualize token probabilities during LLM text generation. +Reads token probability data from stderr output of llama.cpp inference +and visualizes it, coloring tokens based on their absolute or relative probability. + +Usage: + 1. Run llama.cpp inference with stderr redirected to a file: + ./build/bin/llama-run model.gguf "prompt text" 2> inference_log.txt > output.txt + + 2. Run this script to visualize the token probabilities: + python token_probability_visualizer.py inference_log.txt output.txt +""" + +import re +import json +import argparse +import numpy as np +import matplotlib.pyplot as plt +from pathlib import Path +import matplotlib.colors as mcolors +from matplotlib.figure import Figure +from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas +from matplotlib.patches import Rectangle +import sys +import html +from PIL import Image, ImageDraw, ImageFont +from typing import List, Dict, Tuple, Union, Any, Optional + +class TokenProbabilityParser: + """Parse the stderr log to extract token probability information""" + + def __init__(self, log_file: str): + self.log_file = log_file + self.rng_blocks = [] + self.tokens = [] + self.probabilities = [] + self.cumulative_probs = [] + self.token_ids = [] + self.token_text = [] + self.selected_indices = [] + + def parse_log(self) -> List[Dict[str, Any]]: + """Parse the log file and extract token probability information""" + with open(self.log_file, 'r', encoding='utf-8', errors='replace') as f: + content = f.read() + + # Find all RNG blocks + rng_blocks = re.findall(r'RNG internal:.*?RNG generated sample:.*?token id: (\d+), probability: ([\d\.]+)', + content, re.DOTALL) + + probability_blocks = [] + + rng_block_matches = re.finditer(r'RNG internal:(.*?)RNG generated sample: (\d+) \(token id: (\d+), probability: ([\d\.]+)\)', + content, re.DOTALL) + + for match in rng_block_matches: + full_text = match.group(0) + inner_text = match.group(1) + selected_idx = int(match.group(2)) + token_id = int(match.group(3)) + probability = float(match.group(4)) + + # Extract token probabilities + token_probs = re.findall(r'\s+\[(\d+)\] token (\d+) = ([\d\.]+) \(cumulative: ([\d\.]+)\)', inner_text) + + # Build token probability data + token_data = [] + for tp in token_probs: + idx = int(tp[0]) + token_id_inner = int(tp[1]) + prob = float(tp[2]) + cumulative = float(tp[3]) + + token_data.append({ + "index": idx, + "token_id": token_id_inner, + "probability": prob, + "cumulative": cumulative, + "selected": idx == selected_idx + }) + + # Extract raw random number + match_raw = re.search(r'- Raw uniform random number: ([\d\.]+)', inner_text) + raw_random = float(match_raw.group(1)) if match_raw else None + + # Extract scaled random number + match_scaled = re.search(r'- Scaled random number: ([\d\.]+)', inner_text) + scaled_random = float(match_scaled.group(1)) if match_scaled else None + + probability_blocks.append({ + "token_id": token_id, + "probability": probability, + "selected_index": selected_idx, + "raw_random": raw_random, + "scaled_random": scaled_random, + "tokens": token_data + }) + + # Store for later processing + self.token_ids.append(token_id) + self.probabilities.append(probability) + self.selected_indices.append(selected_idx) + + return probability_blocks + + def extract_token_text(self, model_vocab_file: Optional[str] = None) -> None: + """ + Extract text representation of tokens. + If model_vocab_file is provided, use it to map token IDs to text. + Otherwise, just use placeholders. + """ + token_map = {} + if model_vocab_file and Path(model_vocab_file).exists(): + # Load token map from model vocab file + # Expected format: one token per line, "token_id token_text" + with open(model_vocab_file, 'r', encoding='utf-8', errors='replace') as f: + for line in f: + parts = line.strip().split(' ', 1) + if len(parts) == 2: + token_map[int(parts[0])] = parts[1] + + # Map token IDs to text + for token_id in self.token_ids: + if token_id in token_map: + self.token_text.append(token_map[token_id]) + else: + # Use placeholder if token ID not found in vocab file + self.token_text.append(f"<{token_id}>") + +def load_output_text(output_file: str) -> str: + """Load the generated text from the output file""" + try: + with open(output_file, 'r', encoding='utf-8', errors='replace') as f: + return f.read() + except Exception as e: + print(f"Error reading output file: {e}") + return "" + +def create_colored_html(output_text: str, probability_blocks: List[Dict[str, Any]], + color_mode: str = "absolute", output_file: str = "visualization.html"): + """Create an HTML visualization with tokens colored based on their probabilities""" + + # Create HTML template + html_content = f""" + + + + Token Probability Visualization + + + +
+
+

Token Probability Visualization

+
+ + +
+
+ +
+ {generate_colored_tokens_html(output_text, probability_blocks, color_mode)} +
+ +
+
+
+ High Probability +
+
+
+ Medium Probability +
+
+
+ Low Probability +
+
+
+ + + + + """ + + # Write to file + with open(output_file, 'w', encoding='utf-8') as f: + f.write(html_content) + + print(f"HTML visualization saved to {output_file}") + +def checked_attr(value: str, current_mode: str) -> str: + """Helper function to set the checked attribute for radio buttons""" + return "checked" if value == current_mode else "" + +def generate_colored_tokens_html(output_text: str, probability_blocks: List[Dict[str, Any]], + color_mode: str) -> str: + """Generate HTML for colored tokens""" + + # Get all probabilities for normalization (relative mode) + all_probs = [block["probability"] for block in probability_blocks] + min_prob = min(all_probs) if all_probs else 0 + max_prob = max(all_probs) if all_probs else 1 + prob_range = max_prob - min_prob if max_prob > min_prob else 1.0 + + # Generate HTML for each token + html_output = "" + chars_processed = 0 + + for i, block in enumerate(probability_blocks): + token_id = block["token_id"] + probability = block["probability"] + + # Get token text from the output - this is an approximation + # In practice, you'd need a proper tokenizer to get the exact token text + # For now, we'll just take the next character as a simple approximation + if chars_processed < len(output_text): + token_text = output_text[chars_processed] + chars_processed += 1 + else: + token_text = "□" # Placeholder for tokens outside the output length + + # Calculate color based on probability + if color_mode == "absolute": + # Absolute mode: green (low) to yellow (medium) to red (high) + # Map probability [0,1] to color + r = min(255, int(probability * 2 * 255)) + g = min(255, int((1 - probability) * 2 * 255)) + b = 50 + else: + # Relative mode: normalize probability within the range of all tokens + normalized_prob = (probability - min_prob) / prob_range if prob_range > 0 else 0.5 + r = min(255, int(normalized_prob * 2 * 255)) + g = min(255, int((1 - normalized_prob) * 2 * 255)) + b = 50 + + color = f"rgb({r},{g},{b})" + + # Special handling for whitespace characters (make them visible) + display_text = token_text + if token_text.isspace(): + if token_text == " ": + display_text = " " + elif token_text == "\n": + display_text = "
" + else: + display_text = "⎵" # Unicode symbol for space + else: + display_text = html.escape(display_text) + + # Create token HTML with tooltip + token_html = f""" + + {display_text} + + Token ID: {token_id}
+ Probability: {probability:.4f} +
+
+ """ + + html_output += token_html + + # For any remaining text not processed as tokens + if chars_processed < len(output_text): + html_output += html.escape(output_text[chars_processed:]) + + return html_output + +def export_json(probability_blocks: List[Dict[str, Any]], output_file: str): + """Export probability data to JSON file""" + with open(output_file, 'w', encoding='utf-8') as f: + json.dump(probability_blocks, f, indent=2) + + print(f"JSON data exported to {output_file}") + +def create_visualization_plot(probability_blocks: List[Dict[str, Any]], output_file: str): + """Create matplotlib visualization of token probabilities""" + probabilities = [block["probability"] for block in probability_blocks] + token_ids = [block["token_id"] for block in probability_blocks] + + # Set up plot + fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10)) + + # Plot probabilities + ax1.plot(probabilities, marker='o', linestyle='-', color='blue') + ax1.set_title('Token Probabilities') + ax1.set_xlabel('Token Sequence') + ax1.set_ylabel('Probability') + ax1.grid(True) + + # Plot histogram of probabilities + ax2.hist(probabilities, bins=20, color='green', alpha=0.7) + ax2.set_title('Probability Distribution') + ax2.set_xlabel('Probability') + ax2.set_ylabel('Frequency') + ax2.grid(True) + + # Add statistics + stats_text = f""" + Statistics: + Mean: {np.mean(probabilities):.4f} + Median: {np.median(probabilities):.4f} + Min: {min(probabilities):.4f} + Max: {max(probabilities):.4f} + Std Dev: {np.std(probabilities):.4f} + """ + + fig.text(0.15, 0.02, stats_text, fontsize=10, + bbox=dict(facecolor='white', alpha=0.8)) + + plt.tight_layout() + plt.savefig(output_file) + print(f"Visualization plot saved to {output_file}") + +def main(): + parser = argparse.ArgumentParser(description="Visualize token probabilities from llama.cpp inference") + parser.add_argument("log_file", help="Log file with stderr output from llama.cpp inference") + parser.add_argument("output_file", help="Output file with generated text", nargs="?") + parser.add_argument("--mode", choices=["absolute", "relative"], default="absolute", + help="Probability coloring mode: absolute or relative (default: absolute)") + parser.add_argument("--html", help="Output HTML file (default: visualization.html)", + default="visualization.html") + parser.add_argument("--json", help="Output JSON file (default: token_probs.json)", + default="token_probs.json") + parser.add_argument("--plot", help="Output plot file (default: token_probs_plot.png)", + default="token_probs_plot.png") + parser.add_argument("--vocab", help="Model vocabulary file for mapping token IDs to text") + + args = parser.parse_args() + + log_path = Path(args.log_file) + if not log_path.exists(): + print(f"Error: Log file '{log_path}' does not exist", file=sys.stderr) + return 1 + + # Parse log file + parser = TokenProbabilityParser(args.log_file) + probability_blocks = parser.parse_log() + + if len(probability_blocks) == 0: + print("Error: No token probability data found in log file", file=sys.stderr) + return 1 + + # Extract token text if vocab file provided + if args.vocab: + parser.extract_token_text(args.vocab) + + # Export data to JSON + export_json(probability_blocks, args.json) + + # Create visualization plot + create_visualization_plot(probability_blocks, args.plot) + + # If output file is provided, create HTML visualization + if args.output_file: + output_path = Path(args.output_file) + if output_path.exists(): + output_text = load_output_text(args.output_file) + create_colored_html(output_text, probability_blocks, args.mode, args.html) + else: + print(f"Warning: Output file '{output_path}' does not exist. Skipping HTML visualization.") + + return 0 + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file From 4b5d2c533ada500d8c355777b4cb950e9a7ac061 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 14:22:44 +0300 Subject: [PATCH 05/22] Refector file locations --- poetry.lock | 36 +++++++++++++++++- pyproject.toml | 3 +- .../{rng-provider => rng_provider}/README.md | 0 .../rng_service.py | 0 .../rng_service_requirements.txt | 0 tools-superlinear/run_llama/README.md | 10 +++++ .../{run-llama => run_llama}/config.yaml | 0 .../{run-llama => run_llama}/run.py | 4 +- .../run_all_models.sh | 2 +- .../sample_outputs/gemma_log.txt | 0 .../sample_outputs/gemma_output.txt | 0 .../sample_outputs/log.txt | 0 .../sample_outputs/output.txt | 0 .../sample_outputs/rng_values.png | Bin .../sample_outputs/rng_values.txt | 0 .../config.yaml | 0 .../log.txt | 0 .../output.txt | 0 .../rng_distribution.png | Bin .../rng_values.txt | 0 .../config.yaml | 0 .../log.txt | 0 .../output.txt | 0 .../rng_distribution.png | Bin .../rng_values.txt | 0 .../run_llama}/visualize_rng.py | 0 .../process_json_tokens.py | 0 .../token_probability_visualizer.py | 0 28 files changed, 49 insertions(+), 6 deletions(-) rename tools-superlinear/{rng-provider => rng_provider}/README.md (100%) rename tools-superlinear/{rng-provider => rng_provider}/rng_service.py (100%) rename tools-superlinear/{rng-provider => rng_provider}/rng_service_requirements.txt (100%) create mode 100644 tools-superlinear/run_llama/README.md rename tools-superlinear/{run-llama => run_llama}/config.yaml (100%) rename tools-superlinear/{run-llama => run_llama}/run.py (99%) rename tools-superlinear/{run-llama => run_llama}/run_all_models.sh (97%) rename tools-superlinear/{run-llama => run_llama}/sample_outputs/gemma_log.txt (100%) rename tools-superlinear/{run-llama => run_llama}/sample_outputs/gemma_output.txt (100%) rename tools-superlinear/{run-llama => run_llama}/sample_outputs/log.txt (100%) rename tools-superlinear/{run-llama => run_llama}/sample_outputs/output.txt (100%) rename tools-superlinear/{run-llama => run_llama}/sample_outputs/rng_values.png (100%) rename tools-superlinear/{run-llama => run_llama}/sample_outputs/rng_values.txt (100%) rename tools-superlinear/{run-llama => run_llama}/sample_runs/gemma-2-2b-it_normal_20250306_050002/config.yaml (100%) rename tools-superlinear/{run-llama => run_llama}/sample_runs/gemma-2-2b-it_normal_20250306_050002/log.txt (100%) rename tools-superlinear/{run-llama => run_llama}/sample_runs/gemma-2-2b-it_normal_20250306_050002/output.txt (100%) rename tools-superlinear/{run-llama => run_llama}/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_distribution.png (100%) rename tools-superlinear/{run-llama => run_llama}/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_values.txt (100%) rename tools-superlinear/{run-llama => run_llama}/sample_runs/gemma-2-2b_uniform_20250306_044755/config.yaml (100%) rename tools-superlinear/{run-llama => run_llama}/sample_runs/gemma-2-2b_uniform_20250306_044755/log.txt (100%) rename tools-superlinear/{run-llama => run_llama}/sample_runs/gemma-2-2b_uniform_20250306_044755/output.txt (100%) rename tools-superlinear/{run-llama => run_llama}/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_distribution.png (100%) rename tools-superlinear/{run-llama => run_llama}/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_values.txt (100%) rename {tools => tools-superlinear/run_llama}/visualize_rng.py (100%) rename tools-superlinear/{ => visualize_tokens}/process_json_tokens.py (100%) rename tools-superlinear/{ => visualize_tokens}/token_probability_visualizer.py (100%) diff --git a/poetry.lock b/poetry.lock index f09d1d0512fcb..a5de2511b84ec 100644 --- a/poetry.lock +++ b/poetry.lock @@ -633,6 +633,24 @@ files = [ {file = "kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e"}, ] +[[package]] +name = "loguru" +version = "0.7.3" +description = "Python logging made (stupidly) simple" +optional = false +python-versions = "<4.0,>=3.5" +files = [ + {file = "loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c"}, + {file = "loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6"}, +] + +[package.dependencies] +colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""} +win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""} + +[package.extras] +dev = ["Sphinx (==8.1.3)", "build (==1.2.2)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.5.0)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.13.0)", "mypy (==v1.4.1)", "myst-parser (==4.0.0)", "pre-commit (==4.0.1)", "pytest (==6.1.2)", "pytest (==8.3.2)", "pytest-cov (==2.12.1)", "pytest-cov (==5.0.0)", "pytest-cov (==6.0.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.1.0)", "sphinx-rtd-theme (==3.0.2)", "tox (==3.27.1)", "tox (==4.23.2)", "twine (==6.0.1)"] + [[package]] name = "markupsafe" version = "3.0.2" @@ -1964,7 +1982,21 @@ files = [ {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, ] +[[package]] +name = "win32-setctime" +version = "1.2.0" +description = "A small Python utility to set file creation time on Windows" +optional = false +python-versions = ">=3.5" +files = [ + {file = "win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390"}, + {file = "win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0"}, +] + +[package.extras] +dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] + [metadata] lock-version = "2.0" -python-versions = ">=3.10" -content-hash = "a16f250760ab50c444fef037a812b02b32e23329e02198b0694ac4c192209728" +python-versions = ">=3.10,<4" +content-hash = "b2dee6feb1aa16d3e490e501c826161957d05ac8c8978ae98e1020b4c2276e71" diff --git a/pyproject.toml b/pyproject.toml index d9151e7ff0c37..9f5225b6fca6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ classifiers = [ ] [tool.poetry.dependencies] -python = ">=3.10" +python = ">=3.10,<4" numpy = "^1.25.0" sentencepiece = ">=0.1.98,<=0.2.0" transformers = ">=4.35.2,<5.0.0" @@ -27,6 +27,7 @@ matplotlib = "^3.10.1" pyyaml = "^6.0.2" fastapi = "^0.115.11" uvicorn = "^0.34.0" +loguru = "^0.7.3" [tool.poetry.group.dev.dependencies] pytest = "^5.2" diff --git a/tools-superlinear/rng-provider/README.md b/tools-superlinear/rng_provider/README.md similarity index 100% rename from tools-superlinear/rng-provider/README.md rename to tools-superlinear/rng_provider/README.md diff --git a/tools-superlinear/rng-provider/rng_service.py b/tools-superlinear/rng_provider/rng_service.py similarity index 100% rename from tools-superlinear/rng-provider/rng_service.py rename to tools-superlinear/rng_provider/rng_service.py diff --git a/tools-superlinear/rng-provider/rng_service_requirements.txt b/tools-superlinear/rng_provider/rng_service_requirements.txt similarity index 100% rename from tools-superlinear/rng-provider/rng_service_requirements.txt rename to tools-superlinear/rng_provider/rng_service_requirements.txt diff --git a/tools-superlinear/run_llama/README.md b/tools-superlinear/run_llama/README.md new file mode 100644 index 0000000000000..fbd260f0d1468 --- /dev/null +++ b/tools-superlinear/run_llama/README.md @@ -0,0 +1,10 @@ +# Quick reference + +- + +- Run from repo root +``` +poetry run python tools-superlinear/run_llama/run.py +``` + +- default output dir: diff --git a/tools-superlinear/run-llama/config.yaml b/tools-superlinear/run_llama/config.yaml similarity index 100% rename from tools-superlinear/run-llama/config.yaml rename to tools-superlinear/run_llama/config.yaml diff --git a/tools-superlinear/run-llama/run.py b/tools-superlinear/run_llama/run.py similarity index 99% rename from tools-superlinear/run-llama/run.py rename to tools-superlinear/run_llama/run.py index c90714036bd90..7d2d05b6c584e 100755 --- a/tools-superlinear/run-llama/run.py +++ b/tools-superlinear/run_llama/run.py @@ -16,8 +16,8 @@ from loguru import logger # Get script directory for relative paths -SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) -PROJECT_ROOT = os.path.dirname(SCRIPT_DIR) +SCRIPT_DIR = Path(__file__).absolute().parent +PROJECT_ROOT = SCRIPT_DIR.parent.parent # Configure loguru logger logger.remove() # Remove default handler diff --git a/tools-superlinear/run-llama/run_all_models.sh b/tools-superlinear/run_llama/run_all_models.sh similarity index 97% rename from tools-superlinear/run-llama/run_all_models.sh rename to tools-superlinear/run_llama/run_all_models.sh index 3ffc770368cf1..f1e1ba2099169 100644 --- a/tools-superlinear/run-llama/run_all_models.sh +++ b/tools-superlinear/run_llama/run_all_models.sh @@ -9,7 +9,7 @@ echo "Using $NUM_TOKENS tokens for each run" # Get the directory where this script is located SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." && pwd )" +PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." && pwd )" # Directory containing models (relative to project root) MODELS_DIR="$PROJECT_ROOT/models-superlinear" diff --git a/tools-superlinear/run-llama/sample_outputs/gemma_log.txt b/tools-superlinear/run_llama/sample_outputs/gemma_log.txt similarity index 100% rename from tools-superlinear/run-llama/sample_outputs/gemma_log.txt rename to tools-superlinear/run_llama/sample_outputs/gemma_log.txt diff --git a/tools-superlinear/run-llama/sample_outputs/gemma_output.txt b/tools-superlinear/run_llama/sample_outputs/gemma_output.txt similarity index 100% rename from tools-superlinear/run-llama/sample_outputs/gemma_output.txt rename to tools-superlinear/run_llama/sample_outputs/gemma_output.txt diff --git a/tools-superlinear/run-llama/sample_outputs/log.txt b/tools-superlinear/run_llama/sample_outputs/log.txt similarity index 100% rename from tools-superlinear/run-llama/sample_outputs/log.txt rename to tools-superlinear/run_llama/sample_outputs/log.txt diff --git a/tools-superlinear/run-llama/sample_outputs/output.txt b/tools-superlinear/run_llama/sample_outputs/output.txt similarity index 100% rename from tools-superlinear/run-llama/sample_outputs/output.txt rename to tools-superlinear/run_llama/sample_outputs/output.txt diff --git a/tools-superlinear/run-llama/sample_outputs/rng_values.png b/tools-superlinear/run_llama/sample_outputs/rng_values.png similarity index 100% rename from tools-superlinear/run-llama/sample_outputs/rng_values.png rename to tools-superlinear/run_llama/sample_outputs/rng_values.png diff --git a/tools-superlinear/run-llama/sample_outputs/rng_values.txt b/tools-superlinear/run_llama/sample_outputs/rng_values.txt similarity index 100% rename from tools-superlinear/run-llama/sample_outputs/rng_values.txt rename to tools-superlinear/run_llama/sample_outputs/rng_values.txt diff --git a/tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/config.yaml b/tools-superlinear/run_llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/config.yaml similarity index 100% rename from tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/config.yaml rename to tools-superlinear/run_llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/config.yaml diff --git a/tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/log.txt b/tools-superlinear/run_llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/log.txt similarity index 100% rename from tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/log.txt rename to tools-superlinear/run_llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/log.txt diff --git a/tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/output.txt b/tools-superlinear/run_llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/output.txt similarity index 100% rename from tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/output.txt rename to tools-superlinear/run_llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/output.txt diff --git a/tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_distribution.png b/tools-superlinear/run_llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_distribution.png similarity index 100% rename from tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_distribution.png rename to tools-superlinear/run_llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_distribution.png diff --git a/tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_values.txt b/tools-superlinear/run_llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_values.txt similarity index 100% rename from tools-superlinear/run-llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_values.txt rename to tools-superlinear/run_llama/sample_runs/gemma-2-2b-it_normal_20250306_050002/rng_values.txt diff --git a/tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/config.yaml b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_uniform_20250306_044755/config.yaml similarity index 100% rename from tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/config.yaml rename to tools-superlinear/run_llama/sample_runs/gemma-2-2b_uniform_20250306_044755/config.yaml diff --git a/tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/log.txt b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_uniform_20250306_044755/log.txt similarity index 100% rename from tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/log.txt rename to tools-superlinear/run_llama/sample_runs/gemma-2-2b_uniform_20250306_044755/log.txt diff --git a/tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/output.txt b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_uniform_20250306_044755/output.txt similarity index 100% rename from tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/output.txt rename to tools-superlinear/run_llama/sample_runs/gemma-2-2b_uniform_20250306_044755/output.txt diff --git a/tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_distribution.png b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_distribution.png similarity index 100% rename from tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_distribution.png rename to tools-superlinear/run_llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_distribution.png diff --git a/tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_values.txt b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_values.txt similarity index 100% rename from tools-superlinear/run-llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_values.txt rename to tools-superlinear/run_llama/sample_runs/gemma-2-2b_uniform_20250306_044755/rng_values.txt diff --git a/tools/visualize_rng.py b/tools-superlinear/run_llama/visualize_rng.py similarity index 100% rename from tools/visualize_rng.py rename to tools-superlinear/run_llama/visualize_rng.py diff --git a/tools-superlinear/process_json_tokens.py b/tools-superlinear/visualize_tokens/process_json_tokens.py similarity index 100% rename from tools-superlinear/process_json_tokens.py rename to tools-superlinear/visualize_tokens/process_json_tokens.py diff --git a/tools-superlinear/token_probability_visualizer.py b/tools-superlinear/visualize_tokens/token_probability_visualizer.py similarity index 100% rename from tools-superlinear/token_probability_visualizer.py rename to tools-superlinear/visualize_tokens/token_probability_visualizer.py From 12eb2a5c2a66764e9c78e0506e671450fc45ea18 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 15:16:31 +0300 Subject: [PATCH 06/22] v0.0.4 - Misc improvements - add gemma-2b to the model_setup.md - Throw an error if the API service is unavailable for ExternalAPIRNGProvider - fix run.py: a) use correct context limit argument for running the llama-run b) handle errors correctly c) fix script paths and convert to pathlib --- docs-superlinear/model_setup.md | 6 ++-- pyproject.toml | 2 +- src/llama-rng-provider.h | 12 ++------ tools-superlinear/run_llama/config.yaml | 6 ++-- tools-superlinear/run_llama/run.py | 38 ++++++++++++++----------- 5 files changed, 33 insertions(+), 31 deletions(-) diff --git a/docs-superlinear/model_setup.md b/docs-superlinear/model_setup.md index 49294130e47ef..545b75d11f6d1 100644 --- a/docs-superlinear/model_setup.md +++ b/docs-superlinear/model_setup.md @@ -66,7 +66,8 @@ You need to request access to the models before downloading them: poetry run huggingface-cli download meta-llama/Llama-3.2-1B-Instruct --local-dir ./models-superlinear/llama/llama-3.2-1b-instruct/huggingface && \ poetry run huggingface-cli download meta-llama/Llama-3.1-8B-Instruct --local-dir ./models-superlinear/llama/llama-3.1-8b-instruct/huggingface && \ poetry run huggingface-cli download meta-llama/Llama-3.1-8B --local-dir ./models-superlinear/llama/llama-3.1-8b/huggingface && \ -poetry run huggingface-cli download google/gemma-2-2b-it --local-dir ./models-superlinear/gemma/gemma-2-2b-it/huggingface +poetry run huggingface-cli download google/gemma-2-2b-it --local-dir ./models-superlinear/gemma/gemma-2-2b-it/huggingface && \ +poetry run huggingface-cli download google/gemma-2-2b --local-dir ./models-superlinear/gemma/gemma-2-2b/huggingface ``` ### Converting All Models to GGUF @@ -76,7 +77,8 @@ poetry run huggingface-cli download google/gemma-2-2b-it --local-dir ./models-su poetry run python convert_hf_to_gguf.py --outfile ./models-superlinear/llama-3.2-1b-instruct.gguf ./models-superlinear/llama/llama-3.2-1b-instruct/huggingface && \ poetry run python convert_hf_to_gguf.py --outfile ./models-superlinear/llama-3.1-8b-instruct.gguf ./models-superlinear/llama/llama-3.1-8b-instruct/huggingface && \ poetry run python convert_hf_to_gguf.py --outfile ./models-superlinear/llama-3.1-8b.gguf ./models-superlinear/llama/llama-3.1-8b/huggingface && \ -poetry run python convert_hf_to_gguf.py --outfile ./models-superlinear/gemma-2-2b-it.gguf ./models-superlinear/gemma/gemma-2-2b-it/huggingface +poetry run python convert_hf_to_gguf.py --outfile ./models-superlinear/gemma-2-2b-it.gguf ./models-superlinear/gemma/gemma-2-2b-it/huggingface && \ +poetry run python convert_hf_to_gguf.py --outfile ./models-superlinear/gemma-2-2b.gguf ./models-superlinear/gemma/gemma-2-2b/huggingface ``` ## Verifying Models diff --git a/pyproject.toml b/pyproject.toml index 9f5225b6fca6e..275dbc8bee9fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama-cpp-scripts" -version = "0.0.3" +version = "0.0.4" description = "Scripts that ship with llama.cpp" authors = ["GGML "] readme = "README.md" diff --git a/src/llama-rng-provider.h b/src/llama-rng-provider.h index 6036887548f3b..7acf9a284d8da 100644 --- a/src/llama-rng-provider.h +++ b/src/llama-rng-provider.h @@ -124,12 +124,9 @@ class ExternalAPIRNGProvider : public RNGProvider { curl_easy_setopt(curl, CURLOPT_TIMEOUT, 5L); // 5 second timeout CURLcode res = curl_easy_perform(curl); - if (res != CURLE_OK) { - fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); - return 0.5; // Default value on error + throw std::runtime_error(std::string("curl_easy_perform() failed: ") + curl_easy_strerror(res)); } - // Parse JSON response try { nlohmann::json j = nlohmann::json::parse(response_data); @@ -139,14 +136,11 @@ class ExternalAPIRNGProvider : public RNGProvider { value = std::max(0.0, std::min(1.0, value)); log_value(value); return value; - } else { - fprintf(stderr, "API response missing 'random' field: %s\n", response_data.c_str()); } + throw std::runtime_error("API response missing 'random' field: " + response_data); } catch (std::exception& e) { - fprintf(stderr, "JSON parse error: %s\n", e.what()); + throw std::runtime_error(std::string("RNG API error: ") + e.what()); } - - return 0.5; // Default value on error } private: diff --git a/tools-superlinear/run_llama/config.yaml b/tools-superlinear/run_llama/config.yaml index 6b060ce52426b..c33d9f899d675 100644 --- a/tools-superlinear/run_llama/config.yaml +++ b/tools-superlinear/run_llama/config.yaml @@ -4,17 +4,19 @@ # - normal: Normal distribution with mean 0.5, std dev 0.15, clamped to (0-1) # - external-api: Uses an external HTTP API to provide random numbers rng_provider: normal +# rng_provider: external-api # API URL (REQUIRED when rng_provider is external-api) # The API endpoint must return JSON with a 'random' field containing a number between 0-1 # Example: {"random": 0.42} -# api_url: http://localhost:8000/random +api_url: http://localhost:8000/random # Run Directory (where outputs will be saved) # run_dir: runs/default # Model Configuration -model: gemma-2-2b-it # See model_paths in run.py for available shortcuts +# model: gemma-2-2b-it # See model_paths in run.py for available shortcuts +model: gemma-2-2b # See model_paths in run.py for available shortcuts # Number of tokens to generate (optional) num_tokens: 100 diff --git a/tools-superlinear/run_llama/run.py b/tools-superlinear/run_llama/run.py index 7d2d05b6c584e..5df2c78553f7d 100755 --- a/tools-superlinear/run_llama/run.py +++ b/tools-superlinear/run_llama/run.py @@ -64,7 +64,7 @@ def read_rng_values(filename): def visualize_distribution(values, output_file, rng_provider): """Visualize the distribution of RNG values using visualize_rng.py""" # Use the visualize_rng.py script from tools directory - visualize_script = os.path.join(PROJECT_ROOT, "tools/visualize_rng.py") + visualize_script = SCRIPT_DIR / "visualize_rng.py" if not os.path.exists(visualize_script): logger.warning(f"Visualization script not found at {visualize_script}") @@ -95,7 +95,7 @@ def visualize_distribution(values, output_file, rng_provider): # Run the visualization script try: - cmd = ["python", visualize_script, input_file, "-o", output_file] + cmd = ["python", str(visualize_script), input_file, "-o", output_file] logger.info(f"Running visualization command: {' '.join(cmd)}") result = subprocess.run(cmd, check=True, capture_output=True, text=True) logger.success(f"Visualization saved to {output_file}") @@ -198,7 +198,7 @@ def run_model(config, config_path): # Add number of tokens parameter if specified if 'num_tokens' in config: - cmd.extend(['-n', str(config['num_tokens'])]) + cmd.extend(['-c', str(config['num_tokens'])]) # Add model path and prompt cmd.extend([model_path, config['prompt']]) @@ -214,13 +214,28 @@ def run_model(config, config_path): stdout=out_f, stderr=log_f, env=env, - check=True + check=False # Don't raise exception on non-zero exit code ) + + # Check if it was a context size exceeded error + with open(log_file, 'r') as f: + log_content = f.read() + if "context size exceeded" in log_content: + logger.info("Model reached context size limit - treating as successful completion") + process.returncode = 0 # Override return code for this case + elif process.returncode != 0: + logger.error(f"Error running command: exit code {process.returncode}") + logger.info(f"Check the log file for details: {log_file}") + return False # Stop immediately on real errors + except subprocess.CalledProcessError as e: logger.error(f"Error running command: {e}") logger.info(f"Check the log file for details: {log_file}") - - # Even if the command failed, try to generate the plot if RNG values were collected + return False # Stop immediately + + # Only proceed with visualization if the run was successful + if process.returncode == 0: + # Generate plot if RNG values file exists if os.path.exists(rng_file) and os.path.getsize(rng_file) > 0: if visualize_distribution(rng_file, plot_file, config['rng_provider']): logger.success(f"RNG distribution plot: {plot_file}") @@ -228,17 +243,6 @@ def run_model(config, config_path): logger.warning(f"Failed to generate RNG distribution plot") else: logger.warning(f"No RNG values were collected. RNG file not found or empty: {rng_file}") - - return False - - # Generate plot if RNG values file exists - if os.path.exists(rng_file) and os.path.getsize(rng_file) > 0: - if visualize_distribution(rng_file, plot_file, config['rng_provider']): - logger.success(f"RNG distribution plot: {plot_file}") - else: - logger.warning(f"Failed to generate RNG distribution plot") - else: - logger.warning(f"No RNG values were collected. RNG file not found or empty: {rng_file}") logger.success(f"Run completed successfully!") logger.info(f"Output: {output_file}") From 89c8c17f8a35553337b33e4d5c4c91467e0a70cb Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 16:38:39 +0300 Subject: [PATCH 07/22] v0.0.5 - sample run api external rng provider --- pyproject.toml | 2 +- src/llama-rng-provider.h | 3 +- .../rng_values_gemma-2-2b_uniform.txt | 2017 +++++++++++++++++ tools-superlinear/rng_provider/run.sh | 1 + tools-superlinear/run_llama/config.yaml | 4 +- .../README.md | 3 + .../config.yaml | 5 + .../log.txt | 1400 ++++++++++++ .../output.txt | 3 + .../rng_distribution.png | Bin 0 -> 55620 bytes .../rng_values.txt | 98 + 11 files changed, 3531 insertions(+), 5 deletions(-) create mode 100644 tools-superlinear/rng_provider/rng_values_gemma-2-2b_uniform.txt create mode 100644 tools-superlinear/rng_provider/run.sh create mode 100644 tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/README.md create mode 100644 tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/config.yaml create mode 100644 tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/log.txt create mode 100644 tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/output.txt create mode 100644 tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/rng_distribution.png create mode 100644 tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/rng_values.txt diff --git a/pyproject.toml b/pyproject.toml index 275dbc8bee9fc..820c1602bb7d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama-cpp-scripts" -version = "0.0.4" +version = "0.0.5" description = "Scripts that ship with llama.cpp" authors = ["GGML "] readme = "README.md" diff --git a/src/llama-rng-provider.h b/src/llama-rng-provider.h index 7acf9a284d8da..4f496d6698bbe 100644 --- a/src/llama-rng-provider.h +++ b/src/llama-rng-provider.h @@ -111,8 +111,7 @@ class ExternalAPIRNGProvider : public RNGProvider { double generate() override { if (!curl) { - fprintf(stderr, "Curl not initialized, returning default value\n"); - return 0.5; // Default value if curl failed + throw std::runtime_error("Curl not initialized - cannot generate random numbers"); } // Make request to the API diff --git a/tools-superlinear/rng_provider/rng_values_gemma-2-2b_uniform.txt b/tools-superlinear/rng_provider/rng_values_gemma-2-2b_uniform.txt new file mode 100644 index 0000000000000..3b7d05ac4bef5 --- /dev/null +++ b/tools-superlinear/rng_provider/rng_values_gemma-2-2b_uniform.txt @@ -0,0 +1,2017 @@ +0.592845 +0.844266 +0.857946 +0.847252 +0.623564 +0.384382 +0.297535 +0.056713 +0.272656 +0.477665 +0.812169 +0.479977 +0.392785 +0.836079 +0.337396 +0.648172 +0.368242 +0.957155 +0.140351 +0.870087 +0.473608 +0.800911 +0.520477 +0.67888 +0.720633 +0.58202 +0.537373 +0.758616 +0.105908 +0.4736 +0.186332 +0.736918 +0.21655 +0.135218 +0.324141 +0.149675 +0.222321 +0.386489 +0.902598 +0.44995 +0.613063 +0.902349 +0.0992804 +0.969809 +0.65314 +0.17091 +0.358152 +0.750686 +0.607831 +0.325047 +0.0384254 +0.634274 +0.958949 +0.65279 +0.635059 +0.9953 +0.58185 +0.414369 +0.474698 +0.62351 +0.338008 +0.674752 +0.317202 +0.778345 +0.949571 +0.662527 +0.0135716 +0.622846 +0.67366 +0.971945 +0.878193 +0.509624 +0.0557147 +0.451159 +0.0199877 +0.441711 +0.979587 +0.359444 +0.480894 +0.688661 +0.880476 +0.918235 +0.216822 +0.565189 +0.865103 +0.508969 +0.916723 +0.921158 +0.0831125 +0.277719 +0.0093567 +0.842342 +0.647174 +0.841386 +0.26473 +0.397821 +0.552821 +0.16494 +0.369808 +0.146442 +0.569618 +0.703737 +0.288476 +0.433288 +0.756107 +0.396098 +0.896038 +0.638921 +0.891554 +0.680056 +0.449198 +0.978571 +0.116202 +0.767024 +0.41182 +0.675439 +0.249796 +0.313218 +0.965416 +0.588465 +0.659668 +0.533206 +0.230533 +0.394869 +0.618809 +0.474868 +0.470132 +0.716075 +0.287991 +0.383462 +0.74917 +0.878452 +0.102863 +0.0923739 +0.354047 +0.551816 +0.0336251 +0.968962 +0.320997 +0.221263 +0.141264 +0.0972599 +0.984042 +0.260341 +0.537023 +0.447926 +0.0995691 +0.352312 +0.469249 +0.84114 +0.904648 +0.0375594 +0.508315 +0.166848 +0.779051 +0.864933 +0.411397 +0.139973 +0.0332224 +0.982575 +0.373291 +0.420075 +0.0505881 +0.365496 +0.016628 +0.230742 +0.764912 +0.944124 +0.749999 +0.339404 +0.489549 +0.338985 +0.17949 +0.170987 +0.463451 +0.874573 +0.94412 +0.608253 +0.596655 +0.783644 +0.500026 +0.0503701 +0.699098 +0.992396 +0.267263 +0.679091 +0.864281 +0.750844 +0.96449 +0.554242 +0.212391 +0.222443 +0.218749 +0.569574 +0.452109 +0.970237 +0.680545 +0.0852956 +0.0564183 +0.487838 +0.881005 +0.976404 +0.617658 +0.542499 +0.854614 +0.743835 +0.478596 +0.677082 +0.607045 +0.714697 +0.469497 +0.456015 +0.906418 +0.13722 +0.229219 +0.881585 +0.904425 +0.645785 +0.324683 +0.519711 +5.53505e-05 +0.31186 +0.425452 +0.885338 +0.679879 +0.45613 +0.483409 +0.788739 +0.229442 +0.880298 +0.313692 +0.957451 +0.471752 +0.711584 +0.153694 +0.730442 +0.646264 +0.214881 +0.186458 +0.80758 +0.747079 +0.674847 +0.276894 +0.174909 +0.704474 +0.46315 +0.840429 +0.204866 +0.164959 +0.124833 +0.722081 +0.030453 +0.746994 +0.0925962 +0.21745 +0.749254 +0.731694 +0.0456146 +0.209157 +0.286915 +0.677263 +0.0630383 +0.555649 +0.00924007 +0.833038 +0.984329 +0.703495 +0.181631 +0.512393 +0.580447 +0.787542 +0.606475 +0.218403 +0.455169 +0.87887 +0.492268 +0.715561 +0.48616 +0.708548 +0.49814 +0.84455 +0.194342 +0.773326 +0.974259 +0.86231 +0.780427 +0.985032 +0.75357 +0.00404811 +0.269479 +0.410492 +0.428224 +0.297842 +0.401132 +0.120657 +0.9807 +0.40612 +0.569211 +0.343605 +0.788873 +0.411372 +0.359271 +0.399499 +0.301831 +0.77522 +0.926213 +0.32531 +0.952871 +0.0139484 +0.533466 +0.304582 +0.88286 +0.250623 +0.677412 +0.810424 +0.432148 +0.752135 +0.829602 +0.379034 +0.0965496 +0.25614 +0.591935 +0.476477 +0.487935 +0.458515 +0.524593 +0.442015 +0.852635 +0.433439 +0.82687 +0.509342 +0.086377 +0.66004 +0.206595 +0.847275 +0.681359 +0.178367 +0.069859 +0.00968817 +0.89205 +0.133465 +0.779192 +0.925163 +0.715179 +0.490819 +0.469498 +0.882709 +0.48841 +0.414567 +0.174112 +0.475289 +0.783648 +0.556429 +0.159867 +0.143829 +0.649463 +0.539224 +0.325685 +0.147014 +0.15987 +0.129412 +0.578922 +0.0922602 +0.910545 +0.0829717 +0.800378 +0.877131 +0.0934595 +0.426306 +0.473221 +0.580197 +0.716244 +0.027069 +0.731397 +0.766964 +0.00976643 +0.308286 +0.232865 +0.503427 +0.953721 +0.557811 +0.0974687 +0.621678 +0.863023 +0.157632 +0.99243 +0.0850011 +0.0944579 +0.633608 +0.949297 +0.936746 +0.447378 +0.727696 +0.742238 +0.306986 +0.119771 +0.443879 +0.391774 +0.531849 +0.845358 +0.536275 +0.68027 +0.609178 +0.098478 +0.0920276 +0.0559658 +0.0865325 +0.237173 +0.839513 +0.522371 +0.513075 +0.649832 +0.544591 +0.0324653 +0.580152 +0.771089 +0.376227 +0.491025 +0.98164 +0.244651 +0.374323 +0.0456596 +0.310068 +0.831519 +0.807023 +0.640024 +0.368102 +0.312753 +0.801836 +0.0704472 +0.683573 +0.380729 +0.633931 +0.926879 +0.85394 +0.497678 +0.427834 +0.822559 +0.738227 +0.447144 +0.968903 +0.363161 +0.397533 +0.738683 +0.449083 +0.937284 +0.257233 +0.38176 +0.697142 +0.89707 +0.12106 +0.241546 +0.228264 +0.489509 +0.891523 +0.358107 +0.385237 +0.916727 +0.113817 +0.631126 +0.132815 +0.374245 +0.324405 +0.680116 +0.795535 +0.503934 +0.296242 +0.885962 +0.351871 +0.73839 +0.555361 +0.201518 +0.548519 +0.519643 +0.348783 +0.02462 +0.148829 +0.131852 +0.707917 +0.709072 +0.611748 +0.671907 +0.447516 +0.696077 +0.378326 +0.100032 +0.960257 +0.0724344 +0.0193006 +0.979573 +0.148478 +0.258702 +0.215529 +0.579912 +0.975752 +0.273009 +0.970075 +0.0254974 +0.888044 +0.708609 +0.277774 +0.894863 +0.0265226 +0.625637 +0.329543 +0.967121 +0.168576 +0.901483 +0.329936 +0.889233 +0.512452 +0.510629 +0.386827 +0.530089 +0.944707 +0.892862 +0.677114 +0.639027 +0.548361 +0.272683 +0.148269 +0.856303 +0.635057 +0.299643 +0.46022 +0.0245274 +0.558065 +0.361152 +0.50271 +0.153628 +0.425508 +0.906172 +0.00851159 +0.968864 +0.690894 +0.0994164 +0.289776 +0.539074 +0.724148 +0.388867 +0.227084 +0.45486 +0.972083 +0.833818 +0.914791 +0.667285 +0.440666 +0.685092 +0.648598 +0.0291 +0.919531 +0.542457 +0.991142 +0.383103 +0.806669 +0.461007 +0.826824 +0.539118 +0.887265 +0.880107 +0.328303 +0.404425 +0.544647 +0.621693 +0.379356 +0.542129 +0.120919 +0.680697 +0.287119 +0.0429764 +0.116098 +0.106429 +0.316488 +0.246535 +0.562783 +0.906354 +0.580107 +0.597937 +0.238044 +0.580887 +0.14587 +0.335631 +0.625528 +0.361306 +0.910994 +0.878493 +0.434156 +0.517612 +0.474668 +0.184363 +0.434761 +0.187552 +0.715208 +0.523006 +0.320565 +0.00309954 +0.597303 +0.649078 +0.00518962 +0.970222 +0.663866 +0.293144 +0.200364 +0.925702 +0.309251 +0.403218 +0.633062 +0.890653 +0.432606 +0.928219 +0.592081 +0.431785 +0.59278 +0.354506 +0.65702 +0.652309 +0.821719 +0.402767 +0.037565 +0.984632 +0.448143 +0.937635 +0.672041 +0.266697 +0.564064 +0.956933 +0.113486 +0.272229 +0.0820577 +0.765271 +0.0216887 +0.137251 +0.260629 +0.871083 +0.17941 +0.433279 +0.325118 +0.583404 +0.936468 +0.940782 +0.0525082 +0.596211 +0.891801 +0.780931 +0.211534 +0.764994 +0.189063 +0.89876 +0.00893126 +0.090233 +0.619183 +0.38244 +0.0965629 +0.984933 +0.896581 +0.271113 +0.47319 +0.479754 +0.50839 +0.967259 +0.0999585 +0.0396505 +0.483706 +0.458259 +0.235263 +0.0588703 +0.123844 +0.856755 +0.399667 +0.381397 +0.0219736 +0.374753 +0.661607 +0.625358 +0.913991 +0.671794 +0.324083 +0.31622 +0.833316 +0.775803 +0.281153 +0.713927 +0.561577 +0.39966 +0.53086 +0.0852331 +0.665678 +0.0296348 +0.889287 +0.682119 +0.891314 +0.00120757 +0.937219 +0.958307 +0.796319 +0.992074 +0.355225 +0.84834 +0.770674 +0.720528 +0.633415 +0.307376 +0.537279 +0.274575 +0.00868992 +0.0249447 +0.144286 +0.51376 +0.222658 +0.36949 +0.0551073 +0.911197 +0.884952 +0.897277 +0.332191 +0.712927 +0.426943 +0.301692 +0.0704108 +0.421845 +0.38832 +0.160394 +0.350207 +0.0956199 +0.871103 +0.667593 +0.192718 +0.918092 +0.579681 +0.455898 +0.642352 +0.254025 +0.551079 +0.131903 +0.200516 +0.514054 +0.0974934 +0.851854 +0.395544 +0.233078 +0.585432 +0.746836 +0.608989 +0.579011 +0.1701 +0.686362 +0.976802 +0.608593 +0.328379 +0.802692 +0.836364 +0.41528 +0.415608 +0.936617 +0.722184 +0.0329013 +0.425946 +0.0938945 +0.978548 +0.575212 +0.643678 +0.364463 +0.667715 +0.571844 +0.774683 +0.900102 +0.404307 +0.895345 +0.0650825 +0.685477 +0.23107 +0.538869 +0.773035 +0.746584 +0.72143 +0.482892 +0.194023 +0.205566 +0.535468 +0.437814 +0.97499 +0.300824 +0.0789699 +0.726878 +0.975424 +0.782089 +0.247045 +0.036561 +0.406922 +0.877751 +0.431994 +0.284086 +0.935302 +0.772266 +0.610987 +0.00267707 +0.931505 +0.673837 +0.224467 +0.0247531 +0.00746832 +0.927403 +0.693153 +0.705148 +0.596065 +0.537236 +0.455579 +0.600193 +0.654082 +0.37304 +0.668234 +0.456885 +0.0850668 +0.233174 +0.0813717 +0.322017 +0.611327 +0.367626 +0.726227 +0.678059 +0.144691 +0.168043 +0.0644355 +0.264333 +0.348357 +0.0882482 +0.602229 +0.714376 +0.157222 +0.342138 +0.72725 +0.663318 +0.85446 +0.554603 +0.0257565 +0.0573206 +0.0800611 +0.517137 +0.755106 +0.0322733 +0.456688 +0.824798 +0.668606 +0.545918 +0.588812 +0.89648 +0.575078 +0.421819 +0.702952 +0.357207 +0.972912 +0.872303 +0.156653 +0.684126 +0.0340708 +0.885207 +0.844854 +0.510128 +0.127689 +0.621473 +0.239337 +0.630871 +0.693746 +0.106255 +0.476946 +0.585593 +0.253626 +0.20101 +0.637856 +0.760744 +0.359111 +0.708898 +0.587375 +0.809593 +0.204072 +0.796397 +0.266408 +0.298264 +0.978172 +0.0460136 +0.189754 +0.520875 +0.375994 +0.2421 +0.684553 +0.669237 +0.963741 +0.8786 +0.997436 +0.362888 +0.0452345 +0.325297 +0.496915 +0.172196 +0.0850761 +0.452729 +0.932666 +0.763165 +0.311486 +0.888615 +0.697274 +0.463344 +0.26462 +0.575534 +0.194901 +0.741232 +0.936928 +0.659268 +0.724994 +0.164488 +0.748224 +0.497357 +0.0873827 +0.386447 +0.729448 +0.57821 +0.306383 +0.988749 +0.574032 +0.944949 +0.559774 +0.564667 +0.0511704 +0.104079 +0.614438 +0.731634 +0.244032 +0.963976 +0.741238 +0.238011 +0.64442 +0.339756 +0.727481 +0.584593 +0.756654 +0.251464 +0.932295 +0.993001 +0.164533 +0.512905 +0.863476 +0.347698 +0.242637 +0.731065 +0.977518 +0.503748 +0.392431 +0.391933 +0.794506 +0.706061 +0.318917 +0.974589 +0.89158 +0.700928 +0.440777 +0.207296 +0.664965 +0.75389 +0.604078 +0.495617 +0.118042 +0.784731 +0.491591 +0.854115 +0.134819 +0.965089 +0.491201 +0.0723504 +0.280114 +0.90258 +0.334018 +0.970078 +0.444121 +0.367046 +0.78977 +0.467119 +0.0755666 +0.828841 +0.790462 +0.514993 +0.00968332 +0.136357 +0.711991 +0.590858 +0.700045 +0.360934 +0.629377 +0.434827 +0.413801 +0.415033 +0.0320556 +0.060995 +0.849319 +0.695526 +0.230412 +0.118127 +0.0407103 +0.479707 +0.587967 +0.897956 +0.920272 +0.366244 +0.930839 +0.545779 +0.574092 +0.178636 +0.115861 +0.244376 +0.124289 +0.223292 +0.539813 +0.868255 +0.794988 +0.0323562 +0.698557 +0.0467918 +0.998025 +0.784114 +0.231382 +0.49011 +0.744101 +0.839807 +0.228857 +0.133949 +0.931077 +0.670762 +0.876285 +0.232383 +0.128697 +0.350325 +0.2527 +0.0727541 +0.644479 +0.251212 +0.450224 +0.288301 +0.921192 +0.450209 +0.879857 +0.0440164 +0.535802 +0.45273 +0.326118 +0.300189 +0.266788 +0.19953 +0.492632 +0.345138 +0.330601 +0.0662072 +0.0138651 +0.911657 +0.413758 +0.677045 +0.452399 +0.200638 +0.929475 +0.122662 +0.481375 +0.0917281 +0.229276 +0.685415 +0.526413 +0.550055 +0.832239 +0.0607782 +0.904827 +0.651228 +0.640907 +0.0302082 +0.910073 +0.027982 +0.185448 +0.895911 +0.126728 +0.397988 +0.760049 +0.562947 +0.730478 +0.192846 +0.412052 +0.498388 +0.243624 +0.997918 +0.671116 +0.883779 +0.392967 +0.92484 +0.251627 +0.157884 +0.78686 +0.973017 +0.0620358 +0.100003 +0.317898 +0.525951 +0.704271 +0.955551 +0.11782 +0.356315 +0.266284 +0.807419 +0.163559 +0.87684 +0.77371 +0.883819 +0.969168 +0.573444 +0.458412 +0.485031 +0.895513 +0.208683 +0.37535 +0.641133 +0.716033 +0.0563722 +0.978154 +0.331452 +0.206064 +0.307173 +0.984772 +0.668577 +0.245278 +0.201096 +0.0748952 +0.363214 +0.823753 +0.275425 +0.368491 +0.761437 +0.695947 +0.412434 +0.164319 +0.893773 +0.575062 +0.256768 +0.23679 +0.448386 +0.403147 +0.0688805 +0.943142 +0.786954 +0.496548 +0.326903 +0.181476 +0.16629 +0.24049 +0.220528 +0.709701 +0.769634 +0.363482 +0.159394 +0.644734 +0.332831 +0.932249 +0.788661 +0.987758 +0.887363 +0.00531504 +0.0603353 +0.898192 +0.118932 +0.595584 +0.354821 +0.0337716 +0.0828762 +0.765832 +0.842402 +0.363224 +0.246772 +0.493591 +0.152283 +0.616941 +0.739508 +0.703966 +0.93208 +0.601714 +0.595197 +0.736985 +0.725784 +0.279172 +0.797254 +0.90444 +0.779409 +0.849383 +0.848722 +0.247618 +0.827051 +0.718981 +0.323365 +0.674683 +0.673191 +0.802356 +6.057e-05 +0.64721 +0.672889 +0.577413 +0.777378 +0.143529 +0.301309 +0.755868 +0.342921 +0.603649 +0.621393 +0.298614 +0.460909 +0.684799 +0.773659 +0.647218 +0.385739 +0.0315746 +0.901008 +0.00937518 +0.10239 +0.783274 +0.0608922 +0.885796 +0.898111 +0.0559814 +0.950956 +0.514462 +0.000691551 +0.523365 +0.380595 +0.98536 +0.260588 +0.446547 +0.0946972 +0.876648 +0.0375344 +0.771241 +0.278329 +0.139133 +0.701441 +0.624497 +0.0355818 +0.388169 +0.555679 +0.35644 +0.631719 +0.462583 +0.646329 +0.609537 +0.200951 +0.337841 +0.303253 +0.98041 +0.243904 +0.230306 +0.0574706 +0.948443 +0.476003 +0.989032 +0.749701 +0.0342459 +0.795545 +0.908733 +0.73574 +0.26593 +0.145436 +0.194058 +0.992887 +0.500021 +0.996541 +0.658604 +0.930428 +0.643419 +0.246577 +0.315022 +0.902979 +0.922094 +0.63784 +0.097896 +0.260911 +0.17793 +0.620054 +0.15439 +0.440847 +0.55244 +0.227871 +0.227157 +0.266946 +0.897057 +0.564627 +0.125811 +0.205011 +0.936845 +0.373106 +0.350387 +0.469994 +0.394161 +0.338281 +0.36648 +0.866459 +0.694728 +0.0177687 +0.958146 +0.823572 +0.92106 +0.240696 +0.688463 +0.976762 +0.127087 +0.746688 +0.242531 +0.51002 +0.252473 +0.649032 +0.687589 +0.0426159 +0.549237 +0.643199 +0.570423 +0.273678 +0.220824 +0.153583 +0.759873 +0.792718 +0.550338 +0.532326 +0.361106 +0.0758501 +0.98403 +0.869848 +0.205499 +0.424853 +0.652514 +0.552646 +0.2512 +0.324052 +0.788165 +0.162451 +0.0815811 +0.513185 +0.22133 +0.232316 +0.525917 +0.498534 +0.927958 +0.503449 +0.196374 +0.0270782 +0.0841781 +0.419858 +0.517121 +0.646491 +0.471207 +0.658199 +0.000716961 +0.0147809 +0.802734 +0.613702 +0.755738 +0.378403 +0.600934 +0.46917 +0.725316 +0.0976597 +0.117492 +0.813224 +0.105373 +0.964511 +0.712639 +0.523994 +0.886118 +0.86333 +0.775427 +0.797293 +0.781491 +0.569706 +0.726851 +0.45715 +0.802216 +0.743531 +0.866292 +0.92749 +0.0136377 +0.480421 +0.917152 +0.169935 +0.0392223 +0.430146 +0.431253 +0.531195 +0.470647 +0.697994 +0.541822 +0.54234 +0.667213 +0.52239 +0.0621308 +0.811259 +0.286454 +0.90165 +0.642234 +0.763869 +0.312511 +0.156098 +0.651467 +0.337081 +0.430235 +0.849336 +0.0223316 +0.794306 +0.924292 +0.50625 +0.144179 +0.396234 +0.148975 +0.122745 +0.963913 +0.303421 +0.26668 +0.771092 +0.52832 +0.872991 +0.999854 +0.0802848 +0.143078 +0.943103 +0.211302 +0.0449298 +0.860362 +0.0735739 +0.961841 +0.760279 +0.404422 +0.0651185 +0.711614 +0.569834 +0.151386 +0.627341 +0.657301 +0.945419 +0.457835 +0.512488 +0.908538 +0.246402 +0.263347 +0.930111 +0.373449 +0.771632 +0.339843 +0.218128 +0.308819 +0.506966 +0.221781 +0.730003 +0.0749214 +0.553315 +0.576657 +0.315671 +0.0444678 +0.51231 +0.0638254 +0.848063 +0.94937 +0.264958 +0.101374 +0.286726 +0.196646 +0.008904 +0.891301 +0.789015 +0.302878 +0.644827 +0.97041 +0.785555 +0.748973 +0.607624 +0.369941 +0.58464 +0.157281 +0.0377641 +0.392753 +0.15413 +0.152742 +0.47983 +0.176935 +0.199879 +0.575139 +0.457063 +0.718119 +0.46305 +0.307307 +0.635868 +0.482628 +0.977583 +0.247767 +0.675058 +0.98737 +0.925993 +0.252567 +0.719469 +0.386538 +0.75949 +0.125707 +0.973096 +0.149499 +0.699536 +0.345514 +0.186937 +0.390854 +0.97136 +0.114767 +0.0330937 +0.796117 +0.819996 +0.485795 +0.960932 +0.627717 +0.590998 +0.832943 +0.00111618 +0.1967 +0.287142 +0.446737 +0.123796 +0.859725 +0.715622 +0.770997 +0.0911204 +0.501462 +0.119725 +0.274041 +0.920548 +0.772628 +0.772057 +0.753086 +0.404643 +0.0869553 +0.930877 +0.694394 +0.778242 +0.791615 +0.823341 +0.382342 +0.934781 +0.469588 +0.21596 +0.514625 +0.643997 +0.31942 +0.106806 +0.456865 +0.487799 +0.771406 +0.0724229 +0.67043 +0.14799 +0.681115 +0.341983 +0.845086 +0.456371 +0.233708 +0.762525 +0.978654 +0.633643 +0.134428 +0.204374 +0.379661 +0.496578 +0.0201413 +0.481704 +0.474885 +0.435883 +0.308658 +0.956942 +0.666005 +0.787628 +0.515595 +0.366147 +0.371213 +0.273383 +0.546963 +0.854137 +0.0392023 +0.751298 +0.783764 +0.394724 +0.743794 +0.389287 +0.315387 +0.235482 +0.803792 +0.0290809 +0.239098 +0.301361 +0.607631 +0.425818 +0.663062 +0.143061 +0.343454 +0.52931 +0.622566 +0.269601 +0.26117 +0.046384 +0.82271 +0.0279463 +0.360635 +0.179906 +0.501768 +0.980649 +0.420985 +0.585628 +0.591171 +0.0569988 +0.721138 +0.776421 +0.733763 +0.146743 +0.45388 +0.39716 +0.38576 +0.0682928 +0.790305 +0.00492913 +0.163264 +0.696827 +0.227431 +0.847017 +0.601626 +0.753261 +0.225809 +0.751381 +0.836053 +0.542017 +0.0274007 +0.329973 +0.591142 +0.938093 +0.217504 +0.0437157 +0.595986 +0.541009 +0.406493 +0.953563 +0.0842882 +0.951102 +0.0349893 +0.538944 +0.945636 +0.461754 +0.110248 +0.968688 +0.451589 +0.815611 +0.681578 +0.405789 +0.982408 +0.657141 +0.757471 +0.0216689 +0.609172 +0.94519 +0.242013 +0.250298 +0.850195 +0.94532 +0.371295 +0.675755 +0.915647 +0.0951234 +0.484942 +0.780789 +0.648497 +0.126512 +0.860027 +0.618073 +0.95066 +0.601803 +0.814585 +0.987383 +0.787452 +0.49138 +0.588473 +0.573247 +0.0494567 +0.385292 +0.422276 +0.129632 +0.0413064 +0.00474751 +0.356805 +0.242532 +0.555385 +0.923935 +0.0390473 +0.435266 +0.667646 +0.0842075 +0.61197 +0.369099 +0.76527 +0.247615 +0.123495 +0.107043 +0.494765 +0.562065 +0.218783 +0.550799 +0.717853 +0.249803 +0.0101649 +0.831067 +0.843678 +0.377681 +0.938244 +0.826797 +0.317374 +0.598193 +0.404122 +0.116867 +0.412481 +0.843658 +0.523109 +0.608402 +0.359874 +0.175803 +0.570039 +0.551778 +0.95613 +0.00935857 +0.898155 +0.925847 +0.240624 +0.0456771 +0.84886 +0.168976 +0.396884 +0.453198 +0.324999 +0.426528 +0.311247 +0.525223 +0.0400171 +0.590969 +0.152887 +0.976403 +0.645609 +0.00791952 +0.0390692 +0.269731 +0.763936 +0.0922644 +0.662067 +0.551576 +0.436532 +0.177337 +0.0327194 +0.51237 +0.514609 +0.0149 +0.642576 +0.361371 +0.481307 +0.459086 +0.69759 +0.506245 +0.370052 +0.196497 +0.0298811 +0.226181 +0.62181 +0.258141 +0.678695 +0.937591 +0.256567 +0.773833 +0.656158 +0.399585 +0.706328 +0.279674 +0.299379 +0.400492 +0.423701 +0.552287 +0.383381 +0.855484 +0.632939 +0.913909 +0.366275 +0.984668 +0.711309 +0.768243 +0.578081 +0.879193 +0.0979222 +0.779823 +0.652092 +0.840992 +0.879207 +0.464878 +0.962408 +0.813975 +0.869087 +0.0128773 +0.947514 +0.254649 +0.630237 +0.456599 +0.611792 +0.581019 +0.771289 +0.551549 +0.559937 +0.591774 +0.218413 +0.683489 +0.98306 +0.926118 +0.234964 +0.977641 +0.830148 +0.966652 +0.974703 +0.465732 +0.626675 +0.26629 +0.20864 +0.448407 +0.250621 +0.852609 +0.0584639 +0.289153 +0.177652 +0.0985779 +0.509799 +0.854799 +0.435713 +0.265303 +0.882157 +0.921127 +0.505799 +0.658125 +0.0614986 +0.341405 +0.38963 +0.505473 +0.848648 +0.809566 +0.00977752 +0.155371 +0.377161 +0.512017 +0.606989 +0.144398 +0.696291 +0.890779 +0.223718 +0.539886 +0.989765 +0.644129 +0.093254 +0.773355 +0.507874 +0.112015 +0.515153 +0.75524 +0.117142 +0.356256 +0.0823964 +0.151497 +0.0524972 +0.226143 +0.454232 +0.791421 +0.686301 +0.587538 +0.986594 +0.463695 +0.915145 +0.33664 +0.426481 +0.397268 +0.59682 +0.932057 +0.767778 +0.833946 +0.792105 +0.9519 +0.114212 +0.814397 +0.672273 +0.0327379 +0.398848 +0.31845 +0.103193 +0.309853 +0.112391 +0.00386003 +0.8876 +0.156843 +0.487839 +0.0554077 +0.811903 +0.756074 +0.819757 +0.714835 +0.850405 +0.297325 +0.272635 +0.042151 +0.97148 +0.306023 +0.227424 +0.0682532 +0.429823 +0.808608 +0.319593 +0.78037 +0.773888 +0.936517 +0.256813 +0.585046 +0.929442 +0.234846 +0.134832 +0.38045 +0.91041 +0.749537 +0.0374777 +0.773262 +0.451831 +0.66632 +0.710483 +0.552984 +0.696678 +0.635314 +0.681336 +0.103901 +0.757438 +0.490956 +0.188782 +0.311548 +0.620534 +0.963908 +0.188081 +0.895292 +0.646775 +0.49748 +0.370195 +0.153943 +0.120646 +0.130935 +0.228133 +0.852634 +0.245116 +0.356485 +0.931953 +0.480295 +0.592088 +0.828734 +0.142856 +0.533723 +0.0988058 +0.542783 +0.594379 +0.546089 +0.364285 +0.848439 +0.309634 +0.763928 +0.448482 +0.805421 +0.791537 +0.8209 +0.690871 +0.757962 +0.779748 +0.751346 +0.39208 +0.197441 +0.970131 +0.25631 +0.980375 +0.102184 +0.826974 +0.211518 +0.830948 +0.0691848 +0.866703 +0.418363 +0.924825 +0.642279 +0.516363 +0.567241 +0.536456 +0.13677 +0.671568 +0.178717 +0.838175 +0.062907 +0.954401 +0.148959 +0.155473 +0.0700888 +0.149816 +0.573994 +0.356904 +0.93384 +0.366327 +0.926141 +0.0585674 +0.21994 +0.842678 +0.163673 +0.260253 +0.371217 +0.584198 +0.392311 +0.268173 +0.486944 +0.531859 +0.96241 +0.675955 diff --git a/tools-superlinear/rng_provider/run.sh b/tools-superlinear/rng_provider/run.sh new file mode 100644 index 0000000000000..eabc2a1e0a02b --- /dev/null +++ b/tools-superlinear/rng_provider/run.sh @@ -0,0 +1 @@ +poetry run python rng_service.py --file rng_values_gemma-2-2b_uniform.txt \ No newline at end of file diff --git a/tools-superlinear/run_llama/config.yaml b/tools-superlinear/run_llama/config.yaml index c33d9f899d675..99e4ab9afefed 100644 --- a/tools-superlinear/run_llama/config.yaml +++ b/tools-superlinear/run_llama/config.yaml @@ -3,8 +3,8 @@ # - uniform: Standard uniform distribution (0-1) # - normal: Normal distribution with mean 0.5, std dev 0.15, clamped to (0-1) # - external-api: Uses an external HTTP API to provide random numbers -rng_provider: normal -# rng_provider: external-api +# rng_provider: normal +rng_provider: external-api # API URL (REQUIRED when rng_provider is external-api) # The API endpoint must return JSON with a 'random' field containing a number between 0-1 diff --git a/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/README.md b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/README.md new file mode 100644 index 0000000000000..c48c57a042028 --- /dev/null +++ b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/README.md @@ -0,0 +1,3 @@ +I used the [rng_values.txt](../gemma-2-2b_uniform_20250306_044755/rng_values.txt) for RNG-provider service + +And therefore output.txt is the same as [output.txt](../gemma-2-2b_uniform_20250306_044755/output.txt) \ No newline at end of file diff --git a/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/config.yaml b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/config.yaml new file mode 100644 index 0000000000000..e17eca99cb402 --- /dev/null +++ b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/config.yaml @@ -0,0 +1,5 @@ +api_url: http://localhost:8000/random +model: gemma-2-2b +num_tokens: 100 +prompt: Tell me about the history of artificial intelligence +rng_provider: external-api diff --git a/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/log.txt b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/log.txt new file mode 100644 index 0000000000000..fdf6dfb9b585c --- /dev/null +++ b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/log.txt @@ -0,0 +1,1400 @@ +  Loading model  +RNG internal: +- Raw uniform random number: 0.592845 +- Token probabilities: + [0] token 651 = 0.274210 (cumulative: 0.274210) + [1] token 105460 = 0.173214 (cumulative: 0.447424) + [2] token 235285 = 0.099415 (cumulative: 0.546839) + [3] token 886 = 0.089622 (cumulative: 0.636461) + [4] token 1718 = 0.087311 (cumulative: 0.723772) + [5] token 235322 = 0.063815 (cumulative: 0.787587) + [6] token 6179 = 0.028784 (cumulative: 0.816371) + [7] token 3493 = 0.026248 (cumulative: 0.842619) + [8] token 1734 = 0.020323 (cumulative: 0.862942) + [9] token 2045 = 0.019132 (cumulative: 0.882074) + [10] token 1596 = 0.017878 (cumulative: 0.899952) + [11] token 2339 = 0.013984 (cumulative: 0.913936) + [12] token 235280 = 0.013243 (cumulative: 0.927179) + [13] token 11716 = 0.011606 (cumulative: 0.938784) + [14] token 4521 = 0.010636 (cumulative: 0.949421) + [15] token 2169 = 0.010023 (cumulative: 0.959444) + [16] token 5331 = 0.009342 (cumulative: 0.968786) + [17] token 235274 = 0.008652 (cumulative: 0.977437) + [18] token 1841 = 0.008484 (cumulative: 0.985922) + [19] token 235281 = 0.007568 (cumulative: 0.993490) + [20] token 12362 = 0.006510 (cumulative: 1.000000) +- Scaled random number: 0.592845 +- Selected index: 3 +RNG generated sample: 3 (token id: 886, probability: 0.089622) + +RNG internal: +- Raw uniform random number: 0.844266 +- Token probabilities: + [0] token 573 = 0.704883 (cumulative: 0.704883) + [1] token 235248 = 0.295117 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.844266 +- Selected index: 1 +RNG generated sample: 1 (token id: 235248, probability: 0.295117) + +RNG internal: +- Raw uniform random number: 0.857946 +- Token probabilities: + [0] token 235274 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.857946 +- Selected index: 0 +RNG generated sample: 0 (token id: 235274, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.847252 +- Token probabilities: + [0] token 235315 = 0.956310 (cumulative: 0.956310) + [1] token 235321 = 0.043690 (cumulative: 1.000000) +- Scaled random number: 0.847252 +- Selected index: 0 +RNG generated sample: 0 (token id: 235315, probability: 0.956310) + +RNG internal: +- Raw uniform random number: 0.623564 +- Token probabilities: + [0] token 235308 = 0.798478 (cumulative: 0.798478) + [1] token 235310 = 0.174579 (cumulative: 0.973057) + [2] token 235304 = 0.026943 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.623564 +- Selected index: 0 +RNG generated sample: 0 (token id: 235308, probability: 0.798478) + +RNG internal: +- Raw uniform random number: 0.384382 +- Token probabilities: + [0] token 235318 = 0.689007 (cumulative: 0.689007) + [1] token 235276 = 0.221302 (cumulative: 0.910309) + [2] token 235308 = 0.089691 (cumulative: 1.000000) +- Scaled random number: 0.384382 +- Selected index: 0 +RNG generated sample: 0 (token id: 235318, probability: 0.689007) + +RNG internal: +- Raw uniform random number: 0.297535 +- Token probabilities: + [0] token 235269 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.297535 +- Selected index: 0 +RNG generated sample: 0 (token id: 235269, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.056713 +- Token probabilities: + [0] token 573 = 0.460858 (cumulative: 0.460858) + [1] token 21786 = 0.240546 (cumulative: 0.701403) + [2] token 476 = 0.084558 (cumulative: 0.785961) + [3] token 3350 = 0.084262 (cumulative: 0.870223) + [4] token 696 = 0.041919 (cumulative: 0.912142) + [5] token 575 = 0.028854 (cumulative: 0.940996) + [6] token 3725 = 0.016436 (cumulative: 0.957431) + [7] token 6875 = 0.015974 (cumulative: 0.973405) + [8] token 135853 = 0.014047 (cumulative: 0.987452) + [9] token 671 = 0.012548 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.056713 +- Selected index: 0 +RNG generated sample: 0 (token id: 573, probability: 0.460858) + +RNG internal: +- Raw uniform random number: 0.272656 +- Token probabilities: + [0] token 3725 = 0.195866 (cumulative: 0.195866) + [1] token 1370 = 0.184608 (cumulative: 0.380474) + [2] token 10964 = 0.108066 (cumulative: 0.488540) + [3] token 135853 = 0.094987 (cumulative: 0.583527) + [4] token 5168 = 0.057533 (cumulative: 0.641060) + [5] token 2379 = 0.054607 (cumulative: 0.695667) + [6] token 6875 = 0.050389 (cumulative: 0.746056) + [7] token 135348 = 0.039124 (cumulative: 0.785180) + [8] token 112034 = 0.038773 (cumulative: 0.823953) + [9] token 7149 = 0.034064 (cumulative: 0.858017) + [10] token 38692 = 0.028553 (cumulative: 0.886570) + [11] token 2134 = 0.026986 (cumulative: 0.913556) + [12] token 3520 = 0.024039 (cumulative: 0.937594) + [13] token 17569 = 0.011420 (cumulative: 0.949015) + [14] token 35679 = 0.009318 (cumulative: 0.958333) + [15] token 664 = 0.008199 (cumulative: 0.966532) + [16] token 4645 = 0.007035 (cumulative: 0.973567) + [17] token 752 = 0.005918 (cumulative: 0.979485) + [18] token 11200 = 0.005384 (cumulative: 0.984870) + [19] token 61270 = 0.005166 (cumulative: 0.990035) + [20] token 4268 = 0.004983 (cumulative: 0.995018) + [21] token 1578 = 0.004982 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.272656 +- Selected index: 1 +RNG generated sample: 1 (token id: 1370, probability: 0.184608) + +RNG internal: +- Raw uniform random number: 0.477665 +- Token probabilities: + [0] token 6875 = 0.393525 (cumulative: 0.393525) + [1] token 18225 = 0.321882 (cumulative: 0.715407) + [2] token 16481 = 0.080545 (cumulative: 0.795952) + [3] token 2733 = 0.028537 (cumulative: 0.824489) + [4] token 6479 = 0.028276 (cumulative: 0.852765) + [5] token 3311 = 0.024508 (cumulative: 0.877273) + [6] token 17234 = 0.024271 (cumulative: 0.901544) + [7] token 25175 = 0.018235 (cumulative: 0.919779) + [8] token 8697 = 0.013655 (cumulative: 0.933434) + [9] token 16034 = 0.013360 (cumulative: 0.946794) + [10] token 33436 = 0.012121 (cumulative: 0.958915) + [11] token 11982 = 0.011358 (cumulative: 0.970273) + [12] token 3364 = 0.010219 (cumulative: 0.980492) + [13] token 3434 = 0.009990 (cumulative: 0.990482) + [14] token 4065 = 0.009518 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.477665 +- Selected index: 1 +RNG generated sample: 1 (token id: 18225, probability: 0.321882) + +RNG internal: +- Raw uniform random number: 0.812169 +- Token probabilities: + [0] token 17273 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.812169 +- Selected index: 0 +RNG generated sample: 0 (token id: 17273, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.479977 +- Token probabilities: + [0] token 2733 = 0.258680 (cumulative: 0.258680) + [1] token 591 = 0.089152 (cumulative: 0.347832) + [2] token 729 = 0.081580 (cumulative: 0.429411) + [3] token 6875 = 0.078708 (cumulative: 0.508119) + [4] token 1812 = 0.073030 (cumulative: 0.581150) + [5] token 11982 = 0.070477 (cumulative: 0.651626) + [6] token 3679 = 0.051340 (cumulative: 0.702966) + [7] token 2121 = 0.042049 (cumulative: 0.745016) + [8] token 11010 = 0.038663 (cumulative: 0.783679) + [9] token 3542 = 0.033921 (cumulative: 0.817600) + [10] token 7658 = 0.026228 (cumulative: 0.843829) + [11] token 6479 = 0.026172 (cumulative: 0.870001) + [12] token 10844 = 0.025484 (cumulative: 0.895486) + [13] token 17355 = 0.025298 (cumulative: 0.920783) + [14] token 33436 = 0.020719 (cumulative: 0.941502) + [15] token 13865 = 0.013621 (cumulative: 0.955123) + [16] token 235269 = 0.011338 (cumulative: 0.966461) + [17] token 17418 = 0.010205 (cumulative: 0.976666) + [18] token 5188 = 0.008470 (cumulative: 0.985136) + [19] token 19991 = 0.008405 (cumulative: 0.993541) + [20] token 2398 = 0.006459 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.479977 +- Selected index: 3 +RNG generated sample: 3 (token id: 6875, probability: 0.078708) + +RNG internal: +- Raw uniform random number: 0.392785 +- Token probabilities: + [0] token 2733 = 0.639389 (cumulative: 0.639389) + [1] token 729 = 0.206443 (cumulative: 0.845832) + [2] token 2398 = 0.049539 (cumulative: 0.895370) + [3] token 235269 = 0.043503 (cumulative: 0.938873) + [4] token 7658 = 0.033254 (cumulative: 0.972127) + [5] token 1812 = 0.027873 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.392785 +- Selected index: 0 +RNG generated sample: 0 (token id: 2733, probability: 0.639389) + +RNG internal: +- Raw uniform random number: 0.836079 +- Token probabilities: + [0] token 729 = 0.832782 (cumulative: 0.832782) + [1] token 235269 = 0.106967 (cumulative: 0.939749) + [2] token 9223 = 0.060251 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.836079 +- Selected index: 1 +RNG generated sample: 1 (token id: 235269, probability: 0.106967) + +RNG internal: +- Raw uniform random number: 0.337396 +- Token probabilities: + [0] token 664 = 0.203086 (cumulative: 0.203086) + [1] token 573 = 0.174274 (cumulative: 0.377360) + [2] token 3151 = 0.156138 (cumulative: 0.533498) + [3] token 39045 = 0.131843 (cumulative: 0.665342) + [4] token 1080 = 0.068438 (cumulative: 0.733779) + [5] token 3836 = 0.063212 (cumulative: 0.796991) + [6] token 948 = 0.035247 (cumulative: 0.832238) + [7] token 476 = 0.026184 (cumulative: 0.858423) + [8] token 6990 = 0.023780 (cumulative: 0.882202) + [9] token 8602 = 0.021782 (cumulative: 0.903984) + [10] token 5483 = 0.019500 (cumulative: 0.923483) + [11] token 21786 = 0.017962 (cumulative: 0.941446) + [12] token 235248 = 0.014519 (cumulative: 0.955964) + [13] token 89826 = 0.011084 (cumulative: 0.967049) + [14] token 6135 = 0.008905 (cumulative: 0.975954) + [15] token 3482 = 0.007170 (cumulative: 0.983124) + [16] token 777 = 0.006334 (cumulative: 0.989457) + [17] token 629 = 0.005424 (cumulative: 0.994881) + [18] token 4082 = 0.005119 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.337396 +- Selected index: 1 +RNG generated sample: 1 (token id: 573, probability: 0.174274) + +RNG internal: +- Raw uniform random number: 0.648172 +- Token probabilities: + [0] token 664 = 0.278440 (cumulative: 0.278440) + [1] token 2398 = 0.155195 (cumulative: 0.433635) + [2] token 39045 = 0.148617 (cumulative: 0.582252) + [3] token 1080 = 0.119049 (cumulative: 0.701301) + [4] token 40559 = 0.059905 (cumulative: 0.761206) + [5] token 14855 = 0.054982 (cumulative: 0.816188) + [6] token 2733 = 0.045274 (cumulative: 0.861461) + [7] token 5105 = 0.028696 (cumulative: 0.890157) + [8] token 135348 = 0.023370 (cumulative: 0.913527) + [9] token 6978 = 0.016393 (cumulative: 0.929920) + [10] token 712 = 0.014749 (cumulative: 0.944669) + [11] token 1370 = 0.012059 (cumulative: 0.956728) + [12] token 235248 = 0.011658 (cumulative: 0.968387) + [13] token 24742 = 0.010132 (cumulative: 0.978518) + [14] token 3311 = 0.007708 (cumulative: 0.986226) + [15] token 777 = 0.006966 (cumulative: 0.993192) + [16] token 6875 = 0.006808 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.648172 +- Selected index: 3 +RNG generated sample: 3 (token id: 1080, probability: 0.119049) + +RNG internal: +- Raw uniform random number: 0.368242 +- Token probabilities: + [0] token 31616 = 0.823982 (cumulative: 0.823982) + [1] token 17112 = 0.050531 (cumulative: 0.874512) + [2] token 92690 = 0.047421 (cumulative: 0.921934) + [3] token 8723 = 0.029621 (cumulative: 0.951554) + [4] token 37441 = 0.024447 (cumulative: 0.976002) + [5] token 96815 = 0.023998 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.368242 +- Selected index: 0 +RNG generated sample: 0 (token id: 31616, probability: 0.823982) + +RNG internal: +- Raw uniform random number: 0.957155 +- Token probabilities: + [0] token 140950 = 0.976798 (cumulative: 0.976798) + [1] token 13403 = 0.023202 (cumulative: 1.000000) +- Scaled random number: 0.957155 +- Selected index: 0 +RNG generated sample: 0 (token id: 140950, probability: 0.976798) + +RNG internal: +- Raw uniform random number: 0.140351 +- Token probabilities: + [0] token 694 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.140351 +- Selected index: 0 +RNG generated sample: 0 (token id: 694, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.870087 +- Token probabilities: + [0] token 5763 = 0.388286 (cumulative: 0.388286) + [1] token 2316 = 0.348432 (cumulative: 0.736718) + [2] token 235369 = 0.263282 (cumulative: 1.000000) +- Scaled random number: 0.870087 +- Selected index: 2 +RNG generated sample: 2 (token id: 235369, probability: 0.263282) + +RNG internal: +- Raw uniform random number: 0.473608 +- Token probabilities: + [0] token 729 = 0.759880 (cumulative: 0.759880) + [1] token 2733 = 0.074685 (cumulative: 0.834566) + [2] token 731 = 0.069062 (cumulative: 0.903627) + [3] token 591 = 0.048328 (cumulative: 0.951955) + [4] token 6990 = 0.026050 (cumulative: 0.978005) + [5] token 576 = 0.021995 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.473608 +- Selected index: 0 +RNG generated sample: 0 (token id: 729, probability: 0.759880) + +RNG internal: +- Raw uniform random number: 0.800911 +- Token probabilities: + [0] token 5483 = 0.468313 (cumulative: 0.468313) + [1] token 6990 = 0.347191 (cumulative: 0.815503) + [2] token 5952 = 0.123011 (cumulative: 0.938515) + [3] token 42020 = 0.023079 (cumulative: 0.961594) + [4] token 7018 = 0.012982 (cumulative: 0.974576) + [5] token 11261 = 0.012837 (cumulative: 0.987413) + [6] token 6869 = 0.012587 (cumulative: 1.000000) +- Scaled random number: 0.800911 +- Selected index: 1 +RNG generated sample: 1 (token id: 6990, probability: 0.347191) + +RNG internal: +- Raw uniform random number: 0.520477 +- Token probabilities: + [0] token 731 = 0.423427 (cumulative: 0.423427) + [1] token 235265 = 0.406066 (cumulative: 0.829493) + [2] token 696 = 0.065436 (cumulative: 0.894929) + [3] token 235269 = 0.038590 (cumulative: 0.933519) + [4] token 575 = 0.023753 (cumulative: 0.957272) + [5] token 578 = 0.019943 (cumulative: 0.977214) + [6] token 35606 = 0.011805 (cumulative: 0.989020) + [7] token 235322 = 0.010980 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.520477 +- Selected index: 1 +RNG generated sample: 1 (token id: 235265, probability: 0.406066) + +RNG internal: +- Raw uniform random number: 0.678880 +- Token probabilities: + [0] token 1165 = 0.348602 (cumulative: 0.348602) + [1] token 714 = 0.231325 (cumulative: 0.579928) + [2] token 1417 = 0.157429 (cumulative: 0.737357) + [3] token 108 = 0.098313 (cumulative: 0.835670) + [4] token 878 = 0.083861 (cumulative: 0.919531) + [5] token 8533 = 0.022440 (cumulative: 0.941971) + [6] token 9707 = 0.021040 (cumulative: 0.963012) + [7] token 4560 = 0.013822 (cumulative: 0.976834) + [8] token 586 = 0.013079 (cumulative: 0.989913) + [9] token 109 = 0.010087 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.678880 +- Selected index: 2 +RNG generated sample: 2 (token id: 1417, probability: 0.157429) + +RNG internal: +- Raw uniform random number: 0.720633 +- Token probabilities: + [0] token 2733 = 0.787793 (cumulative: 0.787793) + [1] token 729 = 0.139156 (cumulative: 0.926949) + [2] token 6875 = 0.073051 (cumulative: 1.000000) +- Scaled random number: 0.720633 +- Selected index: 0 +RNG generated sample: 0 (token id: 2733, probability: 0.787793) + +RNG internal: +- Raw uniform random number: 0.582020 +- Token probabilities: + [0] token 729 = 0.793150 (cumulative: 0.793150) + [1] token 1538 = 0.111584 (cumulative: 0.904735) + [2] token 235269 = 0.048275 (cumulative: 0.953010) + [3] token 1671 = 0.026261 (cumulative: 0.979271) + [4] token 603 = 0.020729 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.582020 +- Selected index: 0 +RNG generated sample: 0 (token id: 729, probability: 0.793150) + +RNG internal: +- Raw uniform random number: 0.537373 +- Token probabilities: + [0] token 3326 = 0.385277 (cumulative: 0.385277) + [1] token 6869 = 0.249536 (cumulative: 0.634813) + [2] token 5483 = 0.060900 (cumulative: 0.695713) + [3] token 13920 = 0.056935 (cumulative: 0.752648) + [4] token 476 = 0.053681 (cumulative: 0.806329) + [5] token 5952 = 0.045474 (cumulative: 0.851803) + [6] token 6990 = 0.038663 (cumulative: 0.890466) + [7] token 3482 = 0.029451 (cumulative: 0.919917) + [8] token 1671 = 0.025921 (cumulative: 0.945838) + [9] token 73517 = 0.021834 (cumulative: 0.967671) + [10] token 573 = 0.018142 (cumulative: 0.985814) + [11] token 671 = 0.014186 (cumulative: 1.000000) +- Scaled random number: 0.537373 +- Selected index: 1 +RNG generated sample: 1 (token id: 6869, probability: 0.249536) + +RNG internal: +- Raw uniform random number: 0.758616 +- Token probabilities: + [0] token 577 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.758616 +- Selected index: 0 +RNG generated sample: 0 (token id: 577, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.105908 +- Token probabilities: + [0] token 2121 = 0.246434 (cumulative: 0.246434) + [1] token 10244 = 0.234150 (cumulative: 0.480585) + [2] token 11560 = 0.196989 (cumulative: 0.677574) + [3] token 8363 = 0.073299 (cumulative: 0.750872) + [4] token 22101 = 0.038947 (cumulative: 0.789820) + [5] token 614 = 0.032567 (cumulative: 0.822387) + [6] token 3448 = 0.028042 (cumulative: 0.850429) + [7] token 1707 = 0.018386 (cumulative: 0.868815) + [8] token 11650 = 0.016934 (cumulative: 0.885750) + [9] token 3114 = 0.013968 (cumulative: 0.899718) + [10] token 1717 = 0.013778 (cumulative: 0.913496) + [11] token 27205 = 0.012402 (cumulative: 0.925898) + [12] token 1500 = 0.011485 (cumulative: 0.937383) + [13] token 58142 = 0.009571 (cumulative: 0.946953) + [14] token 4971 = 0.009086 (cumulative: 0.956039) + [15] token 3418 = 0.008351 (cumulative: 0.964391) + [16] token 1160 = 0.007878 (cumulative: 0.972269) + [17] token 3519 = 0.007504 (cumulative: 0.979773) + [18] token 1281 = 0.007264 (cumulative: 0.987036) + [19] token 1554 = 0.006595 (cumulative: 0.993631) + [20] token 2701 = 0.006369 (cumulative: 1.000000) +- Scaled random number: 0.105908 +- Selected index: 0 +RNG generated sample: 0 (token id: 2121, probability: 0.246434) + +RNG internal: +- Raw uniform random number: 0.473600 +- Token probabilities: + [0] token 573 = 0.525415 (cumulative: 0.525415) + [1] token 1277 = 0.119517 (cumulative: 0.644932) + [2] token 4270 = 0.108215 (cumulative: 0.753147) + [3] token 24742 = 0.063718 (cumulative: 0.816866) + [4] token 476 = 0.050223 (cumulative: 0.867089) + [5] token 30765 = 0.042634 (cumulative: 0.909723) + [6] token 578 = 0.028530 (cumulative: 0.938253) + [7] token 4282 = 0.026559 (cumulative: 0.964812) + [8] token 14855 = 0.020887 (cumulative: 0.985699) + [9] token 1013 = 0.014301 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.473600 +- Selected index: 0 +RNG generated sample: 0 (token id: 573, probability: 0.525415) + +RNG internal: +- Raw uniform random number: 0.186332 +- Token probabilities: + [0] token 24742 = 0.289172 (cumulative: 0.289172) + [1] token 14855 = 0.211699 (cumulative: 0.500870) + [2] token 32413 = 0.146700 (cumulative: 0.647571) + [3] token 7374 = 0.112929 (cumulative: 0.760500) + [4] token 22466 = 0.039964 (cumulative: 0.800463) + [5] token 30765 = 0.035226 (cumulative: 0.835689) + [6] token 31724 = 0.026153 (cumulative: 0.861842) + [7] token 7873 = 0.022129 (cumulative: 0.883971) + [8] token 13121 = 0.017530 (cumulative: 0.901502) + [9] token 7900 = 0.017200 (cumulative: 0.918702) + [10] token 24743 = 0.013117 (cumulative: 0.931819) + [11] token 24995 = 0.012537 (cumulative: 0.944355) + [12] token 6364 = 0.011312 (cumulative: 0.955667) + [13] token 25087 = 0.009704 (cumulative: 0.965372) + [14] token 147272 = 0.009148 (cumulative: 0.974520) + [15] token 6859 = 0.008592 (cumulative: 0.983112) + [16] token 32346 = 0.008540 (cumulative: 0.991652) + [17] token 12555 = 0.008348 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.186332 +- Selected index: 0 +RNG generated sample: 0 (token id: 24742, probability: 0.289172) + +RNG internal: +- Raw uniform random number: 0.736918 +- Token probabilities: + [0] token 32346 = 0.239266 (cumulative: 0.239266) + [1] token 31724 = 0.174619 (cumulative: 0.413885) + [2] token 32413 = 0.150410 (cumulative: 0.564295) + [3] token 22466 = 0.101935 (cumulative: 0.666230) + [4] token 24743 = 0.072473 (cumulative: 0.738702) + [5] token 7374 = 0.036904 (cumulative: 0.775606) + [6] token 7841 = 0.036750 (cumulative: 0.812356) + [7] token 6945 = 0.033726 (cumulative: 0.846083) + [8] token 5449 = 0.032540 (cumulative: 0.878622) + [9] token 12558 = 0.018514 (cumulative: 0.897136) + [10] token 188216 = 0.015791 (cumulative: 0.912926) + [11] token 12555 = 0.015351 (cumulative: 0.928277) + [12] token 25200 = 0.012549 (cumulative: 0.940826) + [13] token 7268 = 0.010211 (cumulative: 0.951038) + [14] token 578 = 0.008944 (cumulative: 0.959982) + [15] token 7166 = 0.008019 (cumulative: 0.968001) + [16] token 89330 = 0.007199 (cumulative: 0.975200) + [17] token 12225 = 0.006734 (cumulative: 0.981934) + [18] token 26387 = 0.006362 (cumulative: 0.988296) + [19] token 18255 = 0.006031 (cumulative: 0.994327) + [20] token 24097 = 0.005673 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.736918 +- Selected index: 4 +RNG generated sample: 4 (token id: 24743, probability: 0.072473) + +RNG internal: +- Raw uniform random number: 0.216550 +- Token probabilities: + [0] token 576 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.216550 +- Selected index: 0 +RNG generated sample: 0 (token id: 576, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.135218 +- Token probabilities: + [0] token 476 = 0.258376 (cumulative: 0.258376) + [1] token 573 = 0.247880 (cumulative: 0.506256) + [2] token 25175 = 0.158203 (cumulative: 0.664459) + [3] token 17611 = 0.126207 (cumulative: 0.790666) + [4] token 1461 = 0.047754 (cumulative: 0.838420) + [5] token 1277 = 0.046543 (cumulative: 0.884963) + [6] token 3515 = 0.037474 (cumulative: 0.922437) + [7] token 671 = 0.025727 (cumulative: 0.948163) + [8] token 6875 = 0.022853 (cumulative: 0.971016) + [9] token 15051 = 0.011521 (cumulative: 0.982537) + [10] token 181591 = 0.009854 (cumulative: 0.992391) + [11] token 18225 = 0.007609 (cumulative: 1.000000) +- Scaled random number: 0.135218 +- Selected index: 0 +RNG generated sample: 0 (token id: 476, probability: 0.258376) + +RNG internal: +- Raw uniform random number: 0.324141 +- Token probabilities: + [0] token 6875 = 0.819789 (cumulative: 0.819789) + [1] token 3515 = 0.073029 (cumulative: 0.892819) + [2] token 6479 = 0.053858 (cumulative: 0.946677) + [3] token 1552 = 0.053323 (cumulative: 1.000000) +- Scaled random number: 0.324141 +- Selected index: 0 +RNG generated sample: 0 (token id: 6875, probability: 0.819789) + +RNG internal: +- Raw uniform random number: 0.149675 +- Token probabilities: + [0] token 235265 = 0.595366 (cumulative: 0.595366) + [1] token 731 = 0.164509 (cumulative: 0.759874) + [2] token 578 = 0.132724 (cumulative: 0.892598) + [3] token 235269 = 0.091434 (cumulative: 0.984032) + [4] token 2733 = 0.015968 (cumulative: 1.000000) +- Scaled random number: 0.149675 +- Selected index: 0 +RNG generated sample: 0 (token id: 235265, probability: 0.595366) + +RNG internal: +- Raw uniform random number: 0.222321 +- Token probabilities: + [0] token 1165 = 0.432864 (cumulative: 0.432864) + [1] token 714 = 0.342775 (cumulative: 0.775639) + [2] token 878 = 0.077039 (cumulative: 0.852678) + [3] token 108 = 0.063075 (cumulative: 0.915753) + [4] token 4560 = 0.022963 (cumulative: 0.938716) + [5] token 1417 = 0.022739 (cumulative: 0.961455) + [6] token 109 = 0.016129 (cumulative: 0.977584) + [7] token 8533 = 0.011547 (cumulative: 0.989130) + [8] token 9693 = 0.010870 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.222321 +- Selected index: 0 +RNG generated sample: 0 (token id: 1165, probability: 0.432864) + +RNG internal: +- Raw uniform random number: 0.386489 +- Token probabilities: + [0] token 729 = 0.845230 (cumulative: 0.845230) + [1] token 1538 = 0.056396 (cumulative: 0.901626) + [2] token 1134 = 0.030819 (cumulative: 0.932445) + [3] token 1671 = 0.023895 (cumulative: 0.956340) + [4] token 14621 = 0.022608 (cumulative: 0.978949) + [5] token 603 = 0.021051 (cumulative: 1.000000) +- Scaled random number: 0.386489 +- Selected index: 0 +RNG generated sample: 0 (token id: 729, probability: 0.845230) + +RNG internal: +- Raw uniform random number: 0.902598 +- Token probabilities: + [0] token 3326 = 0.605022 (cumulative: 0.605022) + [1] token 476 = 0.084080 (cumulative: 0.689102) + [2] token 573 = 0.058482 (cumulative: 0.747584) + [3] token 780 = 0.043943 (cumulative: 0.791527) + [4] token 13920 = 0.039771 (cumulative: 0.831298) + [5] token 3482 = 0.035118 (cumulative: 0.866416) + [6] token 73517 = 0.033590 (cumulative: 0.900006) + [7] token 6869 = 0.032135 (cumulative: 0.932141) + [8] token 5483 = 0.025636 (cumulative: 0.957777) + [9] token 8385 = 0.025507 (cumulative: 0.983283) + [10] token 6990 = 0.016716 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.902598 +- Selected index: 7 +RNG generated sample: 7 (token id: 6869, probability: 0.032135) + +RNG internal: +- Raw uniform random number: 0.449950 +- Token probabilities: + [0] token 577 = 0.899845 (cumulative: 0.899845) + [1] token 731 = 0.100155 (cumulative: 1.000000) +- Scaled random number: 0.449950 +- Selected index: 0 +RNG generated sample: 0 (token id: 577, probability: 0.899845) + +RNG internal: +- Raw uniform random number: 0.613063 +- Token probabilities: + [0] token 614 = 0.174297 (cumulative: 0.174297) + [1] token 2121 = 0.161849 (cumulative: 0.336147) + [2] token 10244 = 0.132728 (cumulative: 0.468875) + [3] token 11560 = 0.105713 (cumulative: 0.574588) + [4] token 8363 = 0.056397 (cumulative: 0.630985) + [5] token 3448 = 0.038994 (cumulative: 0.669979) + [6] token 1160 = 0.026719 (cumulative: 0.696698) + [7] token 3114 = 0.021097 (cumulative: 0.717795) + [8] token 749 = 0.019989 (cumulative: 0.737784) + [9] token 1717 = 0.019048 (cumulative: 0.756831) + [10] token 1281 = 0.018521 (cumulative: 0.775353) + [11] token 22101 = 0.017793 (cumulative: 0.793146) + [12] token 27205 = 0.017180 (cumulative: 0.810326) + [13] token 1500 = 0.016377 (cumulative: 0.826703) + [14] token 1554 = 0.015237 (cumulative: 0.841940) + [15] token 1987 = 0.010818 (cumulative: 0.852757) + [16] token 2701 = 0.010722 (cumulative: 0.863480) + [17] token 3519 = 0.010620 (cumulative: 0.874100) + [18] token 58142 = 0.010394 (cumulative: 0.884495) + [19] token 1501 = 0.009888 (cumulative: 0.894382) + [20] token 5291 = 0.009849 (cumulative: 0.904231) + [21] token 3418 = 0.009486 (cumulative: 0.913717) + [22] token 18739 = 0.008534 (cumulative: 0.922252) + [23] token 11441 = 0.007726 (cumulative: 0.929977) + [24] token 1611 = 0.007329 (cumulative: 0.937307) + [25] token 3104 = 0.006918 (cumulative: 0.944225) + [26] token 11941 = 0.006757 (cumulative: 0.950982) + [27] token 1443 = 0.006543 (cumulative: 0.957525) + [28] token 4544 = 0.006309 (cumulative: 0.963833) + [29] token 11650 = 0.005806 (cumulative: 0.969639) + [30] token 1707 = 0.005656 (cumulative: 0.975295) + [31] token 6502 = 0.005270 (cumulative: 0.980565) + [32] token 1612 = 0.005202 (cumulative: 0.985768) + [33] token 3508 = 0.005142 (cumulative: 0.990910) + [34] token 1080 = 0.004577 (cumulative: 0.995487) + [35] token 3918 = 0.004513 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.613063 +- Selected index: 4 +RNG generated sample: 4 (token id: 8363, probability: 0.056397) + +RNG internal: +- Raw uniform random number: 0.902349 +- Token probabilities: + [0] token 4270 = 0.615477 (cumulative: 0.615477) + [1] token 1013 = 0.335167 (cumulative: 0.950644) + [2] token 573 = 0.049355 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.902349 +- Selected index: 1 +RNG generated sample: 1 (token id: 1013, probability: 0.335167) + +RNG internal: +- Raw uniform random number: 0.099280 +- Token probabilities: + [0] token 476 = 0.716391 (cumulative: 0.716391) + [1] token 573 = 0.185919 (cumulative: 0.902310) + [2] token 665 = 0.097690 (cumulative: 1.000000) +- Scaled random number: 0.099280 +- Selected index: 0 +RNG generated sample: 0 (token id: 476, probability: 0.716391) + +RNG internal: +- Raw uniform random number: 0.969809 +- Token probabilities: + [0] token 6875 = 0.850031 (cumulative: 0.850031) + [1] token 2733 = 0.070531 (cumulative: 0.920563) + [2] token 2764 = 0.058698 (cumulative: 0.979261) + [3] token 6218 = 0.020739 (cumulative: 1.000000) +- Scaled random number: 0.969809 +- Selected index: 2 +RNG generated sample: 2 (token id: 2764, probability: 0.058698) + +RNG internal: +- Raw uniform random number: 0.653140 +- Token probabilities: + [0] token 6218 = 0.368522 (cumulative: 0.368522) + [1] token 1142 = 0.230611 (cumulative: 0.599133) + [2] token 30765 = 0.093099 (cumulative: 0.692232) + [3] token 30673 = 0.054071 (cumulative: 0.746303) + [4] token 2733 = 0.050353 (cumulative: 0.796655) + [5] token 10513 = 0.045775 (cumulative: 0.842431) + [6] token 24742 = 0.039840 (cumulative: 0.882271) + [7] token 3772 = 0.034020 (cumulative: 0.916290) + [8] token 3210 = 0.023679 (cumulative: 0.939969) + [9] token 9916 = 0.022963 (cumulative: 0.962932) + [10] token 6875 = 0.014606 (cumulative: 0.977538) + [11] token 6431 = 0.011682 (cumulative: 0.989220) + [12] token 13060 = 0.010780 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.653140 +- Selected index: 2 +RNG generated sample: 2 (token id: 30765, probability: 0.093099) + +RNG internal: +- Raw uniform random number: 0.170910 +- Token probabilities: + [0] token 6218 = 0.665578 (cumulative: 0.665578) + [1] token 3210 = 0.079803 (cumulative: 0.745381) + [2] token 10513 = 0.075736 (cumulative: 0.821117) + [3] token 8738 = 0.059956 (cumulative: 0.881072) + [4] token 30673 = 0.041297 (cumulative: 0.922369) + [5] token 6887 = 0.038001 (cumulative: 0.960370) + [6] token 27321 = 0.023678 (cumulative: 0.984048) + [7] token 9102 = 0.015952 (cumulative: 1.000000) +- Scaled random number: 0.170910 +- Selected index: 0 +RNG generated sample: 0 (token id: 6218, probability: 0.665578) + +RNG internal: +- Raw uniform random number: 0.358152 +- Token probabilities: + [0] token 729 = 0.862046 (cumulative: 0.862046) + [1] token 603 = 0.109111 (cumulative: 0.971157) + [2] token 1538 = 0.028843 (cumulative: 1.000000) +- Scaled random number: 0.358152 +- Selected index: 0 +RNG generated sample: 0 (token id: 729, probability: 0.862046) + +RNG internal: +- Raw uniform random number: 0.750686 +- Token probabilities: + [0] token 1382 = 0.890231 (cumulative: 0.890231) + [1] token 4819 = 0.065597 (cumulative: 0.955829) + [2] token 5112 = 0.022657 (cumulative: 0.978485) + [3] token 476 = 0.021515 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.750686 +- Selected index: 0 +RNG generated sample: 0 (token id: 1382, probability: 0.890231) + +RNG internal: +- Raw uniform random number: 0.607831 +- Token probabilities: + [0] token 689 = 0.937401 (cumulative: 0.937401) + [1] token 235265 = 0.062599 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.607831 +- Selected index: 0 +RNG generated sample: 0 (token id: 689, probability: 0.937401) + +RNG internal: +- Raw uniform random number: 0.325047 +- Token probabilities: + [0] token 1566 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.325047 +- Selected index: 0 +RNG generated sample: 0 (token id: 1566, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.038425 +- Token probabilities: + [0] token 235265 = 0.826957 (cumulative: 0.826957) + [1] token 731 = 0.090979 (cumulative: 0.917936) + [2] token 235269 = 0.060614 (cumulative: 0.978550) + [3] token 578 = 0.021450 (cumulative: 1.000000) +- Scaled random number: 0.038425 +- Selected index: 0 +RNG generated sample: 0 (token id: 235265, probability: 0.826957) + +RNG internal: +- Raw uniform random number: 0.634274 +- Token probabilities: + [0] token 714 = 0.577312 (cumulative: 0.577312) + [1] token 1165 = 0.168872 (cumulative: 0.746184) + [2] token 1417 = 0.097621 (cumulative: 0.843805) + [3] token 108 = 0.089722 (cumulative: 0.933527) + [4] token 878 = 0.043604 (cumulative: 0.977131) + [5] token 109 = 0.022869 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.634274 +- Selected index: 1 +RNG generated sample: 1 (token id: 1165, probability: 0.168872) + +RNG internal: +- Raw uniform random number: 0.958949 +- Token probabilities: + [0] token 729 = 0.796294 (cumulative: 0.796294) + [1] token 1671 = 0.055330 (cumulative: 0.851623) + [2] token 1498 = 0.053206 (cumulative: 0.904830) + [3] token 1538 = 0.028023 (cumulative: 0.932852) + [4] token 603 = 0.026168 (cumulative: 0.959021) + [5] token 1170 = 0.020723 (cumulative: 0.979743) + [6] token 1134 = 0.020257 (cumulative: 1.000000) +- Scaled random number: 0.958949 +- Selected index: 4 +RNG generated sample: 4 (token id: 603, probability: 0.026168) + +RNG internal: +- Raw uniform random number: 0.652790 +- Token probabilities: + [0] token 5604 = 0.175121 (cumulative: 0.175121) + [1] token 13277 = 0.153589 (cumulative: 0.328711) + [2] token 2845 = 0.100296 (cumulative: 0.429006) + [3] token 573 = 0.086324 (cumulative: 0.515330) + [4] token 1180 = 0.073832 (cumulative: 0.589162) + [5] token 476 = 0.050402 (cumulative: 0.639564) + [6] token 2076 = 0.049879 (cumulative: 0.689443) + [7] token 1170 = 0.039003 (cumulative: 0.728445) + [8] token 3421 = 0.033634 (cumulative: 0.762079) + [9] token 7103 = 0.029498 (cumulative: 0.791577) + [10] token 974 = 0.028961 (cumulative: 0.820538) + [11] token 1490 = 0.027479 (cumulative: 0.848017) + [12] token 3482 = 0.023851 (cumulative: 0.871869) + [13] token 780 = 0.021746 (cumulative: 0.893614) + [14] token 671 = 0.019661 (cumulative: 0.913275) + [15] token 16714 = 0.017120 (cumulative: 0.930394) + [16] token 3695 = 0.014283 (cumulative: 0.944677) + [17] token 3836 = 0.010992 (cumulative: 0.955669) + [18] token 5123 = 0.009935 (cumulative: 0.965604) + [19] token 11995 = 0.009515 (cumulative: 0.975120) + [20] token 3077 = 0.007914 (cumulative: 0.983034) + [21] token 7094 = 0.007153 (cumulative: 0.990186) + [22] token 736 = 0.005465 (cumulative: 0.995652) + [23] token 3151 = 0.004349 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.652790 +- Selected index: 6 +RNG generated sample: 6 (token id: 2076, probability: 0.049879) + +RNG internal: +- Raw uniform random number: 0.635059 +- Token probabilities: + [0] token 1671 = 0.406296 (cumulative: 0.406296) + [1] token 575 = 0.192803 (cumulative: 0.599099) + [2] token 5604 = 0.168411 (cumulative: 0.767511) + [3] token 974 = 0.059525 (cumulative: 0.827035) + [4] token 1855 = 0.049729 (cumulative: 0.876765) + [5] token 573 = 0.049257 (cumulative: 0.926021) + [6] token 16714 = 0.021290 (cumulative: 0.947311) + [7] token 5327 = 0.021017 (cumulative: 0.968328) + [8] token 476 = 0.017716 (cumulative: 0.986044) + [9] token 3077 = 0.013956 (cumulative: 1.000000) +- Scaled random number: 0.635059 +- Selected index: 2 +RNG generated sample: 2 (token id: 5604, probability: 0.168411) + +RNG internal: +- Raw uniform random number: 0.995300 +- Token probabilities: + [0] token 577 = 0.451206 (cumulative: 0.451206) + [1] token 974 = 0.244825 (cumulative: 0.696031) + [2] token 573 = 0.198692 (cumulative: 0.894723) + [3] token 476 = 0.065625 (cumulative: 0.960347) + [4] token 671 = 0.014540 (cumulative: 0.974887) + [5] token 731 = 0.012775 (cumulative: 0.987662) + [6] token 685 = 0.012338 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.995300 +- Selected index: 6 +RNG generated sample: 6 (token id: 685, probability: 0.012338) + +RNG internal: +- Raw uniform random number: 0.581850 +- Token probabilities: + [0] token 974 = 0.458660 (cumulative: 0.458660) + [1] token 573 = 0.456690 (cumulative: 0.915350) + [2] token 476 = 0.061865 (cumulative: 0.977215) + [3] token 671 = 0.022785 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.581850 +- Selected index: 1 +RNG generated sample: 1 (token id: 573, probability: 0.456690) + +RNG internal: +- Raw uniform random number: 0.414369 +- Token probabilities: + [0] token 1370 = 0.925890 (cumulative: 0.925890) + [1] token 1546 = 0.050608 (cumulative: 0.976498) + [2] token 2134 = 0.023502 (cumulative: 1.000000) +- Scaled random number: 0.414369 +- Selected index: 0 +RNG generated sample: 0 (token id: 1370, probability: 0.925890) + +RNG internal: +- Raw uniform random number: 0.474698 +- Token probabilities: + [0] token 18225 = 0.322165 (cumulative: 0.322165) + [1] token 16481 = 0.198979 (cumulative: 0.521144) + [2] token 6875 = 0.132834 (cumulative: 0.653979) + [3] token 2733 = 0.079117 (cumulative: 0.733096) + [4] token 8385 = 0.061359 (cumulative: 0.794455) + [5] token 4065 = 0.052413 (cumulative: 0.846867) + [6] token 3364 = 0.043254 (cumulative: 0.890121) + [7] token 1382 = 0.038182 (cumulative: 0.928303) + [8] token 3287 = 0.017877 (cumulative: 0.946180) + [9] token 8697 = 0.015535 (cumulative: 0.961715) + [10] token 3885 = 0.010694 (cumulative: 0.972409) + [11] token 1879 = 0.009350 (cumulative: 0.981759) + [12] token 3836 = 0.009169 (cumulative: 0.990928) + [13] token 1080 = 0.009072 (cumulative: 1.000000) +- Scaled random number: 0.474698 +- Selected index: 1 +RNG generated sample: 1 (token id: 16481, probability: 0.198979) + +RNG internal: +- Raw uniform random number: 0.623510 +- Token probabilities: + [0] token 2733 = 0.814418 (cumulative: 0.814418) + [1] token 6875 = 0.152324 (cumulative: 0.966742) + [2] token 1812 = 0.033258 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.623510 +- Selected index: 0 +RNG generated sample: 0 (token id: 2733, probability: 0.814418) + +RNG internal: +- Raw uniform random number: 0.338008 +- Token probabilities: + [0] token 235265 = 0.523983 (cumulative: 0.523983) + [1] token 577 = 0.141812 (cumulative: 0.665796) + [2] token 674 = 0.064771 (cumulative: 0.730567) + [3] token 575 = 0.063529 (cumulative: 0.794095) + [4] token 3364 = 0.048300 (cumulative: 0.842396) + [5] token 35606 = 0.041633 (cumulative: 0.884028) + [6] token 235269 = 0.039151 (cumulative: 0.923179) + [7] token 578 = 0.033886 (cumulative: 0.957065) + [8] token 3646 = 0.030427 (cumulative: 0.987492) + [9] token 6990 = 0.012508 (cumulative: 1.000000) +- Scaled random number: 0.338008 +- Selected index: 0 +RNG generated sample: 0 (token id: 235265, probability: 0.523983) + +RNG internal: +- Raw uniform random number: 0.674752 +- Token probabilities: + [0] token 108 = 0.530066 (cumulative: 0.530066) + [1] token 878 = 0.126280 (cumulative: 0.656346) + [2] token 109 = 0.122080 (cumulative: 0.778426) + [3] token 714 = 0.105097 (cumulative: 0.883522) + [4] token 1165 = 0.043589 (cumulative: 0.927112) + [5] token 968 = 0.031959 (cumulative: 0.959071) + [6] token 4560 = 0.014985 (cumulative: 0.974056) + [7] token 8533 = 0.013229 (cumulative: 0.987284) + [8] token 1417 = 0.012716 (cumulative: 1.000000) +- Scaled random number: 0.674752 +- Selected index: 2 +RNG generated sample: 2 (token id: 109, probability: 0.122080) + +RNG internal: +- Raw uniform random number: 0.317202 +- Token probabilities: + [0] token 886 = 0.668943 (cumulative: 0.668943) + [1] token 651 = 0.229445 (cumulative: 0.898389) + [2] token 235322 = 0.101611 (cumulative: 1.000000) +- Scaled random number: 0.317202 +- Selected index: 0 +RNG generated sample: 0 (token id: 886, probability: 0.668943) + +RNG internal: +- Raw uniform random number: 0.778345 +- Token probabilities: + [0] token 235248 = 0.862096 (cumulative: 0.862096) + [1] token 573 = 0.137904 (cumulative: 1.000000) +- Scaled random number: 0.778345 +- Selected index: 0 +RNG generated sample: 0 (token id: 235248, probability: 0.862096) + +RNG internal: +- Raw uniform random number: 0.949571 +- Token probabilities: + [0] token 235274 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.949571 +- Selected index: 0 +RNG generated sample: 0 (token id: 235274, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.662527 +- Token probabilities: + [0] token 235315 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.662527 +- Selected index: 0 +RNG generated sample: 0 (token id: 235315, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.013572 +- Token probabilities: + [0] token 235308 = 0.639661 (cumulative: 0.639661) + [1] token 235318 = 0.275343 (cumulative: 0.915004) + [2] token 235315 = 0.030109 (cumulative: 0.945113) + [3] token 235324 = 0.027641 (cumulative: 0.972754) + [4] token 235321 = 0.027246 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.013572 +- Selected index: 0 +RNG generated sample: 0 (token id: 235308, probability: 0.639661) + +RNG internal: +- Raw uniform random number: 0.622846 +- Token probabilities: + [0] token 235318 = 0.396667 (cumulative: 0.396667) + [1] token 235324 = 0.285154 (cumulative: 0.681821) + [2] token 235321 = 0.161404 (cumulative: 0.843225) + [3] token 235315 = 0.067127 (cumulative: 0.910352) + [4] token 235308 = 0.048712 (cumulative: 0.959064) + [5] token 235276 = 0.040936 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.622846 +- Selected index: 1 +RNG generated sample: 1 (token id: 235324, probability: 0.285154) + +RNG internal: +- Raw uniform random number: 0.673660 +- Token probabilities: + [0] token 235269 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.673660 +- Selected index: 0 +RNG generated sample: 0 (token id: 235269, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.971945 +- Token probabilities: + [0] token 573 = 0.743643 (cumulative: 0.743643) + [1] token 21786 = 0.116489 (cumulative: 0.860132) + [2] token 476 = 0.053962 (cumulative: 0.914093) + [3] token 3350 = 0.043848 (cumulative: 0.957941) + [4] token 2550 = 0.024190 (cumulative: 0.982131) + [5] token 1080 = 0.017869 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.971945 +- Selected index: 4 +RNG generated sample: 4 (token id: 2550, probability: 0.024190) + +RNG internal: +- Raw uniform random number: 0.878193 +- Token probabilities: + [0] token 2733 = 0.356259 (cumulative: 0.356259) + [1] token 16481 = 0.350001 (cumulative: 0.706261) + [2] token 6875 = 0.157458 (cumulative: 0.863719) + [3] token 18225 = 0.112145 (cumulative: 0.975864) + [4] token 10964 = 0.013665 (cumulative: 0.989529) + [5] token 2845 = 0.010471 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.878193 +- Selected index: 3 +RNG generated sample: 3 (token id: 18225, probability: 0.112145) + +RNG internal: +- Raw uniform random number: 0.509624 +- Token probabilities: + [0] token 17273 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.509624 +- Selected index: 0 +RNG generated sample: 0 (token id: 17273, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.055715 +- Token probabilities: + [0] token 2733 = 0.615609 (cumulative: 0.615609) + [1] token 6875 = 0.384391 (cumulative: 1.000000) +- Scaled random number: 0.055715 +- Selected index: 0 +RNG generated sample: 0 (token id: 2733, probability: 0.615609) + +RNG internal: +- Raw uniform random number: 0.451159 +- Token probabilities: + [0] token 729 = 0.488738 (cumulative: 0.488738) + [1] token 235269 = 0.431861 (cumulative: 0.920599) + [2] token 3151 = 0.079401 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.451159 +- Selected index: 0 +RNG generated sample: 0 (token id: 729, probability: 0.488738) + +RNG internal: +- Raw uniform random number: 0.019988 +- Token probabilities: + [0] token 6990 = 0.828484 (cumulative: 0.828484) + [1] token 5483 = 0.171516 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.019988 +- Selected index: 0 +RNG generated sample: 0 (token id: 6990, probability: 0.828484) + +RNG internal: +- Raw uniform random number: 0.441711 +- Token probabilities: + [0] token 235269 = 0.500042 (cumulative: 0.500042) + [1] token 235265 = 0.337281 (cumulative: 0.837323) + [2] token 731 = 0.092573 (cumulative: 0.929895) + [3] token 3151 = 0.070105 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.441711 +- Selected index: 0 +RNG generated sample: 0 (token id: 235269, probability: 0.500042) + +RNG internal: +- Raw uniform random number: 0.979587 +- Token probabilities: + [0] token 573 = 0.556143 (cumulative: 0.556143) + [1] token 3151 = 0.155399 (cumulative: 0.711542) + [2] token 1080 = 0.124633 (cumulative: 0.836174) + [3] token 736 = 0.093069 (cumulative: 0.929243) + [4] token 3836 = 0.029464 (cumulative: 0.958707) + [5] token 8602 = 0.027943 (cumulative: 0.986650) + [6] token 948 = 0.013350 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.979587 +- Selected index: 5 +RNG generated sample: 5 (token id: 8602, probability: 0.027943) + +RNG internal: +- Raw uniform random number: 0.359444 +- Token probabilities: + [0] token 1080 = 0.858274 (cumulative: 0.858274) + [1] token 573 = 0.141727 (cumulative: 1.000000) +- Scaled random number: 0.359444 +- Selected index: 0 +RNG generated sample: 0 (token id: 1080, probability: 0.858274) + +RNG internal: +- Raw uniform random number: 0.480894 +- Token probabilities: + [0] token 651 = 0.299088 (cumulative: 0.299088) + [1] token 31616 = 0.295837 (cumulative: 0.594925) + [2] token 198351 = 0.053919 (cumulative: 0.648844) + [3] token 8723 = 0.052521 (cumulative: 0.701365) + [4] token 235301 = 0.049580 (cumulative: 0.750945) + [5] token 96815 = 0.040190 (cumulative: 0.791135) + [6] token 2923 = 0.036209 (cumulative: 0.827343) + [7] token 92690 = 0.030223 (cumulative: 0.857566) + [8] token 105460 = 0.021409 (cumulative: 0.878975) + [9] token 83814 = 0.018627 (cumulative: 0.897603) + [10] token 946 = 0.013549 (cumulative: 0.911151) + [11] token 56755 = 0.012601 (cumulative: 0.923752) + [12] token 29564 = 0.011980 (cumulative: 0.935732) + [13] token 235279 = 0.011841 (cumulative: 0.947573) + [14] token 3275 = 0.010808 (cumulative: 0.958381) + [15] token 235291 = 0.009598 (cumulative: 0.967979) + [16] token 4122 = 0.009351 (cumulative: 0.977329) + [17] token 1905 = 0.008279 (cumulative: 0.985609) + [18] token 235296 = 0.007225 (cumulative: 0.992833) + [19] token 235280 = 0.007167 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.480894 +- Selected index: 1 +RNG generated sample: 1 (token id: 31616, probability: 0.295837) + +RNG internal: +- Raw uniform random number: 0.688661 +- Token probabilities: + [0] token 140950 = 0.791303 (cumulative: 0.791303) + [1] token 13403 = 0.146731 (cumulative: 0.938034) + [2] token 694 = 0.032903 (cumulative: 0.970937) + [3] token 116628 = 0.029063 (cumulative: 1.000000) +- Scaled random number: 0.688661 +- Selected index: 0 +RNG generated sample: 0 (token id: 140950, probability: 0.791303) + +RNG internal: +- Raw uniform random number: 0.880476 +- Token probabilities: + [0] token 694 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.880476 +- Selected index: 0 +RNG generated sample: 0 (token id: 694, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.918235 +- Token probabilities: + [0] token 4474 = 0.322602 (cumulative: 0.322602) + [1] token 2515 = 0.258222 (cumulative: 0.580824) + [2] token 235248 = 0.233247 (cumulative: 0.814071) + [3] token 235369 = 0.075908 (cumulative: 0.889979) + [4] token 1816 = 0.057074 (cumulative: 0.947053) + [5] token 5763 = 0.023316 (cumulative: 0.970368) + [6] token 235290 = 0.017462 (cumulative: 0.987830) + [7] token 12623 = 0.012170 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.918235 +- Selected index: 4 +RNG generated sample: 4 (token id: 1816, probability: 0.057074) + +RNG internal: +- Raw uniform random number: 0.216822 +- Token probabilities: + [0] token 1417 = 0.804431 (cumulative: 0.804431) + [1] token 1165 = 0.163081 (cumulative: 0.967512) + [2] token 714 = 0.032488 (cumulative: 1.000000) +- Scaled random number: 0.216822 +- Selected index: 0 +RNG generated sample: 0 (token id: 1417, probability: 0.804431) + +RNG internal: +- Raw uniform random number: 0.565189 +- Token probabilities: + [0] token 2733 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.565189 +- Selected index: 0 +RNG generated sample: 0 (token id: 2733, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.865103 +- Token probabilities: + [0] token 729 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.865103 +- Selected index: 0 +RNG generated sample: 0 (token id: 729, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.508969 +- Token probabilities: + [0] token 6869 = 0.842892 (cumulative: 0.842892) + [1] token 1170 = 0.072457 (cumulative: 0.915349) + [2] token 3326 = 0.035130 (cumulative: 0.950479) + [3] token 6990 = 0.027835 (cumulative: 0.978314) + [4] token 1671 = 0.021686 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.508969 +- Selected index: 0 +RNG generated sample: 0 (token id: 6869, probability: 0.842892) + +RNG internal: +- Raw uniform random number: 0.916723 +- Token probabilities: + [0] token 577 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.916723 +- Selected index: 0 +RNG generated sample: 0 (token id: 577, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.921158 +- Token probabilities: + [0] token 2121 = 0.608715 (cumulative: 0.608715) + [1] token 8363 = 0.247062 (cumulative: 0.855778) + [2] token 11560 = 0.073957 (cumulative: 0.929735) + [3] token 614 = 0.021288 (cumulative: 0.951023) + [4] token 1707 = 0.018550 (cumulative: 0.969573) + [5] token 3114 = 0.015869 (cumulative: 0.985442) + [6] token 27205 = 0.014558 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.921158 +- Selected index: 2 +RNG generated sample: 2 (token id: 11560, probability: 0.073957) + +RNG internal: +- Raw uniform random number: 0.083113 +- Token probabilities: + [0] token 30765 = 0.401319 (cumulative: 0.401319) + [1] token 4552 = 0.163355 (cumulative: 0.564674) + [2] token 24742 = 0.127465 (cumulative: 0.692139) + [3] token 14855 = 0.097238 (cumulative: 0.789378) + [4] token 476 = 0.056025 (cumulative: 0.845403) + [5] token 573 = 0.050174 (cumulative: 0.895577) + [6] token 5766 = 0.027732 (cumulative: 0.923309) + [7] token 3890 = 0.027200 (cumulative: 0.950509) + [8] token 10849 = 0.024459 (cumulative: 0.974968) + [9] token 56983 = 0.014469 (cumulative: 0.989437) + [10] token 40559 = 0.010563 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.083113 +- Selected index: 0 +RNG generated sample: 0 (token id: 30765, probability: 0.401319) + +RNG internal: +- Raw uniform random number: 0.277719 +- Token probabilities: + [0] token 4552 = 0.898408 (cumulative: 0.898408) + [1] token 14845 = 0.073071 (cumulative: 0.971479) + [2] token 56983 = 0.028521 (cumulative: 1.000000) +- Scaled random number: 0.277719 +- Selected index: 0 +RNG generated sample: 0 (token id: 4552, probability: 0.898408) + +RNG internal: +- Raw uniform random number: 0.009357 +- Token probabilities: + [0] token 235265 = 0.723210 (cumulative: 0.723210) + [1] token 578 = 0.103099 (cumulative: 0.826310) + [2] token 731 = 0.068051 (cumulative: 0.894361) + [3] token 235269 = 0.055781 (cumulative: 0.950142) + [4] token 2177 = 0.049858 (cumulative: 1.000000) +- Scaled random number: 0.009357 +- Selected index: 0 +RNG generated sample: 0 (token id: 235265, probability: 0.723210) + +RNG internal: +- Raw uniform random number: 0.842342 +- Token probabilities: + [0] token 1165 = 0.878120 (cumulative: 0.878120) + [1] token 714 = 0.052768 (cumulative: 0.930887) + [2] token 109 = 0.042566 (cumulative: 0.973453) + [3] token 1417 = 0.026547 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.842342 +- Selected index: 0 +RNG generated sample: 0 (token id: 1165, probability: 0.878120) + +RNG internal: +- Raw uniform random number: 0.647174 +- Token probabilities: + [0] token 729 = 0.878956 (cumulative: 0.878956) + [1] token 603 = 0.071867 (cumulative: 0.950822) + [2] token 1671 = 0.026227 (cumulative: 0.977049) + [3] token 1538 = 0.022951 (cumulative: 1.000000) +- Normalized cumulative probabilities +- Scaled random number: 0.647174 +- Selected index: 0 +RNG generated sample: 0 (token id: 729, probability: 0.878956) + +RNG internal: +- Raw uniform random number: 0.841386 +- Token probabilities: + [0] token 3326 = 0.377647 (cumulative: 0.377647) + [1] token 6869 = 0.217934 (cumulative: 0.595581) + [2] token 1170 = 0.113663 (cumulative: 0.709244) + [3] token 573 = 0.075229 (cumulative: 0.784473) + [4] token 13920 = 0.033449 (cumulative: 0.817923) + [5] token 3482 = 0.032880 (cumulative: 0.850803) + [6] token 73517 = 0.028770 (cumulative: 0.879573) + [7] token 6990 = 0.024767 (cumulative: 0.904340) + [8] token 1671 = 0.023332 (cumulative: 0.927672) + [9] token 5604 = 0.018626 (cumulative: 0.946298) + [10] token 476 = 0.016679 (cumulative: 0.962977) + [11] token 780 = 0.012900 (cumulative: 0.975878) + [12] token 5483 = 0.012330 (cumulative: 0.988208) + [13] token 974 = 0.011792 (cumulative: 1.000000) +- Scaled random number: 0.841386 +- Selected index: 5 +RNG generated sample: 5 (token id: 3482, probability: 0.032880) + +RNG internal: +- Raw uniform random number: 0.264730 +- Token probabilities: + [0] token 611 = 1.000000 (cumulative: 1.000000) +- Scaled random number: 0.264730 +- Selected index: 0 +RNG generated sample: 0 (token id: 611, probability: 1.000000) + +RNG internal: +- Raw uniform random number: 0.397821 +- Token probabilities: + [0] token 573 = 0.820007 (cumulative: 0.820007) + [1] token 476 = 0.179993 (cumulative: 1.000000) +- Scaled random number: 0.397821 +- Selected index: 0 +RNG generated sample: 0 (token id: 573, probability: 0.820007) + +RNG internal: +- Raw uniform random number: 0.552821 +- Token probabilities: + [0] token 4268 = 0.208142 (cumulative: 0.208142) + [1] token 1160 = 0.167784 (cumulative: 0.375926) + [2] token 1809 = 0.143926 (cumulative: 0.519852) + [3] token 14855 = 0.132786 (cumulative: 0.652638) + [4] token 7819 = 0.068712 (cumulative: 0.721350) + [5] token 7900 = 0.067013 (cumulative: 0.788363) + [6] token 1080 = 0.048936 (cumulative: 0.837299) + [7] token 12555 = 0.037587 (cumulative: 0.874886) + [8] token 12854 = 0.031054 (cumulative: 0.905940) + [9] token 5793 = 0.020485 (cumulative: 0.926425) + [10] token 135348 = 0.019021 (cumulative: 0.945446) + [11] token 30765 = 0.018448 (cumulative: 0.963894) + [12] token 24742 = 0.013698 (cumulative: 0.977592) + [13] token 39045 = 0.011269 (cumulative: 0.988861) + [14] token 23342 = 0.005627 (cumulative: 0.994488) + [15] token 61023 = 0.005512 (cumulative: 1.000000) +- Scaled random number: 0.552821 +- Selected index: 3 +RNG generated sample: 3 (token id: 14855, probability: 0.132786) +context size exceeded +llama_decode: failed to decode, ret = 1 +failed to decode +failed to generate response diff --git a/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/output.txt b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/output.txt new file mode 100644 index 0000000000000..9c0df4f09cda3 --- /dev/null +++ b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/output.txt @@ -0,0 +1,3 @@ +In 1956, the first artificial intelligence computer program, the “Logic Theorist” was developed. This program was designed to test the logical abilities of a computer. It was designed to determine if a given mathematical statement was true or false. It is still considered as the first AI program. + +In 1957, another artificial intelligence program was developed, named “Logic Theorist.” This program was designed to solve mathematical problems. It was based on the logic diff --git a/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/rng_distribution.png b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/rng_distribution.png new file mode 100644 index 0000000000000000000000000000000000000000..12ab6a870f44e4eca7d7b76fa6e93f826dd58c8a GIT binary patch literal 55620 zcmeFZcQ{=C+b)Vk3P~hFBm_xBbdqS%C8C6H^j;I93`Xw^QbZ7>A$lEsbTb$wg-D3r zjTX@v-3-RCKTE#f-}}D%efQb>TxXwioj>+mm&-6~X3hGP=eeKzzMmDQsjhsQhM9(f zg5vZ8mHXNh6i4R3&y^F$z<)|Es>py}k{*hN9y-o89^Q}LtSKHo_Hc1@_Hcw*Uh%Sa zbB8%Q2@Br3DR_(jik*jti@T(d5d2?92s*pj3b{M)PlHc6>7rufPC-HU82X{imCJ!q zPz;tlxPMpICuwQ)cz{kHsd+7@kP2DxJvQ32&P~?e8PU^f7>Bx z^a&!ccTG)A)d#J^qu%5A6uu8d7M^{2`fTPwWKUv%S}Z?L-0TsGv@GcB3@SdFHOtp4 zuxU#WE#K&n8`m!~t?nU5ORuYVH^?G(mpl@mDU1^4GNYG0a0#LgOEZbiXar%Xd7(GY zO4Omh!V5n*|QYd@iptV#T!ar`IufXK_vC~VP3Urkw&UJmU0wy+Z*FPPh8&qaT zJPDkdMN?8}GPpjKzoMDtPeq}5@`wlZ58{Y>bS13k+wI%89l=LC`7idP@R;#H_s<@7 z!TaD@O{(qBkB>)dJX_D-cVGwK@73u`x37DXHs^crS?|@$dV6)H77NRlzf0B(rFtYD`9I@xaL!2Ya(}%NH5+^z~!0FWC4h=fQy$wkf{Z-$^_$Tv>U!fUs~6 z>%bnz4h6;bgGxJbFq;ihYmMje!AH)x$XR%FDLQ%VriZeXX~^z0w%_^vTABzSDnrtu zL(i}-I0?OeHm9Q@(---(BSi*B7;*2A#*BN9BM4*uD@G{e%JANHGI4yoU4}R!jf)jD zvu|egPC9-0PA6g5L1=NfvH(|pHA}K-bKkZN+)MJcX^YV_QK$NPO8(x1QFNI{u?uGqq(H)hly^u%rsJ@_}KS{ zIR-UBbJFgS+r*Xeaj*bvW&8<0-A(SS)NVw1E)90cq~1PLPg=%GV;l}z^uG%o$kIBP z+;#6v^{vD0IQdXknLA}PzDu=J=DdcB$`+gPO%;rD1z4n{7Hkt|? zA~%MH<7^Bp5S6suq8V?K{SssV-km3gRv9y1FtT;pv-W+^xSpT1Az%3N?((bfh|S zD7|*+p}L`wvNW?6Y9pCz=9l{6*4pRe?1K-*N2lzMabu~#H9*ne5tGQXq<8NoXVNqh z#oDO7Xt=@pm?|L_n_!B}yhlB~2F$#xwLFk1p&QNYK~-J9J0EF&xK=DblzGKGZxWdv zOavE&Zc7x$5~g#`S+@BH9gvBm(oP-8-ed8k^6pt7?9y!q92&2lx59%ZwUW>vXqY4uGy9rP5(wxa9Fy(}ja7*WYfl6MPYOW2VOgNpJ)IPv!P~y#5<) z;`rK)cJ~3H+SPk;@oB!~qjTUQt~G&)Fze)Y0Q(4~7;!<+N3W#ttt&E^>3-%M^J9aX z7H6qjc^zX@sqV_8dUx5uVZI%i6LOa$-?YYA1j0Lq?b&Dnxo!k7vJ)uXDE5ARR8g#~=z0E*FO$`Ub=6_qK(%s4*5PD{ zrdf@LYe$Y+7I$*3R;JS99-K_H4B=y2yl^urx!!Y8UtHW&u~)y)P-|+I$G&MNgEzOz z)F`TC)k;YHQszm&Q}L(QJ94whQAmNePO z?Qdrm+$okAz-D~01z{6gki8E=2lk$8zSv6Gl;mVSbLx0%(EdCxlCN@{C9mDW$ZS2@ zjVTA7ASWm(L9Ys1NeEpTJwqYy%K_GB+Q*8`Sf##7=>(c)MkUG`rLGusN@{+nxYNj> zKx_7=3VtO!#_#skw~)>UD?tZ*9E-M36B~5FB|BnSp*7e^Qc7LuN}i#i}sF*{|rmx+D)(@fnKX$;@ym z3JT7&M+R!)ut@wDnoJmjS>E!m?h$VsT%5^{FyGUxCuhLuj`A(aU#8L5(;DSV(PyCX z(t9e+#nnumZ+v?RL@p1GR^yj{@F6YQM=dOXggw1Qq;_?sc+w)tT?Ls1iroc z%L(t&Cb2SBhmCoWO=DN&KX==K-mlQ(FWu|K77c;RfeEQa)1AHF_7;CKRl=E*8>VAT zMmFqrDsg3Im&gW)@A_s}tNS$20WvuM}*?#72YEsscaC|^~! z>P!NAQJQw&*Gj=EswI$ajX=!DH!)dMd_5yor~>yRj(U$GjVtY)imJm)=Bqua9vj@P zS3k>b*gw3a>R^yijfPz+drl{NhZty|(|^f;xwd=b_?gRwlN97v98oUC7Ejm9tXm6p za@3b!D9?qx*xg#hzQ4%($oqGscuks#&Z!SbQzCHB_Dtbu^DXSC5@5HExhO zDD$SE_z+h`!Ki+SMqWwIq#Yuci6daMLyv6=Xc2;ktUS0#@Cc?m`=1V3OiQfZvN1pz zTZe^(U0`Bj!p}g5Qt)0uS*9c;*kf1fv3hyhduuLK9>PpX7`v`(!)=F0GzL$%#+iV$ z5<$KLUK4j)W^JtTTvYPB9)usR><0mgJ{eBga)itfsLUQR-leQ{u2bWew}dP{Gf~{x z>v@CuYa`R6b2im5JWYEhYnttoCsktGdG9IH=WLp=6hiW zb@YF}+SUI@NX+tvyhXJeGTDE*TE8I>QJ_~4i!bkx6|?&G+D@DnIz#ZaJeymOK#=Uu zRHj?(D~O9^5pQP}bF`zqz+d9GwV;Rv*c3c;Qz*y~Ot>ImJm^qd7mGzbOcc|CpFeyt zMo|7>>b=Gi$Yhvgtj`eYN{y?aL@=4UPRqa02%i@Y*S)Vy6ftbEP{j1cy6Dxs?z&2@un6NK`i zSLZk!S0_GuEut{P8gk?(Q1s_qc9Rk%mN%7Uo&=WoQ5lY zcvkT0S!PR9Se^p2#1w(hEuQUCRyMS^x5*>C|7bY+@>KN*_zu_l#TYF330jS8yF7b> z%{d4WQPE1>=xEwGMm-mrM!Gu_6p6R3RhPx6=V*F^_STxOMW*p7n!68))|_oq&whV2 zL186F{n(3))GsTRrO^xbH{)&3ptrXdQTnJ0oWsris8%=*-3;%|{nZbW{wvJ67;N-4 z!avI;TtJWZAs3x0y~(%66Mj9}4^d#h_@eQZ1LlNZq3kc~f&v0+u~RwXV#Cf(rlv_k z!;}m!p%JW<%^%3zm=|?GR}5DYxw~Ykp5AHweqxG^<-S4TmR92p=yKXa5fv5iwGKWW zYCV@!xuC!lc+mb%P9;dB_HZg_F4MZH=LRf->K-Ob_Oba)P?8pl8yu&;za>mY2o1*= zm|wzYAC?H3zEqn>=%hQpMX@@p)RT(vb;L2Rl{(_^c@62uvq|pi6^T&UL?J&N|6GtO zHJ}U&{Y9a9uBq|FpPkCc`oFMc|IZ=+btwC1T+W`S+yylh+|||9R}k!S+loYLXq@HW z>Xl#Ys&=BM$4n{&#FkThFd)eMU{|2;w^itjJ-B z*(9shSb+$y_s}O%nf{9ag(=j@>B!4s86q5?O$ER7uJZ#@)4*d|RN%#6ExMK~MGGQu&Z3(8ixf!b#=x3Elp2>f| zlg=vt7z&w&i<##3epk{C)CdXBqEBa6H@CRvEg^arnf?h8t_|P6r&*-A1;kz>H13o{ z$D#%HeL0agpL{<{BH*0iKi+YQS+_7O4iu+Koev-k*((1l7o6K<;d)`b``6Jq5DCmH5)c2D&uhF7p(XJ6KU*Feh5rhg{Ij3`FVM?>I;8PQ z@czVeI%ALpppvZ131Y4&C}M==4?ror6vAX)B4}1~AIo$2Z{2nZ83BQ|c8S@`{`1JV zLi0MmE~i7N1ihnSy@l_PrLc;UxH0_jFjS-&<{Q2C0W;~?UuY!QgfIZX;eVLJ!`EY> z1TJC5u#lRUm$#hZcinTbzw+2=W<$m7fvf>~rW~*mu}lGumm3;xZAvib1-?zL8;#{R znj{#|<1o$`W>I@XSmT!!SmD~&anfYq{;YDt`T*(NJn?$-afC?HpZpJS7!Kq@hu_W7 z7{K-{ukQowQV!zLas>iFvDmk7A9n=^i^GSfB#mQuZ^euRU%V%$=&Nnvzu9#zHX*@i zs=2W#UKBpusb6gF5H3!Cz5w^;YMcouw)J!+hdS#Kv}!6Ul?kE)AMDsKUCN718T*2F zXiIp9KeE4jIr4skikgYFkoTQ&MbL#04>pjg2%2e#^UAvdSD?LfYDa7zy_aM$J)i zMs9AQmU4l=zrn8fi)gOxi+sdgKl6mx73h6nLmDqaU)wNGf16QdM)K_Bad1>dqY z)*9(oJMhwQTP9MmYtP@}UB`juCSOn;yY*H|+P*)|Dzg0zV`?OK^3tl4>#3)Gw?Sc7>$a} z>y`lMMZbOxfc4=Sf?F=7btH(k)_BZYK0i)}zVR?&5E*A~-;<*e?NxsG?p#yKT`>6$ zfLh4{WLk?kVoT4Y&aZN7X{ZN$X=fJMfGU(3R&4m;QRw(9gd-jr0IDrj^r)8;$AiY% z{N}D6;uJ&gr(z4uq6ihpEcvY|T>Q8n;e58S?YY15Fy^%?LcPy%0(j(dB#v+S| zc%1m(pPKelx13w&=Kiol@zCDR2FiP7i~uOqnu7w}lPOYmJ=u|U`@4(%pxUt$XN0~+ z@ZjlA<<}Pj1NFn;G{U|&_wkYAXZYg? z&|4`qd1ll2>dZoZt@ohC$9y<={|$6P+ZaTP7Y`kRCY~P8=)Q6I241t^ddH#L1}6j? z^ijsuxWj+up2}cLbI1y4Pm+*=9{`dBnpiE^ziJUV(o#7-!J(+sxqjyx;6O*G0f}wZ2b+1 z3GEK}!=+n-wc^3}TqiiN`vKPwI5Py=x}5&hc`-urK~SXKoa^exdau}tAELP67VOW2 z@J7Ia3H}4t+>XzQkGMRS*_nEO;l$C5QHDRmyT?%Z_~o1|9As zKp$uHi~Tw1VQ@-Pk|TL{E5hqfs?Hsu+y%pO0sm*P&w6U5J{L4HG7@|I<*5I!5GH<6 z(LT6IrF{$%iTqa~7ctMZgMf;}6VNH; zm!i`w01N8%iSqd1%hN1{hjS<@gqUm$(}J@ceybRpqh!)b1BOQ@yK1Xbq0(t|7%=>o zD!mTZv1Xo9C#Vz4h!bkqvjo$S z0MBH)k&1{Cm+{SDNCPl{oLbb{WsA>xV z&eg{_nR!c0y-{!+sdBX*^E$j-P{-5dAV>rP$oI=4HRgut5~D#NjetV_l+;3QW})+V zJpqhjoUh5B?H|TK{6f-8lAigV%OlmVjsHy0DeAe@fb|C{ zi$E3v2@&2Qz43Z$t}Anp)Y{&ThJs9YPmfu?{~a6}?#|Q!#%T0FLPBDhUck?(ZvV^h z8<6yIWAC~}C{V(5_sded#mSudV{}`URaJK)B6Ciw%9!{AHestP8I-{g6K%wLkBY5@lKlrdC5w)0TD6Wet?wA|Ls^S{9s+N}U z=qEquF~#9~@zbFV^MSikQ*pA!ZvZM-pxn*NNlBQkAvUYR765r^_?EZ+UhBA{QFz^fr?;encsl zzrAze?VL&AoWWbo1gmpQw*3__1wi!LiH~TmTb&CFeEZ=8Trc0FL=28L9IbI^A+OQ% zfh=odFs{~W3?q3P5A(LRgkxq?SggTDE-pzm3fxItj%!c|T*##ES}f?K^vi1Z@c_z& zO-BlTEQ?Nd`xD?l>>z4<@CrHB#HsZ2^W(GT1DEmuWm9$jh$NPw&FWWj+KGW5y7JEd z4m{lNDD_-)15~_g0az}g_PyLxC?)IjMTc`4J4RO*OHCVV;^vmYd7S~p`uXj~9h1^~ zl${rFRj$>b_S2sW)O+{)jfJ0|abC0{j1+6Jt9SjwjG-Mvp5IoJnD=ryQW6XW6AzCb zygf-yfGYN6SEUuLZqw;|^?rIH9FQ^d}6-1Iv0;oEapDQjX?dZb@~EczA5S+i*PV993?)t-d1 zdVL&q`Fa{V5*mK$)Ww^h^nwh2JqLwA)x_SQRkT2;!QltUpE^31-@D&uU@?7cHY0Tf zyCN&KT=hGgAq^@8tVdK2k808Hwe2Z*Rue1Us)18A$zX&28ypVuo?k;C*;EorFOE&& zjLYkQn;Hfchh0vxmvKenpKv*?;tKG=EF6L-4M9N$-0h z1>1pOy97o64T>toO8DW>=_W9EQ%~~Ji-Qo2t4Cl*eSSwmAsTyb^T&HGWvuU?qd+zV z%oz@v{>7p4oGmo{M!QO@!4vtYQ|GTgo zJLe%#EN0uDWG8kQ{3$G6`Gu3JXHy)CYu8mP5)c0=KP^Zf7ZYP$T)&(D@Z-(T0MFT+ zJbBXBiCs3pr+4@dR|lYc#07z}ev}E$xq)m0zHI|8+tM|?V(QPTi@DoNwMoF^a9~LB z;No2eK)(3Pe<3fY(DnDXF8jZ+jPn`4D`&sEBR~Rk3KKoNPTtI1rZ7J4M}H>oc9?N+ z(a!YF>__3*%h2nZMtTu5uXc83Q=Qk=)|UO0lwO*74OvU+xv~q}{Pp4QWW{-+6X*$5 zJ$`>Ue_eeo+D-^w*vb4kN&WA{Yp#EW)#m?>F7Tf!>;Ehr{Lc@O2doE#VIfkK{``3q z(9*oXzzhRSojJ)$V8;o+TtK~HmunUam803v#4-QS zx(-mYk2ID0Zp@4{FMy=I3_J~wSUgMsXVBC%fAFqSczdwwUTO}vwzjrgw8?{Bl*s_I zh@IwSvvS7iw{~DtYmVVh3}}#h#nt#OjP(HIXRNO7l{X&!cS0tn++Q^OkmWodI7TLS z7w@g7#1afY5WGhGL;EXl@>!Jpii+1#q?20SkSY zRXSxca7TD|k#x?<9<*jSD@{5-hV3dA2J9;@`1GHiH^7D`XCMI~SPk%XR}cv=P6E4) zMu1n`W3Dq94FX6%ux0QkIA_bA>g(Ek`Yu)W3ZN7opFEkTUp|vSFQcral#k58xR8v? zv!9SOS=bRCFObQS{ybbUWa+tB#6xUu?w$?$kNg31Wk;I|YeYrG zUk<|+=_vEMepPxk#n}zHGK4UF5UmMm*?kUXxHECBjH5Obg!QYmER|S7h{YojTuO5CKy2=;jnqLYj@czTp#`wDt14U z_1A8m#jXvksl~s>^|wf(T+l~roBoPpr&ar3e)upB(!FAT>2K@Uq>-!n%^6h)zB`ha zZ29y>Dk>@jv{noPeKk0?b}SOcJ>~XNlvQC-A-i_k-Nj-K{gw{n``z>hQHnF(b@^@a z8#&Wcy@Fk`s-$hh-96%1bQG&laKB}S8w4^xBb9a=6(40@2{L#u63P}tGH^6|30ok?gpDff2>Eq zB$k{=(5oRTkaD#$Ljb8ExB#xZxCz_9z8jM|)}w;hkE>t}^p(4ImA*Nn0_=)CGa>xq zE3fg*ud@xr!22Y3;djl~Wb32Advga@;39Gd3b={Th7cs4sdRm3r7}~b`P$iR3&sml zifS;Nwt#;3uyWL87<zO?{7G2x31nH!|;jD(h?Y0z#}+3>VLoU3w-YXHx{%1-<`$?iEe8ja{`7sR}x>?-?2Lk2R5${zH^O#=_At({jumtwg?xp zh<0;^$Vw;|CnqP-Thh4s_V7v`n*mZaM!O+OWr>5i`5tj&nLSMG``>|^GqBcTtfzZF zDh9-Qp%vwdah+o4-DzTl2x+y|Hsqf?lF_WRuwuBx8WB!1>!533e>)fo>VvF$ju?Vr zW5^yWPH%}p014MXp6dJ`>b4@DV|$=TL?K8QfQQUp)S-x{%RLK;Dl07X0g{)NJ}_a? z<2i(dzrK5CZ?rM9hyt}9W7+9Fbg%M`UWc~(<$QlB2_P?jk~o9i21Bej4U$49lA0? zL>&h^lU-k5@VuSOs4exyv#g>*#H8d=H76IB@x6PQAj_$?tNU>6A2=6=xIb%nRGI5H!2K`LUijKqmFZT^YJ8ji%z5Lb-FrpKWg7P< z$*=nT^JNo0=r#rJOl!2ZM-f(F^Mqe<9pNaGIIwP`vF8Q5NQrDF%^Bu(lKSN~tVAQ` z6AVJzznmXWf#U)h<5yIW{sQwm`%zGq`#XHc+?J1yj`oh*+x(_PYSZK5%VjlJRfa^a zesvlBRH=;$_v5c6DVLotT}@G1@cv1cdJs4S7I%-`%2%OO#$jcpmvZ~r&HbgTvT6|+ z_WcV=PV9m625!^JyI)Z+!(KLX7{Yng#nEmo$iK)pEQ9y-X3qEe{n+YI^gBusYaoL!(Ug z&np-I=Wz6Y#SioU(x>P~cpcF!wSKY(H!@<0*!l=&+(^A$3|He>ibU!Ob!{kFvYVj! z%U{11F#!diwT;aM(pqaQHG67GeJ9n^NxAwAwg`Y^O)yT$C{TFi>*i@MS!tn+%lZn3 zvhL%dRAqUIQc~5CL|`D1iLHRBZ!e30V9}EXRmLsDC$_dGfYej-;4dq)H=b>0Kbr-y*gI#_ zG8@avbXOoG>8#D0mwJMR;?}pYuyeP@nbBI~3R%?isf0Sr#Ta3OSkYF4T5KzkA z0LO`$)CW}Rp@w@WC&Lf6!HS}K8ch40@jGBC85a!;4NC`?t7oHv$^{SY*1?)b=eR|w zXZ@E-ZQrh>##9cWolvk!`$|Cd+sDmGaFo(TO555FpwVy>LtuiAAaB2ZeX78`4h>H6 z1z_s^-+T+DCz#TdCk>(H5yPrD3H|=BUEIn)FAC29Wlo@4mt;3uQz-+yz~>wesdENK zO39Yq-7N=v5mpex5oHHUmTJbOhl-U(2rb)|+a4@At#n>({}z3aJ4lk5`G^3%5X`#c z;wb4a(n5h;#O0stJ`?SFjX}HfT-d31YT=`YB|f)6epTjQW}bcXvGLWTK(javf*A^# zwP*F7E665M3>J_2o^Mnhi~XPk>L-r=KW{pt)Tw17<~(L6Y!l#oKpI!@TK!yCXjHD> zU%7bK>j$U!;5p^ZGOMVe*3p#~K2r7DP4hZq>>^;KD_VtC96~R54-!T(QORDzPsM!K zUyP7b{FmRN?fY(nVGaeJnxn))7oO#f8M5VU@<1yAC)7p26&GzS4zvZ5JzoM+@P|{q z?;B*cQ(1emlp$DQOCW!rNt~>A{jG%E>{dg8g4b!M6sp~2$(uUfxNf8xKAcam^7$Ei zdo%0}!--!9ot*?HJHZmU$NA;}rLGS;2qYx6qUO_fP% zt5*lVRDPVPia>mGb#~C)A8+eDF7pNNKUO;g$aYUq$18xilTyN!TH7N__DBNj3JdtR zM>);2Q&WRsTq$-ytt9}tAIIju^w@p2y$wFQHDIw)c4W-Yp_cRdk_|0WyJzu0=J_+^ z-DKwh)AOhHAwnVTCRnw?5Ep)&Jz&(Sfo$nd9GykE&yw+QUtXLPL+pA0Z(WM#T<2#va1z6p@vU(} z-tiQyQpvd^~jUB-hfkc7M!Fy~{6YpvW1OrucWJefA>ZnT9?+Z62f*lAhC^ zjj$pt4fWUhZj4MwH}DlzeE~e=i)4hCkpKD*>{7Jc%v3eGO`NOd{Gl$d$p8k-eQiw? zr5tUAZ{b@h(aOvo%*!EQ0DoEZi6pn#c~;y~)@SvInA;SQu-04-Xf1@Pq^~P?UR0#uaYb`C%8%5zcFUJ#P#V2V16OuV+>L%IHH%PmwO70 z97t=;VE{~wqhUSAFetqO0-?{q%5iyQsED3)T71)+8g~!$a&!mo`@SS^bnu!q$Xe&j zwtpYAR|qwq=0z8qR`=SpCpiMb~Yps?5JZw#X*Kh82W;S%QCZ0H@+))&-W1LWIJ$Dj;R@$MIBsI!`crTw9)(E>Th)RD zxw8e_^r5#i!)JhJ8uCMpv>mj!v{c-9XgnBnAO+cRPI;#$ItTs)T9;#{a>Em##g#kP z8(CsTZjU(CZBc;+5CyZhmB11Sif^&mA!47wxQ+LiZwOeFV`v0P3{0VX3Qq67)9-Jd zUcV!cj0`UhM=Ny}SKH)b5eH;CPmXaQ|0SEKf*8HrVrDPU3)))dsMbK)fCEYmP>a1F zji?4ELKQTQP{0F6b*ZH1LX89gGq&rqY!xSDVaFo26tE7ihJxbDRe9z3E_#^4%Tw-g znnp&6@WuW|pD~$H3%>=^6U3e2K-lt3fzPJ+Tc<3VVcG0}HkJ+?N@AFBZ1<$+Nm=jH z{(EcQwD9p!Z#ir4ku^E65fz;niJWTd+1)Du?+_LWieDo3Kr?`ofrZRqzCQd{e${Nw z`$Ghzshd@Gs25Khxy6_UskQ(`{QMt8oBVSTh1ci?PI?2WzVLjjz%NY z)nDQ*dDd@_h7Plxm;&|&rLZyYEv)>NiQzQzhx>{$7xEujfIM%>PP@e@V_-aNN_5Xf znHU(`%;*DTGj3)f6b3nuZETdnf|RfAkj}Lsw$o2{jVPNY8QI(C!^diC?lnYx$2=Xs z)v%<1sPNljWZd`MMxK55HI!menpMTM{rpACcF9ykPCsC;JpS%LT_ zcu13a*Ccst9_#v`ky>?Eh203~yFChGW4>W$})k)xwfmGOfUgdiE#?aK?0So z@VphC?g5CxM^HP=HJD2gaq>HS30nbje%ly7?=f2ygKh5k8v=elU|kgpL(mj z^qnJ`pI`;}N%%^`K|P?ihMC2k91pz&9^DVBF&2LFNGE&6KpDTy60Q`Vj9z#&WB$7! zAbns1p^0qF|J#YpH+#a*6_#bT(I+EGDhnk06<9RXH_cagF?66S?7F_x#RHTh+5k3` z9R8h}U}+s;xD{$(D*0?UUXKuvkQk!QgL-%X{2T_2H`c)B|3RwUWNa7>cOc<07Nu;N z^Yk^qYDjyiUIS`vips4|Vy=g)>i!6mMZhnmw=$pwtQ_ipqF!%8$TsxM?IcOKLmIBH zQddLK?0wsq+dE#Msfb>t-nj7fB(LT+ z=m=SN9M}R*ktL4<&?4r25_zRtsQ2?;?++U79Cg+>^$aU^K~3!LLc!=sI=vnZ6_xL! zMW2>4A_Q9yKS58dyVW01!k9Q1xSZ7^7u( zbCdlR@|;WnQrnPL%EaeCbpg($3sSF8+%$gU-dh*znir%P)1xyj3%FB}xwUhdxhEhu zVvX!l=7R?hI0FlV-fgMf{cH#ll4&rLByZ7TI?oEXT#HI)?mFPb9@WX-25!?o0ilivDWooQGD+?x&|m%xKDH#Q2bsGBTrjp2|CrhsM-T zxvH0_DH*m7{urKYiZBZguicqR><1oNruz+m6+2T-P3R`5B8Stc@O;8)Hfb~cd4JP&h6sx{+NUi}>OVK$4 z#;e$kb}2mA258V(HV`@C2kT~h+P%#9Xhx%<_*J;EzYCyo1#c3cxp&?6rOEO%aXqrB zNs}}HROUG=UDMTQxqa?USs)&SF6safR^o5Apd)!HOKaDKk%RH$3P=gl=0}Tbxqr*M z+h-URQ|9ugB40h+)Y3h=sk3bs@S4!|QN@*L3OX;*9`o99?+Qg`mq)QCiW%)liJsEV zA0p3C6ix=8owc}6w6m{O}usNG@ay%BK|wRwFHubpy=L^Z7S$=3*_Cskxxewb z0~#Wo22^UePxRNWY4V;mFr}H$ds>^d=_3%6cF&gP3)F{}{7jcIwC7|7uwh^tJD?Pj z=5`IFGA);)V8gsI_Q36Q0mG6{NR&gEXtB@ut>(+WyFYgT=OZSnYQ`D(G(JV$nLJ^j zAs;2Vz@2!@>SbbL5rR8L(Y-c8Epsg4eUZl&F!NTy`3#CYHm0^}Lj<^p03u^&`^ma{ z4?7M(3+Ic601JBx3>^wuO?LszF>^P)>kaw9Ua~m3ET$DaN_;kTjzeiGJ00p00OkrA zslC;&W}k|hfj5uWTaGzwKzbDlt!)nmYM#+dMl6Cg<*qj>IzDQWXqvYUtRdd>i_~2Y zj>+gRrWBAbUAhG9xF1H4IO*XIVD>rNGmnRoUyHhFT5ex$aDYgIY2FC4*_d9Xm;9VC zct!s6#khA~R!l||_ALB?2Pd>c&vN+yYq5rhRfAq+ZWU3-fp;XYHvIV~b^)s&So zT3(iUAaD0E}t2bG< z^`e8XF=MBm(j-(l+@^h6whBDyMyaw0Hv>AL&qjOgd<0Z`we{OAiQytMDSz_T04n3E zypXaguHO%zpn&RiM*hWzz@{NTeNY0;bhg+JFlyP|dwFUPW-N{F0Fz)z2P>7HpK^KaQPzPzgN5(u=c`c81gY26Xo z{0_B;>DugM0F6e%+T+aqeh5(0Y$H>nE=zPf0rQS`P;#xoC@cPVzlpN|5uNF#qM_kU z*5-N_Zn-wur8{{8un zGX7qVKsBZ4SGz3dKJ)Vv=w0!yiIjbA7Z9teudhD=tW~FL_tsmVK0R{$*e)=ksG}6E z!Ieo`HolSv(B%lwyM&tcGs^g!0W=G7I!|G)u5&%Jk7V_eq7N3`1fP>Onf?#DS3#0ZeJb-8}&&> z^)j!pAsPBmCYbNA#o3xTe{;QB&{GzU69CiTU&!u@}L zi!{^HLosM4An!g^A@(TH$@I#6Q+sYvVfQxiWf$t_+~)6FpiynotznsHmd%%+ z1(DlJEsA$y2RwmVk6d&jKB%ACPERF(o?n?FU{hbdUt2G13Y6uns;}bvf>*qTYkkY% zqcwdGafK%?-c^p50OI=ST!C!uMi2K>ryj(fN|4WT?S{HRp>o4dNtPVXPFi=+CxIPc z@Z-tVt|kNl``JqYAJ;a~fy0#M8UbRtcTO{}X1}XWmwco)lkSr0h9}exJrLMNHz2v5 z9(X|ZmLhNVn7Ll8R0y$9bpSG=(-{&dzkjti9_h+vJ~^B0os!sr15N9|OUiw7L*)`1 zC{r>>dvY5;g&`KP93TbR;fF`o8OF73nn4ttohgjIB>YX5v?PoLx^L#F$d&Ay&&<^~ zn@T{ZGy(WbriO|ZLEb+}x(F9)CzDkeW%WdN0E+IAaX?b&geV!K9bti60KVZszqhq> z?b=JI*&X$@h#|h(eYQq5hk71#K?CQ&RifWr*ci=AMh2#-XS9MPke;NvurvP}6NRMO z0deW(((lUlJps(fGqKGb+a`XZC^)NEQYmq7`*Jv|r}^_O-TL zYy~{ErhkDI&*W#3Rx7KZL9^9h$z#DQ{X%c^ z#qty2c8~IRGRN@DjDhPvxHgs)I9i#t0)c!p%9H~2Fm~-e<9)*Q-rZsIvxuo{%l$thk$kZxrdQ0_P*gz&v>%$Wf8kKSj4AbN~hi) zchH133?biV5^cw|X|`Tp5?+E-=kWWPp8@zcUiPHU9&n1#N2}BXU%{gfjfLG2Vx$*l0rTx31=?+&G?(m|pC7jF*E-x>{ ztEj5FEk0k)|Nf$TqEb5knPnTNDcW&JJ!nKERBe9NI{suSu%0F>?Rsh}aLi*Rjnb<2_!}yaBSvOvV`HH4ZRy{5EYFBOzI@BhJ;_bWG*jRrz zv)+}>aw6H7tV8b2gjMFxyFxvzgAaOcF>v|BXE0LpEKjRs#?u5a81ho|?pab#nBO|$ ziS87BpC6m2>k>J|d$w;bOUY#mS$0k94ay=7Xmd4-@3}O**q0Mp8JIN}T3>a4tt&UB zanl=_hdMbpqV9Y_9-L;C6d~SXeqiva(4tnj{SJpWw!13TcjgkY`Cg{2F`brekH{oH zjR>t8Y%6?~5hI^U<&%7(^Lza>{Yt$>0f$k)V%{3rE1UK9PwM%3SRwDU*U-@xOU*t# zGlPl3PbQhSsk;(pc=s}1Q;u59FD&?*hx_kuwe{tA&X{s3-kZN4A}C)Wr$*U8)UtN7 zOExD*YWBzW%Ndje_B<3uacb~hSrm^gjuuau# zOy_&m+b$FG#AK6kjKcA+3@md;b7a+GtP7sXAP)M7li@EY>XKla*tB!2oxy#XJD+QG zpX^c?yeYJ}PVKfU5ww8OCW+JpER{CT3>7`PDZ*&Z(ZT$5Kqynm+tYvlo_)e`HoqGN zpL4ZMBi7kIybQ1R-K18d^e@myjhsA+oN>6ssy9ytP9s#|{A=|7AX~ud=jD%!(J0xU zBA9yarxD?d^Yl^b@-Mm_sQ31bn0ZAqkFBb)_x+g9)5(cdXbpb1rStH2Pd-&4fAG2+ zC}}T9BCd^$yUS!t*E_^lcbI0V~AM(6m+6fOHlpJucP9_s7Df}2@inN+4M>gxYa>< zXlAbuGSmTXsIgzMprYyb`k4o^$)B=1JZt`b@Qf{EjJjklBHxD>(1q?!(>l?-Y589e zb|LdB-9KoML?5m@>asgew1c3sI~Bm|YR#5vnm0ajvpH|gBBLDIUnybz2}>}j?e1=M ztdgc5dggAytE8EEo>%(F_0+)KA7sM*beM+VPVrT`SU>{VRmADC~)zRN5!Su|nJv!ME$NMkj3 z{iQ*ts^YrG7X`%-UDr`;wD-NciZ6A~s>28@VvZ%hzJ_9}S9y>A?)qKHzGBzU6u z^-?7p2URlM2~`xdOc^gK0-o|#fc{Eck+Eb6-zKC9rak&g>98;0zjINPs2Q8+Q;Sei z$PYpH?zr*|7IIR+`%*iwgRIavx5o?TjNrig*1;TxWZVDVwaimpTnElb?7|8~Fe_-M zya(~2Ch%(w1Lpp1XF!MZB2Y7QAyc5&6;M_|pvY`BV8}WS?|}nF`aA$e*B+>qz)r8) zbtX)KZr>|HeCD~=F#V8 z+LuKsrhkE6b1ya7h4SkkWHZNt4iJzGGJ5+h5XambaDYaPt^=zqWLYPb6CyrR;JZt- zh$yIY?2>KLFEqqU0=nFS7!L;J3kSGBd!ViZCC48PW^x1$6o8Pi-I%KJ6iGE` zr|D88ZfnHe1|8of{;Qvlqd+W*}-QHG?&siauBUfXZrCL@Qw&{x%yF<}p4|IpanH@ zPz2m^193}w_0tivD=ey+Er99^Hgow*y&RD>40VlPSoGvgBZDVPAlpE-kO{c2gxRBl z_vb-K=G`o1Mrn60nTb%X43#{7J_^17)P7`quumS%MJsh{03?%4&;cPrnv@6Zwaak7 zQRv=E$)5d27uea^(X2Yzf4Md_kI$%4&;FQSBlvU+T4Ci{>cS(GJO- zvVyl_6kRzTcMvzgIWWH6xEQm{_i#XcmqPSQ@=u6XGrIzK$2oerap2z?nXQp!5nRe;fd!qpynVT^81MpIuyls($yIo~e+6B-pU$25fYJ zDY~0qy4L`2pzLiaPg=EmQmq=p3oM@%_i)=lWiSMHuW|&z3k@|3Mtb>+Fc%9|K`_U{ z2{6{n-HM+4>pGo9OO(l%x4~X1tU4pJsp*|m%R|+$w;lnJv|gZYsaXVdqx%70+gRp6 zPOt^>$b>Vn)8vq325d|Az;!r1G3jv?2`}Oipvj5}FFKz9s@`e!)R?C9jH+pa zHFo1I8QozZ50#YLKSooYLtZg?ItyaQs3%gw{)vRgQN_=xN~NMTi0@ZJtKiH{S`)-C zv_e3-z5GkbLG)SSBV&Ues$eKc}-3oyx#H zhDPdlW>|=qGqkqAeVs0BOG7slQqS8`<>`yl8r44=8SXHp>RzLF*Qf)Ka^~NjcK={Z zTO5=IeNv#EdrGMn>I?Bc08OSdQ zOpsN39Jw5UUc56`*X#cd+`KC4eYiRgk}ddv=iFZbELUeMuI z8Cb!@>hke3nz?r>^i0=P5IWCisU{9Q_`WB?4rJ}6*A%SQ>K2V3#PDs1-VWwlXOLPK zA}{l;o`iNklYh(GT5qAfEJO`XY%?ID)JDp4c$v&EAi}@)dOJ)z=o%xjd z;VFdrYsTFXMLZ@dslJk~`8>Abs3T4E0Y(x4$b(#c5Vz;32rl24e-67f#1J(XMo z@)!-YPt@FrN`<0s?^6x%rcW#KPlWQ?wR>pUsUy1^p}_1gP?VgAEvoL@Q4YB`>kv_2 zvOsIEHBC3C$aO?_s4gwT@s|(R6^}3XC~L&+*>^dOEW#N-Fva@iljl5{n!$}m6(}99ISmmPUs_*HLD7WKoJ5>ya+j_(5YXv*S~gs zkJFw~fPIi238O&{^W7PEH3SN+5hfQ3#^+^^bc3~1n_AwSdzjqy3WLs@c`3^$s+WB; z!8o&H7Tl)i=fi=nlUj^*n{&1(oAkU}`Ro}TE;5*zG~omMKBK``a+)CsR4x(D)J z%M$YDJ(Uxz8PYDIT0mi9F(vY>VC(Z%h$crJ@7bJ3cMcNj{~Kk085iXmwvWPqjsX_R zfC~jwbkSfCN{Im|NHg?+MGiAaOCyQ_0-}_3#{h$L3?V9_G(!w944_DtlG6LUbv^(0 zd4GHF5BtmdvY6XDuj@RI^N3Yz-5V(q@K;*)xeM6XQ2USwKFXJBr8ghjIW*#)w7s?# zbjUcu+HkbZs%Qme;6D&nO)8>1J|O)iEnlcvcVQ6o2@ZzS z1U0|yOXTR|?0~AuU!RW=L@T7w3VHg9iMp41Q{M0*EQC!%yw;(-cJa8KtX#a!m#C4} zX=T~BJTRP{#C9vqnLSczc_Bn}EMt|%+<<;ERc%yp!?9kDxliKoY;(J=W^VFB!G=`l zPZEdI>a9)-g)zBTSv1qB-k7$Vsulj}uP1v1oOS&t*eY=Y8QQCI(Qmjba^keNoZG7_ z#pu%M(vOnvx~Wj9k#nkIRh@t|we;6pS7(iqg_bb9i_-Q<^-%XQ@K=>Fbj4|2g$?1E z5dR_qMJFEWL9gwD|EGEO|C<+5=Aqf@bpj36mlQhkdj|ds2$KW8#bTu8&o$%b;jsgx zC?9d91FN`Lj|9;2tpkdyvT5>%NjfhIXg@L+Ap;bQuk}N*?>90tnJSwqE(L8CcsMR$ z2Wmi&Nbvdv;D+nftGMiE(!gX}s8pXzmY~ReBTz>|mASuq#rw@JK)o|Et1NWZCU+5> zHOi=MIffU=pn_6e;zarmwj86ArHp9~YZDnXm_f||eEAOXD1plG-Vr|aA}H9xdmYt5 z(1IF;Jvtw>w_WidXhggRHwU*j6S#n9t7pnCW;*RR+9*nHLiM`SIj~NlEY;4hPsM`h z)Bv!WX=IN9V?P>9zidGN3zYxUGO%;dy*|JEXE-=Zxn4UDy4X#-*%FxbjP=fe7u_^$ zWKMI>j=PU1$Y}O?lyBe6Q~GJq%-Y z(?p=Q&2Dc%jde(K1>7*TAB7<1CeX>WJGX*FkqIUQ%$->}d{9Pl>?Bl7q$^NKSeAY& z%%$k^-cV`C(h3U;cK|AsN^?cP(PbFT@xKG{9|~kjJD^b5+YeREBd7pfX)t5q0%OMu z^V6096+HsTYWDz$kxp^Nz`Ay>4d6XlT8r#L(|agbdH92&LfH*vjqcD44459CICt*c ztB44_Auj-(k6i?@S+R3#07}zi7j|<$d=G`^H>LYvvRHn&I0b4P!s&+-lr%}l z(Oac6umijDcJ#@+&YB=%WD}c`$Z9W;@l>A8`1v$QLFm{6w9MZZ7cxhC6tB!zVdk22%%$2 zzx%P$FItiHJ%G1eQihXw$3xwNknh6}R*ruG${Y1FO)KT&<1-H7mYarkqZsBUKL#@3 zPR#nyNvjhBOE9ngfC{TXM42!Lke`Al_<~jdxOl}j+Muy5>Dmn9O^R%`Cl*19R}8Z< zDr(a7RhF;obWeWy`q4`csc{Ys0tA8wY*1LT4LtNAN5m#iR8207WCxtC5(3>Q)n)3s zzq-+rT`fuJ8VNwXT8?3fWl0{+lSFi5{<~J)n*nTVrI&zznXC3N{Ja!8{En(f>Dpfj z+Z%kzd8}|*?++lKee^;lct1&9^?;G_4|X;h<|NXH7_Q9H7i?ufgsjqk04;MQ$Q_o3 zjk}=H)0pM25d{n||5zQBzg%537N#8Ks6L>c%plDQ?^N<#Yh;z;a%#Jxr-zhm98onu zmm{cqU3pYI`jn$P7%7{J$>1+{$IdMOu7Bf!FPA^S2xRy?4_hpcOV1tB#khElnP{kj z1_?K&7A3p z>_k1qRLOUnw)?sBPG%5P?qVqt@=z|kC!@D{`@2q-NM zyc5LNq!RXUr497ZGtSH28=@B&!^{&K#t+IZzQQ_uJ%bw1$QtAPCJ#3rj6#(v$<9-w z#7d4Z!$bxwALD$Dq}t6d4GjhUEF7Z*gy=xu2BM;17Y<>59pVP&*OP!?F6T}ygx^PY z8#8*XdSwJCP}ALO%bCBU{A8Og2cYMorGLk(H0%A$Ow{C^1+2~^G(mrza@;1ZJNxlN^oaLbg2@U(wc&7bpO`6d7*UbR!>)@wkKooh zMyULy6nP-;lxwt(Hw^s72F9Sx{aUyfL#ao{nmIO05ZrxDpNS^<_sY7W8&1lI~ z0a??y{g-tZHSH?23_CiHW3OuXPL^N(J@lA)-E;@TT^{6Kj5_1E%PB=ri#(vg^TtM# zN8N?aF3AGXP?VwDs#xlS7jPLW)&+ld?0FXuj)iVV(cKgkq0-vjZ;STpu&Pu)v^d!% zv?4F~Jbm(q(eOHA#lyW2G;xFvek;OF3comdTX0 zO0(g#=9v{(kjqvy_zpM;7Y}}~R7w;cUs2+Fsry5#Lt8xeYE24E#qZzZ;O}f;k9HX5 z7!=Z{@3-Or$TuFg6a?0}ijZF`Z=?lRvTUz+%Y1;_*YuMX_64m>GY7b@6v%g!Gc zDb0G5)j*{D5)xcu$*|L!QeZfwd7xDprr_;Ek-Ok4gvWiq(`((%$RoJs6K-5z;#4>O5+6GJxQ zo1}@0%uQAGqY00>qwN#d$E$7ub(W8HzQ(kf>!6eoPem)XSfu~BQ`IxXD}+UdT@mJ7QsNs11Tz88Khe9b+2sqw+9X~3<_pw4g_Pgd3K<97r$jhX zxnLciwZw`atVyIM***r`OrX^T)o8?ejj?PDjE`J(P+(zfhhSz?BytOSR`(ql2-VtE zB%s8!Zsxi3ptO?I<22idZ0?)Vt4Y3Y^FO}Mx^z{yxwJR158(9IuLeH0VFQZeiu0Vl zRdeWr#b{?+yNZc_^}}@d_UP86mMyUaP)anJKP>g>E?jSPnS3auusyiFWeHw90~1@( zCH8e_{jwERz6?-~nlNfrnSxshVj4W;Vf0o(i2n(RiPd^Hm(!~%V1Sh~Mk-yF=*#*L z#5{W4p98Z;O-PN6?KwJ9X|{^(LHn01JuOX!45pyuP_NwB#3O9}B6~p$ zpgl@0v%kmTiCfwqwDrtmCVGX0mSUE^6Plp+S+mdU?%iZE>0a)=d5Wr7xo=e_a;4EB zd7VC7bMRt`xY|5b|2%#=XkZ1PRNj>>n-u17cbe1pE2v9w!?3Pq0RDl_uR5vdO3(m< zLKy?LGLb0!MD$YTCAtcVnrS&6uvg_{cVFfhE?U9 zn4tN-C(FF(mmOg*U_tM1c)gtG9lm|qSDk6Xi*hqSle6pK{vM32#vK0iI``k%hK2aH zdWr<+(pQa?T}h7oRbqhv)Y^TbCAA+Gd7CaKxpqENj22yQbTsWSF(~>7Q7lSDXaix= z^;-gfOK?(kJ?{{wfO;qfO-KRY64^f!E)H?~N-? zgszU2KPCUruty0BZf>chM$9tTvzW+nZeogmZSa!&ND9*0MbjxvmU5A!zPPP*fNxC_ zO|?}Vsg;wU!fc0Pq-4|@N4A8%Nc61kS<=|PDyu~ofTq*D@oPCE)2RV{DQD)L9Io?H zRK~hr_-WIDDrwO$D~rXvD!B4Idze?~_QP?zC$1iMHnl)*&(U7Jo8c#q zcE47-?jB{FC9Wk-N+os36|6aByjI&dItmhpJfUh74cBfQuu4^&$hNt51u;{XQr2qx zHQrc8*&Dt*(Hy87dJ;9pX&rsuJb+fCWUgDR?|4Je>>Z}r!1|5Kg*bVy3gg|TX78Xp zdb1uKKACx@ei5Z(*4ZVG+DMvuA#{RzUOv9f+(Vp4zm-R&?yY!;xX(HM#8bP+nF$E2 zdWv3?IP!UsZ2am$cpD2bm|OZh_Vw7jCxCSX(*nv!xHc)C7wrRrU4Cbr%3r8>7@ zPnYGBtvv0t0M(3T{ylm$35R2|&XYgxTZ2;OL)?AMpT>tav2C8-LETdq^cCC0DS4+H zqsgXWlZ#3J>3Q2s&KDNJjoDw46=QTR%h*9hp*QcrGhb)doo!@b8I>ca4weuZ`RB z>=}3?ZcQK8dCNygyH;MhgFyk9YZ+p3NhvjgB;C$cKjL_{)(~L*y_mu(pc~=0Hdfu3 z`X8u%d$_1*Ist9b*R}6iVo$)BmSz2D+9R7(xkc3ir7~x~_bmGsx1n+w);=${c9`f^ zKvWSXs^o|btSY;Uzb0*tWr)4tTzaPbtRRou4(CGc_k#R$r}r!d!NZiDj5YjT-8cRWuCvPObfTk9Vwpg zz=^4wJI5W@RmeON2Jp6O{GyAN-m7>Y#O5ccVba~2=r8oX`=%;tyWy^&Ewi?_^u&bM zQ};Y$6=TdmN|sX6m4b*O9roAi!B46)q)$%8JwTgZ95D;dy@;QplS@4kV7~rVmKJ^| zUH;J82Ka^rvbly`!jxc$rBd^W;`Twe=WOt|=U9T69JSEqUZL%o0ZbUJw&Mi3A8nJ> z%(mC8_wuGiOqKX?r)XK{Jq%T@En7Ld$-^1zQR-3Z#z?L{ngEnq(KiZ@YDhvC^-unr zN9W9^9#`@Drwd-dLq^>K;#F9))FnlI#^*%AslZtE!%tKnb&l7>8>7P>86+p(v>s#@ z4|h(sK4ZjdbS3P(iMY{m_YdliDkE-nY)jwFU@$^=@TyJe;ED_r9GNdJttPjYZAb`w zXxIHG==ju1rO);jVW9N9eHgnun25-(U?f|g>^;)IDRG$8y3>3YdU-W#npcD+qm{Xf z^2J9Mqh&JFeUs5kZ(_?R84r!C=&*7K9i8?C$7YbH&!^57i&F_zdgA=y?DmTWO{4T% zbAiE+XY;6etqNY=0>fW%8S){REbNSqW)3x%YIfM&r^qhuldeaxcua6<`U$sAOSZBp zFD+I%uVuk|f<;!w_Ox{C-Na8MYc=Ts;un&wu(_;&hwyeaBSZKRsOw{y4@@*tVTdrR zWWMNFHsbU_tD;nycc03)10p72HRI6n88G(LsHIUkwLdwMhg@*yP4aUEgJP;33;_F(x zh_Ys$7T>SLXtKqCeN^r@3VdLCZf|bun-5G*sL#@-HZkY@K2*(utxB)7)A*3Ox6Ns+ zxX47=aIj;D2s4*u`wKNx&04H>TQ2L0)65oXWL@WcqPixCE^>V=)$@7o9at^cKC;JL+yJi1WC^X)oF|6 zc}Li#O`{zW*9CG-y)JD7)jr&Sfjm9`n2!QPRG!VLmfO0PAaLf!Ub_MXriN}N4(Idc8b^)$- zWceH*c)BD@#C_bYq!eua&U*fSu(ALHyl;JnVJ53MYY%#lhC(ntn|TD|+ua2P1xx_T zoBRQ#;^MHkV<7b5!+ZOBF8%Q-o3_L&L%{18v|a}{p(9qnJ@IG>=&?i~goc-w#`!r^ zj_7g{DP&j7S^n1tF8w$pC8aqr!kuPDw9>x9tD(Z%mHAw$P%fH-WZqSD*{%k}M7a2N z&on2S0UV4v=BG8*wJXxKZ-p-tKd1pdw3>w<=x=-=u0kqLTGd&XP|0KcCoftS`S$|+ zVU1uxWe_uGH;)T2D@phmX9aw-iYKZc;Ge-b?sWE3P)!%W_h(yFLeCueJ=1CR4`_}N zl*43=s!(;?K;flNPVCRN$ywAueMcC9s)qW4WQuyROGW9<=+O$|iCFcWR&=8@6`L3Q zY1iC432Ewr=GZH62YhA)d4%x#Vap_4jb<*4d(G$-qGrUX>geV6q$3BTWr*c(;3OFY zxq+;53{?@Dt~K*PDMfKX3>_x4hw6o%mWO$Id8v>Ub;bo8%k-AG3uaIp+KK@5%I7=4Uzy zV5B`pva z*zG2keJ>i+(QrG3#90(ozh+ed`=t;-`LLVD43~WfOx$b#ZUj{Ct!cDH5oGUVru6B$(94wOq{-a67?Qfxr{RD=DsB@wLq-My%p~LJo2(D@a_p-^7sigjVr${y5AB-Z7v@i5{@Df&fUX3;bG9-;>H0E|5s$UC#Y zrzC9DTu_^#>D0VHQ{_916$JeRt-)UlbDVdd4a!R~9`rimtvL9^Y()26wIL;|W6Q@p zvu%0M$A=>oD7pW-#y}Zb4Z!?J`5c_vo&OqqM649&IS;$Tw-5a^3C_It1ZWt_X{566 z%u?<(=8X}jij}XV`gI|@1R(EIN)g3oYL0E=4=<_=B4E)BVmF4!8DW(jQyN;TDPXvc zj|O)~GQ=YxVw8j>gSeM_(tIls=6z(FLNSmhTmXZjG44Ko8xXw$q-L!83}X%X6?x_g z=et)@h8s-FCW+b82#exa`^xwaU-g~)z|a>2gg>}b#zf|9|FDk~m64iJcLf%Fk(CO+ z>!{$>w2TZI1lHhTStsZkSbl~X!&pni3(7YMW={mMcbWuHtI5=R)m$ndXGn+bBFKL# z-j`S{8VU>|i2ZB{OkAWI5S`#tub1(vb-ma;srB9eyajEmUmj;-BNN^Yne2)kDN-g4riNs1^!2(qD#2kEE zvop+|D}dTF(UONuK(iKWNhW!ej%2Lzf=KQ;9RNkq+j{RvVERV33AAD}Gh}2g3lAop zFw9y6-_kEtA&UqJeHn30TfsDVmff>QbPjJ0s2EfCkkzk0sebxkhU&=?)RU2#S|-Up zCV{Lv&*euGIUf7PH#Ie7%GD^nUfPKE`1Z_=gHuAUp{#Et$l+N`WR8<_kSC4}OI=zZ zkY^%1$SVeEFR~BOAa@M=Y67o6VwYwOjBv54#n*8Flab13P$NW*m4_c7sg^B2FO~C& z=?ZS6Ep&J;1&UUDK{Z7y=XP!uwn#SuMnA%xf(IPvk*W|C*WnywkR3Nrt>{=Mhmfog zTVWXUhF@O%1V%REOP!ecvRyr(TtfiXgf8}nACZ$|TMX25rEgFI}? zN0sevtNh6CTn#LK&M`_a(D&VIGfMmiLcE=JY zb8@G<*157a26{{aDb%f42mKAxq~4l6Lp;i(LY1J9iaheH%*xis^Wl6MHeN7P?Wr6x z>BB3$&fiE25W&YOui27T8vr*q1!W;|(JK zjaPgbp^5>BT-I@~QgDJ8MJgZS+$;741{(Bj=1P|DL6aU($`Q6R~LXCa~q63(A&!sMH5APpRWo{6D*Il$u5gcH`n4<@8~y<`6h&?IuyBp0<+B zFbK;vK1DgiMR7s4L9Z>JGG3*AJ=38|Y5OA@vH=0YJ=mV`CAJmzlQAV@^jU$2LRJ&m zns5G9b8z%SpC!IGuvgM`t&4G-#q3r7iY{rX^y`O3qc!NF5XR)u6o~0xkI6GXERO#T zhJCMOo*7@p1? z9JnV8?=OF6NlSFb>yQY|0X`*={Tbs46mSUy21Cdo5bvB7n^-kFL zobUITKg{obCHN(MOj9$==WT9-yx$g*)>b}3R3Z5uky#k+nfejR!@RCYwxd`c3L2~h z-EWz2rok51^}1`Yr?pFc)4zmHwPR;D*89TdimB>O%ltF#k;SoTiBD#()(KwM>tv`p zF9YL!`krjwnsSe%ra1;mj??c7p?HK2z5b zVt_9*p-V7(pQn_c>1*bx)v>P03Z`Nh-9Fvl#cSPM0E{pJ2SON?p_cLi9IQqJ0`*zrYB_IVd$D7{@SxhF{pOGmZxFQLtKyqIQG6@L_2R0JpZx#r?# z)2g#?#6~>SiLO*yalWg}`*Y2Z@1avpvDs$ccYpV0BW0}hqRlgd$WvlHG< z60#ZW^hjHucxZEgRkM51W6Vow)vcOSkmpZ5_~M4vW%tFe$|BA>Et z^({Af@EC>rJl@E;`NmdleYtuz#>kDRy?flK!>Cc)pW=+Vg-VmcZQ6=l8)-M>LKzyJ z5;6(B$aNoGLpc+&xafB*J(S*-?BGw==MpZ;MbTFVwZ64AeDnVm!MD(tOWlOIHkkoi zhlWj0avfGj`f;Z@c_S8+#w3LOD|!?YDxYgiI#bJ5B>0{}!5T5C@rmp1$JQ1y-LdQJ zV`+17K74X}CMje=)b!jQ)WkrnBZ@Y#)V4k)r+ZE^E=z~;ydAkOEJ%O~i2 z>C|KDuQoQhP1Wr%znO4y4ck5DVg4IFuPc8(FIEB0oiP%w5p_K5cEyt%@-yP1e7CV2 zgrXG7Exbq0twNz)JhzRhmOXxRrM;g6MGM@6I1O9=u`K&d3~gPvux1I=|IM)Z>e>N& zobofuybef$P`4|7a1i9|um|@aUAS>exa>}qM~r`tDqiKIm=&Q|DT7a~&mLQA`Q@-# z{QDZ7DDBuq99_TW1gMgT+3_>Yy*?miF;SSTl0+pKC97`>UF68E%^mqvgyo&6s3~zn zmrESZ)54?PuU+0=VDF5_R_&UTjH|-rU&>1n%It8k_~z#!_WKCxdvd!`+>JI_XN(S8 z(EF*>mrQP`!GY?ctqND35T)L#dr7_~wN>%TK1J6{t+zRyTesJ$mOW+`u8nLK9F{(l z$UlcG8+o&nZ48^Xbx@{Z4};^M?Zih6z+lLO*i^zXkEwLDbAb| z+JZ17=%kUZco5T+mMoz-Om4%U6(e~=B89se3rVHgpFpe_Pgbih0_(_fD(vj~9BnS+ zZ6{mrg)fs>HQHyGJDp#mzf4_!rae|XS1>u0WSJy>bt+fQjLn8DfAs2O%Nd=N&DD$x zPqLS}qy4|d?c75jHRrzWv4$**np#dVRiAeS#aia-P2bq9k&UH$j}LTK&-6C27pX}a z8!vun&;R7gSV4Taf()T6A_L?B%#sSoZr4FQmDpfZKAhnk;?DI19Vr_u#?^^Ntee6KO&5Q>g(Q7vVol zD}Ka$+$Y0{@2d&dQrc8FsNWKvde~K3ysh*)jqs2ohfgddrF~>YUQ|wx!Sc+ChB^p zu8uABna9gf?MU(~m2V>sgW9@``m7Q`@(@%i;_BsqdM_9}iuQ%RY11Sz>(U_jVH@;p z-inv)g*FNYWE_&}ie6$)vO`~K9D^QUbIl<0+1l;>QvE}-8BES98PrNQ1Q0g4 zBaJ2>QFCkC{cNQ_T0d{%U{XlCJdgrdB7>6Let_3>z_DM5a+sdEg7h9wpPoN=b;sO; zykX!DmH^L>P9IERa)6Kq{n`Gl4S{9Ok(GefIze-0e#a|bIQ_Rw%q@V!IevCq}Je?_{;PEL7?n$95GKh-r}seXNYDZC^( zCBdz0+4(zPgw86IF@ihx`wCnPx$H?r_gs@(`Mr8yhi1xMrc*~qJ+%7fsh*f&4#UXomEv7^PB!xX ziS1O{=lo+coBD&U9nL~3gTlfk!pus4jmQ-Rm%APNY-Hx2g?GDcxtHQ^HdaV_B;1zE z)XFF+lxm$G52#Aph`-CY3mq8r*Fg4=UeHXh%TSI_zx#+0cdon3Sw*cH{4bb}J1;9{GT=bA>_Q}B2yNoJOo zmtd&$wD*I*n@c_bB2s6c&nx?y?qq%nf<7ArKx}F&dgYKlgoL~y;h`L<9|yy_TfSU^ z<2E#pHe1liF`K`hb#1wJq}yQdo*(b8SGE(D8;PszWkWUO{^pWgn(ZoaB%(+zt10GX zCBOk!YW2i-h(d7YnoW1k%|8!F*F`|U;(|p~+=Ht>-RTwJNgr{9$L`2sT<`A`&R((I zlEpbTwLM9h-NN+R5ZH^jTNNzIm67w0X*z`(r-_mX+g-TdU0uM)Cdv#pPiB}(?vIcWu-_Y#a9zR3UYpAjK+o*OBVcsrqLUJuBw z`uzOc@*hW!(;MiO-lay8Kg;ilWH76W)Jy+OjeWigH3FLQ3uTqrPn)TU>fLs=S`-q=7c)16S?d$!$V`0~L=JBIf zkQUUp;P9TP=GX}eb>{IwzspjD?ZeRahb8>g>oxSF$Em6{JWktJp22pp2D}E zDs&y#yK5B0WB15@i$Zz(wmjD+YFU1Y{db08gFdBA!04R3VDW>{@FJvi5%S85s=e@m zN=+>PoH46#5@<}p-2H3kuc0aRBp)rJ z6Juc)vd5G;A3p^)oHpfW@EU>{91NEcVI*9@_qIJNpI+21F6G~AUhb`*uIHhv#Srz1 z*H>)tEWx>!#zwmzPjhaY2HgGVw?9HcGEi$#CE)MH1ZYpISx-glZw+`@Y;P0}mq_56 zB<&=#jj7npk}CygO4SRVeBN8<9i1y+%-d_D3kHeE?R_T{X+~K7z#>DEvbyau`nHyN zdN+?Pd%g4szg+jCTQc%r&wA#Q(-;#7I`=ME@LgH&ti#%k;3-O7y;cCsgHr@se2=AD2&p^Cg&Wfi*VP%Fjm z^RTjuXqMfh@$tMh|Ng&EIsfJac(|dcLSP9~zkJEDslsLd*2eGQJ3PdpSaXWzz~67_ zQ^eOd&9gOw)7g1`bpxp?=)&P(mf)~b9LEeh(;bAN25F={Vd~Y+Q^Cwr#AsTp$lIfk$N#5+pxbf){c7d0TakPCIX0 zdSKwPh{(fU6!9Hz^xqTEJjx+8rnIT3(&pdkJ>(2#ndBFL z??l}L%1w&|pXy=vnLdlnHIz!S^mKo7i?mSG2k)Ur77#jS%cV*Bd&OZPHQNV*Ut3zn?B_n-56{ssLmhHI5{##t ziLB{~4#|J8Hh!6`EQ@>fyCM~1&ejU^)#rG76=vs2W}C_xFw9cTT-p3~2%h_U=R;ST zr7Xm%j|wlBNTi;xP-+UVCYd*}C92+dKq_vMvKuf-&M^7&Q72tRg4_WpetXxh;$iJF zhglSahW;Eoz~whFwYk2Vr3S4~>$ZJmlWsJ6=6`?A?Zc0({{B?c!DoZ@mKo0_3P@<1 z)Md|=dA(AEyj6 zH(9py=VNJ=3{{Snz;xtqpGC*=iL3K350|7rNu}&Szp@iL2lMPd?u0OxN_U>7mRv&r5#t5Vmwo_Tf7u(na*5{!cGp<2oje0^ZbQo?`^v6px0ySROdP4VnKr!fC<1|w2WmnAs<(;-8Ifk z1P7VB#}GFUlENl3xMwV5`n7?0{2@+zTY}PeMwJF&NMnnb{FRp?BiGC}-En*aTcPXV zev%={?T1l~?8%cCLpJ<5nxe(e(e8yYBxx@VA)axBg!+b;G&M1j)FxT z*l_2j?}^)NA>?!M-^hF$hOPGz9WhYJ-R{iQfK#&TuZN8nfBg7S7HF1*hYT_*{`$!E z|By%M`OABZAG;3Z3#vwr5)OzHY#He#gWk3KIWI+?AFo-f(qzwB$tzntT6f@#mnRsu z&(zmzAA>ot!(}0%hB4^($H)wFAg+T9K*X=ttz>GAXYKZ$YrJFbCLltl$D|X3koh0o zeJc0P9Fiyupq*8ZIoF1)9oB9cgr@yCUCZ4YelYb+w)>Db9`iHC2sYH4QUl}OdLd0W zZ@j~Up0~S0y%3nYyT;z;8S0GA+)z_1UdHzsU(MZ_ZCGrP&yx@qb{`|HpD*d$XFw-9 zt9Cx6%<}eygoI1p?`(LtC^WlV&$-QBze_Ekp%!7&uVxt7aN+ z2+j(`Xxp~TISP+GKaP7vPrQ!jpOH`A{B2-;ZLQmhR`xIjTzM;`@1sW?brf6f2`bpN zB|d_@;x2eW6J%-5RWC*>o+OdSP;56jCRUVpwkd@pPNBcBY}hhF)oJP8pc_-YSx2sV2ML&shaUptmg z1a&Q4t6UXb1x1H>#*U;DW{c^_ye*va1t~%0{=>l0h_L*$0pFSH#DO)R zk%n-Bsh{5h=%;Er@yHV4seyZgGb~yelv>|#ETp@z5NdvZAF?X6FJ!>xu86%p==OHfW06ba!{yysv6yQZSKctWy#(Zm*E@+^RFD z)1jG%Is%iIlI_hopJjMW$qmy+ksF983#RDG^8(1C-4pTVUWs>AgbcH3o(|tdhxj;r z3Dp!T#;wifz9NCH4y$)F{Vc7;bk6C~2x%cgzB#3XF#$Dh#Egg&AMlP#KVAlvRXt*> zVGQ~vxy80?NIEuHr01Pl4MQ%%Hd;n1s#@M5)sR+FEAFC&O}9*tkQqNw=(Q4LkS`^s zSoMoFh%Wu34$B$1==7eCaSa&O(~x>x2r4WJOyln(sjhJ#nuF*b&}Sl^YRi;={xE62 z<(m8>yu92ZV^L^1*Hdku(wnX(6LA$R%g^Jk27Tg;(3DE*VFU`KuLhHoh>uIRp(XUX zZ@{$s;64u?^h+`*=1%=W5=#}P%U3N5pYaT6P3O8h7aq~rm;P>L;+|gieSa#0ZUF<< z|FnxlX(bT8bPbVx{60;x(0;h-fc(WW`Eq`mZf$vlh@R%w;C6IZ9?!^ArpyP7?m%2; zp#U8_%yJ!QW&I_#=y&i-20@SfQXcO;>3O)S`TfV5xV<{TM6tga7jp4K$*t)7hRrc^ zH*nOn=ipsIKmrQw7j&-)$^_2MDEJ=)mhsOrP#9`^-9Qe79bjknQ=-W!7aAHIurI1Lf(%6eu=JW-yMDeGuox3-ieIeQ2WVJa%{R}(VfEw zLrRU70xwfd(4D2CbQ{b;==7Ui*la{+_F`TUHPGmzbC2o7a zf3DO5)T5aaq{2O?F)_~LZ6)cL-s{+k0`K!(JLVS`Z)<}2m_NIR&V%^2i%C5iGZ@W0 z!#s-@+&mQr8PMViW|a@~*K73iObJ=VkSL)@#KNHz`~{Xvz-T*XP2bEg9Hcy>=9_DV z>D{7{N!MI+Fx57gM+-HEmyau4A5HXuN+Yxf@nzOLx3QMp+o7>LunOOA^=q53mF=Q- zjG7tmxihC65k$*3 z3KX`r?>7eT+O>Rpwp-}k_Sofi+kbpj65*PxGvf4qtP?-Tz=;eAUAknUlM&73@H|)T zradlH>Xuq@1<*{EmuEzZ?aWlCI_e9eC0GW0K0X~f!z1`^$8&ez_^|8#H{r*+ZOqel zHTdOOvITh1Jk6^v*wT=eGhK z`4O3bDglL+(`R~@%N?-~xS_+s6M1r~)KM-B?eC8x-|DB5iH_z9*FpO#rJ&nrmu4Zj zPpZb{6x*g_f7V&doeU-!-^|_P$ey4E7%?acED@LEq{b|eCS<2-Aa|P$3wV%a!rQcr znEgi&^>^pS^v;rFitTDrdcXu9x=V)Zq6%}g(!4sH+ujQ@KoV4~Ktg;I7wZO!jz?3d z#j<_rY8GlSyB_}{Vjf4XE;Pdi?V`$FvR`_cL`y3$6Vl>4_oIYFD&HzhE zZ?E|1ZO+v=mRd?!-_7z`>v;;OfWp-q4KQPRLO+P2TA^q%t8yAKDV zs=A4mBV{vuTh%RFldQ)#40|uu67paoxGMmT5P9)+bu+);--2)~#5kzf zrWQLXoatRXz>!_^1chW8fkiW(LvLMf!v#moO>D2 z=M)f|&QmlbN9U6;+JijmZymQ*+kOu|xgsDp_SM|LXYy5yF|C2yYAHTbR>8Srsel*EwT z6brAD7xQ`Xm=0;=JNx7U`Iuu;bf-7S020_|93|1nGIUp?<0Zb^ZsV}bY-Dk_gSpY{ za_5uD5|U}N?8=#oyL}z@wAq2|b{$F@+fUC~Azfpv+-4S>fsl~gTEyz^hAhJ2^Bm6~ zkX0E?JZ_iAkZ$GGux(u5E*Z0+MlRjra`g2O{7Bnd!3# zvl3jo8sN}eGGP66>X$fjlOl4Bm(p5o7p)4ZrWaIiImKMIGifc+4Jw`UC@vD%E#cgl z!pM+(G!eL5h{bx0%%BN14IEh!N}}?2B*|2BKwRxtj9n90j!%C)*sQJdn6|zwAu}ht zGE8gc?9IsN5?YoXXYf7G+=h-71MVhbS&A4I#RPh{UD;h8x&(dX-^*)2>~rzJdvYtm zY_8j>dM{zJ7bY_F@XgWvGjGXr1Z!AhM(M7lIsPnosJJ5(b5An-)b}M7ErZ8V9byhm z26M_>h-+Jh$XEji=|uYN!y3qzaemML;6jGQvBw*U+I4ul*ZFk$ge@Rc=@b(stM3#9 z;fi9KNinw{xpg$pzMUn|N}OXv{%o1MBei!h{63+8M0+~=$d5+dvxOB0G7p8N4pxC3 z@ZV4Ox$ZglMl4JN3|2*%pC}Lv30XOw`Q4ft~XFLUd72Q6Maj0+mkKlf=yRF|I8kpS% zob$u1;2DD|P(2nhAQaeT7~hfG9eW`Zej1^E@lL`qem;7}BMF1F{=0Ve7R$<4Mb<~h z>G(J=*_^@(72ey9-YVRG%*9q! zg?ZOlt`T(H*&<+#zvfEo7?|s#bkFGtMdzXzuG{R~gvR$l%Ge(Rs1h8h{4*GGeIp=N zBfNj!w4QLqz$T^OeWF3j^*k1>viC;4IAj)X6}H3y)YX}zf^IY^JKKjG%YCO8FqjED z=Yd0yqJUi9?(KbY`X|{~_D0r%8Y0t>aW{lW*EIZX+VmP6s z+y>5LY{z6=a-dh(ZYzg>wr1 zDuaUI?s24(nfub0|>y9Quja(UI z*WFft!t5;nPg#j(S4%jBP2w#IfyU7Tzq{6Sjs^(~P ztj-ox7(Uxv{ob?XMl{La3*+q;#r*TT7{Vq0)Q90`Fg`v=2m_FQOAu!_7|P;7Y0)-$XyFhQSvOd!#m&}00*4F;?&*J6rAW*~ zgeZ1W=_g!;VCq~o9Qp{_V~QpPQ02xi3oz?yMvQ5|&LZ-+z@nFckdTnJ$S#K8XB3cf zmHvFi55>~Igc!#p+e2e)n!h3e5)v|E%?E0b*$Sp5gm;|QIOw_1EJHV=N!ZAnH?To= zRt2rQPjwYGx}O)&t`JOfo7p4J)WUQ#UPnVh25QXhEh=O_$3fwBaqeqUBzwIVNkb*EEPDpg`sX5h@Z~t$&7`WIU(T`Q1 z%FTB01pP}ch_GD1Le&tEMBbmwa63-%3pLt6^J5Rre#`C3wJOsEP7}cla)7#bXt4DA z{|9XXZ75Q>&g=iQ(G1Vi7_bW;m0upc14cGq3Kt>JEN%u`7*Rc6lCQjs4DCPe@^;da z>_-EjW_kfd)dfh}Js=z*0IMYu3)1Ka2%CmUJym^W5Eg6l!^+NgkjUoxOQ={)XU|5z zlCJuH2osy@`V9NZWOfM+Jf;aO;Gfc6Q-fMt(XSVwcz2dcq`h<5?1Le6(slgT8($yn zIBCDVB_t}BE*=u{N#1G){&}(Frw_R2x_@B1?O^?|nu#>L*>$ExXy+k&jJu-41>b=m zu|Fify-~+u=EOS+4Xa%>TyA6}mb=_*fG!%fEkwKJ*zU#Q(dBD}zt zM2vozYD7!5+Q3NG{0twh1TsK|7du22e0R98c^Ma`-&j}s|HtiK^nr2Q|HkbaH-8n? z`rtGBvWOB{U7rqT|Eo7Se5m=)`G1l>Z)0xY9^oRDUuW|Jp;z}K6ZTZ zH__4Ha!qftx*==di-u=^@Ydx1)Qma)<>iq`i{9S=is@6&$s(CzTfNmUDxlwpTF*_ z`^@)wyQ)`Dd|LY+p`ieDzo1wz2Pzw{&i0``4Lzj@ z<+sBKKOPTeWFQxk#sPmyP)0@uTY#IY31f}wo-ddPLpP& z^iu~Cu2HsE?m3rW)|xoQpy?uIMr0zV0IlZiF?SYoBrVQ_WgYxdq|UwQvZ96pvc*HvbLF{Vf5IZ)M11qMMpxR8A0h3s;+*e+SyuB_Bb+t(FfSgeRaI{NC} zE&jy1l9q?GR>j8lysz)Mh^gf#OLQ0M6`pAvgX&E&95)rtUknvBS598$R<}m-8UJ^$ z=?P;2UVn?1>Z!h^DY%OJSw}x`xQzXomrSl6iX=nd`9aq8mZEJg2M z|2tr3wy^c6Bu~}Oh+C^w`_Y4w)@io-4bi1g$NYtyU@jLtp- z+BWWFD}VZxCWjgAa=Ts_axUP-{(R0a4oIV&+HGa!vYwuv<1(H|Nt#`~0`P=Z0V)6v zPf-B9*iu@6lXVY9C%MIj%{ z308=My*ElKV_SK-eYgM1$KaiCq(SL6paQ_31=4OuM?mD%^A2QHz!*G*@C}aU`d7ebK4!&7;j{bxLp)S}%n)85eI=gzs$pXQ)QS^(40D)(H4SIe1*x z{E>(eB!%j!)_-*nje&Q$n4BR-d29Uvd<|r#-ylX!1?Ad9 zUJ|49J^eYtM_VI$HPtAe#<5xnqE-$H2i&!i4r3Qv`g?O$#g542aG8zi=ivIkaZ81QKx1Y^4qZHH7T zua`aDaB0FXTsybX@cPbfz&e_#@L?`D zl{Lix?8iT>dcu{;d%9h58}=orI*^#NWd}rW1Gkygem^$OW)$NO6OAsL6Bm=o~*yVb}*rbNcIlMuEEFuNm| zz>D5%2;Of~pL&*Ih3nxyp!_;fUTo|&BI-wenJ$vPO)8(?8^)d3$SWw1{JD4GL5Tcu zE+#InWUfpB`iEd(861pzI3BKF99K|*#_&|4ZCIA{_yogM;}mp@Si;#e&;4Ew|C;G= z?)n45;j zL(Dc(;6NWRst5@;23ciMZb#Zq;Qo@FOQf@)gwGF&`Eb64JI9H3TK`Y<_ryzJniz(G zSmfMr3U6*oCd`E&Pe3GL#yLU5bFFD-ZW&{B*F+t&7VEJXiJ2k$Y#j}TB3q6S-r3#N zRm_mBO4}hJ7CA4~G6C5CYL91tyQ&Uy#mlFv9pMQ3SY#&rf^)Hre~)a;E2=-++hX!p z>m55k-x}+1e4L0v>Z&^`m@V{|NngB9xw$jc$sI6UncEK|j4bc84l2XWbZ2Lt!;U_5 zKGHVt5)^D&nwH7C|2+AggVP{>?*>BbRal<@3*3d_L$65x-Jb_YAOrrR5#X+1&E%`f zTdhvi6qA~&m1fGCn^Md9jEBDOcxSV6X;7J>@(}ml514tXBh>qCNz=%9?a3b}GodN7 zs|nzPw(H|3#V^*au1d`sQi6YDx5N3fp!lsfDih;BU_h0Z0dB6}uFFP!@4FrZyw+YQ zGjnZ_RJvocNQ>Jv5ym-^;Xfii1G0!ppA~8&geC?bGYQUCI}l zCXCmjDKeGy1<$M-o6LZ9+c@a@lLlxWEuIp|Y7zF*8MqOQJk9&6JXY5;`4QT!Qy6BF z2x&px*%YLtns+%+sMC(AuEzAO5IZ(9J`#>M@qWV|YvvxsoVjkvmq&vd|AE?p!=VQT zhq*vGmRRY8BT=HIJzS; z$w)9gAKA)2+L)H?I7}U>_Il#rRF0pQ7r!-Yiq18tTo?~bBT7YN?$+4UBt_u-2Iuf; z5^Q6ra__PpY^mD#QOw}bFZ}@Uq0A=Fe;Nctjj0W_113tnFh$qJqD_dDPvC8;b^reT zcYuh?PhvcaW8=6}18Z=E+G;6tbmN$V;nie=z%~i(yLYSnpnv~(CWFl|$>cBrJ|Nfp z2rr&eB2@|&h1v0f+1#q6Zgjq#9(H3G?8uNuvSj4s2tMG!din`I zsBW*$xI^&#jham>+b#ER#F*KhC{NO%(|$v?2Z$)t&9YYqrU5_hQm6FnaSaX*wpqXr z^>^T&-^}}#EUz(_M{9k>*-e}Y`o+<8TPg*HS4L!@W{J{m93`$uxV18p;o|d%#EGa= zp$5#RTf6!lcdN1s*poHTECa;@$za_|%)!7XRLQ+fwFUR&#DS?wgr-rT&Fw6~{|9)` z^MSh%Kl9E36xZdQ<%p|+wd{$)cg8CRw+KO$WXcXL+byIdAAmf}@G!*DKkP<$nru}}59u$YG7-22!h zjrh|jRNCQ=Fb4K1{|b4;*F;4{gmq*D&&1YW0D}s5l10rO#u_hiUicC`;1%6@>dprT3 z_9m;bRS$jj;H}M(>Mksy=KO@{Whm&;e7IIYrxEdA8jc@)ToqmO+gnDX=ls zHX?K#=|3}u{Z{}lQ-?FN5^<^g?#K`LpyL~4CQy67^PCtDr6_zmBA5x<`?e`&;*)JDT8kMg!ZfQL%is6G%6 z$BVf2FGTs>m!F$ffK=`}=H})+Y1F9TWkIC-Tc}-Dj>$5Uv+;zgF0TX49;!u!3Qkb! z5XO-%BWZDM>tJrMxCUto`3e!NHGr&o#I&{?`3Q(p%H*)aOeI*I+n4Ay$s9cK*KN() zx4-X-3{j!P3(%ukZf#MffrAWCkH#+4n;E^6-*jM9~hLawJ-_g8tqNO9$p2kAZnsIeeuRclz6V*0Wfx<&UErM7Z0Iu5V zXlF$e0>E3x7j}0>T+c9azUap80fzwdiJpQ&-K%u+BE}ih<6b;Xju+(K3;SAmVdw_q zrF6KsH?4wqLHKccr3?;)rAwUl_Id5eVjAGuH%PE*sX)Y*ZF$>2w%1C7JQVvO;@9dT z5b<sm5k+idFK|v$ zeqJeZMPI6{cc1Ax-u@bLb&M&sZrq&{?=3Xa5Sg&kolUXH21+~YGNg`bvOW`NQ)GtM_NvYM@ z_3Ez8^S{TC{^XnID8sV;T=UzzI+7^_;+Wc5QqSsS=YbLab z{J}G?^cv?#)SI3ZUBQj}A@n#_#b5RrjGcpFjdB9|-D7GMg;-nlvX3IB}# ziqHG2u30#Ab-JF#;;_M`{=0Ag^s^A~ul7ukn1Uz)!Ow18Pu6T6k&EdgzE1|0E}MvxsfiswAPkX_50!S^ZhJV=YqhCXXj4u;R$xWt6tWKCf_eO1%A9D6X}MeT^RWpRtPU7+19 z3baxmOYcT}HjI-QMxn$rW&E<*QnSq>@DdI+U3v!9=claGiy;=8$eMD|Q%$#bNbgb2 z7c$Fmh10UEBGi0Oub(~Ku}2KrB?bp1e+Sb%-<>(Ly)KA2`!gI$*PAhp!&wq;S!4Lo z9lKr!8P>vy8D>AXhpv7_j~}LWHauMU^}NgAo>;Z9>y0tciZLlrZ9&MKS3_MZBX)UR zf4VjPDim(j)m>qnHM}I9+#25bx&Gm;q=lct^#X&gN%gE-@I&M|TtA*++z6eYiU8Tl z(oyNY%Hq`);SdyFukf7*TuH83B4UhMU1pJO?8HDa4#9@dj53oCnb4OD z=nWtPFjTXQJOKy6uiigskqnq2jBQLgSAYvxzBuv#kzm|XZcpF^`^8k}hd80?>Q!yq zY0dRQ+DULDFR|r#(`*!)N>75;XM|=IMy-axnk_4-T~>jx9%Zmze^VuZYM84;$APbb z&VY5B`*6`jjhuX`j*=0!%>*k=A6r2$#WDc3bTu8YhBn^hsHYZyhrku)1R9h#xRNM? zFItI)sU{R^VdbMZ)qWEqYw$|Yow+yTwE{CUjXFKucDb~A|NRhP84ro7=b00zQQlE% zCVXj0vR2QpIwn!Tg$up4)oY;2z|LpX-Oa!YEkVNy)fP}G3c)8y9?Tl_VF$b6q70Uz zgFG8v1ZAf>UbPgS)-*vd;FKlf2+B}b9-O+En$`R&4Sh?C4$3vg4k=I4G;=tFxxWS@ zr<{$A4H^yt#3UitPV<<<%x!+M)k<@Mz+ni(s$6Z7t06#Ff%B18fWnqnO3D~SxqX0K zHOKz<73u!?GXv=Uvk)z%si=5RVFJZ6H*u*i!{3G z_=crW-)1olk2Gh&(~KR~tx4Gs+k@oCzo|M6Z7FH+Jk-2B@3V1crD??xGpt@x>vzR? zAgza2M8sUD$AbCouYzwX_6a$T{FPB@uqra;)tpN?a=Zj?UsR)t+P<$j_8ynis$!D)P8 zYJxX1r`Jwtn%ZL-ddh&}0;Lk&%qEeB!uouDj+pb#YsPhazutLDLLs@ojE$Bc8>_I4 zFm}W%F+xWV;a;neLARil;vvVsVp@Q@43!~Gy$W2~P^g51??vDPeuqev`VU?G7M{Uy zhEHZIkK(;J*7fDZ!f7`sq;7fz?*g5UV`&Mx+b0uHThX!kmHSLC;xEpRo$>x2URz;v zYyexsMb~=}3S{}%gVUopFS)GZ4WsGr*XV+o%C%gRmFg_QaD=R9$#v*qNL-?adY9988N``EWF-=JDM0k=3=q@@!t65s&5*C*anNdtXk|I2Io8H?(ZQ zDyK4Xb8Q2$T(V0vnXR!OaGwR_YbQa_V_Eq6#GB5aiZ$2WnuYq9PAY zhs8uiy#a5gIHC?p-u?9zC128|nYWQ6S3NCSG6pZli)xS6cvSfvf=3I}O&?(sl76tW z0s#>A8^3?p!RCwp(ZGxAzN5j=s^|1&CMMO_zTk+{Sx;_ zuEX)djF_OUe0Gd+6t+ryMk7cY^KV3_z;3?an#upfbhzx`^aM$)3y%;DsL_laVR*Op3k|{isH9A$}yI4ZzeqW-9QR?G}M~aVuO* z-rdcZ`k`vKTo5p4LUla{Vr3zFIVvt!0GU?4mR5Z{b8Nj-2b%CUIRZnoFz$AN){Qvs zeOLw+?0U3A>E-Es&>&2{8Xu`&9dg8u=0N^7NMW7^#(VAzT6ZNo)tsMX%?et6#{Mdq zvJe9me}REU+@=4%0-L~{jNF&+jyi!G%_=ZIVkdb~4LFCf4KYF$0wZ_jsD(5oiNwHa zqG)M`j^1(RxE&5;TKJ(RPiMeY&{|qr3aj^y<5M@ffV!EzSQjJdK~KufRo$WBcuBeG z9LH9gVUaU&Q~3g^1Y${wpz+y#H}|$bGFt0ADlR2pK-?uWxn{S425n3sFF;OLS4F=H z+g+|RKW?bP+hmMTuL4euEOu=O8$W8OEhr{t0KJ$Ftgn`3W)zbw>+?MdVd1?EXTSv( zRuz4>pCz~plvL~UzbezJS%M-a=t+%*@8I$`0C5XKDb|4YmJf?sU1a@)y7&iMklf`% z{g(algvG{%di_F0fd?cyJ~#H+3qf)Hq{-?d%61L%w(H1<0#;OC$Dg z5Z_Yn4#Yxx06LcqKU^5h(0=YqCe0j}@%#hjowT#?zLnDf)|QkVI!Vk6)|yeJu4PdJ zI`1RwS>sEnQAm@!1GQE!7|*Z4rIEab3_o>&Qs1%4glcXHXF+qJhrzJdJ_fLhI>%)8 zSg(cCB8_7Yi~sc^OjJ}g>9oIc4Yw|$ya$2*Q6_eznb^jFF;0dpx%0a3kI;7>NB~Bh z&Q}1*e`7}O&MdcNYsen{el{|~jlXpG=_Yr=V_X@bvxAF#*RNASJ}EksNG49pqi>Z~iG<$CI54KRi4t zxCvQ_{@`L6jt7pmO{q>(Ux51&{Oc=ihm6@^O$5IMjUv;aX^=_H(wBz73Oo4Zs`usg z{CQFmieyP#Vf(peu_*Y&V^+c)$i58-oftA9>jz;+i4t&eq^ljz2(DZ z!!?@Jitms9bL;Pk^g_9WO1l|+v|*T@J`2J+y88viv34YVpHa>XYb&Dz?1|PKp$@-B z9YdDdn$m0c0g=sBNU8>xk`cp_m`H?HP3bs!w?IC$I!OYLNWtD*g2Zaz}UbF?j@ zd|X=M?dl*E6lN{ia`5;=f^c=mR>vQ8v6)p7`iuEZ0xAF*!*o-JAPQn81-P`269gZL z-hYVOp%6kmz6GtY=WCt!GTrncD(h`{0L&0P{l%;n^-+i9xcmVAJvUuNK>>}+E+_cp zQt-4>l?sRVm)jAhFv2}-7P@QK@FHc@kgj)Ow+>9iW*L24{%Ui+7<;(`c#I4c~PW z>}9`}c(kUhbB6o&Qn(c|mO;sXJ~$07raj;|`WCjVLWr(91t~7DFniDwFq z+dd#{6~YkmHb~L}Lf6>yzMG4o-Dz9_o;xD_&Nc?)QK*aa%-AsIR?!g40)Q69b&h6r zd)ptlHvd2ctnBQCP!O1vTer*R*n=OL5-lxlf#q8d?yoH^ZlZE~U+?_y&OVF7=cWfo z(A`V*W683GRby|%{Nq0cE;VXb*o9noK4t|(l%Yz$uwcMNi_vB4)r^yaDEGTDlr`tw?>rbtnCle9Etf6$gmnB+d=1)#;F$Y^x3mP&b^x| z0=j*>K0@+@_m&a#M~=k(FteL6sI2elvWolwhF%%qWg&hoKpBaG+L;s%;&sV#SRBJI zFUtWH-P2;I_2^@Tz`Ens-!-o#i}Dqi5o}fR%*-mn!($0=9+6F5LS2PxZq=>2uWyXv zMg^|}FSbyjn^gs!Sd^8d4CkN02O&L~{DLHXr-In3Snc%mq@;p@81qk_fm!QkWkgM6 zH)ErdwDW_Gu72nMc&{2HK zzro5|*73u<<0(T2U^FajY`HH)(<}ZQ2Ac2Kv34YTB?Ywu1PwhvLmB<<0?DNO%=g_l z`0YqXQqJ?of#*~J|KlN7+UaHBO=TLvMPaYAit_CnpWV<^zD2blySTle5ARk|FhyLY z#}-!3;~7JO@Xb4J09o{h*?2Roei#nmnfJ(CBQn_%m2^1$WeK|*7q!jbyeGH1QTblK z{;>RZ5{oy&*GLtmgjMWA6XaUR1T#XCf)HC)56o-_s`*PFuiX4q-ixp?n3~jVyL#DT z+}h(D(tIBCsIqtl0WBZS=~|y7mWdE{rXjogp+4s2_UgMg2CENzKmrvyLR>flm%Ta zF9J5Kbj%)3YG`Q`rshD~&gKJPIQyy_ydFvmB+rM=vhS?5pM?GTtrCtfeAyO&jgU>= z+{gZy+M=0B+D%%cgbMZ7FRtiuf0bDLXOGf`L$uRwRMK|aTPzfBtufP|#4y`=qh`kw zJ3cHyyyfDOrY_uWl3G|L{@mLs#W*Iz{22|CC)F41 z(6iUr#~GJlhblge>cAsWlfkGnhrG(LrS(t;TvUs$bV}&z&IZb_QcLk~Fv!tfo4sZd zxH8q+X7uYrb9=kN_3IzCB3Cy3o7>v@fQ!$f>{oLxduWgm2MHMfplq3~lJZ+c1c{TiJhJ$kc<4{M)GR7iobpf}~ zWy2`VwMLq$ghnS@l|Ks>2l3mp@PQ!f#d3xTop2*M?26&w;{C%?Mi~#&_@Cq zSA1-@RZP3TWFatJHZAXeJ=DfQy*j&qNTeGQ;KGU;a}-;(zRJy`!0wD-LLsA?$XpeM zXRv-<6ciQ?`MAD!AF@RohaH6)81M%kZ1v;|_5}NXwLtQdH-e8Qh8_XX^UI5vz)gjk zjfsZKHmym3_bUq;opvk3zR$^vLEsUTuIz#7yxP8^!_0ohwc;IVNwfO*u|89~!L^nx z8EMeH-qY?;)VEBM7y?Bx_&1nbM(#iq9}|^iCSeTU70bI|vzd4) zBood0-1uH#thp2Nzcr5?>NplB>NJwKts$%Rf>Bm;8DksX(&jlNmg*Y*zO-&gCeKijPM3T+1r`;KLtzbw6;vikLWSBXhT zWu;@beM!{8KL$8ckR>RebxuBv=rD#+_5+<(MX9L_3#95{dU<#>M@?|Ln)P*_fXI28 zxjtLg5I_4jUBG1X4sm}XkCMLtPxUG?EgrS_!b9AQvU@M3bUsO>Bbnh!N?5@>ziwe= zlYO9_$7cL)`h(&7|13A;NlF_Lc(tSpFsoecH7F30FgCsW{Bn|>mQlJq%UZgwano_v z|GtCLS1@Kb#D%xB$URT2l@?hTF?7LsNm1$&|D3n!S8um;>f26ER#EF>zRlqA&u#Ve z=7H%$KF+ev9rBK?W4`i!MeVsSr99_iQ*mpNf32-!0$jV?+gO#Bz>Wcp0W8QyLuQ)F2Xa7Rq=8EX-$R>MS-4%j_RiDUXu5LvD&wj zA-~o(%!$$aw@Su)xFBCE1}l{6Hwk)e>zhkuNq#C%>cp?;5HO{01C~PePY{$mic0cf zLNfmjQ4UQ@?$$_n@`p>3b)R}Csjot0z36u9tge|~0So7OU-mOP{vSLRqM+IKtF+&rZz z`MFuCWZ}B4K?Pq@XGw|d+^aW@YC4k<9-LUq&Ptt7WZywOydM0LCX1ES&8_OzWAVeP z!nDrBOU^T1U)0UuLb|J=YJi*mjaGXz14_YXNyi0qTdIFwdz&O zbjMx3|4=rq{MM2z7SE$7`J?F!g=RIKJy#qsRycWA4|VW&o?ec=+VZW}o*-zj_AztE zv4#DDz18~9RfU9uL;8hw5^Jo;V>M4x`7*bkm3!Qt%UVy;Pfhhn@^G!`#=QMo!KWoi z+ta@&=c;lqp`WBVamF!rtS10D96?ZfsN?M`T4G~@c*{WITXcC}KBeEj*Jc#$sHWPJ z>Go2mv$Sz^!H*>1s9stiDCf}IRd%FD=y?G0Lj^a7qhwUFNzMI+dpPycI@393zm=AV z$X?JE^Gf8(4~iU5sciK)Dzt{wET}viG%uPF$f2>2E1LK<@AKvya=u>{`8+AwEJ z`3WhfZ-Gx&a2e;j>^izTYkpB&_<7V+dP+$YwxZiZ*d!x&k@QYZe1DyIKf0*YaZx=b z49Jmd~BeCS@Uz3v($poMuq?W>%LsLuZo{V!TFP}gx@DM!p^s7~k{+E& zNR2Hv@$>q2(d^EarfL7vC;J^F`BdX#Yx6cVJY#c@QrYUS$Yz?+1Uwy+B@Nan;mD^q z(o9bV9j$c=t7aT_O&$0WA5CtuB?my`b0v6AuTSY`c@gp6hP2iEPz71VAAVHHj2%e- zTY*_r{%1D`7Dn_`QW=h*@u>s(e!TZK$S+ce_W(uy@1M?7Y*A5B#>D;h7a6&^x%@&x z>g<9ZxcxTI^cnGvpY2P|%MYIFJZ@{3eK82)5=gl%PW;o$(YZA6R5vuWmX?;h07#FF zQjvd-JvMN0`dA2*0lksr-vuVeT%Cr#`gnR)p1Nwo)U%m(q(jgWxQIu$04h#VA?>1u z$aIVxcPAtuZtuy;2Czu+_4AFl3w)MYi!~IT}HsTML zPQW{5-)z9YPD(npS`DxJ;su%j8m(C{_d((h1j^$fqE^>K3~~6meC0~2W$$+rIErBk zG6RxnJutSqdjI2r08k}u5#{zyP8lF&APj`*rgtdo{ebMRL1=?i=-7Yey9W@R2n{_A zkS7`qDgGd0+3HT;%>pXqG&mba?usD#bZC@5#Zc9hxsyL-W7-oh{d`r4cy0`4zRsRx zWEUm9TJ`4|`PbFqTTt_6fv?0@7Cn-DNuO6}Bo%o9RL1+21!|eYfS+11*!br7}bRcAgz`Yme<_}oJRagP^bNqY>T_edBU!Hfu zH!g;mo8&Mwf*9I7+^(@?Sn?eZ=JN*9Afyyt!FKVD zA}Q_SG!)$gl>CPU`o#r+GtV#vZ9F0`j2hqHQJDM<$kl9Sgb)(PlCo}X95-iEsW^0SFmwO6&Wv<`~kX(LeKf3eAtj+^a^+$q!g+ml+IQ$LJ^FG zq=Fs{ReCQ?&@W158rv?aQ7moOr{H6kz<^K6X$strI)LiUU%U7eosAdUfpBeWB+Q|W zPC1xup%nlz6R-$q!*8}pE!u!tOj}qg%>G%%yl!IQ8&<|Hfp!1%>G}uXj^jb1*e2p& zYrz!@jeZKHQ&wJ*74}=$@&epJUo{PgTsN%9V#4;6*cN!AGIth*OVz#sw7X7fp~_+} z@c#WbrGV+x0e9v7=bseF(3tDaZKrYsIyT7eh8-uz40MIyLo$L7hO8M_H2MQ?)K`Lh zJWlKH^_l=51uBMaL>sa!b|4ep*KfnSjlSHDyJz=erapW zksheFQc33<_}8cCIQ=saUmIl?WL^No^CCzWy&f7G$}-?~6L)W+Rmh}9oVd|F9<()c z(Rx+^a6A4Yg1lvTXu)F~jWgK7H~O5Y6q}rI_+Oi?Cs)e))CUd_=UB^jeyG105?&_; zab`1F)<%4%$Fu#qAEtpN&s>syiOa)un6hitQ$`n47{vrD{yfx=mjEkeVl5JSaoGuM z=4E%t*m~~9GxxDS{x0i)xn*o}@?6jDr#GKx(C`>QY^hhciyBx8^D5FjR)y^dVy+~u zO`UvweXDRZr!(^MzJ4QZz&?-<(!@OAA#Sat?-c<)9EP*c`&Cr(+|q3uWTjqHMfH$L zfMhX(<$tHM7sFGP=NW#Fcgp@mz%h0)jt(>hVO=hrwij+)l2;f_4s4fRd43`TE={f- zz+@8Z%yDYAi>;uClccRGy6&))@pYdp)Z7a{6aBPArpezxGII6v_pfzJBfvm>`Z<%K zk?ZyA*Q?jrJB1VdR*p#1zYjeR#_ z`HJb;blF$LJro7Y?k2^IhYppR}gddX5i#s#J~sT(#sJpv>t1Yw=;Y zdD%dHmxZNZAt=K;XwAUb5p}44u@#9rP_|zWvUz~S>K<4)Ks-dm+j=}Wdi3KC=Y{Hu z+Th>f=Z!4hI2+{OXaK%7oR-h(fV|SW4zSq<8o#D*W@MD)1hcQUpZz9Ysf$2*8wuKm z9eTbH(j*sQQ7;KXkdqkrdYQomaDqg!`~ECX72y^FbcBn&U3b-)8VcT?3ceZG`2QfC*dlqo_xa}qiEX-pGS%=o@LX2Ag`q#`@d6KuoD;5Qo1yq_3s=4F)>Z zZOsOx5Wu=nPhAnU2qLCM)FxrlWO!5o z3`3BcmXu=f|0jI=^)^@Q#%eOnvhY0#^hmFTnaza~#5SZY=$b z^j2f{2!aie(FQ!xq{by6b*`t>OfDA>R!23zqE%O6DiXHQONC;R`RB#;3UoU`aI$!y zT{hh*2zqtc%xb{?qW8@WIyD&1M6e0>6=TN=AnI5hN^yoQ3HS+mfHjK1;>vH4uyZ}U zugAq+Svfhmo27ri-0M2>?YW&n5o`-YKp>?GAHC&NKM~H~1G&J+`2=(VJRHF}guUH$ zb*>%o{-WFQNAmPwCWGWGS>Hs&8}4aNuzE-wsV6|42I0JePX9=>5D%K$K$cu?b`#@lwLVS139hc_lIgj^Sm*fP6K=2K{ z!(PxG$Tz;bH=@|21WWk272@p(>$Dppx3b}UHAQru4_6_~+FH5de0JvW>D7su`9r^H!DgEGPzgz<+TKYKLiT{Z28|yz6p- zts?zA5NUUKTQCkLz6tSv_3!wSH` zq&c611A|3|)8ZYxm#tMXn}h4%Hj#}GppmS3^(b*abn>INr%;)3M=YI@ z4nc;?P_&8v%Y6Gc3K4a|pEO{%`~Shn1;kzWYoI|EBK-Y-dx+-7j@)^+ADQ=3ECi4@ OR#s5IS#tf}!~X+5t~>q! literal 0 HcmV?d00001 diff --git a/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/rng_values.txt b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/rng_values.txt new file mode 100644 index 0000000000000..5e95a585d362f --- /dev/null +++ b/tools-superlinear/run_llama/sample_runs/gemma-2-2b_external-api_20250311_152948/rng_values.txt @@ -0,0 +1,98 @@ +# RNG values from external-api provider +0.592845 +0.844266 +0.857946 +0.847252 +0.623564 +0.384382 +0.297535 +0.056713 +0.272656 +0.477665 +0.812169 +0.479977 +0.392785 +0.836079 +0.337396 +0.648172 +0.368242 +0.957155 +0.140351 +0.870087 +0.473608 +0.800911 +0.520477 +0.67888 +0.720633 +0.58202 +0.537373 +0.758616 +0.105908 +0.4736 +0.186332 +0.736918 +0.21655 +0.135218 +0.324141 +0.149675 +0.222321 +0.386489 +0.902598 +0.44995 +0.613063 +0.902349 +0.0992804 +0.969809 +0.65314 +0.17091 +0.358152 +0.750686 +0.607831 +0.325047 +0.0384254 +0.634274 +0.958949 +0.65279 +0.635059 +0.9953 +0.58185 +0.414369 +0.474698 +0.62351 +0.338008 +0.674752 +0.317202 +0.778345 +0.949571 +0.662527 +0.0135716 +0.622846 +0.67366 +0.971945 +0.878193 +0.509624 +0.0557147 +0.451159 +0.0199877 +0.441711 +0.979587 +0.359444 +0.480894 +0.688661 +0.880476 +0.918235 +0.216822 +0.565189 +0.865103 +0.508969 +0.916723 +0.921158 +0.0831125 +0.277719 +0.0093567 +0.842342 +0.647174 +0.841386 +0.26473 +0.397821 +0.552821 From 3f9c78f10c6c225de3adbeda76b4ab1f853ff030 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 20:23:57 +0300 Subject: [PATCH 08/22] v0.0.6 - draft run.py --visualize-probabilities --- pyproject.toml | 2 +- tools-superlinear/run_llama/run.py | 92 ++++++++++++++++++++++++++++-- 2 files changed, 89 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 820c1602bb7d0..6ae1399d1730c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama-cpp-scripts" -version = "0.0.5" +version = "0.0.6" description = "Scripts that ship with llama.cpp" authors = ["GGML "] readme = "README.md" diff --git a/tools-superlinear/run_llama/run.py b/tools-superlinear/run_llama/run.py index 5df2c78553f7d..cc08e2b849b1f 100755 --- a/tools-superlinear/run_llama/run.py +++ b/tools-superlinear/run_llama/run.py @@ -159,7 +159,7 @@ def run_model(config, config_path): output_file = os.path.join(run_dir, "output.txt") log_file = os.path.join(run_dir, "log.txt") rng_file = os.path.join(run_dir, "rng_values.txt") - plot_file = os.path.join(run_dir, "rng_distribution.png") + token_data_file = os.path.join(run_dir, "token_data.jsonl") # Save config to run directory with open(os.path.join(run_dir, "config.yaml"), 'w') as f: @@ -193,6 +193,11 @@ def run_model(config, config_path): env["LLAMA_RNG_OUTPUT"] = rng_file # Save RNG values directly to run dir + # Set token data file path if visualization is enabled + if config.get('visualize_tokens') or config.get('visualize_probabilities'): + env["LLAMA_TOKEN_DATA_FILE"] = token_data_file + logger.info(f"Token data will be saved to: {token_data_file}") + # Build the command cmd = [llama_run_path] @@ -235,10 +240,17 @@ def run_model(config, config_path): # Only proceed with visualization if the run was successful if process.returncode == 0: - # Generate plot if RNG values file exists + # Generate visualizations if enabled + if config.get('visualize_probabilities') and os.path.exists(token_data_file): + visualize_probabilities(token_data_file, os.path.join(run_dir, "probabilities.png")) + + if config.get('visualize_tokens') and os.path.exists(token_data_file): + visualize_tokens(token_data_file, os.path.join(run_dir, "tokens.png")) + + # Generate RNG plot if values file exists if os.path.exists(rng_file) and os.path.getsize(rng_file) > 0: - if visualize_distribution(rng_file, plot_file, config['rng_provider']): - logger.success(f"RNG distribution plot: {plot_file}") + if visualize_distribution(rng_file, os.path.join(run_dir, "rng_distribution.png"), config['rng_provider']): + logger.success(f"RNG distribution plot: {os.path.join(run_dir, 'rng_distribution.png')}") else: logger.warning(f"Failed to generate RNG distribution plot") else: @@ -249,12 +261,82 @@ def run_model(config, config_path): logger.info(f"Log: {log_file}") if os.path.exists(rng_file): logger.info(f"RNG values: {rng_file}") + if os.path.exists(token_data_file): + logger.info(f"Token data: {token_data_file}") return True except Exception as e: logger.error(f"Error in run_model: {e}") return False +def visualize_probabilities(token_data_file: str, output_file: str) -> bool: + """Visualize token probabilities from the token data file""" + try: + import matplotlib.pyplot as plt + import json + import numpy as np + + # Read token data + probabilities = [] + with open(token_data_file, 'r') as f: + for line in f: + data = json.loads(line) + probabilities.append([t['probability'] for t in data['tokens']]) + + if not probabilities: + logger.warning("No probability data found to visualize") + return False + + # Create heatmap + plt.figure(figsize=(15, 10)) + plt.imshow(probabilities, aspect='auto', cmap='viridis') + plt.colorbar(label='Probability') + plt.xlabel('Token Index') + plt.ylabel('Generation Step') + plt.title('Token Probabilities During Generation') + plt.savefig(output_file) + plt.close() + + logger.success(f"Probability visualization saved to: {output_file}") + return True + except Exception as e: + logger.error(f"Error generating probability visualization: {e}") + return False + +def visualize_tokens(token_data_file: str, output_file: str) -> bool: + """Visualize token sequences from the token data file""" + try: + import matplotlib.pyplot as plt + import json + import numpy as np + + # Read token data + tokens = [] + with open(token_data_file, 'r') as f: + for line in f: + data = json.loads(line) + tokens.append([t['token_id'] for t in data['tokens']]) + + if not tokens: + logger.warning("No token data found to visualize") + return False + + # Create token sequence visualization + plt.figure(figsize=(15, 10)) + plt.imshow(tokens, aspect='auto', cmap='tab20') + plt.colorbar(label='Token ID') + plt.xlabel('Token Position') + plt.ylabel('Generation Step') + plt.title('Token IDs During Generation') + plt.savefig(output_file) + plt.close() + + logger.success(f"Token visualization saved to: {output_file}") + return True + except Exception as e: + logger.error(f"Error generating token visualization: {e}") + return False + def main(): parser = argparse.ArgumentParser(description="Run llama.cpp with configurable RNG provider") default_config = os.path.join(SCRIPT_DIR, "config.yaml") @@ -266,6 +348,8 @@ def main(): parser.add_argument("-n", "--num-tokens", type=int, help="Override number of tokens to generate") parser.add_argument("-a", "--api-url", help="API URL for external-api RNG provider") parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging") + parser.add_argument("--visualize-probabilities", action="store_true", help="Visualize token probabilities during generation") + parser.add_argument("--visualize-tokens", action="store_true", help="Visualize token sequences during generation") args = parser.parse_args() From 4418e2d1a1e1c49e90401f4f369316f050c026a7 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 20:35:01 +0300 Subject: [PATCH 09/22] snapshot --- tools-superlinear/run_llama/run.py | 95 ++++++++++++------------------ 1 file changed, 37 insertions(+), 58 deletions(-) diff --git a/tools-superlinear/run_llama/run.py b/tools-superlinear/run_llama/run.py index cc08e2b849b1f..c40725d5884e3 100755 --- a/tools-superlinear/run_llama/run.py +++ b/tools-superlinear/run_llama/run.py @@ -242,10 +242,16 @@ def run_model(config, config_path): if process.returncode == 0: # Generate visualizations if enabled if config.get('visualize_probabilities') and os.path.exists(token_data_file): - visualize_probabilities(token_data_file, os.path.join(run_dir, "probabilities.png")) + prob_output = os.path.join(run_dir, "probabilities.png") + if visualize_probabilities(token_data_file, prob_output): + logger.success(f"Token probability visualization saved to: {prob_output}") + logger.success(f"Token probability data saved to: {prob_output}.json") if config.get('visualize_tokens') and os.path.exists(token_data_file): - visualize_tokens(token_data_file, os.path.join(run_dir, "tokens.png")) + token_output = os.path.join(run_dir, "tokens.png") + if visualize_tokens(token_data_file, token_output): + logger.success(f"Token visualization saved to: {token_output}") + logger.success(f"Token HTML visualization saved to: {token_output}.html") # Generate RNG plot if values file exists if os.path.exists(rng_file) and os.path.getsize(rng_file) > 0: @@ -270,71 +276,38 @@ def run_model(config, config_path): return False def visualize_probabilities(token_data_file: str, output_file: str) -> bool: - """Visualize token probabilities from the token data file""" + """Visualize token probabilities using process_json_tokens.py""" try: - import matplotlib.pyplot as plt - import json - import numpy as np - - # Read token data - probabilities = [] - with open(token_data_file, 'r') as f: - for line in f: - data = json.loads(line) - probabilities.append([t['probability'] for t in data['tokens']]) - - if not probabilities: - logger.warning("No probability data found to visualize") + script_path = SCRIPT_DIR.parent / "visualize_tokens" / "process_json_tokens.py" + if not script_path.exists(): + logger.warning(f"Token probability visualization script not found at {script_path}") return False - - # Create heatmap - plt.figure(figsize=(15, 10)) - plt.imshow(probabilities, aspect='auto', cmap='viridis') - plt.colorbar(label='Probability') - plt.xlabel('Token Index') - plt.ylabel('Generation Step') - plt.title('Token Probabilities During Generation') - plt.savefig(output_file) - plt.close() - - logger.success(f"Probability visualization saved to: {output_file}") + + cmd = ["python", str(script_path), token_data_file, "-o", output_file + ".json", "-p", output_file] + logger.info(f"Running token probability visualization: {' '.join(cmd)}") + result = subprocess.run(cmd, check=True, capture_output=True, text=True) + logger.success(f"Token probability visualization saved to {output_file}") return True except Exception as e: - logger.error(f"Error generating probability visualization: {e}") + logger.error(f"Failed to generate token probability visualization: {e}") return False def visualize_tokens(token_data_file: str, output_file: str) -> bool: - """Visualize token sequences from the token data file""" + """Visualize tokens using token_probability_visualizer.py""" try: - import matplotlib.pyplot as plt - import json - import numpy as np - - # Read token data - tokens = [] - with open(token_data_file, 'r') as f: - for line in f: - data = json.loads(line) - tokens.append([t['token_id'] for t in data['tokens']]) - - if not tokens: - logger.warning("No token data found to visualize") + script_path = SCRIPT_DIR.parent / "visualize_tokens" / "token_probability_visualizer.py" + if not script_path.exists(): + logger.warning(f"Token visualization script not found at {script_path}") return False - - # Create token sequence visualization - plt.figure(figsize=(15, 10)) - plt.imshow(tokens, aspect='auto', cmap='tab20') - plt.colorbar(label='Token ID') - plt.xlabel('Token Position') - plt.ylabel('Generation Step') - plt.title('Token IDs During Generation') - plt.savefig(output_file) - plt.close() - - logger.success(f"Token visualization saved to: {output_file}") + + cmd = ["python", str(script_path), token_data_file, "--html", output_file + ".html", "--plot", output_file] + logger.info(f"Running token visualization: {' '.join(cmd)}") + result = subprocess.run(cmd, check=True, capture_output=True, text=True) + logger.success(f"Token visualization saved to {output_file}") + logger.success(f"Token HTML visualization saved to {output_file}.html") return True except Exception as e: - logger.error(f"Error generating token visualization: {e}") + logger.error(f"Failed to generate token visualization: {e}") return False def main(): @@ -348,8 +321,10 @@ def main(): parser.add_argument("-n", "--num-tokens", type=int, help="Override number of tokens to generate") parser.add_argument("-a", "--api-url", help="API URL for external-api RNG provider") parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging") - parser.add_argument("--visualize-probabilities", action="store_true", help="Visualize token probabilities during generation") - parser.add_argument("--visualize-tokens", action="store_true", help="Visualize token sequences during generation") + parser.add_argument("--visualize-probabilities", action="store_true", + help="Visualize token probabilities during generation") + parser.add_argument("--visualize-tokens", action="store_true", + help="Visualize token sequences during generation") args = parser.parse_args() @@ -399,6 +374,10 @@ def main(): logger.error(f"Error: api_url must be specified when using external-api RNG provider") return 1 + # Add visualization flags to config + config['visualize_probabilities'] = args.visualize_probabilities + config['visualize_tokens'] = args.visualize_tokens + # Run the model success = run_model(config, args.config) return 0 if success else 1 From ad2ad8502a786bde8d7d334d42a7e8a3591d1a34 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 21:01:17 +0300 Subject: [PATCH 10/22] snapshot 2 --- pseudo-colored-output.txt | 1 + token_probs.json | 584 ++++++++++++++++++ tools-superlinear/run_llama/run.py | 31 +- .../visualize_tokens/process_json_tokens.py | 115 +++- .../visualize_tokens/simple_token_viz.py | 132 ++++ .../token_probability_visualizer.py | 119 ++-- 6 files changed, 880 insertions(+), 102 deletions(-) create mode 100644 pseudo-colored-output.txt create mode 100644 token_probs.json create mode 100644 tools-superlinear/visualize_tokens/simple_token_viz.py diff --git a/pseudo-colored-output.txt b/pseudo-colored-output.txt new file mode 100644 index 0000000000000..70ad0b7a847c5 --- /dev/null +++ b/pseudo-colored-output.txt @@ -0,0 +1 @@ +235315(1.0)235308(1.3)235276(4.5)235269(1.0)573(2.1)5168(8.8)664(1.7)112819(1.0)17273(1.0)235281(1.0)729(1.1)124133(2.4)731(1.5)3350(1.8)55900(1.0)235269(1.7)476(1.9)45865(37.3)696(1.2)26732(4.5)235265(1.4)1165(11.4)729(1.9)12080(19.1)6908(8.9)685(1.0)573(1.6)2725(42.7)576(1.1)3320(2.3)674(1.6)16722(3.1)675(1.0)25309(15.5)8409(1.4)575(1.6)15051(1.2)235265(1.2)714(3.7)6789(4.2)576(1.9)16481(2.9)729(9.8)577(1.0)3104(1.2)15051(1.3)674(1.0)1538(1.0)3114(3.2)5766(31.5)13333(1.0)685(9.0)17611(7.2)749(1.3)235269(2.4)1582(1.0)685(1.0)32346(7.2)235269(1.0)3210(2.3)235290(1.7)60495(1.0)235269(1.0)6044(6.7)235269(1.0)578(1.0)85985(65.5)577(1.1)888(1.3)15212(1.1)235265(1.0)108(1.7)651(6.2)2725(8.5)576(1.0)16481(1.1)919(1.1)36186(5.9)12879(1.2)2754(1.7)1277(1.8)72173(1.1)235265(1.4)878(2.5)573(1.1)4061(2.1)2705(1.1)235269(1.3)16481(1.1)3679(3.3)14779(1.3)611(1.1)11400(2.8)28514(2.2) \ No newline at end of file diff --git a/token_probs.json b/token_probs.json new file mode 100644 index 0000000000000..532aeb244ecf3 --- /dev/null +++ b/token_probs.json @@ -0,0 +1,584 @@ +[ + { + "index": 20, + "token_id": 12362, + "probability": 0.00651, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 235248, + "probability": 0.295117, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 235274, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 235321, + "probability": 0.04369, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 235304, + "probability": 0.026943, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 235308, + "probability": 0.089691, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 235256, + "probability": 0.027245, + "cumulative": 1.0 + }, + { + "index": 9, + "token_id": 671, + "probability": 0.012151, + "cumulative": 1.0 + }, + { + "index": 25, + "token_id": 5842, + "probability": 0.004854, + "cumulative": 1.0 + }, + { + "index": 4, + "token_id": 16481, + "probability": 0.014562, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 105460, + "probability": 0.036299, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 17273, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 235281, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 9223, + "probability": 0.037737, + "cumulative": 1.0 + }, + { + "index": 5, + "token_id": 42020, + "probability": 0.019484, + "cumulative": 1.0 + }, + { + "index": 6, + "token_id": 696, + "probability": 0.017278, + "cumulative": 1.0 + }, + { + "index": 6, + "token_id": 135853, + "probability": 0.013522, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 55900, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 4, + "token_id": 575, + "probability": 0.019894, + "cumulative": 1.0 + }, + { + "index": 5, + "token_id": 578, + "probability": 0.023639, + "cumulative": 1.0 + }, + { + "index": 12, + "token_id": 7149, + "probability": 0.011458, + "cumulative": 1.0 + }, + { + "index": 3, + "token_id": 578, + "probability": 0.026286, + "cumulative": 1.0 + }, + { + "index": 5, + "token_id": 595, + "probability": 0.014781, + "cumulative": 1.0 + }, + { + "index": 3, + "token_id": 591, + "probability": 0.034936, + "cumulative": 1.0 + }, + { + "index": 15, + "token_id": 968, + "probability": 0.00609, + "cumulative": 1.0 + }, + { + "index": 6, + "token_id": 13059, + "probability": 0.01801, + "cumulative": 1.0 + }, + { + "index": 24, + "token_id": 6869, + "probability": 0.006793, + "cumulative": 1.0 + }, + { + "index": 6, + "token_id": 6990, + "probability": 0.016133, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 685, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 476, + "probability": 0.125677, + "cumulative": 1.0 + }, + { + "index": 7, + "token_id": 2725, + "probability": 0.023423, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 674, + "probability": 0.061914, + "cumulative": 1.0 + }, + { + "index": 3, + "token_id": 8042, + "probability": 0.122625, + "cumulative": 1.0 + }, + { + "index": 7, + "token_id": 575, + "probability": 0.015417, + "cumulative": 1.0 + }, + { + "index": 19, + "token_id": 18460, + "probability": 0.008059, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 675, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 11, + "token_id": 25175, + "probability": 0.013, + "cumulative": 1.0 + }, + { + "index": 4, + "token_id": 16612, + "probability": 0.02641, + "cumulative": 1.0 + }, + { + "index": 4, + "token_id": 235265, + "probability": 0.058654, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 6875, + "probability": 0.025434, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 578, + "probability": 0.059038, + "cumulative": 1.0 + }, + { + "index": 12, + "token_id": 16481, + "probability": 0.00734, + "cumulative": 1.0 + }, + { + "index": 9, + "token_id": 10165, + "probability": 0.007557, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 603, + "probability": 0.021314, + "cumulative": 1.0 + }, + { + "index": 3, + "token_id": 573, + "probability": 0.015347, + "cumulative": 1.0 + }, + { + "index": 3, + "token_id": 20010, + "probability": 0.04449, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 577, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 2500, + "probability": 0.050125, + "cumulative": 1.0 + }, + { + "index": 4, + "token_id": 25175, + "probability": 0.02994, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 674, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 798, + "probability": 0.047492, + "cumulative": 1.0 + }, + { + "index": 7, + "token_id": 3508, + "probability": 0.013672, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 5766, + "probability": 0.031732, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 13333, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 10, + "token_id": 696, + "probability": 0.016575, + "cumulative": 1.0 + }, + { + "index": 3, + "token_id": 34790, + "probability": 0.061788, + "cumulative": 1.0 + }, + { + "index": 4, + "token_id": 1538, + "probability": 0.035059, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 235269, + "probability": 0.420136, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 1582, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 685, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 9, + "token_id": 5255, + "probability": 0.011036, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 578, + "probability": 0.039092, + "cumulative": 1.0 + }, + { + "index": 4, + "token_id": 8377, + "probability": 0.013009, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 26149, + "probability": 0.422272, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 60495, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 235269, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 6044, + "probability": 0.149887, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 578, + "probability": 0.033882, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 578, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 18, + "token_id": 3210, + "probability": 0.005813, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 235265, + "probability": 0.076596, + "cumulative": 1.0 + }, + { + "index": 3, + "token_id": 4559, + "probability": 0.020364, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 25411, + "probability": 0.09184, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 235265, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 6, + "token_id": 4560, + "probability": 0.022317, + "cumulative": 1.0 + }, + { + "index": 6, + "token_id": 17983, + "probability": 0.017229, + "cumulative": 1.0 + }, + { + "index": 5, + "token_id": 5168, + "probability": 0.015439, + "cumulative": 1.0 + }, + { + "index": 0, + "token_id": 576, + "probability": 1.0, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 18225, + "probability": 0.091378, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 729, + "probability": 0.048832, + "cumulative": 1.0 + }, + { + "index": 11, + "token_id": 77885, + "probability": 0.008413, + "cumulative": 1.0 + }, + { + "index": 3, + "token_id": 476, + "probability": 0.024573, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 575, + "probability": 0.0163, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 573, + "probability": 0.025395, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 4061, + "probability": 0.072068, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 575, + "probability": 0.043732, + "cumulative": 1.0 + }, + { + "index": 11, + "token_id": 3428, + "probability": 0.013968, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 6077, + "probability": 0.051516, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 3433, + "probability": 0.049357, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 1658, + "probability": 0.073556, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 576, + "probability": 0.20191, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 573, + "probability": 0.037629, + "cumulative": 1.0 + }, + { + "index": 4, + "token_id": 5188, + "probability": 0.03369, + "cumulative": 1.0 + }, + { + "index": 1, + "token_id": 729, + "probability": 0.209916, + "cumulative": 1.0 + }, + { + "index": 2, + "token_id": 13791, + "probability": 0.029944, + "cumulative": 1.0 + }, + { + "index": 8, + "token_id": 573, + "probability": 0.011521, + "cumulative": 1.0 + }, + { + "index": 12, + "token_id": 6859, + "probability": 0.015218, + "cumulative": 1.0 + } +] \ No newline at end of file diff --git a/tools-superlinear/run_llama/run.py b/tools-superlinear/run_llama/run.py index c40725d5884e3..6e684cef9557c 100755 --- a/tools-superlinear/run_llama/run.py +++ b/tools-superlinear/run_llama/run.py @@ -283,10 +283,23 @@ def visualize_probabilities(token_data_file: str, output_file: str) -> bool: logger.warning(f"Token probability visualization script not found at {script_path}") return False - cmd = ["python", str(script_path), token_data_file, "-o", output_file + ".json", "-p", output_file] + cmd = ["python", str(script_path), token_data_file, "-o", output_file + ".json", "-p", output_file, "--analyze"] logger.info(f"Running token probability visualization: {' '.join(cmd)}") - result = subprocess.run(cmd, check=True, capture_output=True, text=True) + result = subprocess.run(cmd, capture_output=True, text=True) + + if result.returncode != 0: + logger.error(f"Token probability visualization failed with error:") + logger.error(f"stdout: {result.stdout}") + logger.error(f"stderr: {result.stderr}") + return False + + # Log the analysis output + if result.stdout: + for line in result.stdout.splitlines(): + logger.info(f"Analysis: {line}") + logger.success(f"Token probability visualization saved to {output_file}") + logger.success(f"Token probability data saved to {output_file}.json") return True except Exception as e: logger.error(f"Failed to generate token probability visualization: {e}") @@ -302,7 +315,19 @@ def visualize_tokens(token_data_file: str, output_file: str) -> bool: cmd = ["python", str(script_path), token_data_file, "--html", output_file + ".html", "--plot", output_file] logger.info(f"Running token visualization: {' '.join(cmd)}") - result = subprocess.run(cmd, check=True, capture_output=True, text=True) + result = subprocess.run(cmd, capture_output=True, text=True) + + if result.returncode != 0: + logger.error(f"Token visualization failed with error:") + logger.error(f"stdout: {result.stdout}") + logger.error(f"stderr: {result.stderr}") + return False + + # Log any output + if result.stdout: + for line in result.stdout.splitlines(): + logger.info(f"Visualization: {line}") + logger.success(f"Token visualization saved to {output_file}") logger.success(f"Token HTML visualization saved to {output_file}.html") return True diff --git a/tools-superlinear/visualize_tokens/process_json_tokens.py b/tools-superlinear/visualize_tokens/process_json_tokens.py index a22b7aa2f7c24..9b3b2bce83777 100755 --- a/tools-superlinear/visualize_tokens/process_json_tokens.py +++ b/tools-superlinear/visualize_tokens/process_json_tokens.py @@ -50,35 +50,57 @@ def create_probability_plot(tokens, output_file): print("No tokens to plot") return False - probabilities = [token.get("selected_probability", 0) for token in tokens] - indices = range(len(probabilities)) + # Extract probabilities from the new format + selected_probs = [] + top_probs = [] # For the highest probability candidate + + for token_data in tokens: + if 'selected_probability' in token_data: + selected_probs.append(token_data['selected_probability']) + + # Get the highest probability among candidates + if 'tokens' in token_data and token_data['tokens']: + max_prob = max(t['probability'] for t in token_data['tokens']) + top_probs.append(max_prob) + + if not selected_probs: + print("No probability data found in tokens") + return False # Create plot plt.figure(figsize=(12, 8)) - plt.plot(indices, probabilities, 'b-', marker='o', markersize=4) + + # Plot both selected and top probabilities + plt.plot(selected_probs, 'b-', marker='o', markersize=4, alpha=0.7, + label='Selected Token Probability') + if top_probs: + plt.plot(top_probs, 'r-', marker='o', markersize=4, alpha=0.4, + label='Highest Candidate Probability') + plt.title('Token Selection Probabilities') plt.xlabel('Token Index') plt.ylabel('Probability') plt.grid(True, alpha=0.3) + plt.legend() - # Add rolling average - window_size = min(10, len(probabilities)) + # Add rolling average for selected probabilities + window_size = min(10, len(selected_probs)) if window_size > 1: - rolling_avg = np.convolve(probabilities, np.ones(window_size)/window_size, mode='valid') - plt.plot(range(window_size-1, len(probabilities)), rolling_avg, 'r-', + rolling_avg = np.convolve(selected_probs, np.ones(window_size)/window_size, mode='valid') + plt.plot(range(window_size-1, len(selected_probs)), rolling_avg, 'g-', linewidth=2, label=f'{window_size}-token Moving Average') plt.legend() # Add statistics - mean_prob = np.mean(probabilities) - median_prob = np.median(probabilities) - min_prob = min(probabilities) - max_prob = max(probabilities) - std_dev = np.std(probabilities) + mean_prob = np.mean(selected_probs) + median_prob = np.median(selected_probs) + min_prob = min(selected_probs) + max_prob = max(selected_probs) + std_dev = np.std(selected_probs) stats_text = ( - f"Statistics:\n" - f"Count: {len(probabilities)}\n" + f"Selected Token Statistics:\n" + f"Count: {len(selected_probs)}\n" f"Mean: {mean_prob:.6f}\n" f"Median: {median_prob:.6f}\n" f"Min: {min_prob:.6f}\n" @@ -91,6 +113,7 @@ def create_probability_plot(tokens, output_file): plt.tight_layout() plt.savefig(output_file) + print(f"Probability plot saved to {output_file}") return True def analyze_token_data(tokens): @@ -99,16 +122,33 @@ def analyze_token_data(tokens): print("No tokens to analyze") return - probabilities = [token.get("selected_probability", 0) for token in tokens] - random_values = [token.get("raw_random", 0) for token in tokens if "raw_random" in token] - - print("\nToken Statistics:") - print(f" Total tokens: {len(tokens)}") - print(f" Mean probability: {np.mean(probabilities):.6f}") - print(f" Median probability: {np.median(probabilities):.6f}") - print(f" Min probability: {min(probabilities):.6f}") - print(f" Max probability: {max(probabilities):.6f}") - print(f" Std Dev: {np.std(probabilities):.6f}") + # Extract data from the new format + selected_probs = [] + random_values = [] + selected_token_ids = [] + candidate_counts = [] + + for token_data in tokens: + if 'selected_probability' in token_data: + selected_probs.append(token_data['selected_probability']) + + if 'raw_random' in token_data: + random_values.append(token_data['raw_random']) + + if 'selected_token_id' in token_data: + selected_token_ids.append(token_data['selected_token_id']) + + if 'tokens' in token_data: + candidate_counts.append(len(token_data['tokens'])) + + if selected_probs: + print("\nSelected Token Statistics:") + print(f" Total tokens: {len(selected_probs)}") + print(f" Mean probability: {np.mean(selected_probs):.6f}") + print(f" Median probability: {np.median(selected_probs):.6f}") + print(f" Min probability: {min(selected_probs):.6f}") + print(f" Max probability: {max(selected_probs):.6f}") + print(f" Std Dev: {np.std(selected_probs):.6f}") if random_values: print("\nRandom Number Statistics:") @@ -118,6 +158,19 @@ def analyze_token_data(tokens): print(f" Min value: {min(random_values):.6f}") print(f" Max value: {max(random_values):.6f}") print(f" Std Dev: {np.std(random_values):.6f}") + + if selected_token_ids: + print("\nToken ID Statistics:") + print(f" Total token IDs: {len(selected_token_ids)}") + print(f" Unique token IDs: {len(set(selected_token_ids))}") + print(f" Min token ID: {min(selected_token_ids)}") + print(f" Max token ID: {max(selected_token_ids)}") + + if candidate_counts: + print("\nCandidate Statistics:") + print(f" Average candidates per token: {np.mean(candidate_counts):.1f}") + print(f" Min candidates: {min(candidate_counts)}") + print(f" Max candidates: {max(candidate_counts)}") def main(): parser = argparse.ArgumentParser(description="Process JSON token data from llama.cpp") @@ -142,8 +195,13 @@ def main(): for line in f: line = line.strip() if line: - token = json.loads(line) - tokens.append(token) + try: + token = json.loads(line) + tokens.append(token) + except json.JSONDecodeError as e: + print(f"Error parsing JSON line: {e}", file=sys.stderr) + print(f"Problematic line: {line}", file=sys.stderr) + continue except Exception as e: print(f"Error reading input file: {e}", file=sys.stderr) return 1 @@ -158,8 +216,9 @@ def main(): return 1 # Generate probability plot - if create_probability_plot(tokens, args.plot): - print(f"Probability plot saved to {args.plot}") + if not create_probability_plot(tokens, args.plot): + print("Failed to create probability plot", file=sys.stderr) + return 1 # Print analysis if args.analyze: diff --git a/tools-superlinear/visualize_tokens/simple_token_viz.py b/tools-superlinear/visualize_tokens/simple_token_viz.py new file mode 100644 index 0000000000000..64273bd1223d5 --- /dev/null +++ b/tools-superlinear/visualize_tokens/simple_token_viz.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 +""" +Simple token visualization script that outputs tokens and their probabilities +in a basic text format: Token1(score1)Token2(score2)... + +Two modes available: +- absolute: shows 1/p where p is the absolute probability +- relative: shows 1/(p/max(candidates)) where p is the token's probability +""" + +import json +import argparse +from pathlib import Path +from typing import List, Dict, Any +from loguru import logger + +def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = False) -> str: + """Process token data and return formatted string.""" + output = [] + + # Read the entire file content + with open(jsonl_file, 'r') as f: + content = f.read() + + # Split the content at each new JSON object start + json_objects = [] + current_obj = "" + for line in content.splitlines(): + line = line.strip() + if not line: + continue + + if line == "{": # Start of a new object + if current_obj: # If we have collected a previous object + json_objects.append(current_obj) + current_obj = line + else: + current_obj += line + + # Add the last object if it exists + if current_obj: + json_objects.append(current_obj) + + # Process each JSON object + for json_str in json_objects: + try: + # Try to parse the JSON + data = json.loads(json_str) + + # Extract token info + token_id = data.get('selected_token_id') + prob = data.get('selected_probability') + + if token_id is None or prob is None: + if debug: + logger.debug(f"Missing token_id or probability in: {json_str[:100]}...") + continue + + # Calculate score + if mode == 'relative': + # Get max probability from candidates + candidates = data.get('tokens', []) + if candidates: + max_prob = max(t.get('probability', 0) for t in candidates) + score = 1.0 / (prob / max_prob) if max_prob > 0 else float('inf') + else: + score = 1.0 + else: # absolute mode + score = 1.0 / prob if prob > 0 else float('inf') + + # Format output + output.append(f"{token_id}({score:.1f})") + + except json.JSONDecodeError as e: + if debug: + logger.error(f"Failed to parse JSON: {e}") + logger.debug(f"Object start: {json_str[:100]}...") + continue + + if not output: + logger.warning("No valid tokens were processed!") + return "" + + result = "".join(output) + logger.info(f"Processed {len(output)} tokens successfully") + return result + +def main(): + parser = argparse.ArgumentParser(description="Simple token probability visualizer") + parser.add_argument("input_file", help="Input file with token data") + parser.add_argument("--mode", choices=['absolute', 'relative'], default='absolute', + help="Probability mode: absolute (1/p) or relative (1/(p/max_p))") + parser.add_argument("--output", "-o", help="Output file (default: pseudo-colored-output.txt)", + default="pseudo-colored-output.txt") + parser.add_argument("--debug", "-d", action="store_true", help="Enable debug logging") + + args = parser.parse_args() + + # Configure logging + logger.remove() + log_level = "DEBUG" if args.debug else "INFO" + logger.add(sys.stderr, level=log_level, + format="{level: <8} | {time:HH:mm:ss} | {message}") + + try: + # Process tokens + result = process_token_data(args.input_file, args.mode, args.debug) + + if not result: + logger.error("No output generated!") + return 1 + + # Write output + with open(args.output, 'w') as f: + f.write(result) + + logger.success(f"Output written to {args.output}") + preview = result[:100] + "..." if len(result) > 100 else result + logger.info(f"Preview: {preview}") + + except Exception as e: + logger.error(f"Failed to process file: {e}") + if args.debug: + import traceback + logger.debug(traceback.format_exc()) + return 1 + + return 0 + +if __name__ == "__main__": + import sys + sys.exit(main()) \ No newline at end of file diff --git a/tools-superlinear/visualize_tokens/token_probability_visualizer.py b/tools-superlinear/visualize_tokens/token_probability_visualizer.py index 0fbd9fd813846..b0545402d8cb4 100755 --- a/tools-superlinear/visualize_tokens/token_probability_visualizer.py +++ b/tools-superlinear/visualize_tokens/token_probability_visualizer.py @@ -34,77 +34,44 @@ class TokenProbabilityParser: def __init__(self, log_file: str): self.log_file = log_file - self.rng_blocks = [] self.tokens = [] self.probabilities = [] - self.cumulative_probs = [] self.token_ids = [] - self.token_text = [] self.selected_indices = [] def parse_log(self) -> List[Dict[str, Any]]: """Parse the log file and extract token probability information""" - with open(self.log_file, 'r', encoding='utf-8', errors='replace') as f: - content = f.read() + tokens = [] - # Find all RNG blocks - rng_blocks = re.findall(r'RNG internal:.*?RNG generated sample:.*?token id: (\d+), probability: ([\d\.]+)', - content, re.DOTALL) - - probability_blocks = [] - - rng_block_matches = re.finditer(r'RNG internal:(.*?)RNG generated sample: (\d+) \(token id: (\d+), probability: ([\d\.]+)\)', - content, re.DOTALL) - - for match in rng_block_matches: - full_text = match.group(0) - inner_text = match.group(1) - selected_idx = int(match.group(2)) - token_id = int(match.group(3)) - probability = float(match.group(4)) - - # Extract token probabilities - token_probs = re.findall(r'\s+\[(\d+)\] token (\d+) = ([\d\.]+) \(cumulative: ([\d\.]+)\)', inner_text) - - # Build token probability data - token_data = [] - for tp in token_probs: - idx = int(tp[0]) - token_id_inner = int(tp[1]) - prob = float(tp[2]) - cumulative = float(tp[3]) - - token_data.append({ - "index": idx, - "token_id": token_id_inner, - "probability": prob, - "cumulative": cumulative, - "selected": idx == selected_idx - }) - - # Extract raw random number - match_raw = re.search(r'- Raw uniform random number: ([\d\.]+)', inner_text) - raw_random = float(match_raw.group(1)) if match_raw else None - - # Extract scaled random number - match_scaled = re.search(r'- Scaled random number: ([\d\.]+)', inner_text) - scaled_random = float(match_scaled.group(1)) if match_scaled else None - - probability_blocks.append({ - "token_id": token_id, - "probability": probability, - "selected_index": selected_idx, - "raw_random": raw_random, - "scaled_random": scaled_random, - "tokens": token_data - }) + try: + with open(self.log_file, 'r', encoding='utf-8') as f: + for line in f: + if line.strip(): + try: + token = json.loads(line) + # Extract relevant information + if 'selected_probability' in token: + token['probability'] = token['selected_probability'] + if 'selected_token_id' in token: + token['token_id'] = token['selected_token_id'] + tokens.append(token) + + # Store for later processing + if 'token_id' in token: + self.token_ids.append(token['token_id']) + if 'probability' in token: + self.probabilities.append(token['probability']) + if 'selected_index' in token: + self.selected_indices.append(token['selected_index']) + except json.JSONDecodeError as e: + print(f"Error parsing JSON line: {e}", file=sys.stderr) + print(f"Problematic line: {line}", file=sys.stderr) + continue + except Exception as e: + print(f"Error reading file: {e}", file=sys.stderr) + return [] - # Store for later processing - self.token_ids.append(token_id) - self.probabilities.append(probability) - self.selected_indices.append(selected_idx) - - return probability_blocks + return tokens def extract_token_text(self, model_vocab_file: Optional[str] = None) -> None: """ @@ -125,10 +92,10 @@ def extract_token_text(self, model_vocab_file: Optional[str] = None) -> None: # Map token IDs to text for token_id in self.token_ids: if token_id in token_map: - self.token_text.append(token_map[token_id]) + self.tokens.append(token_map[token_id]) else: # Use placeholder if token ID not found in vocab file - self.token_text.append(f"<{token_id}>") + self.tokens.append(f"<{token_id}>") def load_output_text(output_file: str) -> str: """Load the generated text from the output file""" @@ -194,7 +161,7 @@ def create_colored_html(output_text: str, probability_blocks: List[Dict[str, Any padding: 4px 8px; border-radius: 3px; font-size: 12px; - white-space: nowrap; + white-space: pre; z-index: 100; }} @@ -300,7 +267,7 @@ def generate_colored_tokens_html(output_text: str, probability_blocks: List[Dict """Generate HTML for colored tokens""" # Get all probabilities for normalization (relative mode) - all_probs = [block["probability"] for block in probability_blocks] + all_probs = [block["selected_probability"] for block in probability_blocks if "selected_probability" in block] min_prob = min(all_probs) if all_probs else 0 max_prob = max(all_probs) if all_probs else 1 prob_range = max_prob - min_prob if max_prob > min_prob else 1.0 @@ -309,9 +276,12 @@ def generate_colored_tokens_html(output_text: str, probability_blocks: List[Dict html_output = "" chars_processed = 0 - for i, block in enumerate(probability_blocks): - token_id = block["token_id"] - probability = block["probability"] + for block in probability_blocks: + if "selected_token_id" not in block or "selected_probability" not in block: + continue + + token_id = block["selected_token_id"] + probability = block["selected_probability"] # Get token text from the output - this is an approximation # In practice, you'd need a proper tokenizer to get the exact token text @@ -350,13 +320,20 @@ def generate_colored_tokens_html(output_text: str, probability_blocks: List[Dict else: display_text = html.escape(display_text) + # Create tooltip with token info and candidates + tooltip_text = f"Token ID: {token_id}\\nProbability: {probability:.4f}" + if "tokens" in block: + tooltip_text += "\\n\\nTop candidates:" + sorted_candidates = sorted(block["tokens"], key=lambda x: x["probability"], reverse=True) + for i, candidate in enumerate(sorted_candidates[:5]): # Show top 5 candidates + tooltip_text += f"\\n{i+1}. ID {candidate['token_id']}: {candidate['probability']:.4f}" + # Create token HTML with tooltip token_html = f""" {display_text} - Token ID: {token_id}
- Probability: {probability:.4f} + {tooltip_text}
""" From d4b2a9746fd4166c45538e5a7a1dcce6a67e96b1 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 21:15:08 +0300 Subject: [PATCH 11/22] snapshot 3 --- CMakeLists.txt | 10 +- Makefile | 414 ++++----- examples/CMakeLists.txt | 52 +- src/llama-sampling.cpp | 41 +- token_probs.json | 813 ++++++++++-------- .../visualize_tokens/simple_token_viz.py | 79 +- 6 files changed, 793 insertions(+), 616 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7b2a1845e5c7c..520d194750dbe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -164,14 +164,14 @@ if (LLAMA_BUILD_COMMON) add_subdirectory(common) endif() -if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION) - include(CTest) - add_subdirectory(tests) -endif() +#if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION) +# include(CTest) +# add_subdirectory(tests) +#endif() if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_EXAMPLES) add_subdirectory(examples) - add_subdirectory(pocs) +# add_subdirectory(pocs) endif() # diff --git a/Makefile b/Makefile index 5339d490b4e68..e089efa147aec 100644 --- a/Makefile +++ b/Makefile @@ -5,69 +5,69 @@ endif # Define the default target now so that it is always the first target BUILD_TARGETS = \ libllava.a \ - llama-batched \ - llama-batched-bench \ - llama-bench \ - llama-cli \ - llama-convert-llama2c-to-ggml \ - llama-embedding \ - llama-eval-callback \ - llama-export-lora \ - llama-gbnf-validator \ - llama-gguf \ - llama-gguf-hash \ - llama-gguf-split \ - llama-gritlm \ - llama-imatrix \ - llama-infill \ - llama-llava-cli \ - llama-minicpmv-cli\ - llama-qwen2vl-cli\ - llama-lookahead \ - llama-lookup \ - llama-lookup-create \ - llama-lookup-merge \ - llama-lookup-stats \ - llama-parallel \ - llama-passkey \ - llama-perplexity \ - llama-q8dot \ - llama-quantize \ - llama-quantize-stats \ - llama-retrieval \ - llama-save-load-state \ - llama-server \ - llama-simple \ - llama-simple-chat \ + # llama-batched \ + # llama-batched-bench \ + # llama-bench \ + # llama-cli \ + # llama-convert-llama2c-to-ggml \ + # llama-embedding \ + # llama-eval-callback \ + # llama-export-lora \ + # llama-gbnf-validator \ + # llama-gguf \ + # llama-gguf-hash \ + # llama-gguf-split \ + # llama-gritlm \ + # llama-imatrix \ + # llama-infill \ + # llama-llava-cli \ + # llama-minicpmv-cli\ + # llama-qwen2vl-cli\ + # llama-lookahead \ + # llama-lookup \ + # llama-lookup-create \ + # llama-lookup-merge \ + # llama-lookup-stats \ + # llama-parallel \ + # llama-passkey \ + # llama-perplexity \ + # llama-q8dot \ + # llama-quantize \ + # llama-quantize-stats \ + # llama-retrieval \ + # llama-save-load-state \ + # llama-server \ + # llama-simple \ + # llama-simple-chat \ llama-run \ - llama-speculative \ - llama-tokenize \ - llama-vdot \ - llama-cvector-generator \ - llama-gen-docs \ + # llama-speculative \ + # llama-tokenize \ + # llama-vdot \ + # llama-cvector-generator \ + # llama-gen-docs \ tests/test-c.o # Binaries only useful for tests -TEST_TARGETS = \ - tests/test-arg-parser \ - tests/test-autorelease \ - tests/test-backend-ops \ - tests/test-chat \ - tests/test-chat-template \ - tests/test-double-float \ - tests/test-grammar-integration \ - tests/test-grammar-parser \ - tests/test-json-schema-to-grammar \ - tests/test-llama-grammar \ - tests/test-log \ - tests/test-model-load-cancel \ - tests/test-quantize-fns \ - tests/test-quantize-perf \ - tests/test-rope \ - tests/test-sampling \ - tests/test-tokenizer-0 \ - tests/test-tokenizer-1-bpe \ - tests/test-tokenizer-1-spm +# TEST_TARGETS = \ +# tests/test-arg-parser \ +# tests/test-autorelease \ +# tests/test-backend-ops \ +# tests/test-chat \ +# tests/test-chat-template \ +# tests/test-double-float \ +# tests/test-grammar-integration \ +# tests/test-grammar-parser \ +# tests/test-json-schema-to-grammar \ +# tests/test-llama-grammar \ +# tests/test-log \ +# tests/test-model-load-cancel \ +# tests/test-quantize-fns \ +# tests/test-quantize-perf \ +# tests/test-rope \ +# tests/test-sampling \ +# tests/test-tokenizer-0 \ +# tests/test-tokenizer-1-bpe \ +# tests/test-tokenizer-1-spm # tests/test-opt \ # Legacy build targets that were renamed in #7809, but should still be removed when the project is cleaned @@ -1393,43 +1393,43 @@ examples/server/%.hpp: examples/server/public/% FORCE Makefile echo "unsigned int $${NAME}_len = $(shell cat $< | wc -c );" \ ) > $@ -llama-gen-docs: examples/gen-docs/gen-docs.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -libllava.a: examples/llava/llava.cpp \ - examples/llava/llava.h \ - examples/llava/clip.cpp \ - examples/llava/clip.h \ - common/stb_image.h \ - common/base64.hpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ -Wno-cast-qual - -llama-llava-cli: examples/llava/llava-cli.cpp \ - examples/llava/llava.cpp \ - examples/llava/llava.h \ - examples/llava/clip.cpp \ - examples/llava/clip.h \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual - -llama-minicpmv-cli: examples/llava/minicpmv-cli.cpp \ - examples/llava/llava.cpp \ - examples/llava/llava.h \ - examples/llava/clip.cpp \ - examples/llava/clip.h \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual - -llama-qwen2vl-cli: examples/llava/qwen2vl-cli.cpp \ - examples/llava/llava.cpp \ - examples/llava/llava.h \ - examples/llava/clip.cpp \ - examples/llava/clip.h \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual +# llama-gen-docs: examples/gen-docs/gen-docs.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# libllava.a: examples/llava/llava.cpp \ +# examples/llava/llava.h \ +# examples/llava/clip.cpp \ +# examples/llava/clip.h \ +# common/stb_image.h \ +# common/base64.hpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ -Wno-cast-qual + +# llama-llava-cli: examples/llava/llava-cli.cpp \ +# examples/llava/llava.cpp \ +# examples/llava/llava.h \ +# examples/llava/clip.cpp \ +# examples/llava/clip.h \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual + +# llama-minicpmv-cli: examples/llava/minicpmv-cli.cpp \ +# examples/llava/llava.cpp \ +# examples/llava/llava.h \ +# examples/llava/clip.cpp \ +# examples/llava/clip.h \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual + +# llama-qwen2vl-cli: examples/llava/qwen2vl-cli.cpp \ +# examples/llava/llava.cpp \ +# examples/llava/llava.h \ +# examples/llava/clip.cpp \ +# examples/llava/clip.h \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual ifeq ($(UNAME_S),Darwin) swift: examples/batched.swift @@ -1451,123 +1451,123 @@ common/build-info.o: common/build-info.cpp # Tests # -tests: $(TEST_TARGETS) - -tests/test-arg-parser: tests/test-arg-parser.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-llama-grammar: tests/test-llama-grammar.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-log: tests/test-log.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-grammar-parser: tests/test-grammar-parser.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-grammar-integration: tests/test-grammar-integration.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-double-float: tests/test-double-float.cpp - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-json-schema-to-grammar: tests/test-json-schema-to-grammar.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-chat: tests/test-chat.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-opt: tests/test-opt.cpp \ - $(OBJ_GGML) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-quantize-fns: tests/test-quantize-fns.cpp \ - $(OBJ_GGML) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-quantize-perf: tests/test-quantize-perf.cpp \ - $(OBJ_GGML) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-sampling: tests/test-sampling.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-tokenizer-0: tests/test-tokenizer-0.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-tokenizer-1-bpe: tests/test-tokenizer-1-bpe.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-tokenizer-1-spm: tests/test-tokenizer-1-spm.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-rope: tests/test-rope.cpp ggml/src/ggml.o \ - $(OBJ_GGML) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-c.o: tests/test-c.c include/llama.h - $(CC) $(CFLAGS) -c $(filter-out %.h,$^) -o $@ - -tests/test-backend-ops: tests/test-backend-ops.cpp \ - $(OBJ_GGML) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-model-load-cancel: tests/test-model-load-cancel.cpp tests/get-model.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-autorelease: tests/test-autorelease.cpp tests/get-model.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) - -tests/test-chat-template: tests/test-chat-template.cpp \ - $(OBJ_ALL) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) +# tests: $(TEST_TARGETS) + +# tests/test-arg-parser: tests/test-arg-parser.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-llama-grammar: tests/test-llama-grammar.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-log: tests/test-log.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-grammar-parser: tests/test-grammar-parser.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-grammar-integration: tests/test-grammar-integration.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-double-float: tests/test-double-float.cpp +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-json-schema-to-grammar: tests/test-json-schema-to-grammar.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-chat: tests/test-chat.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-opt: tests/test-opt.cpp \ +# $(OBJ_GGML) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-quantize-fns: tests/test-quantize-fns.cpp \ +# $(OBJ_GGML) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-quantize-perf: tests/test-quantize-perf.cpp \ +# $(OBJ_GGML) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-sampling: tests/test-sampling.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-tokenizer-0: tests/test-tokenizer-0.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-tokenizer-1-bpe: tests/test-tokenizer-1-bpe.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-tokenizer-1-spm: tests/test-tokenizer-1-spm.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-rope: tests/test-rope.cpp ggml/src/ggml.o \ +# $(OBJ_GGML) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-c.o: tests/test-c.c include/llama.h +# $(CC) $(CFLAGS) -c $(filter-out %.h,$^) -o $@ + +# tests/test-backend-ops: tests/test-backend-ops.cpp \ +# $(OBJ_GGML) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-model-load-cancel: tests/test-model-load-cancel.cpp tests/get-model.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-autorelease: tests/test-autorelease.cpp tests/get-model.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +# tests/test-chat-template: tests/test-chat-template.cpp \ +# $(OBJ_ALL) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) # # PoCs # -llama-vdot: pocs/vdot/vdot.cpp ggml/src/ggml.o \ - $(OBJ_GGML) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) +# llama-vdot: pocs/vdot/vdot.cpp ggml/src/ggml.o \ +# $(OBJ_GGML) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -llama-q8dot: pocs/vdot/q8dot.cpp ggml/src/ggml.o \ - $(OBJ_GGML) - $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) - $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) +# llama-q8dot: pocs/vdot/q8dot.cpp ggml/src/ggml.o \ +# $(OBJ_GGML) +# $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) +# $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) # # Deprecated binaries that we want to keep around long enough for people to migrate to the new filenames, then these can be removed. diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 66cfab2c3b796..3a38f06546e2a 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -26,33 +26,33 @@ else() add_subdirectory(gbnf-validator) endif() - add_subdirectory(gguf-hash) - add_subdirectory(gguf-split) - add_subdirectory(gguf) - add_subdirectory(gritlm) - add_subdirectory(imatrix) - add_subdirectory(infill) - add_subdirectory(llama-bench) - add_subdirectory(lookahead) - add_subdirectory(lookup) - add_subdirectory(main) - add_subdirectory(parallel) - add_subdirectory(passkey) - add_subdirectory(perplexity) - add_subdirectory(quantize) - add_subdirectory(retrieval) - if (LLAMA_BUILD_SERVER) - add_subdirectory(server) - endif() - add_subdirectory(save-load-state) + #add_subdirectory(gguf-hash) + #add_subdirectory(gguf-split) + #add_subdirectory(gguf) + #add_subdirectory(gritlm) + #add_subdirectory(imatrix) + #add_subdirectory(infill) + #add_subdirectory(llama-bench) + #add_subdirectory(lookahead) + #add_subdirectory(lookup) + #add_subdirectory(main) + #add_subdirectory(parallel) + #add_subdirectory(passkey) + #add_subdirectory(perplexity) + #add_subdirectory(quantize) + #add_subdirectory(retrieval) + #if (LLAMA_BUILD_SERVER) + # add_subdirectory(server) + #endif() + #add_subdirectory(save-load-state) add_subdirectory(run) - add_subdirectory(simple) - add_subdirectory(simple-chat) - add_subdirectory(speculative) - add_subdirectory(speculative-simple) - add_subdirectory(tokenize) - add_subdirectory(tts) - add_subdirectory(gen-docs) + #add_subdirectory(simple) + #add_subdirectory(simple-chat) + #add_subdirectory(speculative) + #add_subdirectory(speculative-simple) + #add_subdirectory(tokenize) + #add_subdirectory(tts) + #add_subdirectory(gen-docs) if (NOT GGML_BACKEND_DL) # these examples use the backends directly and cannot be built with dynamic loading add_subdirectory(convert-llama2c-to-ggml) diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index 9cdf332e50611..2e104f41dba02 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -18,6 +18,34 @@ #include #include +// Helper function to escape whitespace and special characters for JSON +static std::string llama_escape_whitespace(const std::string& text) { + std::string result; + result.reserve(text.size() * 2); // Reserve space to avoid reallocations + + for (char c : text) { + switch (c) { + case '\\': result += "\\\\"; break; + case '\"': result += "\\\""; break; + case '\n': result += "\\n"; break; + case '\r': result += "\\r"; break; + case '\t': result += "\\t"; break; + case '\b': result += "\\b"; break; + case '\f': result += "\\f"; break; + default: + if (static_cast(c) < 32) { + char buf[8]; + snprintf(buf, sizeof(buf), "\\u%04x", c); + result += buf; + } else { + result += c; + } + } + } + + return result; +} + // Global RNG provider instance static RNGProvider* g_rng_provider = nullptr; @@ -248,12 +276,19 @@ static int llama_sample_dist(llama_token_data_array * cur_p, std::mt19937 & /*rn fprintf(f, " \"selected_token_id\": %d,\n", cur_p->data[selected_idx].id); fprintf(f, " \"selected_probability\": %f,\n", cur_p->data[selected_idx].p); + // Add a placeholder for token text that can be filled in by post-processing + fprintf(f, " \"selected_token_text\": \"\",\n", cur_p->data[selected_idx].id); + // Token data array fprintf(f, " \"tokens\": [\n"); for (size_t i = 0; i < cur_p->size; ++i) { - fprintf(f, " {\"index\": %zu, \"token_id\": %d, \"probability\": %f, \"cumulative\": %f}%s\n", - i, cur_p->data[i].id, cur_p->data[i].p, cumulative_probs[i], - (i < cur_p->size - 1) ? "," : ""); + fprintf(f, " {\"index\": %zu, \"token_id\": %d, \"probability\": %f, \"cumulative\": %f", + i, cur_p->data[i].id, cur_p->data[i].p, cumulative_probs[i]); + + // Add placeholder for token text + fprintf(f, ", \"text\": \"\"", cur_p->data[i].id); + + fprintf(f, "}%s\n", (i < cur_p->size - 1) ? "," : ""); } fprintf(f, " ]\n"); fprintf(f, "}\n"); diff --git a/token_probs.json b/token_probs.json index 532aeb244ecf3..d8bdbb9c2c2d2 100644 --- a/token_probs.json +++ b/token_probs.json @@ -3,582 +3,679 @@ "index": 20, "token_id": 12362, "probability": 0.00651, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 235248, - "probability": 0.295117, - "cumulative": 1.0 + "index": 7, + "token_id": 4268, + "probability": 0.014331, + "cumulative": 1.0, + "text": "" + }, + { + "index": 3, + "token_id": 16481, + "probability": 0.019699, + "cumulative": 1.0, + "text": "" + }, + { + "index": 3, + "token_id": 105460, + "probability": 0.023908, + "cumulative": 1.0, + "text": "" }, { "index": 0, - "token_id": 235274, + "token_id": 17273, "probability": 1.0, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 235321, - "probability": 0.04369, - "cumulative": 1.0 + "index": 0, + "token_id": 235281, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { - "index": 2, - "token_id": 235304, - "probability": 0.026943, - "cumulative": 1.0 + "index": 7, + "token_id": 4549, + "probability": 0.016155, + "cumulative": 1.0, + "text": "" }, { - "index": 2, - "token_id": 235308, - "probability": 0.089691, - "cumulative": 1.0 + "index": 5, + "token_id": 8725, + "probability": 0.015647, + "cumulative": 1.0, + "text": "" + }, + { + "index": 4, + "token_id": 8725, + "probability": 0.033011, + "cumulative": 1.0, + "text": "" }, { "index": 1, - "token_id": 235256, - "probability": 0.027245, - "cumulative": 1.0 + "token_id": 731, + "probability": 0.317356, + "cumulative": 1.0, + "text": "" }, { - "index": 9, - "token_id": 671, - "probability": 0.012151, - "cumulative": 1.0 + "index": 4, + "token_id": 7149, + "probability": 0.014362, + "cumulative": 1.0, + "text": "" }, { - "index": 25, - "token_id": 5842, - "probability": 0.004854, - "cumulative": 1.0 + "index": 0, + "token_id": 55900, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { - "index": 4, - "token_id": 16481, - "probability": 0.014562, - "cumulative": 1.0 + "index": 2, + "token_id": 696, + "probability": 0.143155, + "cumulative": 1.0, + "text": "" }, { "index": 1, - "token_id": 105460, - "probability": 0.036299, - "cumulative": 1.0 + "token_id": 573, + "probability": 0.066425, + "cumulative": 1.0, + "text": "" }, { "index": 0, - "token_id": 17273, + "token_id": 235274, "probability": 1.0, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { "index": 0, - "token_id": 235281, + "token_id": 235315, "probability": 1.0, - "cumulative": 1.0 - }, - { - "index": 2, - "token_id": 9223, - "probability": 0.037737, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { - "index": 5, - "token_id": 42020, - "probability": 0.019484, - "cumulative": 1.0 + "index": 0, + "token_id": 235308, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { - "index": 6, - "token_id": 696, - "probability": 0.017278, - "cumulative": 1.0 + "index": 1, + "token_id": 235308, + "probability": 0.354027, + "cumulative": 1.0, + "text": "" }, { - "index": 6, - "token_id": 135853, - "probability": 0.013522, - "cumulative": 1.0 + "index": 7, + "token_id": 685, + "probability": 0.014932, + "cumulative": 1.0, + "text": "" }, { - "index": 0, - "token_id": 55900, - "probability": 1.0, - "cumulative": 1.0 + "index": 17, + "token_id": 665, + "probability": 0.005431, + "cumulative": 1.0, + "text": "" }, { - "index": 4, - "token_id": 575, - "probability": 0.019894, - "cumulative": 1.0 + "index": 1, + "token_id": 573, + "probability": 0.027479, + "cumulative": 1.0, + "text": "" }, { - "index": 5, - "token_id": 578, - "probability": 0.023639, - "cumulative": 1.0 + "index": 32, + "token_id": 8602, + "probability": 0.003418, + "cumulative": 1.0, + "text": "" }, { - "index": 12, - "token_id": 7149, - "probability": 0.011458, - "cumulative": 1.0 + "index": 8, + "token_id": 235248, + "probability": 0.00913, + "cumulative": 1.0, + "text": "" }, { - "index": 3, - "token_id": 578, - "probability": 0.026286, - "cumulative": 1.0 + "index": 34, + "token_id": 33622, + "probability": 0.003533, + "cumulative": 1.0, + "text": "" }, { - "index": 5, - "token_id": 595, - "probability": 0.014781, - "cumulative": 1.0 + "index": 2, + "token_id": 671, + "probability": 0.02937, + "cumulative": 1.0, + "text": "" }, { - "index": 3, - "token_id": 591, - "probability": 0.034936, - "cumulative": 1.0 + "index": 20, + "token_id": 3186, + "probability": 0.008291, + "cumulative": 1.0, + "text": "" }, { - "index": 15, - "token_id": 968, - "probability": 0.00609, - "cumulative": 1.0 + "index": 10, + "token_id": 675, + "probability": 0.007638, + "cumulative": 1.0, + "text": "" }, { - "index": 6, - "token_id": 13059, - "probability": 0.01801, - "cumulative": 1.0 + "index": 8, + "token_id": 36701, + "probability": 0.015806, + "cumulative": 1.0, + "text": "" }, { - "index": 24, - "token_id": 6869, - "probability": 0.006793, - "cumulative": 1.0 + "index": 16, + "token_id": 774, + "probability": 0.008269, + "cumulative": 1.0, + "text": "" }, { - "index": 6, - "token_id": 6990, - "probability": 0.016133, - "cumulative": 1.0 + "index": 9, + "token_id": 3320, + "probability": 0.010558, + "cumulative": 1.0, + "text": "" }, { "index": 0, - "token_id": 685, + "token_id": 577, "probability": 1.0, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { - "index": 2, - "token_id": 476, - "probability": 0.125677, - "cumulative": 1.0 + "index": 13, + "token_id": 3679, + "probability": 0.011651, + "cumulative": 1.0, + "text": "" }, { - "index": 7, - "token_id": 2725, - "probability": 0.023423, - "cumulative": 1.0 + "index": 11, + "token_id": 5188, + "probability": 0.008934, + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 674, - "probability": 0.061914, - "cumulative": 1.0 + "index": 3, + "token_id": 5188, + "probability": 0.03112, + "cumulative": 1.0, + "text": "" }, { "index": 3, - "token_id": 8042, - "probability": 0.122625, - "cumulative": 1.0 + "token_id": 35606, + "probability": 0.026485, + "cumulative": 1.0, + "text": "" }, { - "index": 7, - "token_id": 575, - "probability": 0.015417, - "cumulative": 1.0 + "index": 18, + "token_id": 586, + "probability": 0.007712, + "cumulative": 1.0, + "text": "" }, { - "index": 19, - "token_id": 18460, - "probability": 0.008059, - "cumulative": 1.0 + "index": 10, + "token_id": 1872, + "probability": 0.011012, + "cumulative": 1.0, + "text": "" }, { - "index": 0, - "token_id": 675, - "probability": 1.0, - "cumulative": 1.0 + "index": 17, + "token_id": 7697, + "probability": 0.009879, + "cumulative": 1.0, + "text": "" }, { - "index": 11, - "token_id": 25175, - "probability": 0.013, - "cumulative": 1.0 + "index": 7, + "token_id": 1170, + "probability": 0.007703, + "cumulative": 1.0, + "text": "" }, { - "index": 4, - "token_id": 16612, - "probability": 0.02641, - "cumulative": 1.0 + "index": 1, + "token_id": 664, + "probability": 0.076052, + "cumulative": 1.0, + "text": "" }, { "index": 4, - "token_id": 235265, - "probability": 0.058654, - "cumulative": 1.0 - }, - { - "index": 2, - "token_id": 6875, - "probability": 0.025434, - "cumulative": 1.0 + "token_id": 1080, + "probability": 0.024216, + "cumulative": 1.0, + "text": "" }, { - "index": 2, - "token_id": 578, - "probability": 0.059038, - "cumulative": 1.0 + "index": 4, + "token_id": 235296, + "probability": 0.019969, + "cumulative": 1.0, + "text": "" }, { - "index": 12, - "token_id": 16481, - "probability": 0.00734, - "cumulative": 1.0 + "index": 0, + "token_id": 17848, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { - "index": 9, - "token_id": 10165, - "probability": 0.007557, - "cumulative": 1.0 + "index": 18, + "token_id": 7756, + "probability": 0.008604, + "cumulative": 1.0, + "text": "" }, { - "index": 2, - "token_id": 603, - "probability": 0.021314, - "cumulative": 1.0 + "index": 3, + "token_id": 235281, + "probability": 0.07585, + "cumulative": 1.0, + "text": "" }, { - "index": 3, - "token_id": 573, - "probability": 0.015347, - "cumulative": 1.0 + "index": 12, + "token_id": 6990, + "probability": 0.013089, + "cumulative": 1.0, + "text": "" }, { - "index": 3, - "token_id": 20010, - "probability": 0.04449, - "cumulative": 1.0 + "index": 19, + "token_id": 5140, + "probability": 0.007553, + "cumulative": 1.0, + "text": "" }, { "index": 0, "token_id": 577, "probability": 1.0, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { - "index": 2, - "token_id": 2500, - "probability": 0.050125, - "cumulative": 1.0 + "index": 7, + "token_id": 10488, + "probability": 0.018444, + "cumulative": 1.0, + "text": "" }, { "index": 4, - "token_id": 25175, - "probability": 0.02994, - "cumulative": 1.0 + "token_id": 674, + "probability": 0.011787, + "cumulative": 1.0, + "text": "" + }, + { + "index": 7, + "token_id": 736, + "probability": 0.018635, + "cumulative": 1.0, + "text": "" }, { "index": 0, - "token_id": 674, + "token_id": 17273, "probability": 1.0, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 798, - "probability": 0.047492, - "cumulative": 1.0 + "index": 3, + "token_id": 591, + "probability": 0.015331, + "cumulative": 1.0, + "text": "" }, { - "index": 7, - "token_id": 3508, - "probability": 0.013672, - "cumulative": 1.0 + "index": 18, + "token_id": 577, + "probability": 0.009123, + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 5766, - "probability": 0.031732, - "cumulative": 1.0 + "index": 19, + "token_id": 23477, + "probability": 0.005807, + "cumulative": 1.0, + "text": "" }, { - "index": 0, - "token_id": 13333, - "probability": 1.0, - "cumulative": 1.0 + "index": 4, + "token_id": 23477, + "probability": 0.020922, + "cumulative": 1.0, + "text": "" }, { - "index": 10, - "token_id": 696, - "probability": 0.016575, - "cumulative": 1.0 + "index": 1, + "token_id": 3646, + "probability": 0.033722, + "cumulative": 1.0, + "text": "" }, { - "index": 3, - "token_id": 34790, - "probability": 0.061788, - "cumulative": 1.0 + "index": 6, + "token_id": 93098, + "probability": 0.00982, + "cumulative": 1.0, + "text": "" }, { "index": 4, - "token_id": 1538, - "probability": 0.035059, - "cumulative": 1.0 + "token_id": 235290, + "probability": 0.024017, + "cumulative": 1.0, + "text": "" + }, + { + "index": 1, + "token_id": 576, + "probability": 0.085512, + "cumulative": 1.0, + "text": "" + }, + { + "index": 2, + "token_id": 1277, + "probability": 0.027586, + "cumulative": 1.0, + "text": "" + }, + { + "index": 1, + "token_id": 1744, + "probability": 0.156334, + "cumulative": 1.0, + "text": "" }, { "index": 1, "token_id": 235269, - "probability": 0.420136, - "cumulative": 1.0 + "probability": 0.399165, + "cumulative": 1.0, + "text": "" }, { - "index": 0, + "index": 1, "token_id": 1582, - "probability": 1.0, - "cumulative": 1.0 + "probability": 0.415986, + "cumulative": 1.0, + "text": "" }, { "index": 0, "token_id": 685, "probability": 1.0, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { - "index": 9, - "token_id": 5255, - "probability": 0.011036, - "cumulative": 1.0 + "index": 3, + "token_id": 17289, + "probability": 0.029156, + "cumulative": 1.0, + "text": "" }, { "index": 1, - "token_id": 578, - "probability": 0.039092, - "cumulative": 1.0 + "token_id": 2232, + "probability": 0.032934, + "cumulative": 1.0, + "text": "" }, { - "index": 4, - "token_id": 8377, - "probability": 0.013009, - "cumulative": 1.0 + "index": 2, + "token_id": 578, + "probability": 0.04672, + "cumulative": 1.0, + "text": "" }, { "index": 1, - "token_id": 26149, - "probability": 0.422272, - "cumulative": 1.0 + "token_id": 21786, + "probability": 0.150738, + "cumulative": 1.0, + "text": "" }, { "index": 0, - "token_id": 60495, + "token_id": 595, "probability": 1.0, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { "index": 0, - "token_id": 235269, + "token_id": 43193, "probability": 1.0, - "cumulative": 1.0 - }, - { - "index": 1, - "token_id": 6044, - "probability": 0.149887, - "cumulative": 1.0 - }, - { - "index": 1, - "token_id": 578, - "probability": 0.033882, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { "index": 0, - "token_id": 578, + "token_id": 235265, "probability": 1.0, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { - "index": 18, - "token_id": 3210, - "probability": 0.005813, - "cumulative": 1.0 - }, - { - "index": 1, - "token_id": 235265, - "probability": 0.076596, - "cumulative": 1.0 + "index": 8, + "token_id": 968, + "probability": 0.009675, + "cumulative": 1.0, + "text": "" }, { - "index": 3, - "token_id": 4559, - "probability": 0.020364, - "cumulative": 1.0 + "index": 5, + "token_id": 105460, + "probability": 0.019214, + "cumulative": 1.0, + "text": "" }, { "index": 1, - "token_id": 25411, - "probability": 0.09184, - "cumulative": 1.0 + "token_id": 235248, + "probability": 0.266307, + "cumulative": 1.0, + "text": "" }, { "index": 0, - "token_id": 235265, + "token_id": 235274, "probability": 1.0, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { - "index": 6, - "token_id": 4560, - "probability": 0.022317, - "cumulative": 1.0 + "index": 0, + "token_id": 235315, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { - "index": 6, - "token_id": 17983, - "probability": 0.017229, - "cumulative": 1.0 + "index": 2, + "token_id": 235315, + "probability": 0.02164, + "cumulative": 1.0, + "text": "" }, { "index": 5, - "token_id": 5168, - "probability": 0.015439, - "cumulative": 1.0 + "token_id": 235276, + "probability": 0.026099, + "cumulative": 1.0, + "text": "" }, { "index": 0, - "token_id": 576, + "token_id": 235269, "probability": 1.0, - "cumulative": 1.0 - }, - { - "index": 1, - "token_id": 18225, - "probability": 0.091378, - "cumulative": 1.0 + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 729, - "probability": 0.048832, - "cumulative": 1.0 + "index": 5, + "token_id": 20010, + "probability": 0.018032, + "cumulative": 1.0, + "text": "" }, { - "index": 11, - "token_id": 77885, - "probability": 0.008413, - "cumulative": 1.0 + "index": 9, + "token_id": 2379, + "probability": 0.009043, + "cumulative": 1.0, + "text": "" }, { - "index": 3, - "token_id": 476, - "probability": 0.024573, - "cumulative": 1.0 + "index": 1, + "token_id": 11982, + "probability": 0.081726, + "cumulative": 1.0, + "text": "" }, { - "index": 2, - "token_id": 575, - "probability": 0.0163, - "cumulative": 1.0 + "index": 7, + "token_id": 6343, + "probability": 0.017314, + "cumulative": 1.0, + "text": "" }, { - "index": 2, + "index": 1, "token_id": 573, - "probability": 0.025395, - "cumulative": 1.0 + "probability": 0.040077, + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 4061, - "probability": 0.072068, - "cumulative": 1.0 + "index": 5, + "token_id": 577, + "probability": 0.033616, + "cumulative": 1.0, + "text": "" }, { - "index": 2, - "token_id": 575, - "probability": 0.043732, - "cumulative": 1.0 + "index": 3, + "token_id": 112034, + "probability": 0.066646, + "cumulative": 1.0, + "text": "" }, { - "index": 11, - "token_id": 3428, - "probability": 0.013968, - "cumulative": 1.0 + "index": 0, + "token_id": 235269, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 6077, - "probability": 0.051516, - "cumulative": 1.0 + "index": 0, + "token_id": 1622, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { - "index": 2, - "token_id": 3433, - "probability": 0.049357, - "cumulative": 1.0 + "index": 0, + "token_id": 39068, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 1658, - "probability": 0.073556, - "cumulative": 1.0 + "index": 2, + "token_id": 578, + "probability": 0.015656, + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 576, - "probability": 0.20191, - "cumulative": 1.0 + "index": 7, + "token_id": 948, + "probability": 0.016128, + "cumulative": 1.0, + "text": "" }, { - "index": 2, - "token_id": 573, - "probability": 0.037629, - "cumulative": 1.0 + "index": 7, + "token_id": 1767, + "probability": 0.011887, + "cumulative": 1.0, + "text": "" }, { "index": 4, - "token_id": 5188, - "probability": 0.03369, - "cumulative": 1.0 + "token_id": 974, + "probability": 0.017961, + "cumulative": 1.0, + "text": "" }, { - "index": 1, - "token_id": 729, - "probability": 0.209916, - "cumulative": 1.0 - }, - { - "index": 2, - "token_id": 13791, - "probability": 0.029944, - "cumulative": 1.0 + "index": 0, + "token_id": 731, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { - "index": 8, - "token_id": 573, - "probability": 0.011521, - "cumulative": 1.0 + "index": 9, + "token_id": 3757, + "probability": 0.013048, + "cumulative": 1.0, + "text": "" }, { - "index": 12, - "token_id": 6859, - "probability": 0.015218, - "cumulative": 1.0 + "index": 1, + "token_id": 8133, + "probability": 0.034466, + "cumulative": 1.0, + "text": "" } ] \ No newline at end of file diff --git a/tools-superlinear/visualize_tokens/simple_token_viz.py b/tools-superlinear/visualize_tokens/simple_token_viz.py index 64273bd1223d5..19273d8f3da5e 100644 --- a/tools-superlinear/visualize_tokens/simple_token_viz.py +++ b/tools-superlinear/visualize_tokens/simple_token_viz.py @@ -10,16 +10,21 @@ import json import argparse +import os +import sys from pathlib import Path -from typing import List, Dict, Any +from typing import List, Dict, Any, Optional from loguru import logger -def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = False) -> str: - """Process token data and return formatted string.""" - output = [] +def parse_token_data_file(file_path: str, debug: bool = False) -> List[Dict]: + """ + Parse a file containing multiple JSON objects, each spanning multiple lines. + Returns a list of parsed JSON objects. + This is a reusable function that can be imported by other scripts. + """ # Read the entire file content - with open(jsonl_file, 'r') as f: + with open(file_path, 'r') as f: content = f.read() # Split the content at each new JSON object start @@ -32,28 +37,49 @@ def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = Fa if line == "{": # Start of a new object if current_obj: # If we have collected a previous object - json_objects.append(current_obj) + try: + # Try to parse it to validate + json_obj = json.loads(current_obj) + json_objects.append(json_obj) + except json.JSONDecodeError as e: + if debug: + logger.error(f"Failed to parse JSON: {e}") + logger.debug(f"Object: {current_obj[:100]}...") current_obj = line else: current_obj += line # Add the last object if it exists if current_obj: - json_objects.append(current_obj) + try: + json_obj = json.loads(current_obj) + json_objects.append(json_obj) + except json.JSONDecodeError as e: + if debug: + logger.error(f"Failed to parse last JSON object: {e}") + + return json_objects + +def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = False) -> str: + """Process token data and return formatted string.""" + output = [] + + # Parse the token data file + json_objects = parse_token_data_file(jsonl_file, debug) + + if debug: + logger.debug(f"Parsed {len(json_objects)} token objects") # Process each JSON object - for json_str in json_objects: + for data in json_objects: try: - # Try to parse the JSON - data = json.loads(json_str) - # Extract token info token_id = data.get('selected_token_id') prob = data.get('selected_probability') if token_id is None or prob is None: if debug: - logger.debug(f"Missing token_id or probability in: {json_str[:100]}...") + logger.debug(f"Missing token_id or probability in object") continue # Calculate score @@ -68,13 +94,27 @@ def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = Fa else: # absolute mode score = 1.0 / prob if prob > 0 else float('inf') - # Format output - output.append(f"{token_id}({score:.1f})") + # Format output - use token ID directly + # We'll display it in a more readable format + if token_id < 256: + # ASCII characters - show them directly if printable + try: + char = chr(token_id) + if char.isprintable(): + token_display = char + else: + token_display = f"\\{token_id:03d}" + except: + token_display = f"\\{token_id:03d}" + else: + # Non-ASCII token IDs + token_display = f"T{token_id}" - except json.JSONDecodeError as e: + output.append(f"{token_display}({score:.1f})") + + except Exception as e: if debug: - logger.error(f"Failed to parse JSON: {e}") - logger.debug(f"Object start: {json_str[:100]}...") + logger.error(f"Error processing token: {e}") continue if not output: @@ -103,6 +143,11 @@ def main(): format="{level: <8} | {time:HH:mm:ss} | {message}") try: + # Check if input file exists + if not os.path.exists(args.input_file): + logger.error(f"Input file not found: {args.input_file}") + return 1 + # Process tokens result = process_token_data(args.input_file, args.mode, args.debug) From 7797ffcf03e3f05238ec433c96f11b61dfd846ff Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 21:31:38 +0300 Subject: [PATCH 12/22] proper jsonl reader util --- token_probs.json | 686 +++++++++--------- .../visualize_tokens/simple_token_viz.py | 88 +-- tools-superlinear/visualize_tokens/utils.py | 168 +++++ 3 files changed, 522 insertions(+), 420 deletions(-) create mode 100644 tools-superlinear/visualize_tokens/utils.py diff --git a/token_probs.json b/token_probs.json index d8bdbb9c2c2d2..bd78ca95db0b1 100644 --- a/token_probs.json +++ b/token_probs.json @@ -6,530 +6,509 @@ "cumulative": 1.0, "text": "" }, - { - "index": 7, - "token_id": 4268, - "probability": 0.014331, - "cumulative": 1.0, - "text": "" - }, - { - "index": 3, - "token_id": 16481, - "probability": 0.019699, - "cumulative": 1.0, - "text": "" - }, - { - "index": 3, - "token_id": 105460, - "probability": 0.023908, - "cumulative": 1.0, - "text": "" - }, { "index": 0, - "token_id": 17273, + "token_id": 235371, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 235281, + "token_id": 571, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 7, - "token_id": 4549, - "probability": 0.016155, + "index": 0, + "token_id": 235298, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 5, - "token_id": 8725, - "probability": 0.015647, + "index": 1, + "token_id": 2997, + "probability": 0.023167, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 4, - "token_id": 8725, - "probability": 0.033011, + "index": 0, + "token_id": 73786, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 1, - "token_id": 731, - "probability": 0.317356, + "token_id": 109, + "probability": 0.087381, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 4, - "token_id": 7149, - "probability": 0.014362, + "index": 2, + "token_id": 105460, + "probability": 0.022329, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 55900, + "token_id": 235371, "probability": 1.0, "cumulative": 1.0, - "text": "" - }, - { - "index": 2, - "token_id": 696, - "probability": 0.143155, - "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 573, - "probability": 0.066425, + "index": 0, + "token_id": 571, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 235274, + "token_id": 235298, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 235315, + "token_id": 2997, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 235308, + "token_id": 73786, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 1, - "token_id": 235308, - "probability": 0.354027, + "token_id": 105776, + "probability": 0.210727, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 7, - "token_id": 685, - "probability": 0.014932, + "index": 0, + "token_id": 108, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 17, - "token_id": 665, - "probability": 0.005431, + "index": 9, + "token_id": 2299, + "probability": 0.014749, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 1, - "token_id": 573, - "probability": 0.027479, + "token_id": 30515, + "probability": 0.044432, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 32, - "token_id": 8602, - "probability": 0.003418, + "index": 6, + "token_id": 235322, + "probability": 0.012961, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 8, - "token_id": 235248, - "probability": 0.00913, + "index": 3, + "token_id": 974, + "probability": 0.02161, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 34, - "token_id": 33622, - "probability": 0.003533, + "index": 13, + "token_id": 12132, + "probability": 0.008043, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 2, - "token_id": 671, - "probability": 0.02937, + "token_id": 604, + "probability": 0.021917, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 20, - "token_id": 3186, - "probability": 0.008291, + "index": 3, + "token_id": 6875, + "probability": 0.020412, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 10, - "token_id": 675, - "probability": 0.007638, + "index": 2, + "token_id": 6403, + "probability": 0.018184, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 8, - "token_id": 36701, - "probability": 0.015806, + "index": 4, + "token_id": 235269, + "probability": 0.016629, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 16, - "token_id": 774, - "probability": 0.008269, + "index": 7, + "token_id": 6815, + "probability": 0.014198, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 9, - "token_id": 3320, - "probability": 0.010558, + "index": 4, + "token_id": 87491, + "probability": 0.012042, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 0, - "token_id": 577, - "probability": 1.0, + "index": 1, + "token_id": 15081, + "probability": 0.18399, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 13, - "token_id": 3679, - "probability": 0.011651, + "index": 6, + "token_id": 6383, + "probability": 0.007913, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 11, - "token_id": 5188, - "probability": 0.008934, + "index": 1, + "token_id": 476, + "probability": 0.050319, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 3, - "token_id": 5188, - "probability": 0.03112, + "token_id": 17203, + "probability": 0.023984, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 3, - "token_id": 35606, - "probability": 0.026485, + "index": 7, + "token_id": 17611, + "probability": 0.008355, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 18, - "token_id": 586, - "probability": 0.007712, + "index": 0, + "token_id": 2817, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 10, - "token_id": 1872, - "probability": 0.011012, + "index": 1, + "token_id": 17273, + "probability": 0.04496, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 17, - "token_id": 7697, - "probability": 0.009879, + "index": 0, + "token_id": 17273, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 7, - "token_id": 1170, - "probability": 0.007703, + "index": 4, + "token_id": 108, + "probability": 0.018042, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 1, - "token_id": 664, - "probability": 0.076052, + "token_id": 1154, + "probability": 0.02734, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 4, - "token_id": 1080, - "probability": 0.024216, + "index": 0, + "token_id": 685, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 4, - "token_id": 235296, - "probability": 0.019969, + "index": 0, + "token_id": 9095, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 17848, + "token_id": 20908, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 18, - "token_id": 7756, - "probability": 0.008604, + "index": 0, + "token_id": 235269, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 3, - "token_id": 235281, - "probability": 0.07585, + "index": 0, + "token_id": 11360, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 12, - "token_id": 6990, - "probability": 0.013089, + "index": 0, + "token_id": 16398, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 19, - "token_id": 5140, - "probability": 0.007553, + "index": 0, + "token_id": 235269, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 577, + "token_id": 4530, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 7, - "token_id": 10488, - "probability": 0.018444, + "index": 1, + "token_id": 3547, + "probability": 0.247556, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 4, - "token_id": 674, - "probability": 0.011787, + "index": 0, + "token_id": 14577, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 7, - "token_id": 736, - "probability": 0.018635, + "index": 1, + "token_id": 578, + "probability": 0.08638, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 17273, + "token_id": 578, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 3, - "token_id": 591, - "probability": 0.015331, + "index": 5, + "token_id": 3210, + "probability": 0.019112, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 18, + "index": 6, + "token_id": 1281, + "probability": 0.021082, + "cumulative": 1.0, + "text": "" + }, + { + "index": 0, "token_id": 577, - "probability": 0.009123, + "probability": 1.0, "cumulative": 1.0, "text": "" }, { - "index": 19, - "token_id": 23477, - "probability": 0.005807, + "index": 12, + "token_id": 2582, + "probability": 0.013377, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 4, - "token_id": 23477, - "probability": 0.020922, + "token_id": 2652, + "probability": 0.016881, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 3646, - "probability": 0.033722, + "index": 2, + "token_id": 35606, + "probability": 0.029259, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 6, - "token_id": 93098, - "probability": 0.00982, + "index": 9, + "token_id": 3766, + "probability": 0.009102, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 4, - "token_id": 235290, - "probability": 0.024017, + "index": 2, + "token_id": 651, + "probability": 0.026992, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 576, - "probability": 0.085512, + "index": 0, + "token_id": 235371, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 2, - "token_id": 1277, - "probability": 0.027586, + "index": 0, + "token_id": 571, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 1744, - "probability": 0.156334, + "index": 0, + "token_id": 235298, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 235269, - "probability": 0.399165, + "index": 0, + "token_id": 615, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 1582, - "probability": 0.415986, + "index": 0, + "token_id": 73786, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 685, + "token_id": 108, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 3, - "token_id": 17289, - "probability": 0.029156, + "index": 0, + "token_id": 235322, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 2232, - "probability": 0.032934, + "index": 0, + "token_id": 235371, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 2, - "token_id": 578, - "probability": 0.04672, + "index": 0, + "token_id": 571, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 21786, - "probability": 0.150738, + "index": 0, + "token_id": 235298, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 595, + "token_id": 2997, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 43193, + "token_id": 73786, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 0, - "token_id": 235265, - "probability": 1.0, + "index": 1, + "token_id": 1645, + "probability": 0.236807, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 8, - "token_id": 968, - "probability": 0.009675, + "index": 0, + "token_id": 108, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 5, - "token_id": 105460, - "probability": 0.019214, + "index": 9, + "token_id": 4038, + "probability": 0.011994, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 235248, - "probability": 0.266307, + "index": 3, + "token_id": 3311, + "probability": 0.032169, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 0, - "token_id": 235274, - "probability": 1.0, + "index": 1, + "token_id": 235284, + "probability": 0.053946, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, @@ -539,18 +518,18 @@ "text": "" }, { - "index": 2, - "token_id": 235315, - "probability": 0.02164, + "index": 1, + "token_id": 235310, + "probability": 0.070341, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 5, - "token_id": 235276, - "probability": 0.026099, + "token_id": 235308, + "probability": 0.031117, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, @@ -561,121 +540,142 @@ }, { "index": 5, - "token_id": 20010, - "probability": 0.018032, + "token_id": 3725, + "probability": 0.018343, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 9, - "token_id": 2379, - "probability": 0.009043, + "index": 11, + "token_id": 2204, + "probability": 0.012064, "cumulative": 1.0, - "text": "" + "text": "" + }, + { + "index": 5, + "token_id": 777, + "probability": 0.019518, + "cumulative": 1.0, + "text": "" + }, + { + "index": 0, + "token_id": 30515, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { "index": 1, - "token_id": 11982, - "probability": 0.081726, + "token_id": 591, + "probability": 0.113661, "cumulative": 1.0, - "text": "" + "text": "" + }, + { + "index": 3, + "token_id": 1671, + "probability": 0.024076, + "cumulative": 1.0, + "text": "" + }, + { + "index": 0, + "token_id": 731, + "probability": 1.0, + "cumulative": 1.0, + "text": "" }, { "index": 7, - "token_id": 6343, - "probability": 0.017314, + "token_id": 7149, + "probability": 0.017957, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 573, - "probability": 0.040077, + "index": 0, + "token_id": 55900, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 5, "token_id": 577, - "probability": 0.033616, + "probability": 0.015476, "cumulative": 1.0, "text": "" }, { - "index": 3, - "token_id": 112034, - "probability": 0.066646, + "index": 12, + "token_id": 4560, + "probability": 0.008069, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 235269, + "token_id": 235322, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 1622, + "token_id": 235371, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 39068, + "token_id": 571, "probability": 1.0, "cumulative": 1.0, - "text": "" - }, - { - "index": 2, - "token_id": 578, - "probability": 0.015656, - "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 7, - "token_id": 948, - "probability": 0.016128, + "index": 0, + "token_id": 235298, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 7, - "token_id": 1767, - "probability": 0.011887, + "index": 0, + "token_id": 615, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 4, - "token_id": 974, - "probability": 0.017961, + "index": 0, + "token_id": 73786, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { "index": 0, - "token_id": 731, + "token_id": 108, "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 9, - "token_id": 3757, - "probability": 0.013048, + "index": 0, + "token_id": 235322, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" }, { - "index": 1, - "token_id": 8133, - "probability": 0.034466, + "index": 0, + "token_id": 235371, + "probability": 1.0, "cumulative": 1.0, - "text": "" + "text": "" } ] \ No newline at end of file diff --git a/tools-superlinear/visualize_tokens/simple_token_viz.py b/tools-superlinear/visualize_tokens/simple_token_viz.py index 19273d8f3da5e..6bab7d10e73a1 100644 --- a/tools-superlinear/visualize_tokens/simple_token_viz.py +++ b/tools-superlinear/visualize_tokens/simple_token_viz.py @@ -10,65 +10,17 @@ import json import argparse -import os -import sys from pathlib import Path -from typing import List, Dict, Any, Optional +from typing import List, Dict, Any from loguru import logger -def parse_token_data_file(file_path: str, debug: bool = False) -> List[Dict]: - """ - Parse a file containing multiple JSON objects, each spanning multiple lines. - Returns a list of parsed JSON objects. - - This is a reusable function that can be imported by other scripts. - """ - # Read the entire file content - with open(file_path, 'r') as f: - content = f.read() - - # Split the content at each new JSON object start - json_objects = [] - current_obj = "" - for line in content.splitlines(): - line = line.strip() - if not line: - continue - - if line == "{": # Start of a new object - if current_obj: # If we have collected a previous object - try: - # Try to parse it to validate - json_obj = json.loads(current_obj) - json_objects.append(json_obj) - except json.JSONDecodeError as e: - if debug: - logger.error(f"Failed to parse JSON: {e}") - logger.debug(f"Object: {current_obj[:100]}...") - current_obj = line - else: - current_obj += line - - # Add the last object if it exists - if current_obj: - try: - json_obj = json.loads(current_obj) - json_objects.append(json_obj) - except json.JSONDecodeError as e: - if debug: - logger.error(f"Failed to parse last JSON object: {e}") - - return json_objects +from utils import load_jsonl def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = False) -> str: """Process token data and return formatted string.""" output = [] - - # Parse the token data file - json_objects = parse_token_data_file(jsonl_file, debug) - - if debug: - logger.debug(f"Parsed {len(json_objects)} token objects") + + json_objects = load_jsonl(jsonl_file, debug) # Process each JSON object for data in json_objects: @@ -79,7 +31,7 @@ def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = Fa if token_id is None or prob is None: if debug: - logger.debug(f"Missing token_id or probability in object") + logger.debug(f"Missing token_id or probability in: {json_str[:100]}...") continue # Calculate score @@ -94,27 +46,13 @@ def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = Fa else: # absolute mode score = 1.0 / prob if prob > 0 else float('inf') - # Format output - use token ID directly - # We'll display it in a more readable format - if token_id < 256: - # ASCII characters - show them directly if printable - try: - char = chr(token_id) - if char.isprintable(): - token_display = char - else: - token_display = f"\\{token_id:03d}" - except: - token_display = f"\\{token_id:03d}" - else: - # Non-ASCII token IDs - token_display = f"T{token_id}" - - output.append(f"{token_display}({score:.1f})") + # Format output + output.append(f"{token_id}({score:.1f})") - except Exception as e: + except json.JSONDecodeError as e: if debug: - logger.error(f"Error processing token: {e}") + logger.error(f"Failed to parse JSON: {e}") + logger.debug(f"Object start: {json_str[:100]}...") continue if not output: @@ -125,6 +63,7 @@ def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = Fa logger.info(f"Processed {len(output)} tokens successfully") return result + def main(): parser = argparse.ArgumentParser(description="Simple token probability visualizer") parser.add_argument("input_file", help="Input file with token data") @@ -143,11 +82,6 @@ def main(): format="{level: <8} | {time:HH:mm:ss} | {message}") try: - # Check if input file exists - if not os.path.exists(args.input_file): - logger.error(f"Input file not found: {args.input_file}") - return 1 - # Process tokens result = process_token_data(args.input_file, args.mode, args.debug) diff --git a/tools-superlinear/visualize_tokens/utils.py b/tools-superlinear/visualize_tokens/utils.py new file mode 100644 index 0000000000000..0078c67d60894 --- /dev/null +++ b/tools-superlinear/visualize_tokens/utils.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python3 +""" +Utility functions for token visualization. +""" + +import json +from typing import List, Dict, Any +from loguru import logger + +# def parse_token_data_file(file_path: str, debug: bool = False) -> List[Dict]: +# """ +# Parse a file containing multiple JSON objects, each spanning multiple lines. +# Returns a list of parsed JSON objects. + +# This is a reusable function that can be imported by other scripts. +# """ +# # Read the entire file content +# with open(file_path, 'r') as f: +# content = f.read() + +# # Split the content at each new JSON object start +# json_objects = [] +# current_obj = "" +# for line in content.splitlines(): +# line = line.strip() +# if not line: +# continue + +# if line == "{": # Start of a new object +# if current_obj: # If we have collected a previous object +# json_objects.append(current_obj) +# current_obj = line +# else: +# current_obj += line + +# # Add the last object if it exists +# if current_obj: +# json_objects.append(current_obj) + +# # Parse each JSON object +# parsed_objects = [] +# for json_str in json_objects: +# try: +# data = json.loads(json_str) +# parsed_objects.append(data) +# except json.JSONDecodeError as e: +# if debug: +# logger.error(f"Failed to parse JSON: {e}") +# logger.debug(f"Object start: {json_str[:100]}...") + +# return parsed_objects + +# def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = False) -> str: +# """Process token data and return formatted string.""" +# output = [] + +# # Read the entire file content +# with open(jsonl_file, 'r') as f: +# content = f.read() + +# # Split the content at each new JSON object start +# json_objects = [] +# current_obj = "" +# for line in content.splitlines(): +# line = line.strip() +# if not line: +# continue + +# if line == "{": # Start of a new object +# if current_obj: # If we have collected a previous object +# json_objects.append(current_obj) +# current_obj = line +# else: +# current_obj += line + +# # Add the last object if it exists +# if current_obj: +# json_objects.append(current_obj) + +# # Process each JSON object +# for json_str in json_objects: +# try: +# # Try to parse the JSON +# data = json.loads(json_str) + +# # Extract token info +# token_id = data.get('selected_token_id') +# prob = data.get('selected_probability') + +# if token_id is None or prob is None: +# if debug: +# logger.debug(f"Missing token_id or probability in: {json_str[:100]}...") +# continue + +# # Calculate score +# if mode == 'relative': +# # Get max probability from candidates +# candidates = data.get('tokens', []) +# if candidates: +# max_prob = max(t.get('probability', 0) for t in candidates) +# score = 1.0 / (prob / max_prob) if max_prob > 0 else float('inf') +# else: +# score = 1.0 +# else: # absolute mode +# score = 1.0 / prob if prob > 0 else float('inf') + +# # Format output +# output.append(f"{token_id}({score:.1f})") + +# except json.JSONDecodeError as e: +# if debug: +# logger.error(f"Failed to parse JSON: {e}") +# logger.debug(f"Object start: {json_str[:100]}...") +# continue + +# if not output: +# logger.warning("No valid tokens were processed!") +# return "" + +# result = "".join(output) +# logger.info(f"Processed {len(output)} tokens successfully") +# return result + + + + +def load_jsonl(jsonl_file: str, debug: bool = False) -> List[Dict]: + """Process token data and return formatted string.""" + output = [] + + # Read the entire file content + with open(jsonl_file, 'r') as f: + content = f.read() + + # Split the content at each new JSON object start + json_objects = [] + current_obj = "" + for line in content.splitlines(): + line = line.strip() + if not line: + continue + + if line == "{": # Start of a new object + if current_obj: # If we have collected a previous object + json_objects.append(current_obj) + current_obj = line + else: + current_obj += line + + # Add the last object if it exists + if current_obj: + json_objects.append(current_obj) + + result = [] + # Process each JSON object + for json_str in json_objects: + try: + # Try to parse the JSON + data = json.loads(json_str) + + result.append(data) + except json.JSONDecodeError as e: + if debug: + logger.error(f"Failed to parse JSON: {e}") + logger.debug(f"Object start: {json_str[:100]}...") + continue + + return result \ No newline at end of file From b3cd1c05d5f8b8ae3e50cf42a64fd358f4a07195 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 21:41:23 +0300 Subject: [PATCH 13/22] fix simple token viz --- .../visualize_tokens/simple_token_viz.py | 32 +++-- tools-superlinear/visualize_tokens/utils.py | 135 ++---------------- 2 files changed, 28 insertions(+), 139 deletions(-) diff --git a/tools-superlinear/visualize_tokens/simple_token_viz.py b/tools-superlinear/visualize_tokens/simple_token_viz.py index 6bab7d10e73a1..3caaf3576ffd8 100644 --- a/tools-superlinear/visualize_tokens/simple_token_viz.py +++ b/tools-superlinear/visualize_tokens/simple_token_viz.py @@ -8,21 +8,26 @@ - relative: shows 1/(p/max(candidates)) where p is the token's probability """ -import json import argparse -from pathlib import Path -from typing import List, Dict, Any +import os + from loguru import logger +# Import the token loading function from utils import load_jsonl + def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = False) -> str: """Process token data and return formatted string.""" output = [] - + + # Load the token data json_objects = load_jsonl(jsonl_file, debug) - # Process each JSON object + if debug: + logger.debug(f"Loaded {len(json_objects)} token objects") + + # Process each token object for data in json_objects: try: # Extract token info @@ -31,10 +36,10 @@ def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = Fa if token_id is None or prob is None: if debug: - logger.debug(f"Missing token_id or probability in: {json_str[:100]}...") + logger.debug(f"Missing token_id or probability in object") continue - # Calculate score + # Calculate score based on mode if mode == 'relative': # Get max probability from candidates candidates = data.get('tokens', []) @@ -46,13 +51,12 @@ def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = Fa else: # absolute mode score = 1.0 / prob if prob > 0 else float('inf') - # Format output + # Format output with token ID output.append(f"{token_id}({score:.1f})") - except json.JSONDecodeError as e: + except Exception as e: if debug: - logger.error(f"Failed to parse JSON: {e}") - logger.debug(f"Object start: {json_str[:100]}...") + logger.error(f"Error processing token: {e}") continue if not output: @@ -63,7 +67,6 @@ def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = Fa logger.info(f"Processed {len(output)} tokens successfully") return result - def main(): parser = argparse.ArgumentParser(description="Simple token probability visualizer") parser.add_argument("input_file", help="Input file with token data") @@ -82,6 +85,11 @@ def main(): format="{level: <8} | {time:HH:mm:ss} | {message}") try: + # Check if input file exists + if not os.path.exists(args.input_file): + logger.error(f"Input file not found: {args.input_file}") + return 1 + # Process tokens result = process_token_data(args.input_file, args.mode, args.debug) diff --git a/tools-superlinear/visualize_tokens/utils.py b/tools-superlinear/visualize_tokens/utils.py index 0078c67d60894..9c53602810004 100644 --- a/tools-superlinear/visualize_tokens/utils.py +++ b/tools-superlinear/visualize_tokens/utils.py @@ -7,127 +7,11 @@ from typing import List, Dict, Any from loguru import logger -# def parse_token_data_file(file_path: str, debug: bool = False) -> List[Dict]: -# """ -# Parse a file containing multiple JSON objects, each spanning multiple lines. -# Returns a list of parsed JSON objects. - -# This is a reusable function that can be imported by other scripts. -# """ -# # Read the entire file content -# with open(file_path, 'r') as f: -# content = f.read() - -# # Split the content at each new JSON object start -# json_objects = [] -# current_obj = "" -# for line in content.splitlines(): -# line = line.strip() -# if not line: -# continue - -# if line == "{": # Start of a new object -# if current_obj: # If we have collected a previous object -# json_objects.append(current_obj) -# current_obj = line -# else: -# current_obj += line - -# # Add the last object if it exists -# if current_obj: -# json_objects.append(current_obj) - -# # Parse each JSON object -# parsed_objects = [] -# for json_str in json_objects: -# try: -# data = json.loads(json_str) -# parsed_objects.append(data) -# except json.JSONDecodeError as e: -# if debug: -# logger.error(f"Failed to parse JSON: {e}") -# logger.debug(f"Object start: {json_str[:100]}...") - -# return parsed_objects - -# def process_token_data(jsonl_file: str, mode: str = 'absolute', debug: bool = False) -> str: -# """Process token data and return formatted string.""" -# output = [] - -# # Read the entire file content -# with open(jsonl_file, 'r') as f: -# content = f.read() - -# # Split the content at each new JSON object start -# json_objects = [] -# current_obj = "" -# for line in content.splitlines(): -# line = line.strip() -# if not line: -# continue - -# if line == "{": # Start of a new object -# if current_obj: # If we have collected a previous object -# json_objects.append(current_obj) -# current_obj = line -# else: -# current_obj += line - -# # Add the last object if it exists -# if current_obj: -# json_objects.append(current_obj) - -# # Process each JSON object -# for json_str in json_objects: -# try: -# # Try to parse the JSON -# data = json.loads(json_str) - -# # Extract token info -# token_id = data.get('selected_token_id') -# prob = data.get('selected_probability') - -# if token_id is None or prob is None: -# if debug: -# logger.debug(f"Missing token_id or probability in: {json_str[:100]}...") -# continue - -# # Calculate score -# if mode == 'relative': -# # Get max probability from candidates -# candidates = data.get('tokens', []) -# if candidates: -# max_prob = max(t.get('probability', 0) for t in candidates) -# score = 1.0 / (prob / max_prob) if max_prob > 0 else float('inf') -# else: -# score = 1.0 -# else: # absolute mode -# score = 1.0 / prob if prob > 0 else float('inf') - -# # Format output -# output.append(f"{token_id}({score:.1f})") - -# except json.JSONDecodeError as e: -# if debug: -# logger.error(f"Failed to parse JSON: {e}") -# logger.debug(f"Object start: {json_str[:100]}...") -# continue - -# if not output: -# logger.warning("No valid tokens were processed!") -# return "" - -# result = "".join(output) -# logger.info(f"Processed {len(output)} tokens successfully") -# return result - - - - def load_jsonl(jsonl_file: str, debug: bool = False) -> List[Dict]: - """Process token data and return formatted string.""" - output = [] - + """ + Load a JSONL file containing token data. + Returns a list of parsed JSON objects. + """ # Read the entire file content with open(jsonl_file, 'r') as f: content = f.read() @@ -151,18 +35,15 @@ def load_jsonl(jsonl_file: str, debug: bool = False) -> List[Dict]: if current_obj: json_objects.append(current_obj) - result = [] - # Process each JSON object + # Parse each JSON object + parsed_objects = [] for json_str in json_objects: try: - # Try to parse the JSON data = json.loads(json_str) - - result.append(data) + parsed_objects.append(data) except json.JSONDecodeError as e: if debug: logger.error(f"Failed to parse JSON: {e}") logger.debug(f"Object start: {json_str[:100]}...") - continue - return result \ No newline at end of file + return parsed_objects \ No newline at end of file From b256e25611de3ff8b628940348d261881ad339fc Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 21:44:18 +0300 Subject: [PATCH 14/22] plot token probabilities --- .../visualize_tokens/token_plot.py | 136 ++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 tools-superlinear/visualize_tokens/token_plot.py diff --git a/tools-superlinear/visualize_tokens/token_plot.py b/tools-superlinear/visualize_tokens/token_plot.py new file mode 100644 index 0000000000000..cc3010ccd6f39 --- /dev/null +++ b/tools-superlinear/visualize_tokens/token_plot.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +""" +Generate a plot of token probability distributions. + +Two modes available: +- absolute: shows 1/p where p is the absolute probability +- relative: shows 1/(p/max(candidates)) where p is the token's probability +""" + +import argparse +import os +import sys +from pathlib import Path +import matplotlib.pyplot as plt +import numpy as np +from loguru import logger + +# Import the token loading function +from utils import load_jsonl + +def create_probability_plot(tokens, output_file, mode='absolute'): + """Create a plot of token probabilities""" + if not tokens: + logger.error("No tokens to plot") + return False + + # Extract probabilities + selected_probs = [] + scores = [] + token_ids = [] + + for token_data in tokens: + if 'selected_probability' in token_data and 'selected_token_id' in token_data: + prob = token_data['selected_probability'] + token_id = token_data['selected_token_id'] + + # Calculate score based on mode + if mode == 'relative': + # Get max probability from candidates + candidates = token_data.get('tokens', []) + if candidates: + max_prob = max(t.get('probability', 0) for t in candidates) + score = 1.0 / (prob / max_prob) if max_prob > 0 else float('inf') + else: + score = 1.0 + else: # absolute mode + score = 1.0 / prob if prob > 0 else float('inf') + + selected_probs.append(prob) + scores.append(min(score, 10.0)) # Cap at 10 for better visualization + token_ids.append(token_id) + + if not selected_probs: + logger.error("No valid probabilities found") + return False + + # Create the plot + plt.figure(figsize=(12, 6)) + + # Plot 1: Token probabilities + plt.subplot(1, 2, 1) + plt.plot(selected_probs, marker='o', linestyle='-', alpha=0.7) + plt.title(f'Token Probabilities ({mode} mode)') + plt.xlabel('Token Position') + plt.ylabel('Probability') + plt.grid(True, alpha=0.3) + + # Plot 2: Token scores (1/probability) + plt.subplot(1, 2, 2) + plt.plot(scores, marker='o', linestyle='-', alpha=0.7, color='orange') + plt.title(f'Token Scores (1/probability) ({mode} mode)') + plt.xlabel('Token Position') + plt.ylabel('Score (1/probability)') + plt.grid(True, alpha=0.3) + + plt.tight_layout() + plt.savefig(output_file) + logger.success(f"Plot saved to {output_file}") + + # Also save the data as CSV for further analysis + csv_file = output_file.replace('.png', '.csv') + with open(csv_file, 'w') as f: + f.write("position,token_id,probability,score\n") + for i, (token_id, prob, score) in enumerate(zip(token_ids, selected_probs, scores)): + f.write(f"{i},{token_id},{prob},{score}\n") + logger.success(f"Data saved to {csv_file}") + + return True + +def main(): + parser = argparse.ArgumentParser(description="Token probability plot generator") + parser.add_argument("input_file", help="Input file with token data") + parser.add_argument("--mode", choices=['absolute', 'relative'], default='absolute', + help="Probability mode: absolute (1/p) or relative (1/(p/max_p))") + parser.add_argument("--output", "-o", help="Output file (default: token_probabilities.png)", + default="token_probabilities.png") + parser.add_argument("--debug", "-d", action="store_true", help="Enable debug logging") + + args = parser.parse_args() + + # Configure logging + logger.remove() + log_level = "DEBUG" if args.debug else "INFO" + logger.add(sys.stderr, level=log_level, + format="{level: <8} | {time:HH:mm:ss} | {message}") + + try: + # Check if input file exists + if not os.path.exists(args.input_file): + logger.error(f"Input file not found: {args.input_file}") + return 1 + + # Load token data + tokens = load_jsonl(args.input_file, args.debug) + + if not tokens: + logger.error("No token data loaded") + return 1 + + # Create plot + if not create_probability_plot(tokens, args.output, args.mode): + logger.error("Failed to create plot") + return 1 + + except Exception as e: + logger.error(f"Failed to process file: {e}") + if args.debug: + import traceback + logger.debug(traceback.format_exc()) + return 1 + + return 0 + +if __name__ == "__main__": + import sys + sys.exit(main()) \ No newline at end of file From 0e696e15e36551ff2b6a7ba48a99ac10dddb80bc Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 21:46:33 +0300 Subject: [PATCH 15/22] html viz token probabilities --- .../visualize_tokens/token_html_viz.py | 260 ++++++++++++++++++ 1 file changed, 260 insertions(+) create mode 100644 tools-superlinear/visualize_tokens/token_html_viz.py diff --git a/tools-superlinear/visualize_tokens/token_html_viz.py b/tools-superlinear/visualize_tokens/token_html_viz.py new file mode 100644 index 0000000000000..f4e7853d1ff7e --- /dev/null +++ b/tools-superlinear/visualize_tokens/token_html_viz.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python3 +""" +Generate an HTML visualization of tokens with colors based on their probabilities. + +Two modes available: +- absolute: shows 1/p where p is the absolute probability +- relative: shows 1/(p/max(candidates)) where p is the token's probability +""" + +import argparse +import os +import sys +from pathlib import Path +import matplotlib.pyplot as plt +import matplotlib.colors as mcolors +import numpy as np +from loguru import logger +import html + +# Import the token loading function +from utils import load_jsonl + +def get_color_for_score(score, max_score=10.0, cmap_name='plasma'): + """Get a color for a score using a colormap""" + # Normalize score to 0-1 range + normalized_score = min(score, max_score) / max_score + # Get colormap + cmap = plt.get_cmap(cmap_name) + # Get color + color = cmap(normalized_score) + # Convert to hex + hex_color = mcolors.rgb2hex(color) + return hex_color + +def create_html_visualization(tokens, output_file, mode='absolute'): + """Create an HTML visualization of tokens with colors based on their probabilities""" + if not tokens: + logger.error("No tokens to visualize") + return False + + # Extract token info + token_data = [] + + for token_data_obj in tokens: + if 'selected_probability' in token_data_obj and 'selected_token_id' in token_data_obj: + prob = token_data_obj['selected_probability'] + token_id = token_data_obj['selected_token_id'] + + # Calculate score based on mode + if mode == 'relative': + # Get max probability from candidates + candidates = token_data_obj.get('tokens', []) + if candidates: + max_prob = max(t.get('probability', 0) for t in candidates) + score = 1.0 / (prob / max_prob) if max_prob > 0 else float('inf') + else: + score = 1.0 + else: # absolute mode + score = 1.0 / prob if prob > 0 else float('inf') + + # Get color for score + color = get_color_for_score(min(score, 10.0)) + + # Add to token data + token_data.append({ + 'token_id': token_id, + 'probability': prob, + 'score': score, + 'color': color + }) + + if not token_data: + logger.error("No valid token data found") + return False + + # Create HTML + html_content = f""" + + + Token Visualization ({mode} mode) + + + +

Token Visualization

+ +
+ Mode: + + +
+ +
+""" + + # Add tokens + for i, token in enumerate(token_data): + token_id = token['token_id'] + prob = token['probability'] + score = token['score'] + color = token['color'] + + html_content += f""" +
+ T{token_id} + + Token ID: {token_id}
+ Probability: {prob:.6f}
+ Score: {score:.2f} +
+
""" + + # Add legend + html_content += """ +
+

Legend

+

Colors represent token scores (1/probability):

+""" + + # Add legend items + for i in range(11): + score = i + color = get_color_for_score(score) + html_content += f""" +
+ + Score: {score} +
""" + + # Close HTML + html_content += """ +
+ + + + +""" + + # Write HTML to file + with open(output_file, 'w') as f: + f.write(html_content) + + logger.success(f"HTML visualization saved to {output_file}") + return True + +def main(): + parser = argparse.ArgumentParser(description="Token HTML visualizer") + parser.add_argument("input_file", help="Input file with token data") + parser.add_argument("--mode", choices=['absolute', 'relative'], default='absolute', + help="Probability mode: absolute (1/p) or relative (1/(p/max_p))") + parser.add_argument("--output", "-o", help="Output file (default: token_visualization.html)", + default="token_visualization.html") + parser.add_argument("--debug", "-d", action="store_true", help="Enable debug logging") + + args = parser.parse_args() + + # Configure logging + logger.remove() + log_level = "DEBUG" if args.debug else "INFO" + logger.add(sys.stderr, level=log_level, + format="{level: <8} | {time:HH:mm:ss} | {message}") + + try: + # Check if input file exists + if not os.path.exists(args.input_file): + logger.error(f"Input file not found: {args.input_file}") + return 1 + + # Load token data + tokens = load_jsonl(args.input_file, args.debug) + + if not tokens: + logger.error("No token data loaded") + return 1 + + # Create HTML visualization + if not create_html_visualization(tokens, args.output, args.mode): + logger.error("Failed to create HTML visualization") + return 1 + + except Exception as e: + logger.error(f"Failed to process file: {e}") + if args.debug: + import traceback + logger.debug(traceback.format_exc()) + return 1 + + return 0 + +if __name__ == "__main__": + import sys + sys.exit(main()) \ No newline at end of file From 2f8b626c849537788edf116b5c741d6bfc972c14 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 21:57:09 +0300 Subject: [PATCH 16/22] Remove old code --- token_probs.json | 681 ------------------ .../visualize_tokens/process_json_tokens.py | 230 ------ .../token_probability_visualizer.py | 446 ------------ 3 files changed, 1357 deletions(-) delete mode 100644 token_probs.json delete mode 100755 tools-superlinear/visualize_tokens/process_json_tokens.py delete mode 100755 tools-superlinear/visualize_tokens/token_probability_visualizer.py diff --git a/token_probs.json b/token_probs.json deleted file mode 100644 index bd78ca95db0b1..0000000000000 --- a/token_probs.json +++ /dev/null @@ -1,681 +0,0 @@ -[ - { - "index": 20, - "token_id": 12362, - "probability": 0.00651, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235371, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 571, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235298, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 2997, - "probability": 0.023167, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 73786, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 109, - "probability": 0.087381, - "cumulative": 1.0, - "text": "" - }, - { - "index": 2, - "token_id": 105460, - "probability": 0.022329, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235371, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 571, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235298, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 2997, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 73786, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 105776, - "probability": 0.210727, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 108, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 9, - "token_id": 2299, - "probability": 0.014749, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 30515, - "probability": 0.044432, - "cumulative": 1.0, - "text": "" - }, - { - "index": 6, - "token_id": 235322, - "probability": 0.012961, - "cumulative": 1.0, - "text": "" - }, - { - "index": 3, - "token_id": 974, - "probability": 0.02161, - "cumulative": 1.0, - "text": "" - }, - { - "index": 13, - "token_id": 12132, - "probability": 0.008043, - "cumulative": 1.0, - "text": "" - }, - { - "index": 2, - "token_id": 604, - "probability": 0.021917, - "cumulative": 1.0, - "text": "" - }, - { - "index": 3, - "token_id": 6875, - "probability": 0.020412, - "cumulative": 1.0, - "text": "" - }, - { - "index": 2, - "token_id": 6403, - "probability": 0.018184, - "cumulative": 1.0, - "text": "" - }, - { - "index": 4, - "token_id": 235269, - "probability": 0.016629, - "cumulative": 1.0, - "text": "" - }, - { - "index": 7, - "token_id": 6815, - "probability": 0.014198, - "cumulative": 1.0, - "text": "" - }, - { - "index": 4, - "token_id": 87491, - "probability": 0.012042, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 15081, - "probability": 0.18399, - "cumulative": 1.0, - "text": "" - }, - { - "index": 6, - "token_id": 6383, - "probability": 0.007913, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 476, - "probability": 0.050319, - "cumulative": 1.0, - "text": "" - }, - { - "index": 3, - "token_id": 17203, - "probability": 0.023984, - "cumulative": 1.0, - "text": "" - }, - { - "index": 7, - "token_id": 17611, - "probability": 0.008355, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 2817, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 17273, - "probability": 0.04496, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 17273, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 4, - "token_id": 108, - "probability": 0.018042, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 1154, - "probability": 0.02734, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 685, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 9095, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 20908, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235269, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 11360, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 16398, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235269, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 4530, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 3547, - "probability": 0.247556, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 14577, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 578, - "probability": 0.08638, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 578, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 5, - "token_id": 3210, - "probability": 0.019112, - "cumulative": 1.0, - "text": "" - }, - { - "index": 6, - "token_id": 1281, - "probability": 0.021082, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 577, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 12, - "token_id": 2582, - "probability": 0.013377, - "cumulative": 1.0, - "text": "" - }, - { - "index": 4, - "token_id": 2652, - "probability": 0.016881, - "cumulative": 1.0, - "text": "" - }, - { - "index": 2, - "token_id": 35606, - "probability": 0.029259, - "cumulative": 1.0, - "text": "" - }, - { - "index": 9, - "token_id": 3766, - "probability": 0.009102, - "cumulative": 1.0, - "text": "" - }, - { - "index": 2, - "token_id": 651, - "probability": 0.026992, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235371, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 571, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235298, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 615, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 73786, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 108, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235322, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235371, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 571, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235298, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 2997, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 73786, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 1645, - "probability": 0.236807, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 108, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 9, - "token_id": 4038, - "probability": 0.011994, - "cumulative": 1.0, - "text": "" - }, - { - "index": 3, - "token_id": 3311, - "probability": 0.032169, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 235284, - "probability": 0.053946, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235315, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 235310, - "probability": 0.070341, - "cumulative": 1.0, - "text": "" - }, - { - "index": 5, - "token_id": 235308, - "probability": 0.031117, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235269, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 5, - "token_id": 3725, - "probability": 0.018343, - "cumulative": 1.0, - "text": "" - }, - { - "index": 11, - "token_id": 2204, - "probability": 0.012064, - "cumulative": 1.0, - "text": "" - }, - { - "index": 5, - "token_id": 777, - "probability": 0.019518, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 30515, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 1, - "token_id": 591, - "probability": 0.113661, - "cumulative": 1.0, - "text": "" - }, - { - "index": 3, - "token_id": 1671, - "probability": 0.024076, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 731, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 7, - "token_id": 7149, - "probability": 0.017957, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 55900, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 5, - "token_id": 577, - "probability": 0.015476, - "cumulative": 1.0, - "text": "" - }, - { - "index": 12, - "token_id": 4560, - "probability": 0.008069, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235322, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235371, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 571, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235298, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 615, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 73786, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 108, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235322, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - }, - { - "index": 0, - "token_id": 235371, - "probability": 1.0, - "cumulative": 1.0, - "text": "" - } -] \ No newline at end of file diff --git a/tools-superlinear/visualize_tokens/process_json_tokens.py b/tools-superlinear/visualize_tokens/process_json_tokens.py deleted file mode 100755 index 9b3b2bce83777..0000000000000 --- a/tools-superlinear/visualize_tokens/process_json_tokens.py +++ /dev/null @@ -1,230 +0,0 @@ -#!/usr/bin/env python3 -""" -Process JSON token data from llama.cpp - -This script processes the JSON token data generated by llama.cpp when -the LLAMA_TOKEN_DATA_FILE environment variable is set. - -It converts the JSONL (JSON Lines) format into a single JSON array -that can be used for further analysis and visualization. -""" - -import json -import argparse -import sys -from pathlib import Path -import matplotlib.pyplot as plt -import numpy as np - -def convert_jsonl_to_array(input_file, output_file): - """Convert JSONL to a single JSON array""" - tokens = [] - - try: - with open(input_file, 'r', encoding='utf-8') as f: - for line in f: - try: - # Skip empty lines - if line.strip(): - token = json.loads(line) - tokens.append(token) - except json.JSONDecodeError as e: - print(f"Error parsing JSON line: {e}") - print(f"Problematic line: {line}") - except Exception as e: - print(f"Error reading file: {e}") - return False - - try: - with open(output_file, 'w', encoding='utf-8') as f: - json.dump(tokens, f, indent=2) - except Exception as e: - print(f"Error writing file: {e}") - return False - - return len(tokens) - -def create_probability_plot(tokens, output_file): - """Create a plot of token probabilities""" - if not tokens: - print("No tokens to plot") - return False - - # Extract probabilities from the new format - selected_probs = [] - top_probs = [] # For the highest probability candidate - - for token_data in tokens: - if 'selected_probability' in token_data: - selected_probs.append(token_data['selected_probability']) - - # Get the highest probability among candidates - if 'tokens' in token_data and token_data['tokens']: - max_prob = max(t['probability'] for t in token_data['tokens']) - top_probs.append(max_prob) - - if not selected_probs: - print("No probability data found in tokens") - return False - - # Create plot - plt.figure(figsize=(12, 8)) - - # Plot both selected and top probabilities - plt.plot(selected_probs, 'b-', marker='o', markersize=4, alpha=0.7, - label='Selected Token Probability') - if top_probs: - plt.plot(top_probs, 'r-', marker='o', markersize=4, alpha=0.4, - label='Highest Candidate Probability') - - plt.title('Token Selection Probabilities') - plt.xlabel('Token Index') - plt.ylabel('Probability') - plt.grid(True, alpha=0.3) - plt.legend() - - # Add rolling average for selected probabilities - window_size = min(10, len(selected_probs)) - if window_size > 1: - rolling_avg = np.convolve(selected_probs, np.ones(window_size)/window_size, mode='valid') - plt.plot(range(window_size-1, len(selected_probs)), rolling_avg, 'g-', - linewidth=2, label=f'{window_size}-token Moving Average') - plt.legend() - - # Add statistics - mean_prob = np.mean(selected_probs) - median_prob = np.median(selected_probs) - min_prob = min(selected_probs) - max_prob = max(selected_probs) - std_dev = np.std(selected_probs) - - stats_text = ( - f"Selected Token Statistics:\n" - f"Count: {len(selected_probs)}\n" - f"Mean: {mean_prob:.6f}\n" - f"Median: {median_prob:.6f}\n" - f"Min: {min_prob:.6f}\n" - f"Max: {max_prob:.6f}\n" - f"Std Dev: {std_dev:.6f}" - ) - - plt.figtext(0.15, 0.02, stats_text, fontsize=10, - bbox=dict(facecolor='white', alpha=0.8)) - - plt.tight_layout() - plt.savefig(output_file) - print(f"Probability plot saved to {output_file}") - return True - -def analyze_token_data(tokens): - """Analyze token data and print statistics""" - if not tokens: - print("No tokens to analyze") - return - - # Extract data from the new format - selected_probs = [] - random_values = [] - selected_token_ids = [] - candidate_counts = [] - - for token_data in tokens: - if 'selected_probability' in token_data: - selected_probs.append(token_data['selected_probability']) - - if 'raw_random' in token_data: - random_values.append(token_data['raw_random']) - - if 'selected_token_id' in token_data: - selected_token_ids.append(token_data['selected_token_id']) - - if 'tokens' in token_data: - candidate_counts.append(len(token_data['tokens'])) - - if selected_probs: - print("\nSelected Token Statistics:") - print(f" Total tokens: {len(selected_probs)}") - print(f" Mean probability: {np.mean(selected_probs):.6f}") - print(f" Median probability: {np.median(selected_probs):.6f}") - print(f" Min probability: {min(selected_probs):.6f}") - print(f" Max probability: {max(selected_probs):.6f}") - print(f" Std Dev: {np.std(selected_probs):.6f}") - - if random_values: - print("\nRandom Number Statistics:") - print(f" Total random values: {len(random_values)}") - print(f" Mean value: {np.mean(random_values):.6f}") - print(f" Median value: {np.median(random_values):.6f}") - print(f" Min value: {min(random_values):.6f}") - print(f" Max value: {max(random_values):.6f}") - print(f" Std Dev: {np.std(random_values):.6f}") - - if selected_token_ids: - print("\nToken ID Statistics:") - print(f" Total token IDs: {len(selected_token_ids)}") - print(f" Unique token IDs: {len(set(selected_token_ids))}") - print(f" Min token ID: {min(selected_token_ids)}") - print(f" Max token ID: {max(selected_token_ids)}") - - if candidate_counts: - print("\nCandidate Statistics:") - print(f" Average candidates per token: {np.mean(candidate_counts):.1f}") - print(f" Min candidates: {min(candidate_counts)}") - print(f" Max candidates: {max(candidate_counts)}") - -def main(): - parser = argparse.ArgumentParser(description="Process JSON token data from llama.cpp") - parser.add_argument("input_file", help="Input JSONL file with token data") - parser.add_argument("--output", "-o", help="Output JSON file (default: token_data.json)", - default="token_data.json") - parser.add_argument("--plot", "-p", help="Generate probability plot (default: token_probs.png)", - default="token_probs.png") - parser.add_argument("--analyze", "-a", action="store_true", help="Print token statistics") - - args = parser.parse_args() - - input_path = Path(args.input_file) - if not input_path.exists(): - print(f"Error: Input file '{input_path}' does not exist", file=sys.stderr) - return 1 - - # Load and convert the token data - tokens = [] - try: - with open(input_path, 'r', encoding='utf-8') as f: - for line in f: - line = line.strip() - if line: - try: - token = json.loads(line) - tokens.append(token) - except json.JSONDecodeError as e: - print(f"Error parsing JSON line: {e}", file=sys.stderr) - print(f"Problematic line: {line}", file=sys.stderr) - continue - except Exception as e: - print(f"Error reading input file: {e}", file=sys.stderr) - return 1 - - # Write converted data to output file - try: - with open(args.output, 'w', encoding='utf-8') as f: - json.dump(tokens, f, indent=2) - print(f"Processed {len(tokens)} tokens, saved to {args.output}") - except Exception as e: - print(f"Error writing output file: {e}", file=sys.stderr) - return 1 - - # Generate probability plot - if not create_probability_plot(tokens, args.plot): - print("Failed to create probability plot", file=sys.stderr) - return 1 - - # Print analysis - if args.analyze: - analyze_token_data(tokens) - - return 0 - -if __name__ == "__main__": - sys.exit(main()) \ No newline at end of file diff --git a/tools-superlinear/visualize_tokens/token_probability_visualizer.py b/tools-superlinear/visualize_tokens/token_probability_visualizer.py deleted file mode 100755 index b0545402d8cb4..0000000000000 --- a/tools-superlinear/visualize_tokens/token_probability_visualizer.py +++ /dev/null @@ -1,446 +0,0 @@ -#!/usr/bin/env python3 -""" -Token Probability Visualizer - -A tool to visualize token probabilities during LLM text generation. -Reads token probability data from stderr output of llama.cpp inference -and visualizes it, coloring tokens based on their absolute or relative probability. - -Usage: - 1. Run llama.cpp inference with stderr redirected to a file: - ./build/bin/llama-run model.gguf "prompt text" 2> inference_log.txt > output.txt - - 2. Run this script to visualize the token probabilities: - python token_probability_visualizer.py inference_log.txt output.txt -""" - -import re -import json -import argparse -import numpy as np -import matplotlib.pyplot as plt -from pathlib import Path -import matplotlib.colors as mcolors -from matplotlib.figure import Figure -from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas -from matplotlib.patches import Rectangle -import sys -import html -from PIL import Image, ImageDraw, ImageFont -from typing import List, Dict, Tuple, Union, Any, Optional - -class TokenProbabilityParser: - """Parse the stderr log to extract token probability information""" - - def __init__(self, log_file: str): - self.log_file = log_file - self.tokens = [] - self.probabilities = [] - self.token_ids = [] - self.selected_indices = [] - - def parse_log(self) -> List[Dict[str, Any]]: - """Parse the log file and extract token probability information""" - tokens = [] - - try: - with open(self.log_file, 'r', encoding='utf-8') as f: - for line in f: - if line.strip(): - try: - token = json.loads(line) - # Extract relevant information - if 'selected_probability' in token: - token['probability'] = token['selected_probability'] - if 'selected_token_id' in token: - token['token_id'] = token['selected_token_id'] - tokens.append(token) - - # Store for later processing - if 'token_id' in token: - self.token_ids.append(token['token_id']) - if 'probability' in token: - self.probabilities.append(token['probability']) - if 'selected_index' in token: - self.selected_indices.append(token['selected_index']) - except json.JSONDecodeError as e: - print(f"Error parsing JSON line: {e}", file=sys.stderr) - print(f"Problematic line: {line}", file=sys.stderr) - continue - except Exception as e: - print(f"Error reading file: {e}", file=sys.stderr) - return [] - - return tokens - - def extract_token_text(self, model_vocab_file: Optional[str] = None) -> None: - """ - Extract text representation of tokens. - If model_vocab_file is provided, use it to map token IDs to text. - Otherwise, just use placeholders. - """ - token_map = {} - if model_vocab_file and Path(model_vocab_file).exists(): - # Load token map from model vocab file - # Expected format: one token per line, "token_id token_text" - with open(model_vocab_file, 'r', encoding='utf-8', errors='replace') as f: - for line in f: - parts = line.strip().split(' ', 1) - if len(parts) == 2: - token_map[int(parts[0])] = parts[1] - - # Map token IDs to text - for token_id in self.token_ids: - if token_id in token_map: - self.tokens.append(token_map[token_id]) - else: - # Use placeholder if token ID not found in vocab file - self.tokens.append(f"<{token_id}>") - -def load_output_text(output_file: str) -> str: - """Load the generated text from the output file""" - try: - with open(output_file, 'r', encoding='utf-8', errors='replace') as f: - return f.read() - except Exception as e: - print(f"Error reading output file: {e}") - return "" - -def create_colored_html(output_text: str, probability_blocks: List[Dict[str, Any]], - color_mode: str = "absolute", output_file: str = "visualization.html"): - """Create an HTML visualization with tokens colored based on their probabilities""" - - # Create HTML template - html_content = f""" - - - - Token Probability Visualization - - - -
-
-

Token Probability Visualization

-
- - -
-
- -
- {generate_colored_tokens_html(output_text, probability_blocks, color_mode)} -
- -
-
-
- High Probability -
-
-
- Medium Probability -
-
-
- Low Probability -
-
-
- - - - - """ - - # Write to file - with open(output_file, 'w', encoding='utf-8') as f: - f.write(html_content) - - print(f"HTML visualization saved to {output_file}") - -def checked_attr(value: str, current_mode: str) -> str: - """Helper function to set the checked attribute for radio buttons""" - return "checked" if value == current_mode else "" - -def generate_colored_tokens_html(output_text: str, probability_blocks: List[Dict[str, Any]], - color_mode: str) -> str: - """Generate HTML for colored tokens""" - - # Get all probabilities for normalization (relative mode) - all_probs = [block["selected_probability"] for block in probability_blocks if "selected_probability" in block] - min_prob = min(all_probs) if all_probs else 0 - max_prob = max(all_probs) if all_probs else 1 - prob_range = max_prob - min_prob if max_prob > min_prob else 1.0 - - # Generate HTML for each token - html_output = "" - chars_processed = 0 - - for block in probability_blocks: - if "selected_token_id" not in block or "selected_probability" not in block: - continue - - token_id = block["selected_token_id"] - probability = block["selected_probability"] - - # Get token text from the output - this is an approximation - # In practice, you'd need a proper tokenizer to get the exact token text - # For now, we'll just take the next character as a simple approximation - if chars_processed < len(output_text): - token_text = output_text[chars_processed] - chars_processed += 1 - else: - token_text = "□" # Placeholder for tokens outside the output length - - # Calculate color based on probability - if color_mode == "absolute": - # Absolute mode: green (low) to yellow (medium) to red (high) - # Map probability [0,1] to color - r = min(255, int(probability * 2 * 255)) - g = min(255, int((1 - probability) * 2 * 255)) - b = 50 - else: - # Relative mode: normalize probability within the range of all tokens - normalized_prob = (probability - min_prob) / prob_range if prob_range > 0 else 0.5 - r = min(255, int(normalized_prob * 2 * 255)) - g = min(255, int((1 - normalized_prob) * 2 * 255)) - b = 50 - - color = f"rgb({r},{g},{b})" - - # Special handling for whitespace characters (make them visible) - display_text = token_text - if token_text.isspace(): - if token_text == " ": - display_text = " " - elif token_text == "\n": - display_text = "
" - else: - display_text = "⎵" # Unicode symbol for space - else: - display_text = html.escape(display_text) - - # Create tooltip with token info and candidates - tooltip_text = f"Token ID: {token_id}\\nProbability: {probability:.4f}" - if "tokens" in block: - tooltip_text += "\\n\\nTop candidates:" - sorted_candidates = sorted(block["tokens"], key=lambda x: x["probability"], reverse=True) - for i, candidate in enumerate(sorted_candidates[:5]): # Show top 5 candidates - tooltip_text += f"\\n{i+1}. ID {candidate['token_id']}: {candidate['probability']:.4f}" - - # Create token HTML with tooltip - token_html = f""" - - {display_text} - - {tooltip_text} - - - """ - - html_output += token_html - - # For any remaining text not processed as tokens - if chars_processed < len(output_text): - html_output += html.escape(output_text[chars_processed:]) - - return html_output - -def export_json(probability_blocks: List[Dict[str, Any]], output_file: str): - """Export probability data to JSON file""" - with open(output_file, 'w', encoding='utf-8') as f: - json.dump(probability_blocks, f, indent=2) - - print(f"JSON data exported to {output_file}") - -def create_visualization_plot(probability_blocks: List[Dict[str, Any]], output_file: str): - """Create matplotlib visualization of token probabilities""" - probabilities = [block["probability"] for block in probability_blocks] - token_ids = [block["token_id"] for block in probability_blocks] - - # Set up plot - fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10)) - - # Plot probabilities - ax1.plot(probabilities, marker='o', linestyle='-', color='blue') - ax1.set_title('Token Probabilities') - ax1.set_xlabel('Token Sequence') - ax1.set_ylabel('Probability') - ax1.grid(True) - - # Plot histogram of probabilities - ax2.hist(probabilities, bins=20, color='green', alpha=0.7) - ax2.set_title('Probability Distribution') - ax2.set_xlabel('Probability') - ax2.set_ylabel('Frequency') - ax2.grid(True) - - # Add statistics - stats_text = f""" - Statistics: - Mean: {np.mean(probabilities):.4f} - Median: {np.median(probabilities):.4f} - Min: {min(probabilities):.4f} - Max: {max(probabilities):.4f} - Std Dev: {np.std(probabilities):.4f} - """ - - fig.text(0.15, 0.02, stats_text, fontsize=10, - bbox=dict(facecolor='white', alpha=0.8)) - - plt.tight_layout() - plt.savefig(output_file) - print(f"Visualization plot saved to {output_file}") - -def main(): - parser = argparse.ArgumentParser(description="Visualize token probabilities from llama.cpp inference") - parser.add_argument("log_file", help="Log file with stderr output from llama.cpp inference") - parser.add_argument("output_file", help="Output file with generated text", nargs="?") - parser.add_argument("--mode", choices=["absolute", "relative"], default="absolute", - help="Probability coloring mode: absolute or relative (default: absolute)") - parser.add_argument("--html", help="Output HTML file (default: visualization.html)", - default="visualization.html") - parser.add_argument("--json", help="Output JSON file (default: token_probs.json)", - default="token_probs.json") - parser.add_argument("--plot", help="Output plot file (default: token_probs_plot.png)", - default="token_probs_plot.png") - parser.add_argument("--vocab", help="Model vocabulary file for mapping token IDs to text") - - args = parser.parse_args() - - log_path = Path(args.log_file) - if not log_path.exists(): - print(f"Error: Log file '{log_path}' does not exist", file=sys.stderr) - return 1 - - # Parse log file - parser = TokenProbabilityParser(args.log_file) - probability_blocks = parser.parse_log() - - if len(probability_blocks) == 0: - print("Error: No token probability data found in log file", file=sys.stderr) - return 1 - - # Extract token text if vocab file provided - if args.vocab: - parser.extract_token_text(args.vocab) - - # Export data to JSON - export_json(probability_blocks, args.json) - - # Create visualization plot - create_visualization_plot(probability_blocks, args.plot) - - # If output file is provided, create HTML visualization - if args.output_file: - output_path = Path(args.output_file) - if output_path.exists(): - output_text = load_output_text(args.output_file) - create_colored_html(output_text, probability_blocks, args.mode, args.html) - else: - print(f"Warning: Output file '{output_path}' does not exist. Skipping HTML visualization.") - - return 0 - -if __name__ == "__main__": - sys.exit(main()) \ No newline at end of file From c7e04a3614abadac0776ba72663648029cf517ab Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 22:03:37 +0300 Subject: [PATCH 17/22] token viz fix --- .../visualize_tokens/token_html_viz.py | 369 +++++++++--------- 1 file changed, 181 insertions(+), 188 deletions(-) diff --git a/tools-superlinear/visualize_tokens/token_html_viz.py b/tools-superlinear/visualize_tokens/token_html_viz.py index f4e7853d1ff7e..968b32d50c80c 100644 --- a/tools-superlinear/visualize_tokens/token_html_viz.py +++ b/tools-superlinear/visualize_tokens/token_html_viz.py @@ -1,6 +1,9 @@ #!/usr/bin/env python3 """ -Generate an HTML visualization of tokens with colors based on their probabilities. +Create HTML visualization of token probabilities. + +This script creates an HTML visualization of token probabilities from +the JSON token data generated by llama.cpp. Two modes available: - absolute: shows 1/p where p is the absolute probability @@ -10,146 +13,175 @@ import argparse import os import sys -from pathlib import Path -import matplotlib.pyplot as plt + import matplotlib.colors as mcolors -import numpy as np from loguru import logger -import html # Import the token loading function from utils import load_jsonl -def get_color_for_score(score, max_score=10.0, cmap_name='plasma'): - """Get a color for a score using a colormap""" + +def get_color_for_score(score, max_score=10.0): + """Get a color for a score based on its value - simple green with varying intensity.""" + # Clamp score to max_score + score = min(score, max_score) + # Normalize score to 0-1 range - normalized_score = min(score, max_score) / max_score - # Get colormap - cmap = plt.get_cmap(cmap_name) - # Get color - color = cmap(normalized_score) - # Convert to hex - hex_color = mcolors.rgb2hex(color) - return hex_color + intensity = score / max_score + + # Simple green with varying intensity (0-255) + # Higher score = BRIGHTER green (not darker) + green_value = 255 - int(50 + 205 * intensity) # Range from dark green (50) to bright green (255) + + # Return hex color + return f"#00{green_value:02x}00" def create_html_visualization(tokens, output_file, mode='absolute'): - """Create an HTML visualization of tokens with colors based on their probabilities""" - if not tokens: - logger.error("No tokens to visualize") - return False - - # Extract token info + """Create an HTML visualization of token probabilities.""" + # Process tokens token_data = [] - for token_data_obj in tokens: - if 'selected_probability' in token_data_obj and 'selected_token_id' in token_data_obj: - prob = token_data_obj['selected_probability'] - token_id = token_data_obj['selected_token_id'] - - # Calculate score based on mode - if mode == 'relative': - # Get max probability from candidates - candidates = token_data_obj.get('tokens', []) - if candidates: - max_prob = max(t.get('probability', 0) for t in candidates) - score = 1.0 / (prob / max_prob) if max_prob > 0 else float('inf') - else: - score = 1.0 - else: # absolute mode - score = 1.0 / prob if prob > 0 else float('inf') + for token in tokens: + token_id = token.get('selected_token_id') + prob = token.get('selected_probability') + + if token_id is None or prob is None: + continue - # Get color for score - color = get_color_for_score(min(score, 10.0)) + # Calculate score based on mode + if mode == 'relative': + # Get max probability from candidates + candidates = token.get('tokens', []) + if candidates: + max_prob = max(t.get('probability', 0) for t in candidates) + score = 1.0 / (prob / max_prob) if max_prob > 0 else float('inf') + else: + score = 1.0 + else: # absolute mode + score = 1.0 / prob if prob > 0 else float('inf') - # Add to token data - token_data.append({ - 'token_id': token_id, - 'probability': prob, - 'score': score, - 'color': color - }) - - if not token_data: - logger.error("No valid token data found") - return False + # Get color for score + color = get_color_for_score(score) + + token_data.append({ + 'token_id': token_id, + 'probability': prob, + 'score': score, + 'color': color + }) # Create HTML - html_content = f""" - - - Token Visualization ({mode} mode) - - - -

Token Visualization

+ html_content = """ + + + + Token Probability Visualization + + + +

Token Probability Visualization

+

This visualization shows tokens colored by their probability scores.

+ +
+ Mode: +
+ + + +
+
+ +
+ """ -
- Mode: - - + # Add legend + html_content += """ +
+

Legend

+

Green intensity represents token scores (1/probability):

+
+ + → High score (low probability) + + → Low score (high probability) +
+ +
+ + Score gradient (low to high) +
- -
+ +
""" # Add tokens @@ -159,9 +191,15 @@ def create_html_visualization(tokens, output_file, mode='absolute'): score = token['score'] color = token['color'] + # Determine text color based on background color brightness + # Use white text for dark backgrounds, black text for light backgrounds + r, g, b = mcolors.hex2color(color) + brightness = (r * 299 + g * 587 + b * 114) / 1000 + text_color = "#FFFFFF" if brightness < 0.5 else "#000000" + html_content += f"""
- T{token_id} + T{token_id} Token ID: {token_id}
Probability: {prob:.6f}
@@ -169,37 +207,9 @@ def create_html_visualization(tokens, output_file, mode='absolute'):
""" - # Add legend - html_content += """ -
-

Legend

-

Colors represent token scores (1/probability):

-""" - - # Add legend items - for i in range(11): - score = i - color = get_color_for_score(score) - html_content += f""" -
- - Score: {score} -
""" - # Close HTML html_content += """
- - """ @@ -212,49 +222,32 @@ def create_html_visualization(tokens, output_file, mode='absolute'): return True def main(): - parser = argparse.ArgumentParser(description="Token HTML visualizer") - parser.add_argument("input_file", help="Input file with token data") - parser.add_argument("--mode", choices=['absolute', 'relative'], default='absolute', - help="Probability mode: absolute (1/p) or relative (1/(p/max_p))") - parser.add_argument("--output", "-o", help="Output file (default: token_visualization.html)", - default="token_visualization.html") - parser.add_argument("--debug", "-d", action="store_true", help="Enable debug logging") - + """Main function.""" + parser = argparse.ArgumentParser(description='Create HTML visualization of token probabilities') + parser.add_argument('input_file', help='Input JSONL file with token data') + parser.add_argument('--output', '-o', help='Output HTML file', default='token_viz.html') + parser.add_argument('--mode', '-m', help='Visualization mode (absolute or relative)', default='absolute', choices=['absolute', 'relative']) args = parser.parse_args() - # Configure logging - logger.remove() - log_level = "DEBUG" if args.debug else "INFO" - logger.add(sys.stderr, level=log_level, - format="{level: <8} | {time:HH:mm:ss} | {message}") - + # Load token data try: - # Check if input file exists - if not os.path.exists(args.input_file): - logger.error(f"Input file not found: {args.input_file}") - return 1 - - # Load token data - tokens = load_jsonl(args.input_file, args.debug) - + tokens = load_jsonl(args.input_file) if not tokens: - logger.error("No token data loaded") - return 1 - - # Create HTML visualization - if not create_html_visualization(tokens, args.output, args.mode): - logger.error("Failed to create HTML visualization") + logger.error(f"No token data found in {args.input_file}") return 1 - except Exception as e: - logger.error(f"Failed to process file: {e}") - if args.debug: - import traceback - logger.debug(traceback.format_exc()) + logger.error(f"Error loading token data: {e}") return 1 - + + # Create visualization + try: + create_html_visualization(tokens, args.output, args.mode) + logger.success(f"HTML visualization saved to {args.output}") + except Exception as e: + logger.error(f"Error creating visualization: {e}") + return 1 + return 0 if __name__ == "__main__": - import sys sys.exit(main()) \ No newline at end of file From 1be1fe227f3b7e93783a023b02aff0062d01b0ec Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 22:12:58 +0300 Subject: [PATCH 18/22] It's alive!! --- examples/run/run.cpp | 36 +++++++++ pyproject.toml | 2 +- src/llama-sampling.cpp | 1 + tools-superlinear/run_llama/run.py | 22 ++++-- .../visualize_tokens/token_html_viz.py | 76 ++++++++++--------- 5 files changed, 97 insertions(+), 40 deletions(-) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 38407d5190923..8629c0ee4eadb 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include "chat.h" #include "common.h" @@ -945,6 +946,17 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str return 1; } + // Check if we should save token map + static std::unordered_set saved_tokens; + FILE* token_map_file = nullptr; + const char* token_map_file_path = std::getenv("LLAMA_TOKEN_MAP_FILE"); + if (token_map_file_path != nullptr) { + token_map_file = fopen(token_map_file_path, "a"); + if (token_map_file == nullptr) { + printe("failed to open token map file: %s\n", token_map_file_path); + } + } + // prepare a batch for the prompt llama_batch batch = llama_batch_get_one(tokens.data(), tokens.size()); llama_token new_token_id; @@ -952,6 +964,9 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str check_context_size(llama_data.context, batch); if (llama_decode(llama_data.context.get(), batch)) { printe("failed to decode\n"); + if (token_map_file != nullptr) { + fclose(token_map_file); + } return 1; } @@ -963,15 +978,36 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str std::string piece; if (convert_token_to_string(vocab, new_token_id, piece)) { + if (token_map_file != nullptr) { + fclose(token_map_file); + } return 1; } + // Save token to map file if needed + if (token_map_file != nullptr && saved_tokens.find(new_token_id) == saved_tokens.end()) { + // Escape special characters in the token text + std::string escaped_text = ""; + for (char c : piece) { + if (c == '\\' || c == '"') { + escaped_text += '\\'; + } + escaped_text += c; + } + fprintf(token_map_file, "{\"token_id\": %d, \"text\": \"%s\"}\n", new_token_id, escaped_text.c_str()); + saved_tokens.insert(new_token_id); + } + print_word_and_concatenate_to_response(piece, response); // prepare the next batch with the sampled token batch = llama_batch_get_one(&new_token_id, 1); } + if (token_map_file != nullptr) { + fclose(token_map_file); + } + printf(LOG_COL_DEFAULT); return 0; } diff --git a/pyproject.toml b/pyproject.toml index 6ae1399d1730c..2fe8151602e0f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama-cpp-scripts" -version = "0.0.6" +version = "0.0.7" description = "Scripts that ship with llama.cpp" authors = ["GGML "] readme = "README.md" diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index 2e104f41dba02..31260bcefd2a7 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include // Helper function to escape whitespace and special characters for JSON diff --git a/tools-superlinear/run_llama/run.py b/tools-superlinear/run_llama/run.py index 6e684cef9557c..1cffd3d3627ce 100755 --- a/tools-superlinear/run_llama/run.py +++ b/tools-superlinear/run_llama/run.py @@ -196,7 +196,10 @@ def run_model(config, config_path): # Set token data file path if visualization is enabled if config.get('visualize_tokens') or config.get('visualize_probabilities'): env["LLAMA_TOKEN_DATA_FILE"] = token_data_file + token_map_file = os.path.join(run_dir, "token_map.jsonl") + env["LLAMA_TOKEN_MAP_FILE"] = token_map_file logger.info(f"Token data will be saved to: {token_data_file}") + logger.info(f"Token map will be saved to: {token_map_file}") # Build the command cmd = [llama_run_path] @@ -306,14 +309,24 @@ def visualize_probabilities(token_data_file: str, output_file: str) -> bool: return False def visualize_tokens(token_data_file: str, output_file: str) -> bool: - """Visualize tokens using token_probability_visualizer.py""" + """Visualize tokens using token_html_viz.py""" try: - script_path = SCRIPT_DIR.parent / "visualize_tokens" / "token_probability_visualizer.py" + # Get the token map file path + run_dir = os.path.dirname(token_data_file) + token_map_file = os.path.join(run_dir, "token_map.jsonl") + + script_path = SCRIPT_DIR.parent / "visualize_tokens" / "token_html_viz.py" if not script_path.exists(): logger.warning(f"Token visualization script not found at {script_path}") return False - cmd = ["python", str(script_path), token_data_file, "--html", output_file + ".html", "--plot", output_file] + cmd = ["python", str(script_path), token_data_file, "--output", output_file] + + # Add token map file if it exists + if os.path.exists(token_map_file): + cmd.extend(["--token_map", token_map_file]) + logger.info(f"Using token map file: {token_map_file}") + logger.info(f"Running token visualization: {' '.join(cmd)}") result = subprocess.run(cmd, capture_output=True, text=True) @@ -328,8 +341,7 @@ def visualize_tokens(token_data_file: str, output_file: str) -> bool: for line in result.stdout.splitlines(): logger.info(f"Visualization: {line}") - logger.success(f"Token visualization saved to {output_file}") - logger.success(f"Token HTML visualization saved to {output_file}.html") + logger.success(f"Token visualization saved to {output_file}_absolute.html and {output_file}_relative.html") return True except Exception as e: logger.error(f"Failed to generate token visualization: {e}") diff --git a/tools-superlinear/visualize_tokens/token_html_viz.py b/tools-superlinear/visualize_tokens/token_html_viz.py index 968b32d50c80c..adc4570e68d31 100644 --- a/tools-superlinear/visualize_tokens/token_html_viz.py +++ b/tools-superlinear/visualize_tokens/token_html_viz.py @@ -13,6 +13,8 @@ import argparse import os import sys +import json +import html import matplotlib.colors as mcolors from loguru import logger @@ -36,11 +38,26 @@ def get_color_for_score(score, max_score=10.0): # Return hex color return f"#00{green_value:02x}00" -def create_html_visualization(tokens, output_file, mode='absolute'): +def create_html_visualization(tokens, output_file, mode='absolute', token_map_file=None): """Create an HTML visualization of token probabilities.""" # Process tokens token_data = [] + # Load token map if available + token_text_map = {} + if token_map_file and os.path.exists(token_map_file): + try: + with open(token_map_file, 'r') as f: + for line in f: + try: + token_info = json.loads(line.strip()) + token_text_map[token_info['token_id']] = token_info['text'] + except json.JSONDecodeError: + continue + logger.info(f"Loaded {len(token_text_map)} tokens from token map file") + except Exception as e: + logger.error(f"Error loading token map: {e}") + for token in tokens: token_id = token.get('selected_token_id') prob = token.get('selected_probability') @@ -63,11 +80,15 @@ def create_html_visualization(tokens, output_file, mode='absolute'): # Get color for score color = get_color_for_score(score) + # Get token text if available + token_text = token_text_map.get(token_id, f"T{token_id}") + token_data.append({ 'token_id': token_id, 'probability': prob, 'score': score, - 'color': color + 'color': color, + 'text': token_text }) # Create HTML @@ -133,32 +154,12 @@ def create_html_visualization(tokens, output_file, mode='absolute'): margin-right: 5px; vertical-align: middle; } - .mode-selector { - margin-top: 20px; - } - .mode-selector label { - margin-right: 15px; - } -

Token Probability Visualization

-

This visualization shows tokens colored by their probability scores.

- -
- Mode: -
- - - -
-
+

Token Probability Visualization - """ + mode.capitalize() + """ Mode

+

This visualization shows tokens colored by their probability scores using """ + mode + """ mode.

+

Mode: """ + ("Absolute (1/p)" if mode == 'absolute' else "Relative (1/(p/max_p))") + """

""" @@ -170,9 +171,9 @@ def create_html_visualization(tokens, output_file, mode='absolute'):

Green intensity represents token scores (1/probability):

- → High score (low probability) - → Low score (high probability) + + → High score (low probability)
@@ -180,8 +181,6 @@ def create_html_visualization(tokens, output_file, mode='absolute'): Score gradient (low to high)
- -
""" # Add tokens @@ -190,6 +189,7 @@ def create_html_visualization(tokens, output_file, mode='absolute'): prob = token['probability'] score = token['score'] color = token['color'] + token_text = token.get('text', f"T{token_id}") # Determine text color based on background color brightness # Use white text for dark backgrounds, black text for light backgrounds @@ -202,6 +202,7 @@ def create_html_visualization(tokens, output_file, mode='absolute'): T{token_id} Token ID: {token_id}
+ Text: {html.escape(token_text)}
Probability: {prob:.6f}
Score: {score:.2f}
@@ -225,8 +226,8 @@ def main(): """Main function.""" parser = argparse.ArgumentParser(description='Create HTML visualization of token probabilities') parser.add_argument('input_file', help='Input JSONL file with token data') - parser.add_argument('--output', '-o', help='Output HTML file', default='token_viz.html') - parser.add_argument('--mode', '-m', help='Visualization mode (absolute or relative)', default='absolute', choices=['absolute', 'relative']) + parser.add_argument('--output', '-o', help='Output HTML file base name', default='token_viz') + parser.add_argument('--token_map', '-t', help='Input JSONL file with token map data') args = parser.parse_args() # Load token data @@ -239,10 +240,17 @@ def main(): logger.error(f"Error loading token data: {e}") return 1 - # Create visualization + # Create visualizations for both modes try: - create_html_visualization(tokens, args.output, args.mode) - logger.success(f"HTML visualization saved to {args.output}") + # Absolute mode + abs_output = f"{args.output}_absolute.html" + create_html_visualization(tokens, abs_output, 'absolute', args.token_map) + logger.success(f"Absolute mode visualization saved to {abs_output}") + + # Relative mode + rel_output = f"{args.output}_relative.html" + create_html_visualization(tokens, rel_output, 'relative', args.token_map) + logger.success(f"Relative mode visualization saved to {rel_output}") except Exception as e: logger.error(f"Error creating visualization: {e}") return 1 From 5c0a6594ac28af254e49bb195dae7853e9716576 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Tue, 11 Mar 2025 22:21:47 +0300 Subject: [PATCH 19/22] Polish --- examples/run/run.cpp | 64 ++++++++++--------- pyproject.toml | 2 +- .../visualize_tokens/token_html_viz.py | 60 +++++++++++++++-- 3 files changed, 91 insertions(+), 35 deletions(-) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 8629c0ee4eadb..942fc8b33807b 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -937,6 +937,34 @@ static void print_word_and_concatenate_to_response(const std::string & piece, st response += piece; } +// Save all tokens from vocabulary to token map file +static int save_vocab_to_token_map(const llama_vocab * vocab, const char * token_map_file_path) { + FILE* token_map_file = fopen(token_map_file_path, "w"); + if (token_map_file == nullptr) { + printe("failed to open token map file: %s\n", token_map_file_path); + return 1; + } + + const int n_vocab = llama_vocab_size(vocab); + for (int i = 0; i < n_vocab; i++) { + std::string piece; + if (convert_token_to_string(vocab, i, piece) == 0) { + // Escape special characters in the token text + std::string escaped_text = ""; + for (char c : piece) { + if (c == '\\' || c == '"') { + escaped_text += '\\'; + } + escaped_text += c; + } + fprintf(token_map_file, "{\"token_id\": %d, \"text\": \"%s\"}\n", i, escaped_text.c_str()); + } + } + + fclose(token_map_file); + return 0; +} + // helper function to evaluate a prompt and generate a response static int generate(LlamaData & llama_data, const std::string & prompt, std::string & response) { const llama_vocab * vocab = llama_model_get_vocab(llama_data.model.get()); @@ -947,13 +975,13 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str } // Check if we should save token map - static std::unordered_set saved_tokens; - FILE* token_map_file = nullptr; + static bool token_map_saved = false; const char* token_map_file_path = std::getenv("LLAMA_TOKEN_MAP_FILE"); - if (token_map_file_path != nullptr) { - token_map_file = fopen(token_map_file_path, "a"); - if (token_map_file == nullptr) { - printe("failed to open token map file: %s\n", token_map_file_path); + + // Save all tokens from vocabulary to token map file if not already done + if (token_map_file_path != nullptr && !token_map_saved) { + if (save_vocab_to_token_map(vocab, token_map_file_path) == 0) { + token_map_saved = true; } } @@ -964,9 +992,6 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str check_context_size(llama_data.context, batch); if (llama_decode(llama_data.context.get(), batch)) { printe("failed to decode\n"); - if (token_map_file != nullptr) { - fclose(token_map_file); - } return 1; } @@ -978,36 +1003,15 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str std::string piece; if (convert_token_to_string(vocab, new_token_id, piece)) { - if (token_map_file != nullptr) { - fclose(token_map_file); - } return 1; } - // Save token to map file if needed - if (token_map_file != nullptr && saved_tokens.find(new_token_id) == saved_tokens.end()) { - // Escape special characters in the token text - std::string escaped_text = ""; - for (char c : piece) { - if (c == '\\' || c == '"') { - escaped_text += '\\'; - } - escaped_text += c; - } - fprintf(token_map_file, "{\"token_id\": %d, \"text\": \"%s\"}\n", new_token_id, escaped_text.c_str()); - saved_tokens.insert(new_token_id); - } - print_word_and_concatenate_to_response(piece, response); // prepare the next batch with the sampled token batch = llama_batch_get_one(&new_token_id, 1); } - if (token_map_file != nullptr) { - fclose(token_map_file); - } - printf(LOG_COL_DEFAULT); return 0; } diff --git a/pyproject.toml b/pyproject.toml index 2fe8151602e0f..838d2e03dfac6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama-cpp-scripts" -version = "0.0.7" +version = "0.0.8" description = "Scripts that ship with llama.cpp" authors = ["GGML "] readme = "README.md" diff --git a/tools-superlinear/visualize_tokens/token_html_viz.py b/tools-superlinear/visualize_tokens/token_html_viz.py index adc4570e68d31..cdb5a7516d30e 100644 --- a/tools-superlinear/visualize_tokens/token_html_viz.py +++ b/tools-superlinear/visualize_tokens/token_html_viz.py @@ -57,6 +57,13 @@ def create_html_visualization(tokens, output_file, mode='absolute', token_map_fi logger.info(f"Loaded {len(token_text_map)} tokens from token map file") except Exception as e: logger.error(f"Error loading token map: {e}") + raise RuntimeError(f"Failed to load token map file: {e}") + else: + logger.warning(f"Token map file not found or not specified: {token_map_file}") + logger.warning("Will use token IDs as fallback") + + # Track missing tokens + missing_tokens = set() for token in tokens: token_id = token.get('selected_token_id') @@ -80,8 +87,12 @@ def create_html_visualization(tokens, output_file, mode='absolute', token_map_fi # Get color for score color = get_color_for_score(score) - # Get token text if available - token_text = token_text_map.get(token_id, f"T{token_id}") + # Get token text - use fallback if not in map + if token_id in token_text_map: + token_text = token_text_map[token_id] + else: + token_text = f"" + missing_tokens.add(token_id) token_data.append({ 'token_id': token_id, @@ -114,6 +125,23 @@ def create_html_visualization(tokens, output_file, mode='absolute', token_map_fi border-radius: 3px; font-family: monospace; font-weight: bold; + white-space: pre; + min-width: 20px; + text-align: center; + } + + /* Style for whitespace tokens to make them visible */ + .whitespace-token { + border: 1px dashed #999; + } + + /* Style for newline tokens */ + .newline-token { + display: block; + margin: 5px 0; + width: 100%; + height: 1px; + background-color: #ccc; } .tooltip { position: relative; @@ -197,12 +225,32 @@ def create_html_visualization(tokens, output_file, mode='absolute', token_map_fi brightness = (r * 299 + g * 587 + b * 114) / 1000 text_color = "#FFFFFF" if brightness < 0.5 else "#000000" + # Process token text for display + display_text = token_text + is_whitespace = False + is_newline = False + + # Handle special whitespace characters + if token_text == ' ' or token_text == '\t': + display_text = '␣' if token_text == ' ' else '→' + is_whitespace = True + elif token_text == '\n': + display_text = '⏎' + is_newline = True + + # Add CSS classes based on token type + token_classes = "token" + if is_whitespace: + token_classes += " whitespace-token" + if is_newline: + token_classes += " newline-token" + html_content += f"""
- T{token_id} + {html.escape(display_text)} Token ID: {token_id}
- Text: {html.escape(token_text)}
+ Text: {html.escape(repr(token_text)[1:-1])}
Probability: {prob:.6f}
Score: {score:.2f}
@@ -215,6 +263,10 @@ def create_html_visualization(tokens, output_file, mode='absolute', token_map_fi """ + # Report missing tokens + if missing_tokens: + logger.warning(f"Missing {len(missing_tokens)} tokens in the token map: {sorted(list(missing_tokens))}") + # Write HTML to file with open(output_file, 'w') as f: f.write(html_content) From ed9232298335a06141ecc573bc2ca90f4910bf15 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Wed, 19 Mar 2025 19:32:25 +0300 Subject: [PATCH 20/22] Fix html formatting --- .../rng_provider/token_probabilities.csv | 95 ++ .../rng_provider/token_probabilities.png | Bin 0 -> 142822 bytes .../visualize_tokens/token_html_viz.py | 7 +- .../visualize_tokens/tokens.png_absolute.html | 981 ++++++++++++++++++ .../visualize_tokens/tokens.png_relative.html | 981 ++++++++++++++++++ 5 files changed, 2061 insertions(+), 3 deletions(-) create mode 100644 tools-superlinear/rng_provider/token_probabilities.csv create mode 100644 tools-superlinear/rng_provider/token_probabilities.png create mode 100644 tools-superlinear/visualize_tokens/tokens.png_absolute.html create mode 100644 tools-superlinear/visualize_tokens/tokens.png_relative.html diff --git a/tools-superlinear/rng_provider/token_probabilities.csv b/tools-superlinear/rng_provider/token_probabilities.csv new file mode 100644 index 0000000000000..d05ddccab6274 --- /dev/null +++ b/tools-superlinear/rng_provider/token_probabilities.csv @@ -0,0 +1,95 @@ +position,token_id,probability,score +0,235315,0.95631,1.0 +1,235308,0.798478,1.0 +2,235276,0.221302,3.1134241895690056 +3,235269,0.972755,1.0 +4,573,0.478959,1.0 +5,5168,0.114121,1.747811533372473 +6,664,0.600047,1.0 +7,112819,0.963701,1.0 +8,17273,1.0,1.0 +9,235281,1.0,1.0 +10,729,0.888408,1.0 +11,124133,0.41749,1.00986131404345 +12,731,0.669952,1.0 +13,3350,0.551506,1.0 +14,55900,1.0,1.0 +15,235269,0.598911,1.0 +16,476,0.539878,1.0 +17,45865,0.026829,10.0 +18,696,0.816916,1.0 +19,26732,0.224004,2.2242995660791767 +20,235265,0.719722,1.0 +21,1165,0.087404,2.835751224200265 +22,729,0.527904,1.0 +23,12080,0.052347,5.099356219076547 +24,6908,0.111883,5.3571498797851325 +25,685,1.0,1.0 +26,573,0.620567,1.0 +27,2725,0.023423,10.0 +28,576,0.938086,1.0 +29,3320,0.429848,1.0 +30,674,0.620329,1.0 +31,16722,0.323329,1.0 +32,675,1.0,1.0 +33,25309,0.064616,7.9315958895629555 +34,8409,0.727023,1.0 +35,575,0.644556,1.0 +36,15051,0.831335,1.0 +37,235265,0.805563,1.0 +38,714,0.269837,1.0 +39,6789,0.236485,1.186663001881726 +40,576,0.521041,1.0 +41,16481,0.350678,1.6288675080843398 +42,729,0.101811,4.92639302236497 +43,577,1.0,1.0 +44,3104,0.843335,1.0 +45,15051,0.796326,1.0 +46,674,1.0,1.0 +47,1538,0.952508,1.0 +48,3114,0.314035,1.3752225070453927 +49,5766,0.031732,10.0 +50,13333,1.0,1.0 +51,685,0.110638,3.539462029320848 +52,17611,0.138693,4.930883317831469 +53,749,0.799386,1.0 +54,235269,0.420136,1.380181655463945 +55,1582,1.0,1.0 +56,685,1.0,1.0 +57,32346,0.139289,3.0736956974348297 +58,235269,0.960908,1.0 +59,3210,0.442822,1.0 +60,235290,0.577728,1.0 +61,60495,1.0,1.0 +62,235269,1.0,1.0 +63,6044,0.149887,5.671692675148612 +64,235269,0.966118,1.0 +65,578,1.0,1.0 +66,85985,0.01527,10.0 +67,577,0.923404,1.0 +68,888,0.757807,1.0 +69,15212,0.90816,1.0 +70,235265,1.0,1.0 +71,108,0.577071,1.0 +72,651,0.161045,3.6144059113912257 +73,2725,0.117356,5.327218037424588 +74,576,1.0,1.0 +75,16481,0.908622,1.0 +76,919,0.951168,1.0 +77,36186,0.168852,1.4188579347594343 +78,12879,0.800832,1.0 +79,2754,0.586208,1.0 +80,1277,0.566269,1.0 +81,72173,0.927932,1.0 +82,235265,0.691068,1.0 +83,878,0.404777,1.0 +84,573,0.948484,1.0 +85,4061,0.476463,1.0 +86,2705,0.926444,1.0 +87,235269,0.79809,1.0 +88,16481,0.880237,1.0 +89,3679,0.307637,1.3223701960427388 +90,14779,0.790084,1.0 +91,611,0.928275,1.0 +92,11400,0.358562,1.0178379192440916 +93,28514,0.463127,1.0 diff --git a/tools-superlinear/rng_provider/token_probabilities.png b/tools-superlinear/rng_provider/token_probabilities.png new file mode 100644 index 0000000000000000000000000000000000000000..c3f1efb34913a93e7b367ffc99f0d5615e17b0a8 GIT binary patch literal 142822 zcmdq|Ra9JG@GXqu8Z0EZCjo*J++7kpK|^qNcW7vwpa~ExxH}|BfW}=L4M7`sZ=f4# z_?qAO-*e8m_vyZV4~!n{y~o;Xty)zzYtGs+n(9gfcvN^OC@2K)mE}L8pkUOYprGmF zU?H!#@6b~re?&bM^gTbhSbO@Ixm%&AnR&W8x_CO;nKO7>xqH~TIP-Dw@^JBTFxYx} zx_XFmb36U-C%9bPZMZ#L9=4IA;JPXsc%YyVnLYiXmPwV`p`fCmyqA~J_WgO(jthFN zXCU~ndJ@of3b1L|NpxyLmb}q{QrK6f%sD3|CN`_O)V|M zjp04y(>&|kj~CoJ?VacGE=AsB^mq02g(}?b{FebfD_&`$LL*&psmI;NGAGdqkzE@9 zzrS;=f}HOU1$FiG>;QnAhuhjm(YZ})E&5tPI;_B(Q4`&(0dN_-3tLhe)&@b?IykT= z|H8#UlOc|D15CTXXGCrq0E_C^)1cD;eo@g6 z5X6bkt7F*T=8jaZ);*?*`NJSyY+SU27{8S|#*_O$AIm3!=-7M6r%jl*pZ7%(>xbr} zIxh=8P6JFW36@i?E-a=mUrAlOy38jEVx7Z2U_c{%E?#vNqgi`C7<@m-&&Ma%d8yJV zj|jQC@|sf&6Fj-|+vsd>R|~!^4PHDfz`+Pbl`-CBNe>IWETk`165k-PcMi3IM35UR z&wueC2s(B!DHJczt(-bEdGz<6iZz~9L_>5w!XEeZg0J-ERNBnz%Z&d-tbb*%$s-4? zVN#|E)5Ft(Z_?zq#?a~E(ya#-m3v;b0ohlQqobpjQNi%2%Mrbf?|2ciT#GVm=8leW z#t#>1Gg+&b%!|v*M_z6iREk$p7@^{uwGRiiiw^@u2eBsi=7ZO<+#Tsy1U9)rg6-R! zE?S-RQh+aW1f<1F&Ta7zYeWwrShUzgbVc#;1Xnz`J+RJ4MALf=oGKyEI>Fa1K9<;Y zRc~tFEOfL(EAD`d%&u#EV1yvrn2>fYa*g{swf;JZ9|Y4W=;=!xYTJLTo+tQ}VUF>z z{X-g+f6EJSu|eLMzIwL`o1V$J=Jy}q%gKMi<*Oci;*nc;2YGzB98!|}ei8Y27yMXT zu?fw6yl*<`WFMj8hZ>=e9tOwSy>b1l=iy!^#b0&qMJh^IRX!JZ6Q zh0@(3Adf*B8`v(2`7gRd+aRdVyGnk_kkXMBe~f934}RDU*2UQ5e%RoS1o${P#g~+n zl%;jaD=~{r0Us3fyVE)@-`t+lKb{+tD%3kt%MhY>LpKs4_!*lw6 zlM?L;=U|naEuUd++xQ>8;!;HOF*7eVr!$c>*od=QB z&Uv}_!@1hWF&2_1Zi$7D$bfnPr(2}i?e}_0O1;8TH|s>fWg}Sc1~GEj|3D6egKisI zA2%ptZe{!aB#4CiR7^O)`)L05@}h6%?F}7h1n42Gz-(cSm>&5UaiQ{ z`J=Q+K@{+>cQ5y0KUZ{i+LzhbwHxz5Ed6vXBhe<9LMzvS`zA=<3~T6GU~FTT zTO6jgLaWJl@+jJD^#Wn+CO03-uz%oss7JevK+z?($gKK(L(wKH zYtJ|aLE*p$8L=htHr|d$ptev3`2K@Ir{a} z;73^SsW3SQ@mYrYuK~{HRXSGclr)M&hKQ4vUAj3Xefl;3LAZCAn*b9_y9Rrf0*#;nDH@DeSYRmAQS|0!eBrsv69d|M1R9Rhd#hMP1HiA zDfSti(s4Ed&SJXM9!wsuO!gBW_7Z2+;6i{+A@ezGQX)`4+HeoNIrvV?W>wQK&5z8O z5SjBJMUp^PZst?{E%5`b#E;2-0B9nvK#j;xYElOd_sP!mhWr{7W&neOOv-vd`iu3} zeqRBg6Dtw(a{8qyZ*+4l!!(afkNkV=+XZV?B%+g{+hy@>=?cdsTfJ9>qK!@P618mq zFr4+EhiMS&=`(D;LEmBjV`5<(y33h^`3JK3Prt!}@$P#XsOUKTk5pJu7nBBU#{oLO zxAuC`6M81z{S$p_8S;pLh=H-hcTGpnmUkufR+i4i70|L=0MIOZ-b3sp6<@zC*w1E6 z9R3ErTh5a}+#-~wFvq?EnocMmrDS;-eNJkfK-X_PL=BjH>eSaPEKSN6D=fYDH<8Cn zqwH`5ui&|=;)=}A{LXw~?6(!@9v{NBHvD2GQQrbROx4YkCkKhrT}%nhBUjAT@j?l= zv2Q|n^P>C|e<(xCrol)-e%RoiCYHO3@q_|D1HboVJE!4vV(HY|p(Q`2y!^kIv z(sMWOeJM-PgD`$Y-@Sg{LYdxc7TEO}H3%r#Ej&PjcZdy-USCwq=CLNp8Sc8mk)^a` zJODc+)&?UEouKc>+@`S7AKNdBxpfX1;?x=>a4O26K|!i(ba9e{_a>9rwm2AaUm^G7 zkUodkyQfIt6|5cngf$)SR>4Sb6H-)Er1DvIi~3Cf>3!&W%1Hu90JQl8O0&Uwy1G=z z^kpl4>IZfZ`#V*rMD5|>;j~oSe*M`C1i5R6INY6VrGF8(GUwBQ+$z5& z>IYsVgnRhV2!7g;hM-dCe;Z>S}=rDI}0{`&2tmP5#AIW^-5r@ar}k zc)OAQG?>`I+5N7RNva+)g++P++{Xpx%Md;%%M0*@T*Sh_66C>KK#BLfb-rCa=yWFc z!E?|i5WIbNg){;0F5egl-Bp=V`7Ze4!hI>w%-g%EzOHW4yDAqRlWQx!CqrbP6}1M} zGxjTP9pt0CNQ&aGL4s*T?LZ_d)ZTrH&7^P%8fxfEsV~SP=0LbiW%lNIjv;&%flL`q zEfkW2Fs}B?>4AZP$lnNL9?zYFO<`MicJ3)~+dHtNP(u}h9xNtcu5>>)pGt+LrcGb? z6Ef;u@ZOuuAArOEP*LxEnW<`%>^#pT&76-Daq)Y+gFu=PHh>+Q*+A*w67(m8I|p<5 zvmU8PY<#ykUvawbKmBb0z9~7oTVo78Xa;R_9^5L4MK+?FPNV{E(t-`*amUChl@e&Q zV9yYl&795TesBw>i(Qg?h;?ZQY$R0}^w))m1N`g$@j2F7qSpmiWl|MS&n`yrO}u%Mx1 zFH`DOw2kALr;Wc7Oa?a7`@V`6vh#>)-r{{R}gB0d<1en_Vno+!$9@30W4Gl#DQS;*HAMD1^DR83XhA3vk-8lIicQ`5L zHBt60Q0Jd%TUZyoJQ0)doU5&!qW3QuZWzcVzw{nhM!%D_3wNMBXb}#nptFz#JAb{Q z+2Mn8#Jlt5jr4+&x~FJ0yY`JM|0VDa4EE{wLNvAa>6|r_Wf=!Ie+&wp=(NsLyvV%u zD;$FrCgA?Bqa}ln6DEg40%GuV9io&J`i)^WjX_zN0Ssr60PJLya8M@WB4h@IRH-`| zP?BlV!+zDj38YpH#to6HqBtAuL8E>hf;&r1#M@UU_hGE7-fL~0;JGx6y{nJN4BvMz z1A~HDNOW?i#6X0Z1|?yknnPiaFPu9a(bW}3(j`Zm@5i9WP&^<{lkdnNmL1$xg8@d4^IF(WQulL1dNINZ|Q9aThxS&|i7!+V;w86p6DkFz-R z)788UP`&n9K+a^`fpXgSKKv2rCl2*rJ2WuC$dnGcQox7(wq$&PMCPJW-Fzg*0`MqI z?w%mxYk~_D#@!705H0aO~6(Me{e;_S($SpYT!As1BlPCb% zn?i(va)Ng5za_@_vF;SM3dm%JjH3(*HK58G%9xs}hf4~4`})_sO9 z^6kzwnQCZ&Qu@A-b)EWZ4F_d6EQ=bHhrNFz(VzjD@VcaKW+>;8yzzLjOW;jf-Vtuf zKIOBNoDU{OQ(=h4?F-uk#y7|M^5qZy{Y{0=yJt*f$@kk(e;}2|RCd#fis}-Jg?(+D zE5$e+!{Rwk?laYpb3sDh6D@6(6^xOqJZy>RC(ghZFhOL{!1zMQS1#;5b0Fhc*PuCy zy@rQ1h~Z(L@D7ME4WT(W3xK2f9wrEBXwG-DU=GSGtOWr3Wxs@!`CnC< zR5OS8zQUr%P!;AO`B1q0tke5)DaW&$7=!NjnM-fq1pbbp{w3e+mz6&|NX9SEWS8X` z{JP5r>+zpOGHwKi>1sgBswPlIWCIg4iL%xnz56=luW!v}@ymNR>5lyo(^}CB1SF8r z3LmbpQA|2C_PH#WxWThe1pDR$5ph8fCsww`;cF{owj-9jI9!qsz`~d?CklVsoa*{yVREqurxHgPpqQ zX2u`>n8djHmc7JN7zYHh{h)7~;}JPAgf4!GG$I9Y3aG5^XC+t2)Y_0ayO;cqyUX>t zyDg%cN%aNlCrSF3TB|?uz#WKVm&#^Mv%7qwGl~0NbYh|airBE(i$fOJ=k{#6B*wib?{k}Yj`6N zSJy1Ww7y(OS&AZOlYU1e9o(fV8r_9f-vtZiNZLf<9yt4`Oa#=h=;%8R+s!KP=oS2p zn4G{NwR)2<-$HyFL!N?zI=hR)PzPFj`8~$MoX#F<&WY5^1ea6L{(LP8o#mt~kiZ`52Hf3`RhMLnn`m-jOfleP;D>9g#{Xlo&@LH>;c6J?- zwnii3^nJNjLewHu`n%r{kHcHWaWCKSnR4+RsxK6%-Q+u3rqXjxhUEh00lw?SO)Ai> zC0%{K!TF+@s8ekUlCvfg)+}b+)iIP%6igm$@vbo7O&_Yp1?b~(W|yHoXk3=H1ox7i zIX*;>%1C$Hw-_RTpeHVM!B4@pc)k!0TI4J7Z3ow`}GW(k^`xE&HsWNX47mC&z%v;R)_e@!ZZJ;+YC?Gc&xRiwScBDcT-Bag z`X6wz_a3d!K(l>e^eb?)u*wYVE6z^5_+nec1uZDjFQYtMMbre%E?O|;t7Ue{nWv}3CJ`OpBts%6)B&Lye5`janNDV6koiH30TEbB9*h|Chx8;Gv#%zZ%=<8G zo`3$zgXgXaWiL7#`BA~Bf!N9HaEA5n_+iFqT5+Cl=Hx^lmi0+`6Pg_~OVUZFj8RuH zePhvo;qBz4^RLT9;1aH`Wp;(3CXJmuIo+v2&CuE-b6YbfowBefC=YLBQD5UNb4NONwPNDDV7BO7X ziF=}kW1XdI(8~e&388PfM!pIu`__elrruMq7S$0PC%iLbLI)@2rTpCs7#cn&f(SuI z-XNVKL}=lurYlpR9bg!V7K*b)2l4JS!+22py`qIO)AJbaQ4tm;0fR6S+0ach^H`ya zvw$uP91F~Ov5)S2t{lYKdoD;VhGO7mxE;1yN!IJTV?Gh$svyqo=7MDq;qIYKsEi3x z4w-^_fdDQe= zi=d5unL@secBa}_K)c-qMSqPsQJj?-fNK9*{50=J?-v>DS&K5FJ3KS>&>QlQTi)Sl z8@z*`*MYdX7AE4IXK|52R0j#Wyo=8ny{(N*zlF5Wh`*8XGz(3%G*b8%5cIc9Q<1p zqcLAr;>(wykK;_VC!@yKzXl!+11&f`627c#URWy}2+V0o7PU6E$bX}{{;tA>i^$Q-poHA- z2!VSp<%128L&8$Q*sb7+X0Vh=xko`HngQ@ zXgtl42%zdjP6pDn;___IE<}s4VRvjS?t@L7pn|lF{oNdT(-2Huk?A#T566)L30u`4#fcJ@RcoO>V{u=tfWpGv`gzut>k#H)&`5abo1fe7345G z?yBtR(LkSw3yDz}vcms3j(zrQsnH=Vvv(!nwA~4^4p1^gN(&kv!GnAg!g5z)iSz^i z5rOs5oB7$A8nx%$n->=Jcj?fel-|Gzcd>kW#Ey`$M~{8uqVJPr_OH#S)?Tjm{3etf zB<+N(nWat^1d;o_JGdARW0#O@OHR5$Mr+t@etdeqFxo7 zfu{kGJF4BBfEp^3Ct{q+1ol08>6Yl`1x;Z4)*J49q&kN*7g1X{*RdzVL#~4-P1RoM@^uJgX}-ZBI=Y!{_xU&5)0^K5ssxl@t+MMEE_W^gm#M z9CI7@g)E$5o@|SVfrZg3ir?rzT>ul5j=b7a0}?0gbtIa`DJAtZ_we{+MIR2yB=^Il zuY1y}668gDSPqe%7BQl^Tua%6c@9ka7(Uc--B|nbq5{l-$ML@vafZE5(P->+YonOC zY`*E;=!DD{KYfuJ)-&e5zW?6%KVo?c+B+giq)Tg-91mHtY^6%`z@Jj&)13adoSs+9 zh{QzyXK3WT(2hvr|KCf#rX=NKBvC~4B^eo%sggSXYm_c5S95&>gM6ee_$i-9{1nsb z`BW>W*T9whNMSHB&)C#C%O-(%#{LI{HW+!Kp`J$sfX{GgA{QrNfQ8B;>MJ71U*k#fQpS_{MIKO3>x78J(=p@wq z67Q9fh!5HB{+rwC1cYu5pwfcuc&1wLbguD8uM=<><(zFpQNxRjo_V&jF+ZG{7bO+6 z7vJ;y;7$#Nbj(bAGK!>}PF#>()JuAGNejG6tqqJoX20gCc%fBgB+ZupM2zK?BFSLj z-BaU2Nm}RqwkLF4Xy&Q00Ev=0B@PY12cI*rX@%%M7bSiPjy}=XFFv-9|4^u~D4^4` zl0Y`kHqiv#E!|&aCz^hV?PKMC!Z9(Ua%o6=S1pdv9pqRnHFR37bXcebB>s8iYF(e` z@7VvJdCLU9a^#w`<=?xi4VFM4mh%B-2NR4&#z*XNA8x_Q27qeJ+heF-Jw_rwoOW3IY2)n! zlFF9*ugl;(F<5*BNp4+Er*)*{AfxLvdB<7A*k~rUY5r9ScvTkC?h<1?sI-5Soy;#w zaC>VMdwJESJ~f0>?gT&D=~X#s`^Qr0VdIu#TmU)v=d!)uENVR^xnd7BHX0S86WunD z;vxcE%Sm}2iubz4k5^PhFB%*@OJ7O9)zhL)Cvt6zAn zpd=*SPs#~MwspjwniUz(E$x_FP5@(L{@?40?s!!PI4A{tU4;(_O@kkfz@LNYlWRS| z5@|>|?ID`018(lWj&GtY^$3;HI1SttzE!Qi zNBI!O`SLmxOn8!6R2+{Nba^Xh4`~1!jjf5d%XiMzLq=v?yv7QGTIY7!TYntJV+CDS zW?yA-ye$9hpjoV|_sATuOw{Bg$co@3ex0Er6LeMM)2>oJmwVk%I)#J!D6N&HtnuwkWYnj8 z7{j&BkxXxD&RIB1gHx}eJZt@ZU1MYM1MiQtv^lVUd(A^J0SzhX07pVLMl+pJz8IOV z?Wt-^T~6!itZ229j-6YOA*XMQqOF@#)``KFQuH?FGnV5FuBl^ucF)5pt5#cxD%98R^E}InPgV5JgAbsXI+34D-&Nw6kiE z1r^S_RemnDV9(;0;#1$Wh?o>TxZ7wO{2?^=Pu!x3rY-2w;x83}XrO(zxB3rT(QQGY z!yrq$WI^`E1P4DezvLa#Stab-Eo>G+?R1*v1;t`TrA80*(!cBnG}zwitV}HSQa`^e z6~rG?WjDTR3m{I26=WxizMd$Wfu#C}C2&;Fgm~P$sNnaqABf^d!+(-mHAIe8^W~sC(+hiO3UrbN^V(TLj!yW+^e!1Lzv2X3xC`C~c6|f~4y^IXX`E^~n!Py7-M6?h&~7$_-v` z4kp?#s(idJnhag`KP=omR-i`Q(K>5Rem*swUc~dk*@M$n7%u#qV{hN1;o? zPhmA`!=;8oaalt{uh04m&SF9n;m}$~?Y%wau!&U?W0!U=>MViQyDd=xDtW$Q61GIL zcXq!Uuc&Bcf)k0yd)~!utQ=lico96qEk~}QZ@LIt_k>TfI$o% zrQHMn%Ej!w1G4QSRT9FjIt#-L=)pW`l z&^^_~zqB_?po$n)_8O-4UlDSf_h1qA&3=+2Rtk>>Bc3d`{l9@N|6?A8-DZ^+p1=XZsB= z!ikFVt!7J=Zen>h(&J`~l?r6@KSY?{542U!m-n*x zG@Pg9{dBT@(9B{F|0K@-yu-n%`W%)CBOdiQ^g<HB?AEY)JbAHY&li@S8Y}cjLDiwe4`dv}ULH-I&3)7>GDZc4ML)+jDo7(gQ z--$+pMMvkIV(sW2#*}D|ASPRUJW2cyrnxsRdw!6I_M^u6^Q%0?>z!+E#y69x&@;?R>!(y+; zr*EKqBaXgSzYEB+Y>p{PKQyYd8|rM&ywj!8qT`zL5Bm^bQaVw5w>#VM{$mfc#mq6T zPDa;0uFJc1O8Hp4{fNFj;e$v4NXcJMsa>2Wb%*b|Lfqgo-dHKlNFk5)jixW{RS;}E zLG1WkE6z3u%+L}c%_3=fGRRgN>>!eA$oI8*NDO=krEP-Q2 z+pa;NBWB0-#NH@^L_PKE0p0Xbge@ZEv^6)OEK8ouslwiL?`+CG_O)&ipnI(BA%vUQ&(mpMcvc5rUUQ~Il>|j?EK<6@c7gy;vy~(c zv^-b5<{O>vo(iKT{7@j~i|JN&t2u8qCu(|X9Z638Go~$^SdHZ^UBOCETQ{R`YV%+G zc+NLs!SS2jeApP2IDD)xro}b5^XEzM^=3&VmD?){jdLtU4aWsf&HR4Vyz0T$Qc4mQ z^qPgW@6NU^k{9*vUmgE@3MXdBaPp^V+8Ns9Un?+3)iwX_f$b$;d7@NVTaBMK6eP@i z-#lrZY9FQ;`-)$eW74*M5|lezc-1@hM@-#q?CSVv@yPX%E*x6rtoL#MbhEPhp=QpG z(kx7*WdI|L=yeBbOffh+uS8Z*HbZQiLO;(KE?Y3IUQoR$e`P-dqGXF}q~~uofoFx) zC(Rv({_2Hb<6j&suFYAc=fhr!FYquQHg?k_BUU@I@WX<}@iC}wo%?EPYNpkMR*D|# z7gzzEcPDMMxOW2cUp50D=aK6C{)E_RksfH{rRQOF^E9&OQ4*<#ODb>&ER6c=M#=Ob zG2m39ab24ANTGc`?kgW#Iy%4v*X=hCZ??Gs#l{wtF!wb9J)^4Y_lL{EtKBP~wEH)_ z+dhnD&CP;mR9tDniz4|4e?VIMcC;&kx!T(XD`PHMrx4y%M<|ZhR?umu&FACTdLqof z=-1bB{P-8}W@_7oB~x5_w(Pl6?(2#i zUAAY9w7Eo|jl7N3wQGszgrY0h!wk~Quwa{GnB%;-d=E0T$%y5SPcR1~XzX-HC{PI! zK@8ZWhU5)OJ)BQKj+$MM8|SA@9=1)Uzr9kTc^3ZDs}nx=M3BrZ*yO7H*LlP& zDX zlmo0kTo&a1VMrDn^vk(ULU^ul;$NoY!zM}TeIrG|LZ>1ME_cf?KI&lM(`{_;>BGl; z>z9(SPCEZ4DyZ-vCUoXf$>dM`L^^dTx%h+lp@&>`#NG6wuG&>ElsgC931f4y6F|r7KQ2h4=j~nq9 zStua+Im!P@4ruz|KfZbC9{w5)CP{tgd| zB1p*;@h=gP&ZKi65|cJB3Az2-{Eo{&(haszrcH(EgAu_I7 z+VTJHCvOd)qT_$1MwIcTKxa^bZ@Z)X)yQujYmm!GvZSImEf5M*SEA}%nv49+?Yev3 z_#%aG{9+7jy|lxlHt5H-Ds=eMra@D4X=3PX*%p$WY|-HHOMdqx+kLU^pQuoZS<`N$ z%WU(hM0E<&?_>)$5Jg3I)rrj6!N!B+pFz$5riNS8bV~5a!&~CM43+i8+jhvE#FYk5 z5k=OUOp!F(!7$rApaT5gGp&4ievcfE5$qt`byG}rNoGXw^%#L-yboy1_`jLV#}4hZKizjZwi{p_5CXSlaug_UpK3(7gO!oD zufu*&Y*HtG-n<^m(33hFjFSseMZ3_=Wt2Y|u*_w2DQYw=dn$y`$p5G6RxX!nsuW<0 zCz}hUGN_s6K{NicRMMtVQ#sj=O`i_EpGX)kLBr4Z_X{m9VFhv*hMcGBX9oUidd<`4 z8Ph6za&Osy{dgP{we*Io+BNQiCHe;{3H2ykGuvMDJ!co&PMsAl?rF(@qPFgA( zTji>xj@Hy$kNZ})MAy6KOmcC?&Ntlp&FQ1T)s7;E1zL5yH};;n)V#*`TCJ@ zIry~U^WFx-=^aILsH-V`ZXeunkYaTCH!d(Nal(Bs@$|0x19ThlIg5HT=a=02>($Hc@ ze+rll|7~lPp5ypEz#If{Yx4T z>1?OpUj34;m7_c0YakAN^9e_Wb%-Rq>p+g9debQ=eqeCKp0{-sFLrfZ#k! zi`?+6_)_$)he%0r@1)TwqriM`Y&SLqOK+kz-D|bAX`R?q;)}sX-($ z^SD3N&xkq_Dz0{&YIb)Yn5O}PU>fAtX>Up9k86%=+QD5Nkd)yztL^_3&%jCSos34n zm&aDAd5H`dDow661a>CfYjQ92gQD?wZBqPrpDlrZQoF#)i?C=-Rti*L;PTT2Srb;e zWr_C?v0uy2lzzUQ(JohxBAZXG{fo@KqB(kTwAFRpRWI~uE>dhreS*#j`whwmmw9a} zMg@SyIs^yhzRV{ISJ2QK%GN7oz6RJJ9Z#%FV(7~AJk{DBWz?O|lch3-3QoRb6QSNjMLTQ;nY?T&Ft8@GZIEC z|B3iAf~lw6z7gwXteS=IjNYgQ5R)j4BafY~$>W15uNj7RMf}5g*NBaWPm+h0+Om)# zW7KA$8rtFgi@4<$;T1f_*r<=9BTwHM&an_NM^8JLG_q&oO~b?#HBaM}mZNG%_#rDSLGIt0r%) zBbWyhCy~{g?pLh9hs;W@5-9{sZ2y-2XWlW^)P0|d>EP$;HL1#0j838T5PoQy~$*c*KF7Hd_lla>Zn}x2<0Qa{H3v6q?`x= zT%hlL+dWBWu)Y4w>hHIOabtg+KK2h?1uXjydrO^cGSvqmJ>6n}^R{{NkW2v>(XnXn zXWHFI3t#>ArWM*JN^7!EG#|9n#xJDxmBx6UlZb;Glg#AMc=zvI2K+LLg@F*Z_yRdwu#&zu2`Ad~_35_Kk&$JISnticj+IYuO53L=WTPkLiE~+k zUg%S}*cyVwyAVQbyllwT%2f{_&z|JuUu3)1hzF0()_bqF6#mF%qN*yO-+Ssd+4J*> ze8Bcm`^-)SCzD+Hi1oA0hV8mX(S$W2QII_C-aU-`E1=`E7<21BaBfHE+_I-Ux?v#mUNQPmMt$khOVTke@1-ts9MDksU0VU|Ii8}`rcQp z@E!)wt&U<&1(b*zHLK#~si4M$T4ynPT#gsLfsOyr@$umtWZ&i(hh)pRSMPvb)1%3H z&sAzS2fW7@#c3his{Z}oNTT8->pWyh_~10J$%bsd%rF!>B_O{Xu&M5{r|pm+HI0?8 zh_aNJ>*0p`ELD~Id#lwF%XYdUlM2y4p+aRI%aAXtj*%)BcR-`$YQ$SH%)g#712mYr zt&HKCKIh4JJj=*Y)e;<8tWeJtU)AnYk#l z+hYRQ@$*Z%HXvR)ndKAV8|B&mJm)gV$26j>CAQr#F1|`sE&e9H)uVys2;Gmp@f^eEmHqdwvi_Yir1Qi;7>j5MxbISi_)vO%P&4KBL5|PofS76Yh66|F9aY7L|_d3P9yRTj6WcG#r z=OF1M0r0%G>3$#UJINYn-KFoA@{EdYFY!7-2KfORPRRaC{HOVwGigLlYyeY9kQ9%dENOljg2a3N(%=e$}>dV#0V2GDF5`bsRHJC$vhw za7{3tVfkON0Y^>Uf6g*23&es$SL=X1?C)v}#C;eLSC8}i`Gcg`CofTE-ygi}c4Bj4 zrwByK`*ItnAQ7~l$IXktHy#-Cspc^!{01_v4^hDpj@H}~) z6HrxjiO=XrCk5*C?5S)C_H9YFX25v=1At=&Wl`>0`Lr!a>cgyPGcdT#Zgd6j8qp*PL`o0*t+QF8d#P9-+_=5#KtZr zRm^Ek786hIw+J6Z&U4jtC9&q*sMH4^24H`3!S5?$cmgbv1tOHFv~viFyjHfanKfT>N9rZ2LZ zv>BQ)Z>o`6^3cpRQ=qScGPFb>G?g zjcmSY&o1q_)JF2+XU=y=w?igk9QX7HOB619*w{{W4?m>U22IM*Oj=Ju&owmtlnh7{DL z>P_ik3;Z&DU({#0KN{X?WLTdZZ-^hUZ)?mG!?pY>Ni9;abu1a9Vky8HoNskpx+{{BpdWk8rE ztnO-QW9k24>aD|~`n&F7C5KQ(NtGHvawtJMhY|#&9eR-NkWPW2yBj2=W9X1ZIwhnV zq@+_q-oy8Pe%JN>&oFb&=bRmDt-ZDexH9E!@%y=tk#p{I_oyE&;(InCIYCH%5YQ^69E+YEME$d5|&-#*?RqCU(#rC zPV7Zk)dPam+I#(S_ArvomYzq+<|9J6d_G+mr9tS9c;!Ace8pKSz4Gy zn#KQ~K|@g~!mq#E)!1xIrfrwr->hbiuvoEK&G&_1tT?qK#~;1U{D0n~*`@ujxhgT` zDs{^y=$>m!mWGLOob@HVln9Q}5ex1Mk10SIbf6@vz|J_TtZ##?>A3@2Gi5D*YCnOS zVHxoF%Omq&GF51T&ReTr@63q(^}R98fEt&iC8$-!JJHhWEgM5>oA{yu zi}%1mOK(#m(8Br|+3qtFr%60)7u}6C?==0*p_F3OG+T0s5%kbCg66K3Xz90+VDiiYMidpZr3KPKXS`?LJc~qlE}w zYJ}VLBi1#SpZX!L?oAwxjW1ANmB~PEgXiWpIU8V`KZ^;!yb^m~ zE}LlX1dK@;RS7lK6oI&PrZxY1LKZwqv_4wE3bV&Z1Y*MWmC9!6{8x@wrbGl99o=)~ zf)q=CR&FYsYie!8rp@Hcq_}-NB(BdOmR5n*0UUfeFPtLh8m@m{o%`){W4o@$M=f(vyL;Id_m+zb_+V-QermM7*E5a>l zUTtHg6(iu;yEKk*5I_m2ib(hgO_QB-hg0ZIRS>hu*$gU(*Z2^}>Wf-iQ;|Wn(Ueef z@W%$9QY#IZEa64=C-rz{m%Sh6#vWxiNbuV0p85lkGVivlp%;ca-vrqX+C0)n<^{`Q zMJBb+*BtEM1mpcKkcboCzJylXBq_4J6*|~7kA9y%gJvu}+ejV}>;wq410g zIivO&y{x5g8nTd#r~v~u5!UdTV+yBRz(5 zpBCp4$RyQIUOlQra=?-^=Yi;hXMEYOpeClvCJu>`(nkHSe~Yv-H`R&ojQtPv?1MK zmsK?;H;FcwTYTB;z7ID;YgyxRy(#3GN#tJA^FVTy%Uf%*l0 z@-WNO12tKygL=!V6#N{Of^Wh7)E1C+7(>!mn=UWh>|G_Hd|5XB5Z00OZ~Iz6SQXkE ze`f^&;}^W7#m?5sS?I^6BjVQXbE4^cmRcep{u?2&4V;oS-T+ZwhFk(^NQNBtJh)Sh z7;XYGP@IjYql9a^GVJuq8&)nwtqAyuB{P$<;*>V!CUSAtVsb$6R4WV6KLs8t~8Zff9`>p+6^J=XEg8OOR zvxx7CMeC@Sa#P=aeI4I^CxD@`cguJA#D#6VrC?fJcct>8_=As3hG0m2>Jza)LVG{A z#`OPf=`t(#So&Q1I?m1tmg0O%m2SF;z@txEI{L7jEBK-GxdQ}h%N_FOOhRkpdF1DV zOeU?X5?1vjsul!$%LW4*cr!sPnl8O>J%y7GY%DACfTc$7Nk&rvr*4d?F@q)$-7aTf z-KF{O>)4UjSM0}lFoYYR!U$EqyhJOBv4d_}#;Ov!=$er%qKB=$YS#_`s~mida{yx~ zBx>Q7f-&h{HFyLN>~6?4Ly-||CXhV1^sYeVi`vN2O!A1GHsinr&MT31shT(DXezsb zRGDg_wTP{TU^3;Ow#T zdF<1$(u6YR;kv5tuOG7qB9bGNqHFNuZ=PIt3O8XVN_*x_3c2yxPm|8NgH|nqv45F} zVZoeA=>SM>wqyxt!=5 zsuNZA z5(YYt%6SeKDsMSpEx*^`9%Z~c{JQpq&iA?LmXWNRW=dW^pa2F49_HcE_zb1qPxg{b zyGnH}6UOI%-AnE#l>FXkUUm^$jx-LI-7LH_kc{9Ixg3|4DhjPSkw#y6Vf?(d&Wy{R zpb>kAU5k&_Xilt0w&KBf3@=%;4=WQ2UZbS~_?Z7*d4h+c0#&#^mdYpszupjv2oVSZ zN4ha`=UgESIufvwL5OZV(D!DyHz`PEHO$0T+n0EjpUF3s&+_BAS48un!rRfaP(~z2 z2-+a`%TY96>D|c3UOYJH>EO;!&0yGc+u+Q!pR1+!(ru%9kxQX^(l3@8-LfKYr|Q>D zn?IaA4<=s+g!%1N*J(P9M36pO|CU2j-5`ph(C=>HfB%LbV8%Mp2lq~EG70H5>X{qq zS1|po4D`6rMR35IiXzl^$*g%^|5qR^)l|Ei3)6)&4GtMMgIB|~l{-Y>1-x}kPmVsR zd~DuO2rzcmo>Urhrm7(VLD$LLbRO||62X-k;Y_HiqxNE#jvtG?AYsO|EKvi!DATnA zVwjn|jY#bazioE}W|qDv4@(glh!`3{l5o;DJh`2TJ%9Z7%8c`EPV%0=2YuG7EzkBB zT+H!GqPz{<&1!uoTBUSK2r0LxJJtm(-^P*N3(fTZz@Rt&jjj}D-KEf>)5kl#RzQUZ z@C2Yz$}5DFGU6MB$nS3=!G08RhD9bcjzTEz59Q5&fs%c^5U;#O-PPpucMQ<23mg%p z2iPsn{aC!YUKyk>3h~fT7%?0S`&tJ<%}sj8^qMbDqg&GXT@RjNJ0=)HaH0F3PhWQg zoN_t-yX6LWAP11qViMjwdvcbBH{3i3tY;VKVWX-$uqC}s+hdlvYA4`ZO93#O9!~>m zA{0HH{o7-~Fkfn#6L~~`r>7kRL!&PGf_MW2^it0vap{bnl}V89<*oT{g5k9Npwb!` zNIML25on&ssgL(Oj^iB>3_c3;m1^`~`c2JzHHY^EQd&dg>kUDY&_(;CG!G`xTfDWU zD*9bF;^6ovixCeJXPOa3OdsY)3>N|ZXp={j_plOl*L*SHFZaaB(e1sij2VqlKtPB< z9jzsVp?*bakGnH)wYGb!r_oNarp)^QAi9{=*t9VdJ^l;_V`z9d4>Tn_kK+1e@)&}` z@T23XSQXbF8t4?F0@J+VO8)%t%A-GppqfAzs~w%)B-WZ8D%8$8xtp%KQA7R!VS9bh zQn2$YcQAxYj6XD{GdTC9EbaU!(GHzMgKVnqtrB5R2Dka-u#IN7$emG>Xd-H?d^vJh z^3acrkYqjyHQTLH3DEgb1LHqkSy-aK{DQR*>J|pFmhGwo!=jXwjaWQ~)k!B>8|}ZH z=qweEpO~n}$jn5Jrg*-W3r&{AxLN>&J*r#Hmy_=EjhBb%+cWIe+m};i!4RZ?eL10; z-q<`F3!%#*`g;%*IH#Ajo~$NPnz3#6On1p_C(aLa_po}#R~FRy>aq4&es=4slsq}w zUXMiB+3I~$n2ku4B;S56U= zU6)bc036Vz0*b`E`J9GzR!IbBr5VTDn|Dg3r1};_xgblrpJq*a!{=eBk~1^p*Wcb~ zHzKp(<1*ivvGE9?dMzTT)P2821#o<-UPNcf1;h4Vq#CE;U^GfDfuJ~$A#7yu5ZLYe zZ_KWT=Q6lR#gU+tE)>Lm)EVBQS300%J0?>unioD#aFD4M<{EEYCGNmT zU*evX27^0)t#O*#PPP;ib{jHS6YCYAvFPtVu_I2oWsD#p|Ma>KJFL3ec-{9K6Y9;& zbx&d9mUna|fNeqV%MwB1E>haUHUVLjRW-P&4SUhB@jhj8vZJ^!Z#pFflM7$o$k>fZ zqxhH@K9x)fH`}Y}6*vgW0cQ4N+nY_WJULkgGSbDaIk|$d6RAxBbqxaq`3zKwVZJ{x z01J*z0yI75-uZtHrp*$uS4BJu1|4rYi?8*b6F~KZ@|8!u(Fm2`c@D1XL;*o;@80`c zy_xpl->6vNneOwK9g!phcYc2cLo`ytfp}A@M*Gy4*lf*)6h?L~;MjNk=mc80x_Ak@g zO9kx|C>WpkB)|RnLynO&3XBHB#9M*k_7)HXA7sm!&3>iQ58Fomoe2VWRv3TR$~~^E z(lUdTka#Mg5CPGafRf^Wf`(27bA7u$-)1)if2$-d^)9YjN!L~hr6p&&^t_`9Way*E zp3|foIgo6_%gwOHT5(HNAj$2ax$WayCNJJLDuQkTk1_FuGbDpc+drK^+tFDHmuzMV zc6_^Uu##l%`Q90m=CwP(K!i({=f|s4lo?wQ6@S8Biaf;#`04&Q|8nHefD$e}&uXit zc0xb_;twh&WtTp(Vu~OsyCZTsC#J?o6yw;yNMv+50penn!~xKdq5$!L?hiE_JKQZ*}2)lV^!OEzeWhe;P2E~Fcs9jQxtu}blBSN|wFf?AS^8MyKX?S!t zVAhM2iWVK0P7e7l#h9)xY9gt2Rg}Dj1WT9E#*vqP3U#-k_Y#l*2@d|oY-#qUKOoy% z=*@&61L9$z2xzha>TU6%IteV}4F4m7(`})2GE|(eGHcD{P+LVL6hL9ZHwp@G^xqEB z659JB3a7|mo*U=@d-N2LZ8%^-Q0`ybwITqlAcUq9FM*8ypf5h4e!cf&z1E8bEn`dW z$>kaEj@z1RJ+(=4%tYyf4J+}Spz6xek3_H4X|1Wa*U9*a5RHZjRQlh`v$3B_iZ2Y5X`cz8UJjIrtqWN5k44^ke@R8prT}JE9;C{w zCaB=qnc||h%DYq+0{3E(Kcowo*2l^0(gD+-;JH1=e=Sb4WDROHsQkdN^4Jk7q^^&d zHodEtFzUHc*o4GNm4yQX{^+`v(VN$WZ9sJzj+Zeh7Hm%*=diR)=Vf)5eP(!5q{DxU zkMZrJ85NQ~7xMu7u#}g}eY*Zwn4MJphcRUVukWuw$gCd?kxxa381-K!mSOKMr z79ZDv_5F5o0|TAZ5bzAz^>w?ufJ*ytq5fm1w1hupZWncdp5n7{_1bE|48Cmda_`5w z^wCO+EbPBbC&%#BgzDY^v1nn3`sAsQD6@@bK#l_hj=mQU*m-b>2PvnBh7umJ{7Gdr zb3`KS@+YZrsY7tCKMvQ@HFr_b{bigv z(c(2#s?jA%_yq)MGEu#ymj^|PP{PT*d3K(O%s@SE!O9g?GfS_mxmbGw6rtV9h?M=G ze-1ybTskW_1%h|ILp`%>`D{X+y+i-_Vs0LK6#%*QHCQsxes~f){p=YK9KXgzjYqvG zVZ*+CcuF5K@0aGa$$fDVA$tj4n!l5hY6wmgby^nd#7O`1j226pXKqsm{pxo88R;!5 z(_OP`$ql?J8mlQ`KRJvCsr~jQZPu0s&~`uXALlH6@cPD*mh+r^5qG{+(+CZp4prkW z1=%^Xe^L0a3IyRkWQs(4Pb(mFIW4kOJSlugYsq>}@kYhn0D2CFy(cKem}Rv_Eb-=_aH*8zh;zmVOJBF5PHk2NV z62?E+pWcF?^s%vN4ZJ>cCpVfw;8Y|(Y$i9oMAdf+bhk88MBhnqhnr~g2$e>8mlU6t-L6mKqZ=+m+Oi#(}_TND$+8|B%2dY=f@vQDV%Iq^pa8q_2ZKay+ z?_RyTq07Y`4#G?1bjnAPAw72lfA(DWv#Cf%sveK%Ii(=C4;LQ&?ejmEtJeWV-b6G7 z-IT;(y}z?!e4$Fk#9rpLnER2YU@2@o1?i};@Yxle5cEcBgrxcbQbV>6Jf+C|2p}mP zz_7P&kK+%3oD)XrbgMdGY7kKsa2S6%X_r%;o%*n{i+~(ld(KWPQ=ysHDAae(y5q3F zk-Th2D>8fjS`oBr#_4oYH}Rw8CpzKf5l3xZB}w-WVLI+vdgK@mLnNTla_XHvwgf=Z z7ErZHl7PjV_gI4v(rR4Rsb&JsX5M$?l8iZ{tw9W=jA2&Fwl|RNUWom;W zIboivoc5zaNYt$8w@g6wu zIJ{xDOM31gjFcS1W3AB`2brppCv*?0Uk4d2yg&+|WqlEe`(qrvB+;Fua|43haYr<6 zhQwvtj&;s9L@LiI>MwjVuv^}FXjhX$ct{T9#oU;vt*gLug~{%+FR|R%XOTrtfdbO% zh`&hi99f^>Ki&QLMrH$ro?erLJ$JCL`iKB>>^qzH%As*WfD!>@G6$h0(sAGKdg5u~u4J2C0 zU^27R6X5;{7Kz4FR!vh;!kbr^_>2d%Nnpb1!$pS3BML8tH0faFAZ(4Nq&|_m3Wlh$ zf8vymy%~c4j4j%b8Yz-oYMNFt&4LPA+Y|Gq5Y)c5eZ?_~FOzfPK?V)n~VdU$|u=xmp z%tD+0PiEz1vt|=`L^nLP#fZ`hOXjmv!!-{CGZS8VnnETO7IG(*_0xfAD3iY@7&4Cy z!JyJFMz4cRNjkp*&y)N@wS@z(&fC5p!s(K4qLS2nH=~uD(x6BnhZ$J+WO12)Vn%V$ zmntj}F8%LEayE_z2mh4l1xRyw37QC*%qilQZDLaE@1Kic4^q(_xn-qbL&*ZLS!)A_c; zE1Ygh_$KGQr3AyroI;T%sh1~jcN7o-ug20sKWN9YLGiNHLD^C6rQo;7v0n7BsgGu_ z=nrSt>IyDMQ6KRDYkJJ2j~M=ecxgcl%@xQ z-!OgkKuBp)@8!XmQ>X+xXHV*{PE}49T+L$sEni0d1iE;j=*7-V&P24bmFW11JY8@eEaw>r)laBz;E?cRU`+;dmd#d#_Lo_&6>pgAUsvE{23#8{sZg#qoAtuaX>n0 zz7^4e3z26C@DdHGPLS3%h<)}>poKptuzZ#Y#pp5?ahlTXK4EH`9k8h&RW+m#SuAxl z9F#Qa?AdA%14I#=Fc?p={%|Naz6Rp04;JgiI*_pjMo{2RY67S}W}-33z|!ZUUW>Q0 zCgm1E#mx>JSLm+qW*}Ilj{X3X7jIPqgP_zx4Y&zNyBb;~SdQ-^ZJtw8Mb!Gx9PmHT zm}6H3kZ|#(K0g4G;=HGPw-Kqf&-Gxa05nxV8DwqWZ1b$M120>}(!e;-LDHAfI~mz7 z_x{^g_U8#99&)|=yxSJ|ym$tl_jxrW)80N?j|E=%bsqyLg=E1r3%97M#yY+_R*I+H zG##t8+IH?VRnB?YHg0L*W=ORy(IgOIwXT$;0JbQ(VLpAHbBSFN5a!`@q?{zzqm>Y= z@GV|1;MoM*YGEW&_;0!+jq}=zoB9RUm{5Ou0(Js^e`{aEM*`19*wl;NAAL{UN)K)NBFY$S7)!ib`Y4wS5CEghr4D^WNZfcPzn%nIt}&y#BG zTVg`y>Y&sLc1F=css3soc zLh1E#80pZ}lM*jA)1VQuUkTs;IVKrKFY(g(8-WKzXB?=muTzdw#=g3dLyc+wu12@2 zZvyXBxmYyQNq!F){fM0Ue=pbnC5llmlq*FyhQ{-gP8?d<6`zFs5RV`(QA-y@zY|E0D~L?Q37R`r51j)9-&jRfFKh$ zH#m1>!nIQxZh(*YT-T=Q*|fIN3}%-9c9a0!l~F19ak1%0xPL#KH!_z;8*vZa>T@0Z zAsmUu0*znfJ_QB{7*P#}B#F9cEEX41GQPl%>zRFp>8C?u8#dcKWl9FoJb5EM(i!+) zx$XW)Pi&SE<@z`!kT=X`BMsxIM8){M9Kt*l1&aiTRG?H2#SbLnukP_!k> zPh2wp^c_s^fja@m6ks?iVGTnQ^!Q5mxHe!P^pk>u2^k1RGcVk(!X-8|mXAhbDYO)j zpxDzTfN&FJ6KZ+i?CtksB)-TWzSO?^On4MLzDG#vZIn(Gs%Z*w6dJ&dM8m_#I{#OO z%h|Zcd!fM2O`mEfx)MFI&Wi2=hA&LlN$!NcfF+MtzoRB&+fb-9ir4Rp>X2CSoW0K6 zpg02#<`0loh1-dOz%L>i+@|ez6dTRnQi(=NDk2ZIlHW2XQ>=#-S$sV@d3IKi(xUe=+q>ayD$z?P<$Q zAw#E_0a2vLZ;m=f+)~DAQ}}P?SK&sAh{j!Q`fMKBfa~#9l9UD#MPXGaGGaXvLbYSX1SRFA1WBB7q@PQU@)!9KiLpsc*KSChbpt*f4l+n6S%{qYVX=w`CJ>D7X9^ZY_%43YmRwdL}3qJ*({ zG***AJ9bX_3dx8Vc%z7=KGvvyD4T?&VF$Bu8OQWUf3hU-f&T;|`sl`h`0XVhnQP4}v zgqpd>g+$-N7@j^Lpu#7KF;Exe0vLA9q5VG`3YdSwUV*|5Ajs=Hp(mIc-*y8dUyEzm zwzPCAsPR{8b)SRW`CbvmD8Cb^PKmoYwc}H+hT4Bu9zDR$+1{Arq0pTB`uS4+r<(a> z|3T29ZjEhX6Ao`~#`h<553@((A8kYxeH<3|xi#6w=Y3Wo%z5BY&R6#=PD5ntZ9q6w zk@3(Nr6wrF|1VK<0GKcJArhQ=qtbx({<|>(s_3?0_z}Q!u3Mft*&e?=u5f#g71oxN z(AZY)Y#hrIgT^xSL-!-17RI-#AK-!UG+=A?&kr-Y!wO@Du)KL6&rCXYXAHkpiIGV* zim>Cy6{8&gjecQd1Nj0|gP?u)3kPPR2*Zchcm{i4JWIWg+doaAvwOz{9-h5A8)Gat zj0eM1b)(g+5zm9{yAsd-c(a}WO{>_nj3wzClld~d@;Q+(|S5Nc`GFp%Ii)GtJknBf@`tpm(%a;lMKfIKtkdEoQ_VZ-^Oi9|~H_1e#2y(CHB zZUZ`6CMMK#K$Lp1wO-3kd0I{LlXSYdeXBKiD=WTq(XFy~o%&$y!{|Z5z!WnB%nPMp zeEEvC_|WN^I~ed1Ie&55nMXSIH%nLg9T+Z$)>|b-)JpM z-l{hNz&icBccUBxP?456#$2Z;DSghAw~I9OLW7!Q_C4}B;7<1%C^8aO7KU3oEVpyBRe#`Y)c-CgvcY6!ND;BBuBK+I_mqyK6QL2{Md~2yK(`t%j@^vHMW)?u=I)J{Gw_D&n zwwm!kR2C#!!%M}eIb!O&OQWgK{|m{{%|J*NLIFt=_qcsr*dPa~b>%xn6e0oQpJeBZ z884q3Tb}DJW2jo{fRiuDgY^B;Ky-LjV;$&4q+_Jh-v$Ju&<^ zvUm0_+8R82TSp{2MvWiVRluwMUH|i@xFywB?q~U59#{6@LgQy4C>kXA*9|LjOs3<< ziVcWn^s0K?tVTZ9h7Wbw62OaH+=h0IPnVdPFoh9^YOW}9*i6C%`JN8OCz~Wr0V*UO zN`oCE2)VsyDPQ>5Z?_dBI1te21;kdNS-MrP)!-4rm2WpzvZsVHgV5f80%7`Lgv~x~ z^A;Fj4?Fes-*J15@xE!>Z1i#41D2-@9J%T*KxFQsasovf7>Lxpv-}DC%er79s7gk+ zA-)6t2612fgV@va|cYbyCFNktXN;K;R&>V6?m()r_N=QO4|iYN8^h{NxJb^v=zznh<%A6pHsCnSK21Nyoj~T0l!9aIq>m{^aA0}w-^226fI z)Ay|?*V2VB3mQw`e;h9&c#6!eWE2d7KTq!2j^cRR?1Yz}v;#oZMSrTYoauvJvkHqm**|ztbo99?4XQPR0d98d~9_y;r55 zxyDP8pkuO!^Qt|UoyVY?67)HSoV9bA?mP%e{d%)LB&GAe2ZqPbtqjal-P799DCtP- zZYtHMe+a`s2HZ52ci%PXm{tZk)7z^cyt&50Q^JXUBNUS+`Jv2mvkLc6Z`m(n;=;G# z_1KUI4{I{tUEcgEIla||8|~@lJ%3`GjY!MnuuWIQUuKkmglq;FmfTQb)2)H9r*d~k z;5~E4$W&Mcm?4!N{`lqFU>7UU~mj7bNiNmcPtxM664tm38LQ;u(yC zVZB?7!uMonqBCr?ghDL-Sqp8Ivz)fe{O~f_1Q0WqWoDH0cB9 zv2r=>18hhRKq4&x$_1{VVw2Gz$=&tqG^F8=HwHmcHG&cKOS_nY0X_SP?}bBV>N_BaqkcZ< zp${!vR$AKS3V+b^Y$yT~iohV|6gngA;x8C=U*FC25;W4MX6yci@Tu+hq7cOw2H#K!LoXiSI3RL9_Wm`bT!_J%ZRu2o=sDV2P=L z3K2XF_FhO)6m#`J)DCRz`A*FxDJ*C+PuKox2nP`7o|d|2oXD})SB!)W*dE!{edtV30ynD+SHer z+&%wA!KFCJK+E7&SHS#2l*FuhT^}8>5=0^Z&-wmEMN;&Oa zaFO1bGoc!&CX~=CaRlPosVgb(zqFi%+_$Fv``2@`9k#5kWId)J;q{6j=k!96<&%+t z+XUp@A0bqZepr4en<{2vMTiHVY-cYbA)j>>j-!kSV0w2xtcRhK>^{Ve!Zyz_g&3g? zG9x)K{g&v;y4syanA0c;!wp%{iiJp2fx|!b6Gk~!h%OL_2^TY62Tm8+x0w9re~ma0!$3BPP#?_1a%Q{Ei|5c{*O(%9wzc-*yyH*C0$?cM8al-YIZ{sLAw{Zq@=xW?XL4fQup9c*5D$D<|r zILKzv$syut){;_~;$-!F;B}LIE0vf@daWVs&fCgd_09-WA8Z9X78smpT^Z@z>l^f8 zv}kP9F$ZD{=%v^9k4eY1>mRZo$XICT0_UP{0~ zpC9dX$wM#z3Bk5D+Fr|{7kdVWeU_TT2hA3rcC`0Ty_ zUe8HCf17+(o{LI`62~*`PakTX>4rD7vK=YNQHp4ow^)B5hZV9aj)Tl6Nd@gg1fo5a zE`}7se`a?(eS@}t@~l=-_c6SA`;oHdB2SjkU9?9zB}HK<`;&b~m=`5>5hJQ@QAvSk zQbzkjIEh-_uyh#5Zen)pPRK^k>8Ke4cNq)>l-ff>QOd-$JR6vVao)vQD0%NDf`mO^ z^(blS8)nqi!qUy2@d>xROK=-vE#k&Rg_g!(qjhfJ$^w`Hjm1gmxFgi{-Mr%mEhl-O z(7XR-Xj7>88+2Hhbv2&|GO0?h_N<4h87qBb9#5nf9t(w>(M~|iSeAC@e;#*aIK^G{QJ>6Am2ekatRe>2fB>X^)d_pDG7`@S2XT^lFIblABn3`E>kU1DG17{ zejoEuGpH<}#23JJ1%yy4xQp_fR_3~@h{|k{NrlUdC8dFo?po_!gg)s5E`X{OR)zbCE-? zKWEnsm#^er<3U(fcX=R4Jxcf_4T{m#M{FZSknIKzGTONOELw}-~pYZ07->ErQdv?rdJ^N0f$y@z&QzMRYUkX zx<72i90kmsb^wpiuQXNk3=G8Ln*n4_gA8-Op9E0ns^aJZu;Bze9_%8VCic1Fo0Qm%?QJZ2>wh0^w;}%@g{N7i-wUXj$KN0oDPaqs(4z0 zVO?>^aRRmavupi=2u7B_GX6nyldSP01wrYHVNoU_JcH*1r56rC9Xqdb{Yp(R+?)V8 z#5ZaQZE0n$-Rx+2`6fy@3H?E86?-^Gu^TeLFx>k>P$u2-&xiXv?2)%7CGA>E8~Dim zyRAfqnwMz4?czXLkg`d7V)#gzm;TXsP!S9S6CW9K!T5D?!L#VP9&v_b5bBuKU?iGc z*1y~RD17O=i+jUi;Ix(DG7jubZ5va~JR#a;*baMEsDzs7Y8tfV%n8EyW{`e>jKTP3 zQvGP@3+XuZq>uz?odre4IA6Z=d_Nuf^F{S$Ci2Og5v1ldwyV$6J-Om`hpt&2_Uf7{ zFzh_%)vkwe)?Q$Cs{#l@GIsc{eqMc6oSA099#|+;_<=O)C!@g^li;*rC&O6MfoXHP z74K&jYl^;`zxrSEBFBEAm&Ao%2-N$Kh)-7!4c0^BJx@Vd=~hK$U)voN7WO4131d!~ zQW~mLW2V9ic$v=tZE&%^{M%frGbQ7o|HMQsWOqdH5#g3G>2^xpCCBp04As|LFK{hd$ zKfVO^Xzn%=#DAPQ)%CeX!$Tq=@IXk5V_EBIsQ+T)E6U$FQhvI=64|soM5XPt>4_QaU9MZOFs2R{t~i zG;GSG?d@d=z1inS`M(wpc3o>Xob;EAiwzaRiMMZsRUMa>n68&rjZ+zkC;2}9le;1a zyk^mM?ANT`_0T+znEjp6YYmJPOzW1RN}p#e*;P;aZZzC%V0mp^f8U0f4m*Lr($Wq$ zeWkD->|T$#0eiiyW1nR9Y-?9oDJQfH4en@pyvu*;tz5)Y!9X;~4r4Or2J6z2o*$Q1Hr~7jPrbQQ zb&huyISaDxyXqyzCG~-A+wruo^y$sY{`p~+-n<5L(Ft7E%iGgfRao%rJ3se&A&swP zQ>oXPZj=XE`*Npq7%9J?AN&Wpf18PI+rs~*w=#tn{(EAP7CI3%d_$yfcw;i?cvz(1 zgpVXttS_%*_qtEU73{e>rZRm3+@#l9I?I3$Esm2;D}Bb+C!{4O-r30lnz+%?n))I< zX2+zx*Y=?RT@hgke?G+mymDO)(#Q>Be7@_*_E3LzpTC1=zj@nImH@MQrqNQtg9d-S zlh~xO!~1`S6Nmo}4xK&kS#90$T2`OfZqIn%ULDJaFLO5PYH8(s*9iMRYuG8v;Q+t{ zQ(Jp}*Ms&v@cMRa6?0B~<|`q?^pxq!T%ux@N?S!aAuAO6gSmHnpje=1U>q4q~)!d2;D=;&F{CnbTY^^|HYThmE48KQI zV%O@=!L@8^Z%*E@LW#^gK1hX7Y?ATtj;G#R?(z8q z{Q9W9uV2_psTyv`@S(&|oo9OcN67lwWSGfL5Z<7pm76N7b3HSyqlK|Q$u!Hr*xv#w zj*?c=zy`1Cs+@Ya0^Hw`gs;x-Xba5Cb@IwA;*8o?vmara4AMI8txlZPY9HpGUUG90 zzxkzgXTQr`PW$~$hCoZ+#Vnhz*m8bEjFwp%l|b{_0()KEZcS5T*(7(#t2AbziDvzD z9Ub!}b{<}#sFsFskA9vq-saJkI^E9)QVsM|=hc6F?(CKe|5@+5>~7%9o%L={295JHI12rM6Ywx^tys!3WmjWf=?gLr$c} z`)`T`q@+6sv~RawTGoJy9V*4N=iWLWuDt|ouzpsJ^4(;0m7LY{vW~e#mX42`L>%bq z6x=96C)jm(J`gO4DwLFYz{mLZu{1PpiS5wD@7YXb>~n`6Yev^!<3q@w$7Uss2c^_j z06;eR*F#XLu*Zc`C0s4emEy`{xz#K4=1ZPCu%lUa)a&5$)G~qln7 z!7aouB8`1HI&pi8PqC<%g&Am-;(3VLE&1htIztGYC< zBG#9q z;*~-C=ZO>ggh3fI&zkMOe>XPlnLqNF^(ux}jCh>Iluf9yW#ttW$(r8()qSNV(x{*v zU_VtcMnxfHnPnRM$@Jdlr;*>Dl>_c{7yW+n=R<4X<>g`l>jsy@=3I`s>H=|st1MNy z;RZwZaNKTd=E-1ODIp19xr|%_8^0yC@JY^%6C-A$53?cgMO3Pp2*vqyO65C2jZ6^h zvWj8Az$RvDCV2tZ#ewo25-eVEC;k3Xxlt*X>-Xij0E|L=nru@4JjPFjFHu_)!#;vS zyKMYj^Or#4`K%mWT8i0@`&9Nf{`T`k5g52vSV1?{<(_VS*!$yD+NphdIqX71^ z1ye7t7DIcyPkp1y1v+dcKJ<3sP!Rqjyb$a^Y2Qw_Oeg&)nTephN5r0m&Us7zoU~v#$ry^ac z^REpZwoh%!^_7($Xe`VXQbGOC;qAW(Zf9(uN=k(6<357yhY8KC+#iOEzb1xlm-tZ{IhhU*?#XHiwo2Q1*yHhkI3r~E-BYMB3 z>(v6O^Q9IgsSfE9wrJ-0_UCy0$;Gf&3byUi62>Q8%SSQaF9tqOo_16dQAcVSi)WqK z_x7e*SL@>WWFGzNra5>qe77>Sx%n}_xpA^Flwx{jDaAP!dzx%OjhSfb_{@RY>Zk&+ zH>Igqxp#grE=KT;WERC<-x%tDPs?Roh=>a;7gcNH7xA!s1)b?NV+L7e*6^$of(+>0o^>|a| zNIDZkzJDmbVSLx6mU%g)S}yaJ?-Ko$c93!XOFBXq9zDf3SZ8-Kssk1?i2PIiy)5@rxfC)h46+^b^j7!e3KlkK97`4PIzGTf;fJ!s_Y8{iO+7 zypa-*fj`_25aVui=z-ZtQYottLU6e4o}cT}+F zz-8HsmxABn$KQXJ`ZrqFWy{yB=choENPP$FO=cDbmdyiRaucH8ywTaE587PhSouEk z6xTFsp6i73g;4`=5Y+PyrMpSA)&G65e=-5F+8*|F#Oj;3ZGysRIx+Twye!wnx5xB9 zlg-G&;4*qQ{CImcpi&L~aWkOs%fElxoFdepN%XFY(GC>hN5PNr{Kfv741L;-^lO<6 z5vzU6d2#_a-($2BG-LcGeUAQ>EF9D`J~lgRnH}C%Y>a2?fUQs68SQd&m8Sc1gq9Dm zQHihd7aa;gX@g!a^ZIc}d7EdjJi;xV>nf!a+!-6r zvg?M4tHJ zJOWz*Nj0_37`qWTe_udaMrH?COVV=93u@0w1pI z_txs`Wi5H@s{GhPC=D8;K0}Bs3V?`@&!yTwh=j#zc<%d>@MM^~m*;Ah(=0GON+r9v2=UCH=1?uAl?_jM~k{$*=8@*qOd2-L>TW_EU zsC1y#kbSxY;L?&hOTqTtS5RfHdJ$MdYqW%C+GDPGFTE}IO)tmgL;q(OU(Vh(R3uHn z?%%>BG66MjVbjfSSX)GZeONa8=Omv-pg1eg$jf@JP)nU=lM$Yv1T_p^!y#H0$`%NZ zw(l@7S8;mz*0xfn_$}wTXS83+kdFE3+sxzLr^pqaPh6KaVIRAsYfCUW-fO~fS2lQ5 z?I~C~O}NY?`wt*-oov2o6|&wtmO*@Ct>6dEX@Y1PK-R7L9hoxY$GQ>2;zwZIk$xx= z+c_V;6Qm^H_U~b`B9S~Uf#>+jmhWIQ~w#S+SDH#c3M6!N@^iSeMO#&B3@jLGd$rLhF=x&e3ofcS|GkK&% zmc~8TfYXE?)87uled8kS=eZL2lW9ZUS*jc-iM#BWszK9ZSSs!t&w%?H8c_hk_cEOO zFK~>1>sOh<>1Tdg(C---;V-Y@PV$BhF)pU;;-iTtM^5*vk9)s)juA1whkX(H+i<49 z`06Q*SzUMIRwQx97TO;Fq7|lM7B=eSnsar(3k26ZPtZ`4{>^hUX zVY6mxrHu%rOM%F;gX`q+gIKS|cCYNupZO0T5B(m9^xP42`kglDm(5r6L?!Q+7JF`6 zHQkTGu|}&@{2Z6HpZZ1=qiauNo@I~U7^(CS9XaB@Cf?U;MyUF(e;btyAjdn@C}GM7 zh!CEn`M7uN_wUD4xz9!B*wNhv2U~@Ns+dsh8YzQ{-ms+#$?6Cd?l^mK1R=S<>BS0K z=h+I{=-EmRx6M~JllYyjOAN-Xhp4+lHXRbB$D98vSF|2qf1k6&F;QZ~22awT@HTwI zQ{FYB6Yl$k=KK{v5?9C`l%F5AIflZRhA@GuQs#&l3g zy!omgf~k~nd#r@AF1qmpwVg{6@~FS|_Oq8YJA}K>@u=jKf=*&+%NssyQ=pa>QIsD$ zMCUm;S1+_kwobcjdUPXe+QgwMFjQIm$nai0m&nY@b?h zJt#C-0rZ>KyWb1!{T(uj>5awST!DzoSSb#Cyrz}x-eh(j`!WAlPNDiQC!5+6$k3gYU=VI5>MTuH5q9n+Snh(;=T&qBRA@R-_vb#r^qmxROi+{nq0hk=A}w)PHL zqp4LxY8i7LNJp&&D+>*O=Q3*%|1o6DBSCR#Xau0P zB%K=(?ypv+ngoah+ou1XjZoeT#K?Q8L35aVmX}aJbFgbRRERe+SI7y96D_;L-A}+? zac9XX>c$u)@_nT1s2#5h#8H{GIin-BHdg{-Po21#*iMqYpK4aHG)UOAK|np4H#E1n zF~%Ee%eYhYIN;6IS^2+oxvSkcUJ^dGexFrMGmZ1(?E;7Lre80ao(HL>*xiC+aPaR8 z&o=qld;T)*q_Y;zseH6G!y6ST=H?%)Dxsa0so_K)=g zH169(vi#x{dGzctt5;KfQDge_1Ksz`#ZJR3XUBFIWs*-9_i9Nu>l1xZ&8rz)<7uA~ zp0e{i-exX-t6S4}T6&Iou%iC&{riszher;9A_bfGJ;uJhw)FRx8My-D2&jZ=YXb{Q zkIdMuGhBH9V(CqOvPFOXjSsD^1oa^uXX4n}jB+R|=U4B3_&y9$@>lpyI%DB(+Y{8Q zZwg2rPlk4&8FgG>q?N(2vIQenX`ymACqHU9VCs2<(rZ|lnoh1HO2U)-N0Tt?8$KXV`<@u8#j*&1r-lYJwD`wQYRa6-AGgVR(M! z9xJAAsq)MgLVj7waXuLTV_Y?jrrPGh9yp#5vo&=>Hlkx|ZpUNsKlY8|d(g@s>>lB%RRBA}O zikbC5rQA7-{=wjr3gmOonU0x$9A0)jDnXC)`N2l}l>v^k8n(OS<~mJZ8v7TvN(99! zntEJ>SuLAX%pXR_beVpzSw7v8e|AbM@SMf>4IF^A7aw7o^h zjUL&mhcvV{iEaW3S;6>JOeGfY6vx<}u|z*ryq^VfOn8P$&S$m*dC)t?b7v_cu0h{k zW_@4JB@lTKOZ5f7mOnHWJCVjb7W-FLMk;R3(4wOIo{cV(dGg+`i!5fA)=}TpAGe8n zY{(>|L2!J$DA>t6;H88{o~woXI7b6jK>^3z5fi#7nb|Q>B8w+@`X!r*@v`H1qfM+fb+#Cyx zF+FY=J8ZBEE}idqwrn9>VsaiU>DYCQ^PHY9C3%@L=F!*XMbFR9V)EU`xG6$g}r7+3AZ?T{*gNG7K(Tgbxk25Df+$Fbaes|}-`T-kZsX0Obu-D?>OIayJ*B@@5FYmg4rwMm6O&pIFy$rN1{@SJ{kCL~su~{+M_DwzG zq>=a|gx25f&J`igwl6u?jK3hL6T(Ars9q$cN}CZ zmkaj=jrv#)wsJtZXR-4(Q>~u)yIbdhAD<#=?EE49{AR#gr18cCWiHhl4C?wgu31E84{b?$t%HX2ok~XQ2vdi_=(dG z4ug)Ghao}tVRVM!i1Ub3OWY%jYeMb-(?QO{gVJ%DG5pYl)$?`6fWTwUS{cuYWysKH z_R@$37r%*&ppcLMSt}NKjn}dyLD=7y#XOJwdG>1Ah>ka8a_@jh9%)yso>sXhTy|C? zGudcbi--^sMGtSQXq3VbF=+?%wi1M;f!dRVxHvc*-VDG`WIC`$$k#0Ikt5e5&wT)zlkC#{T*=R{ zwvUX+!B96_V@!cM*+1HVn-gXnTNE_Gi!DKUgBP*{+{72%8mV(uud|vdPoSE)87f=k)6SNSV*t$wXw$c65eoueKfR7 zBd%!&z8z_fhxF87U9P@$xoCw~vMBY3MLyhLw-Pu?Js_H!W-i_wv=;dS-(1K>zr8FZ zX}p6Mj8e=JX_kIbf;c9Sj@R{LRQH&9EcS3lFpzcx^^d);L>V`#4BLlPm(=YU5f_9- zKuWxZsUrf>ZNR$Ufz*4;59=3&FqCrH6J-65+E zg;$eu%iK=#?!o2895ip*q{Xc>rJ^9bVbqyrus3-c--nhSY=yn6lN{PE-y2B|=BO~w z-_n91R!%xj$&js1pGx#xjFH#Dk=GoG=>l(tg?&I-Y6!5<3m77L6yZXs?3NBkAEp5@ z3&}rNn;juvV6S<~_sguyDufGDi!Z*lqzANA&RYlI)ST<|XA#h^hS>?968wB5M-F@t zrk$Z^m<`eHwV~C$V3X9b(RDy_j-iG+9P8TV52ghdRKhjG#}1-yf*fWt65j`(GI4`$ zAtU$9U-)L%m9=^s?<+k0FIue@nj@7kDl2~NFRwN2J_c=Y-C8z%BgBbP(t`BM6IuA3 z=8`Gi8!0wRvKWgezn8*>7DDg1wFv<6jN%=E&jxm1jh<#XGH`T1Cw_gcg4z#Gk`}#r zdiTLi8db;iMfL&zOdWflXECSgb;iEtpuX;BkK z%>Y$M<~|u<8$VSsCzF2Q6nq=XM_U3+u4vd1RDuv+^nj1RzJn*S>IPTA~d)6(ZaHAq0?6u6@#IgDZ%df6Qd%{CB$vheFGtr( z@MRS{b@g>J)R}yjC}p{LH1N|5q7eY7=Eyty@wRTqlzPthl~ZNlgSe0 z+{-G*Pmx!gtO-)Pe$1Y{9MROKDB$T^QN}9NTD8FVI{(u19?HZYX_0U!ashg@BRH)O zhL;zbU9yPvAmv1s*6`=;2X6^V0UEE2I_VR3^de`WS?8F@X_fL>$P4|^iF5zeSM$v{ zw24(tY2y~++WLmjjArrAFTI4{e`gE&jw_g+7tJf%!G8x3JG?qY{z$!$Xz|SR;p*IY zt5$R6tGMipwuUoQ=$6Dr5C1=g(-ZtGfw+54!JZvQfYSK$xDm?+eUz+gK^J-fWC0;c zs{`xbCOb@?=G-iO^yLsMLW$%^pnHJ&dE4p^qwUt0++j)FdIh2J-}q!nCpi9_QX~c- zX<~#GqN&TjJF>1l{Jy>Z(KwF7>vA8I0j=g#8XUc(z?-;@5~o9)c({mICM5_8 zq!mOWSk`bg)8O^g2kQfmGgn~Hn`2V7;bWF_j-pc^No(eN6_+JMU#Sib7w~TFY=c-~ zo|07+TC0gtJuq_IMQRSxBJ-7|$^UkO<=EWc?=Wm6c88+P}fNwQM zRMcn{SNPU9ef@n24zS82+qC01pDtA#a2r{f00a zqCO-jE^d2<{6>p*>Ha)Peqi9H`%=kV=PN|Bgq3&k;`k>JD;jrxPEvoD}>nq}IdoNMm4H&@&@Kd;2?{hL_&82=M4T8qQS)Mdivr3|r!pneR zTIK(nG&AtK=&yn#+O_5D@J7Hz3m4l~)JcR&5l|xhn{#d89$M4JjJ}{K;G;+wa-dtt za9ZSQVrga@2`FvEE}9JK_g`m^#|u5v{O1R_4u=4eO@Wl&6`@bzi<5brS_8mHp!^7T zMR*_v9g@gz23oMWS(JR+2}yFFLB3syVtPBl%g2A)Cc(D}v-`BhmBvE)H_wl#GC_qL z8=lVJ*(Q10e}|-AFyBRbRFXZf+<0I<|NL;)+uWKILGyu)P%w@jL6a}-k@a2BFP~U_ z@E9h@(!++Dc9{P5=YBLp?R_u6t0w8Zt5)1kvY|V1%VPWv@GcRDo#u+Sr)OM}k444x zrg2>qIV@~l`taL8;bS?#^KPCzI1G3+T9XluD-aC4mdzknBk}dmZl~nE zjeHwA%r3?tx=(|fmJH#2Ee{n@lKhgjH-yogmOu~h-Yb4Ua_Z4tbX+lW`%PD1C@IJo z=2OAzk(#zm3WKQ<6cG6-juc^wRT__}Ex9oRe{M7zLQnIRJ8Yw?$tG;gOOz>MRENxT zp|~L-458#c++Mym3j%0O9yr=Z`_|Dl3_e~Ij0;KT(YR;t#w0cxA0#%0ZYSfXwxB9{H8r9i1q+49@`Q?Kc2rgJk3nUk2$rfAr` zoJ3j}QsyXc$ITS@zbrDG7?Gbhyqk}s(hQ-OZ-Xty;BoW7@nUjfc@W?vQ1rMvg9PCZ*T`z>02HLjIBpJ)(vv%G-9$kFxbRwgG5x>wq@@(P;6g?Vyp4W8n*`Lq4en&6utOhi_h25x;; zi1rD^wU`q5*~Fhn#;q8Y_;D@VsD&FN^MjqCk5>PIq*!rLv-=Nux2a6xcXuHrP0;{J zC_^qs3w^b}*(f0I|Phh0a!WF|gUFMiC@?NSrM$|1X4@;;i0aJagf|n$_@sEP?Q~KbovgNcm zGWGq_wsHyJ62bWwqiq`@=x6R^kO40WbTT;X4Re?cain;S8hcuZpE5L#Ie0-1#SMHB zJ=^?8^VUP>8So41E<}bQ}{-waN;7$k#O>z<7;riM56O=)_uS>RQdI4oN;EQl?({Z25_ttf^5aoG!e_}O_>93= zg@Z^@ufX~EX8aVdHj*$d;8^|`D0O`u7Z(IV;S9e$H+ad9e>Is-imLos0k02m;MGEn z$IsvKbnDF3S+D^Gdt!oCEN5zWZU;em6hO*Q6s5w^o6p~TZThhmE);h+>8)A=Qk3}J zr5awmfy>zj{{dKD*m?TD{m+h}Z)V+M8L_teD{!Tb_6*$4W;OI$Vr*M6xDM_xk z-$*j&jWX{)&@uP1fs3h88z)=Gr;nabE8?bPVq|Y(swIjOq zDP?Z!8+|3;*D3GW9Ey(j2B`%M0g}pLUD5GdF)gAq@#j+{4f1;L@oGjtqPq!V zof7R&GW3v&^AWG|f&gpFG7J_rL*(-{+gHjSS*;-CKdhFmXkc|PiBlqgtAB6!To`|` zGCR`rWrl*#>+5#0%>?=*(X?X66K z@0&y=Ykm@j(65`Q{4sV7sDTWgJ}X(#O?U2VnnjmnoZBL7_9j~_a3U(a-o3gI)kRs` z&u;#>f9sTr4q?aExRM{~_Z2@Q02SP$QawWe1`*;km{iEts=(>uXrk)7jbcYD07e>!&p8z@f9-Vj(hKz=8%|i-%#7KH zj!Z&pU<;{fqG)F4!$0OYD!Q!KNAF(R-o{IL;I|5Q7X>~1Uq|&zF{MA1PjIAF4}nQw z)lfVg2@oZ%5v1u$f%svYJB(>)6ta;n7y)GbNEUG9yixV{~X8+E2efWH& zhRGvTCoR3_SI5lueZ`NN51~03LRjZg>5foqXwI0Ft8Ci^1#;8+fY;9moxgcVtD91Y zOZD=!Svhtek@uUYzzEm<*|yR*<({#cOtSIwPR(J>NM;<%b03Q_WT%h3rSevf`GrH( z)Oh2~7w-e38|g&!qRMx2+?YKY5q}a9^62I|#xIWYoFO^Fw-Upg-3=Sx^7lwU+da{g ztMl4g!IsyUK1T}VjH!9oF4G%G6O)s=w23Qc(1IJWFwmY?vsa}cd&LClI zNqAuPRp9T6UlE|B)7|1jK$BpdP9I;v0q5ZKpNy+C`5;L03yjVA;kzI6gY%vcs3W$@%C|sSkra;w+wt^)y7UQaD zD<0^XwiD2p-LJL?=mPGoCh@ zjlam>$;!*T7!I4{#hjRK(oAP?Y^9jmxyfR#@n_G0Cmb6!Van?T;1~GQo=q$);k)V7 zp&90{0T!2plq#&?{4X0j#Jnw21!^%Jsx%xdu6`10;4{#22$SZD`2DZ4arKc5c*c)H zF%^S+=&7H-YhFGbf68XHiP@6^*$Xe29YaXQr#on7n(A`8=(`ryY?ANrmz8XSKyvK> zmv$DS9DdS7s{Q5?&MPU)JQCoDZ$Oy9gge*T~H-Q#cc zy)o75MWM+XYi8g*h@Z}eSdluj&2S3TUF}<_zKdAEWNT^&Qw~d&u)_>J>M<{y`4@&7!L;I zUQ7{a$R8HQ;2Xb3Gg}x6s9bvrQJ$F|H1|lnJ*!Q_VRv589|5ox8s^#EVFJ>DvDXL( z<+L~LVjPcBLgNSh&y`S}hCdpq^dg;6-X|~907H6pOod794p!su6eXp9`t8#a zefb}(-sTA)D040)Eb^;qtT{>PuX@=>&avY3ods8hX?tBd3qCxU$(HYiTjrJWTCJ3n z>RXDwsvTT%j*sR}d==|2``>gA-dN}jyKswLR6_gsl$@UK)X%@!ML;jNNG?jDg9s@M z#}9}ib&jCXOKQY;dN_cz|IQgRwNd)n%=)?aEeLIP1)pL@7Do#go4*(&O9^qCGIQzNo;cw?vJX#L?sG0#GPBla0_ro9NFUFJDw>y2=_%?rbq-?OX)2u_m$`LmT{u!$` zDJiJN6;t*uIm}Dp1i^WHMD{z(!CiVmr^#QNqVHNumL+|}6!}epQG7z)SOR67;@@?N zcQ@N9t{>0>pmG?IvE^p0S&@FVvPZaJ?gp~EoXBpy67FAOTm{BKY>h>BDC`ZseuJG~ z&C$v|Qs|TG(H!g!?L4)JC7;Hk4gAMXI(r}U-3m-ZWZn~~(FR6iK&<#t5*DKHME1Di z7-&BCND7AwN7u8CV4#0>d?9yFF>0bBWv^9&$O6A89uxtqFQ3N_f?0Z1P`C1fTUEWU z!5rHup$@d7*op-}{5ti?lKO*o9_#`n!?&*wOQ>?Vo{8}HYlx(j+xNUCu#C;m5gFPIxR4QN^E&PffCsmR0lRpkA7Zf9&~hLJS{9wRP{^ri z#(2xL!~CBZLAucX3^Q(%o)-mDSQay|hwDSUo6B9YBOxyi3wIAI8e`aI6ZPl(H2L~? zaS>f42PAJLnYjA#|B_9Q(q?FB?dg3BD(rp{q%gB|%Mn>L-gb4S$qs_TU*5H_%A2XX zf1yK0f#_M@S@d@lJ;?7goFp4iv?d^07MnjCG4wVMg+0ElUvUPaHrjx6Xnaoi_}IJk z^*N{4L)g2!46*A4Si&-;#O3(w8VEUXgguebJCO3r3@k%AwaY1#NA^5xKWHKk*8VSk zJskSCSeM3@N3JbSno7*qp&-0jjc`iESiE>P)-aXe6E^2Ke z<)EVaW>aIG0V%2WzO&b|77B@4C;O>&k1&_6LQ-uXODOc@I7EHe1JF`k)nN2>L#V72wTDiaO+6k8)5 z$~BF93K-=_FpEBl@0#poC`sE3O^H0C-rD8cN^r!P+>zd`tN z-VscF{Rn63% zR0;c9&}QoGeHm&}h{Nw;2NsdBYfSbP2!0RLH@A;6HTg_2fm5QAs7$B(GK;B9#HHi}XdjX9jzL#|H>MGOh$IeeKmZ7-t=;fu+|60+nx3*saMpYPQC z1OOxphP90|A_L)Qx>hMUy7#g|8*)T@YY(%VLbqfpK0*H$Jwg3H5d86e_2{;>zkfiq zvR-Swpv_uER~r2rK>#^~aZk|;?L1eEYf74gmVz2cPGM;9Rflpn#L^kD5dNBP`vfAlT0beo;t!!8Y6>izVvw3VMiIdNnlM z8*>7>`?(5&OAkt<1TNV6=9p!NCImiX1-xHZqlO}>&nb(v|y_Xb5?t}Y|H#qs}k*383GvAoF% zz(ui;l6=BFrBO_GA(T~VQ~^f2_=jHV{Mv?9>fM3Uh$ukYFb7_bZn#r`iZ-Dli)RqSg;83QD?;9;L1x zh3Xy3g5>DHC7Zvlp38p1rip0Md;m&UZ=}HsowN?To__If zH~X_$^>e~oG}jNT2vX5wmY;w4KJapp8#8(b{v1!=KRI|Tn6t<>aY9al$dx(jFb@3M z#7VcE@9T#O?_xWij@HZzTbDSt`W%0I`*WM9-&hqW9iE!MLSYv)dp|w${hq6(L;Sl5 z0ks0$u^$5SJY4jYWN}LLl^k5k0;Ly^!kS@0g@y0_RSLODmKz%59>z4^J=QWN;X77A z8K>kQ5;_?;tkIZmzWHy*M~G5w8mMR$q`y%6g!K)Fz4}_! zUn;{X5)--0dO%}>W ziEITp`)lxD)C40m2kYH2}W{>qQ>3JvO zKA{O8QEWu`W1{)-4Uv8Y`1&W1p$l356Kl3FCd^(m4xG@b+k|jOaE*dgX8HTGDUq(3 zlt_?K=l2QQ7vJh$u9C55cfM)}T_@0*zLWuQgivtbH()C5nwIS*U{8e z>a^_aw8zT*EJxc5M4M11+NaljebHksLi-BWqGyhT1=DD1YJPDI>`c#~$@6>UR4CUW z{-r@LM=5Ik#WiX{cW$3GPAeNRX4e#-3F)_ueszAu0=q5t{yZwDYp1^Uj{b^a#rdld z_1B8*H_t7!))t>YQ{Im35WpFR9ZZ?QQ`cTUwS`11e#(O*PI@Vqk?-FS<3^<&1hzy* zZ-JOsnE+nu3A7~08!3ExHlMN|jn=I8IIhCz0+)`V0M_B~wK(vRQ8>(V*~w@!Y_|0U zOU7PR3HKrzwl}WzVuA5bi*@k_&;v0tYVQ6pYyJ2*4ODgap75CGO2rcDN^ZnK!heJG zntC$^rqUNDmxKO3Zk??r2k9jOpEQq#iuM7_zN%3tAEeV1ZJs<6)C3peJSrI+%YpO$ zLa**CVWSZznea5D6Jr zn_G^4q-R8GzhsW0jD9_zAM8pIplCphd^=9}e##d9G0Pf$@DwM;=+}rC>M>ocEsqJH zt_wQpzFi9eYB36=#KguZ`ZFCjK`tzE6U52TlFOpw)rp?cK4(IV8-9AU5DI|1Gq9&o zu(sE(dIr^hs5!c<0fbaV{LxTbhinN9A*$u>U#j3>-1-j6d($-Je>D5RLsm0`u>;L) zSGero3t6XQ`xdt)jvaG6{R^O6!K?^{c-y}3(#;2q3Os#+rOx5{v5ECbwUV1o?OE+$ zTxLQ!z70o{<#PAjydE0`b8Izm3k&vPFxYY=lL`U6NL>qq&00q zQu=#4ue_iy?@ei2@M*^yK(9$jOHA4lmU_dV>A*tOesnV(5$y%<$%cXDUDs1{yiiy= z8(9(~SYHSRvpoU|;#E=0cmLIqZE55_<|8BgqGr>G0xWOMZwB6p`(q_MMFpVVYc?ieWoCk>4_ImT zc@~Mkn(7|m277?GC|PA7AahZqwyoTH4%ae9Eh%DA|0Bl&bd~b8vgzoX`r-cn0&Yl* z-=@uT>&(+pe@RUVGG6_tK}E#;-5^5v6F*l7ATD>lEs=;AlZc(Dq(#syd~Ki0$aagF zHarzJC1f;c^xn$Cr^+wx+C@-M>P<~+5y4A4h23l+ykHN_HQlFsN7@jFUk1~dD69;( z^T-ujYc@p3aKJ~BcrX`}Q@T`@bS@=y3g$k5Bgs|W^`qtiS8le*H(gm3;r%%Gpy4SN zJ|c@+s5{F1!#qrmf9U|5gm+1SkScT@x2Gc;9%Ln@-!fIFsghPdB z@RC#fUPOr(T2;D~C;SrC1?&pInA9hk=Ub7XL~a5Aq}&L&Q>caQz0F(q2`xfEQB#e4 z5HSIW$W?luOIj=-f#+>J0MMawKMY9i^p~aNkd1|&N*^FH{$ec5M5y>|y27SdMZ!`~ z2%lYrtwGk23MTIix=UplN=%LfNU!^Iv@ZOwFR|yI{dpYDRCQbtiR$q!^9MOfEGmFw zULz4nX*?>RCfybOr@}}}8XX_?m$uoCi}>>B@P$aU{qjA{RjPud6`k^G-+^7~Pi5lg zX#}MTB&9~UkP>|&V(FuOth*BNqAeUmyWshmM@D{CFvpRXe?R#<{J~+{dn5U=4>cAo z@9Lruh4(*JM;o->0O_cpC}~}&Wqu_SpZgqMYDTR90{ls;Xb0#MNI z<%?RLr?0_}yRoa}?QKETfm-bzV(AzxXHq1qv(NzC&uy)@$sY~Dtwy+OWf>g&HIkt8 zD|H60zE3IaUfDwQ1Y1M~o`60i3_x`laV(mxP@tx9aJXNCuMdm-Vfu(Z&(|P_JkZ50 zGVI$>8?=tOO?x{8BtMwWr&|b3jZj(i$w)p+A}F=VaNz)Al-e@0M!TIU`2n1$DX!R| z1v`bgz1!fui^Y zv*eUzJ4=+sSA6~HO81u{%YO<{2$AevpPbx97~*wrA9DxomrEvu+7WI_o~8XJJu6X; z{X*G!&*~DCb<=} z8_24MMf%MgpMPtTS77fw&H-pnU=i3HDl-b^Lo3dj`SyItE-)D#D223Yw;IGGutG{Q za%^)!^npr2r9MGx;yr&g2Z9lb|E&hVH~wOD?pb`p#Ck565#B|t2)Jf+x3~tXTGPuk zH{?@2e#Xz2yCYd+CM5q5|5!_WoeVRszHr#Fxyl3Hi{BP={RrZYLlVLyF#;iaJuR5` z^!S?Vu|dJ2rV_VZGIWN$+$wP#w4=qmL}jDzwCf~A9eCiFZhVi z@8${^5umC!^7Bh5x2fs3Vt|4{fJT-wh=nL>xr;Ey46P1s-lc}j%ajxUOK9li`Z(?E%T za2kJkQUn3Eay%c+Y3A$QyfN;>=|R^la5#=6Jxy`niio(4^}Yq!wYmS7d>eY(8PrBP zMc@A?fci#M@}V=9-jte?2-(gY(e}mTT}weIvo(Pr1Y|OS9bnQWF9*sRjT@T4JzeVy zUCdfd+&@)TyM@sED**CSEK&`z>}2iTw&A4m@7QddE6UjyNr zhO0jepOnFfcu$dkyv0*JIiz_C`-`IVs`zEGz_`%;N9E%dQ17XKT^~MBev>=9_XMeE2nDHt_XovJVl`NNNS$nGOrn-|_)>vZBYQ4g?%;wF=?O>M z7jHQt{J(?X=m;8l@sDnTv`oaHsa@jV3Q-(;1}5^F=6G>I-F$M95MFGDY@ylp!sK=Z zU0%!u&8l3&56?1CwKWsFB^?c@zUm!|{Qq|cb_?H<+(3}Wh}QICxEF3KcT_mt?I__B z)@y3gb9-Ixirhj8#~!WbA)~*ruTdhCl zb&u4MQbZhmabs8v7`ZN8Uw|%F$ z3WK|fn)x5Pzb&sWw2}}&(WN{bT3!5g%%RcBvE{rM`@6gGA=Dxq((mn~qZz*{v6A(d zQ4)l6RT(#PEE*dRrh+8sh83Q zJbN!}H8IB&U|$%p<%bH1$6u)i`Q$BJm{x;gyfDeJ?X&J_a3$=8Q#M{bSykFAYSadzSPW+T+hgqHl(1UxQ}skDi!w(ytiP<*ioeo z1p%Mab}-QdeAUT=j7^`Zp_yir7P){gpjRKO|GU5HDV5}|Xo$!=o5e$A2=Tp7B~U%O z-tawNa&nbUTRE0+I}V)qE%$ZJ$%$f_U0oYlk__puANsDa_%i#P!NhN~+5?#Vv%9*w z?A+ZGZES5<-QxThfSb@Iw>_NXxFI4~Cfs4E4@Wa#cxFGNg(%ER>nPw{BjrKs@BeP> zH%t!;ASK6!&~~k!+A6$}#7E}RQJJq{2%=YsOx}w9OJ4>NG@VI3REo38rO{9$<_vhm zncD)znAo|eR#Sjzws_E^Z&(;T^-RMk7~;MBu#GN)3ZNLcU5oLpy7MK zZ>5NM1%{k1Bq6glJm~oD7*7&&%kC^Jp&%zTN2qOD&wK(6^JVJqLYtJQs9RdAASYo1 zro+8mT&_*;K?>)TI?Eli!Mr`D-}OGj{8x z$Q8-I9TJcEQ0mc_S^EqCL6UFKyqA2i`Zy@P89*|I@#RkxRm?BfTdqo2Y636Ef-+;7 z=T0#Bbh*{q&134!?-_5ON_2-1#>ciz9}+&W`vjl){9Yf-RZ z4RJrd4;8X=i&B}cAwgMx8IM%@AzUW~cB~3&vCTB!7Kj7Edl{qC1kl#53R4tB^KgH~ ziCG5rgzPmXR#k;3|0P}m3P<&a4%c)Z%KrskW*lS;pHGDCs1tHhl<{(G%^wQ)<})+# z2(HZWhH+yMYa5SQ=8fzE9;^$#Cz?C_ddEj*V%m86+~fd;xI9xFIsE?4qQ)NXT<1Iu zawlFfOqR~(U|*0Te}WAqrkP+5>rTKHf)x>IR3%^+&xtc+DELF_&Tvqwmj14K%zNE< zPGj4D`TlA_;95c9Z^a*%3xZdvkkHC~t8qqm~ejP7uxvLk^tr1gN3G+?v^v`Ia07y}X z1ZN`C8+&aaU|hKWTF=pPuj5m!v(yuJd2UgPK^~BuG{kcDf9Ikzvg(pdrutKy{drX{ zEu+D%{X0P7ejGS|SbdAvhl2D;Kd7cNCZCq9_yz2L%}7$#Uj)&WVV_~-Grd+U5>A@V071O2Qt zzoK12d@iM2Oz$C}et`dWm6ET=_CuWM%U{7kCUBs%%qjQXz^y}u7=M79B7AxTVCNj` zq!F(*0FhZR?@$2*Dhsl6DOo)y;mfYsbJ%w-LOqMi1a;fdWzvbd4}WkyGBD7{Kh{dq z7(`GPi?}}U<_y35ErpcY{-0#XW2#MKi2B_(e+kgHQ@H1eqJ`4L#B#Fp>=)$7vax#v z0b=bGPNKUL&-yKvYlJPv%CjGld(WmmQJGdIb0g4|G`3wnTv6GpfDoA-68`%_eAf%A zGi2q>d#**om(jrx2yG~sC=8FfxhY|HGMd`_W1R)M7uyK()XPE04BoE~HCBQNqED)~ z<7@vn@+pAs-ExnJf^C!1udMz6YI;kaEYQK#KG_84yO~4c&!ch3=q`5ep{7go<`BT@ zQBGPbjY_}a4tJ$7h!6x zvqYm!Nwvrsw8y#Hxc@CrvUxYgV@US+pg}4)hXwOOTKnzcI7q~HQS=CnN7q@GG%y?t zQ;C<1x39UI3dLHE^r}*_HiEK_u;BBX7D$wOkpiARgxt$Ce79}jpusY&?+qUq ze28l=+gg1d&SXyf29_%?m>FxJo;^bH!MU%lR(EwkltyOgUhB+L5W{HLeS;449MgZ& zmfhaFO%apuJ*ZBZkxuikf@2t5xI?g&O<{+8ekYQDbKZb1=0c6_^6@2&1=}67Y>7W7 z5s#&EV=Q}b!*7389aWeXvMDqB5wDv0TC_W?sBQ6xFYc7Se(LBUA4%yQa<4n^=ve~A zUWnsc6Ge@`ia7pdrYCLGqt{kM?(avO@~6EA*WT)y+FCyoF-7O!i{YY$fl7_gZ7_&x z)lMo7m}>?@BO3o}{UC0U{RP!4oxwrjoaIAPzLg(&~1bAlDDS3`TS>wak^N16f^qY`L@erKcDfKET1^?SJ=}zldITTR=D4 zjQq7ng{Q>0OOxe_p&U0Z5l|>FSJSekK}HE z@!Dp#&%2?5o19f;v%voK&B_}@sEiFNBlk(*x4G4%Ut`_RxZ+xLYMYr)(eQU0dw7EY%t@+&>vzTFr%B%0 z3Q=Oi1698AqhhaioPOblamy8p)&K0Y#FMT9Oge9#+HiPdF-D*DV*;E?iYF=L zcR@!6;O}N+Z~{0jbV>m zX!p#hfixD@w$Z~A2anE>&$8&@LaV{M-_P78dIDHqU(cTFmfV>3p6b7&rE}`eW_t3} zoVX4@B{p2{Am?Dspv8#0Qs2N}Rf*{gX5!XKd5|Z625fzUlGpSRMU`2=ynome2~;h| z^BB|pkm3f4Yw@}41;w3amuA+p%9Umgw> z5Wu&;uSjx2!(}AA(nHpF|7Cx$E4^0td^(ofCYd&D34T|m{ghY!F_2O|Qu z{V!S<60#3ZbI6(zcX6GpoyiM=(&cHL1XUC&#cFCuYp*S)ysoE|UTdfZ**7ga0^fiR zzUGZ6zR7xn+dzX|XEp>lVDtc8&u2G_IescSniT_hb4%BF(QJU^)TI9);j7-j!O1Dc z)t((MVP#>4iek0_YG0b5<87)h4@piKLODtXH)ZO+3D?(00>Gg>RPpo1{RcxWTF2n! zWe+T6@01n*cy3aXqRZaDv>gO;KQRe%@y&b96#~nR|9qLVhRO4xH_;PWR&haAR4B+( z0M&=pf{^sr<#d$(lYQxrvvGUY_!=G6v!l-3 z#97`+n`zwat(?JNmdT1^g45w{s58J$nPP%e0rC!Kw;|=w^_}doX~mFz>Dy;#Cp=&O zFoM=yiAye@3$8EnLKi6`c@2NB%D?U9C-G(}DJYCBtUQ0mQ)K{kiY`yGw9*R!mI-W8 zLNd{+Z&S2BqyHO+GWqyTW#?vl!7lgsHrw(`-fvJIqKO0yE^IB%udqYNauFl!0DY~R zZdT#HEnU2WJ~SZ4L79CcmIKf$S00vcc7Xsq*VE$26KTSLnL$<=tJj{K9%~vU zAj6$nS+a6icqj9;KH>TDSj;P+uH$Yy>rIlky7>AI zd*MtNQvcQg-?@A&tT6GAz1PB`N<2@WuqPQ^LT_b>#EqYqe#1tW=(Z}C_BC3}ac2|SOtM}p|w zw)L(R5j3W$0>%WY9@UeKglGK1_fP$dse8C-nX2jm!=P9%#Uze|M785erj4Xma76fb z?1UasU4%q~#sVBfBFRC?mV$e{N}s2Ly?&<3ys@$CVy!yu{Qv5kA7*<>M#ygfn@3rM zpNJIDQOvV-obaZRp=un{apU31aAo8nk@B0g{%evl(ADtD8Yu_o_*0y8U%L^l2syQ5 zPFn&OGB*a5cn}kzCme>a*;#!i!AHQ#w*<qbw~kbVixRx{OTIE!dWJVoojJMXiIsV+vw?Sk5DE)!P0|}3D7pCsvct9EIFa@?hxs9%!uONL zC1q86q?_KA7$5*iHs9VIyV`~~D@SVJCxQG__W_T?^-yBk{#)0ypHh3P6*5zj6s^xO zVx(@xa^DaVy;}hAQprZPK}*z5fQEYz2r)@Wnt8?^r7VwV%myU+xaS}(ZDMc$679eo zk;~Y%IA2n+qnC~tV7&CkS*5a&)4nCT`x8*Bhm>P`YogjHZUn43o#RH7$iC9fHWAXJNIsu0~p_L~{MB&UVsxVSgu!XP4t0%!-|3HsvRXbkhbD)>Ygh|m z6rKZ=YQQjM@55y}cW`f2Yyv)E_U`8(-!P(b@iyBLnm9gbL;>=zHbx`K2mnWnl{A94V6%=vJG&xf}tODRq(y zXFp5^(!PDM)$y_z`POvgnx7)tQ%%|xti-{$!}^WYdPNxc(JDhuJW-X$+RLYuMTgoA zi4oCVDeAjeVNR;X`aFc_PZ96+tc(GNkEkf9R@Km%HDHvrbAqZSk~b<^o1qGfBpD3Y zvKu^CK0YL$jF{uMH;xbq0}L2pQYiDL!>NvvF{U;o#s5TjZ;TlHc`8jbCKm zw2&t%$57nfp-)}pFdP8)2jG=5Ul)8`4arz<5M#m0{kP-qIn|-$4d-p+T1ga#VFd$J z?9D&~{2CE*NG&j2ZVe%kLy&7g=eeji``M$|7qh?)^mh$RF9wpovmRSc)_+;)!s{DI z`oUlEKLv8bXCThrtIg)!o(omS03dVvm=qUDF*5;vcx$dfW#*{xOTmv`_BLm}ZF%`b zKgYHng-`%4SCRTIkP^|1cEw!W=jQNGrq>8JoFcAeE%*d!+0y+>iJ%+VfsLx_;=F3?l$;5G|=fP z$B>;To((Cfn7e;A`~E!b7DkslUk`#5CpTi=CLcb46o8%hsu(cqp|v4US1Rxk{FK+9 zU2?52ZRs5}XFHJs&dDPFX{+^PVnqT~1O8ORTM<+ep(&lz4@0-C048JZO?0K5GcDww zC}@E-IZI<@;>Mn)>f^YGP-aCy2j+#VK4a7eF-Hqh{F*cO0a))8iV1j3y|nve#6_bN z3NNuM?H$00l|cnq)v2%k&M2Ko-2;^@xn`LYZ3PWF0***VoB?zzS-Ek$-fP~RNT_Fr z%7$8iY!?vy006eJr0>7VQII%_AuTy>>%CnVofV<0x&#M^>La_GBL!gz|Zor zsamKm1TsOHx)*?X1?>ZXS~uagUwj&$cL6iJ*9Kk~6-gPeazfF2z6dW&^i_#E@ETpz zpPEw&1W+zxa)ohRC^jBH8ezjRjMM=>3*e7tv9HJf;jFz-!dQcB3*-Ra^7WTT!)1J& zjCKJ^MC7Fh23M}Wdu5NjLcy}Xl%F0BBgIn3(dhWUmW_mD!}om zY3RtEtpG6~^xZ0i@4hN)P}0S5nOPW9{-G6@C(?&_PlgGgC-z>8Xfya+#5vOrsP6{2)E291Yh% zyyFY_@HRWbyfztt&bNpnRYOxVEHP>$k5aWM=kg?@)7}iKnZjeCFCkG~vg5V^I9c~0 zb%F_x=Oc{!d%f`A^;6kfLyWIbzF37@eoLRvcpW`rob5hsM~(Xz)knMn>x+7}JsWKK ztTF9qU1SJasz7Lux|t89%to|--CX@6o{`EER348a&8Zd^)g(YB{)3pa5O$yX&Ya%L zJ*UpfFNdq7rRQf;p?s8g3nqi$03hSg5-r2PXEY-SKOK|=%Oq+p9N-S8BdJ#kSm-%n znYP2RdkBW5eDUxL_6hvBkzD5|GPjfKfy!Q`6K6+wHz}#!r5_fa7pT$uhy@VnMh*|=9B*Y5PM0Y>=Y`om^}2qjwi|mD8Kx66#Sx7+(iD};a%%~A&VVfW>Rz% z-?L}GJyAOj;_<#+UXx1)d96v+HzE2Zx&Ho60cu|}fMtb@ap_1>0S&_!CEWDw+G$W< zx(xzp2f?s&8OQOZq%{H&Avyp+fU@&d9%#{7a#?i1pQSu?0d#_gJhzObJ^{~ReVzWV z4zxL<6FqQDhuYVuBBVd7L@HMRu-peQIb^9QT2TW}cIj%lALm>Wn1($ouz+G+L_#ka zLoWcTl9$x>xOE9Mw*DYL3z1@=-UrYt>|TJpUU+wR7jX<1}Q z1Y?lplJR{Xg#yv)fQZKMl2wusAgY=`y_4pWl?%{NcsV9XRyX;&IDvKTOHn z;kWx~FNy^^J2~sz0fx3K)W2pvBT2{EY0=+3pEFu2B0+{{gnQuVI5o{ zfNw}iUC<+y@b6yY!Cc_xbuUnX^Vjd11yAm`Ny((6VkH}10b>nH@TZP zN>$VooTwfGtZ!6{8SO!iyOdBcfXfo&Pi>admP5*+e7h?Ldq8&L8Z1i$^bZQ@JfTw` z)Mh<_|0C9Dw(hHwt@L~^s;Fptb|)qw`C4;_2K$H~0(5F|&Ls-N{KMJ1beBkZR0J>( z3`?&uSgA%WZQAMN%D4Dwja(~v(C|L0D%hVSy4N_R3t_9OT4+5-0~Nuab8P)_X)F31 zed|5(_hhhe$+yhU&xIfK;|rmJ5nS994n|@7c5E- zg7<%K!SYbk>o@PXSLZ+nufFO4uGoF#R9TH;^VoLOppIwEaeXw@Ol@i5#-F<$wd%=sb0jLZ*dO*OtOxi%u809&3SPN8Y1Zkp*+Y zq_d8BLF!A1b-v9tIdN~fV~BGDOjelyE>0fs5c!ejo*;pXitSp?B;Bt{4UTM>a8X|2 zD87I1rz+i#FO*&`l`e~<1j4*8xP0OTfsf?ZlEOVr?=8*Ugi233Je|FN4|vCECVwQf zAm(83x9k?!|L`rmH7&*TJ(M-W=_HzxE3e}>3YQ#B`sfN=>GXDat_3x)XW26o^624S zs#jNMoha!4^-+saJ3-H2VW0k3A09m^W$LXKAhNTyw2}fTL&2r^5T&{ttTsBkMGi}p9-1>!7yo5l>FbASkG>dfnS!sv@$Ex&&e)UREaayMn(4A^w1ck!(Fpe%hBtpgG_uuRSr zwIXCdr8Y7Z?j|Nsq@MBr0j7)q1;jEh*Y+=xBd*;Xy8y<=U^j~IIQz9?O|4jsUx2Q@ zzJ5}u`~1A-UaN9us_nP031@;xMo)Lk9})DDX<&0GmLG6LGx^s*#MZWe<%V~;TuO5{ zDiCj;@3ZO!HkhB}(T0L*o=|@xn)Do1=Yi<2ylYH>=1&&gQd2jp zU4bK~9*%jf818c-W!a06wjyFkV#b0(j*8}IqN%^ycZ8>Q|11<(*ibKd zZo)P7Mw2L~ryH2cAMT_NuU)#e>g?1{e%~`P)AH=p&FH6n(E@O=swT-OWybabRRPej zsKv-WL4OW<^8LKKs< zlAdCKt_2?L{eIzbLuQaS_32SRf1*%vOw?ql_HfGmD-{B)_1CYZ#miL;eE3tI$MNqJ zN4LA1?;kqX-)kq^WkJ%lwNZJZqAX z=I@T4!HSx#^OrMP=E<4V1&u4ViPVo+W3AysoEkm)T1VX5>&0eQcir917jxt0wP*=g z=irX}?E+Q*bbdF#66-U2g3{8VuUWjUq{Wf`6=W-4%efXjkvFUd=}IQx`#WsC4rgJ@ zqaPa3qigG?%(H&jI9%&-AKwz*9sBJ6zJI>g{_4q6uAVyMRPKh!UAO#5!ibwzUv6X1 zWH!)#hVy%=ALQ;-%~Rmey4wTMvASLuKbP&PR$|Fiof+D0escaix14*VeX&?~RbB6B zmATLf@ZvnPbaibGH0`n?H6HQ;x9D1SXA7ocwevR4KeYu|d*`|-{qEWh(vUjxu68Zq z3F4d`94tnxA=bNa$Sbers)Qm)u!xI7FW33@m14gvyd-LN9~w+ zT}}D@`RrNzkcV_N`Jbnmg-m}IG}m^5rhWrg zC{^S+!#EYb@lm5frAZ|lz+U9sW#D9*WidvoT78pa{!J37Ld=R+-j9LsW*9x4O-QMh zqf|}5Wc-k{zoFsiPrOSG^Nf|9c^tk*gO#Uo`|G{x`3RpdfIeUAK`=BjQsQqwtFstw)-Oro}Y7$$9>QqxoI7EX4ru@eS{0=F%#S~ikv{pQ#gz!IS(s$7|P+s3oZ}R_l-X0QaPBBl!H*kzPHH3*unefF6cpm-zhEh0{StPoKFtpKs_hGgx@CCG$>1 z8@EUaUdPDv-90DjwDZGyf2>aA<4=?INb;TBVhaij=A){uV<($>-Yqo1O$0N2peuNt(9)q&-Xi>+Q)u=W(@lyq@ENYDxcLev`~?dVl1y*22*dCN<)$Z&3!2CDj{5 z2DR3Gp-+y+QHy_(v=3QRq z<~}iI%@*ttJya(H*CEY>r{e8&ERB>}Q><$@(_(tOfbggHAX+gRA)=Mk6y=tVk;Y6t z8pHtH5Og-85&Yp#)Bqq-XqOKb)f5L;ZAQ<9OaS;`alx;?eF$Xr%-Io)_P+eJw?XmZ z%F$9#*V_uTkK1w>l8yYM~IH?()?>T$k_7sg$eDNN(M1+qVwXB}4_p-r_ z;-QgE*;Lmst>yFamQ2Z1N&wZ*pmC_guOq(ydilESImurG#7Uck!YOYU>hP*^ehmj{ z32=6mtyyV_^!vxC=3Qq?riTg@fH($Jk;;-J@B%2Z^8V%QW?s$)yaxN$vE{!o^U4Wd zam_C6CuoKLtde$#dgwKI>S}lnlq$L&f4p(&QS7rogN6r1zg@{3zUS=0WrsbtxXrjR zil#;k!GA{Ekss$=#H_*zb0=-H?!g>lTX8$eL%+o z$90n=$kLi!v-s)6eEFvT=gEqvS8m2H(xek6qpLI6_BUxc*_#*q4$*xn<}VMy;lH)Y zCn&tvIPa5A$Uo0%Q+&e@0ePZ62d~ZiyPYlK{p-E*p(n#ai;3p9x@{tcxaQ)u%gMyi zm1IaR@Aaj$GO2; zDf{N0wOeo`MYD2^y~z(_+tDXE`U~xnGhVmL z9B=u)z&@0iY6TOs>sMR%_c4@!_aRD>&2i?KWV?~$Uyrl=V!SEUrHuUABwr`Pu(d8a z%IhY>?U@B+>Q5Sr;GU8p;f9F>PM=VzLH11`_63mJl6hA!r7Q@3-t_;0O}3iOnrB)S z1K;R>pEJ9>8}V6Y``-3&{AXd=mHP1n)!-Jk_0Wv<7hs+G7q$qS5(o}>!r($pcQG%I zy4)%c<)6l%?Fk=j`d>|^wBf?w>7UGe(X`bs)z5Pe!Z2$WzgIyx#A)qBr`gbR+N$(H z)qiF$*R6%>b5_4(qj`Ly)#u#U$^zcg&=AjU0Aei%oq6>wa(_tS4Re1k9qz>S$)II- zn;6wrQXt5@*Slw&BdpGBf@8n(tHK@@RAH?aZ>j| zlq1B2iW#|Wx8ZHsI(AJGPfyUdg37HeCJ8|0AJTBHv*~G{Up+!&DZO3pvYAlz8+up| z41Z``{a_;HfBq1WHf#t?JHnGFA|OSuy4ZM3{vww!c$lBD99=Dhp6g83vFekkou|zD z@4hj*YWBHj@j5wWe`@c*7@fl8i%C6aEP5=1jwetbW&ylr3m|dIn)JlfKv5{vbo?E_ zFpcwhpHVT=zWajWQDI++3z1iL{q$$K!5awv^NExsbHbv|{fUrhC`FtI{W<)V$87vL zD!1kg_QXEUPKy%R7ld$}%OmeAK~8J>S;T^4vr*8qyjYnxUlCq~2q54Vse`3ySp(@# zJWh(y%`Tr_YH!{d#*uENa6eBi?Oa}xmVYHg{^my;1CQ8gMuLw<2=15%cP9>>S6#0l zMIZ=~tic6nZhxrfA@KNT7+^$K8H));MYM?#sreT->LnLZk+PSv_fYci;u(8oCMhW) zE~&AlMOK-q;$pFdMMRXMem4E^cjd7^F-~5yw5>S@Pwn@Eca4EpG`fFS_P5eRF>DAN zQk)t*eg+3{s_|=B>-jhB5&vnEN$)(b`oL;ef{013<)5*E2ecz_2*4ebIpq*lOK%_@ zg+H#SEX#?|Y73>HHFaOj7+_YqPn8;)n+Lrg#i9WAhO0m!7KP^)lYuV*;4ie|8lygO zu(kgsjK2%?5t)X*DTNY~(wJEf{S(Nt7@u#?4c_da25(XoS&s!y<|KDYM*CHd+?m%4@o^Ra}F2vq#KqndYX)Tx{ z=%C_H)RV_L?U?vmd@- zDzFzD)S;9uH#|%ZbNAvcnI>W?XevaRZ3ySj(=cKBW<`{++lKKXXjJ+@POoFW(5wFH z6u~Z0w|8W!GDJd1d~SXlJ6FLTW@Z>jQ8e6X5`|E^G#70qHpTo>FNuw9e6E>-~DrzKdg zNY*XwWw7^9E^+C+wsXK2b!0E0et&D7J-8J5gOHL>0~7Y z_wmV9pG0rwY*DPh8=-_7py}uJnv*L#*U5p|m|{O%`SkDnsN(P1kHnue)bR#V`Om5i zeB2ARgdiE0oeic4ZX#9!qY`t)adZRjJwZ>V|s{>Vr_?Z2?IQ8-E{qM7;$S6+=CTe+YKhc$m7^mxV$C37lFH z#F7m5y;6qx!rr&w0G{KX!KgmFmj)jEH6vtjIF@)G5d;Ez|7wHHUEJywBU-Ph&RpyK zS1f35CqzAt`*Iauk&cChBA|`&0(&&skVf>B_i8PqJ(+(G4Gw^kAjDv`MgG8 zYl7~Oz#Sgx74IrdRp&r%;kC)FlSc_33jL6iTfe6lZMYCFz$kE(p$LQ{kpZ8n z*XUwmw0qP0%=I=cszE7wc=$0b7lI+J25o zh_8l0FDcKwNZ+Y4OFjbdX5QF>r@okZv$El_z8=w$?WT!;R`%Qu;wX{%QOjVNmeIh# zvIbcj4Y5lgOKK`VP=XYGdgO6%1lsp5YmjkU*L^R)j5j-w{Bp?2K)S{pJAmRkw)6uP zT<7_LikM`9@-?=>>t$AJVx-Re%}44>XRcU`BG$T-% z^m9p=tBN@)!uz3f!u8{f&lcGrm(J{)5AUt3gj#2Y=8xWU``fy<)C~;im)wn%N`Z~8 z$H(e;b(9c@9CyqO5!9*FE>ofoF;QdlF3#IyCf^gsesff#!d3s`S*Kycs-G09O~kl` zQLA@yBEZE|fZx8%0UE)1AjldK*(hO!vqnPAkwbg{N05E}(_|zP2!<^MPjDokF0?%@ zcpW^Avp!)D^wd-lVxWAIB!5@yH8ll#uY+_k8?B{|#I|ak2o3AvOqT;*71W5Z2WLGL z!K|KZsHebCM*fOsy4}lqwsM_FTZShv@MKdfonpZJ#Ll2n449GdvZ0~@s)j_^l16I^ zc}<|3Kllr+5pbw1-sqB3sCQb2aPCR;3Fw(Q$-g zmLZ$s=Ph=S;8ixgu2`au=+W?Ner)1O0t&@~Y zo``V)dn2s3zI7HOy(&pA|(SNA%qFUC=U^ueor$N$Noak_pPT>J(%t8zq)#NT%K2S z)_ylxne&N{%OTc$+o%)X+7_Rpge$#4E8H1WCW6HIAsL~>^nyd&{yjfEOxXE#efN{A zLQ4Fjt7vY90(RG@brjUSb**?{vjtlG!MWw<$(sVkMzOUZ7c`ny07SY9&=CFW>OS29 z3m#MgeOD?#Drfz{chbU9(Xq|lsh3>EK>zdctvfiL96lA^P@;sJvequfIr67M0GUY( z?0{!Yt$k8cq_W!A5cNq{P)d6P$3E++zcgirXhc@g#aHQ+Bhb4{Q!3irQQFQWtF)IZ zZqdb7M_3vYs*giUv2{`j&b0oAytkr@V*2k=oa7Vh=rd`|Sl4db59#{lM7uDaBL`TU zEZY)8#!! zIsF{tTkSFT?Vab_h{8SM>dor?x1bx1Dxh7^A@yCAvM(O|7=2-FMJN%ZYNWSVKqM`2f#9|mwYqo3wpl3oKA{~F+NU4t2tN= zY;<{x;dm1-eJ+TD{H0%?_u|&doT{whPnC*2#}*!o=6lb3aKO?VZ~zK|#?M5R*u0L5 z?kD?69bm$-Vrad*MKPWbp9&m_DYfT^W?Ddaux7e^ifB2CvHRXOr7gbvCY71Yu1D|< z@qE6MGwGD}rK_gg#=;c11br^f0=Rsq{hEK{x>#4+UKw;2FS(t-lAHg;ZNy(TANJ+s z+S-$}a7>UPjTpbV9kt7v7jC7`&3 z&=F{y4w6fRM+dXKK1yN$7hP3Lw$qm^N#7}&AFjDW;#fF{BG>dT>-dILo9}|zr^pj( zr05W~0>(Pygguj_Rg)2=FyT}!I?JI>?Ynf0mxS4$Z_Z+{K31Ug8#eZ3BIsif8w7%= zN1*2+QucbZ#fr&9!3~wmu;m&!&p@!AJh__oSV`jzo27@KvKPoX?)~kL%M$^I0Il2^ zVq5N$JR(VbgxGH#5c^iPEd#~Q$|%}gnEQB_sE)@k8CV)0hviGe;}TNC9FiHU7Tbb* zp}99$6uwg)hAELuy|#$>m}SDQ`^AOr4Q)@h*B8Lo{u@%h?`X?sQ7%;@iYn{=6pxfl zzx`D(;rR=?io_CvC&-y$nUF=J?#nDoL6X*vweT9$d39u^_&kz%@c7>TH)qj4qyTYy zM!0-CV71?==K0Ft_#5;^8UzF;7P^PXF1iOLM+CX`nVWWK7^~E5 zRS^0-I$_Yz?fr;Og0a-?Af%(}jc6!Wd$6Gj@VO10n2>Sujvg|HQY)EYA?D;)?W9Avpadl)xFOG z04p~5rtRhFw7#Jg9YFXd_C^IJb|wMRiagc}VC5xHsCJEO zh^~OIyjS02ELMBl#@;f1kq#6-~5tS;Fsl*@n%*dJN;8QW;BuxKE zp}QRL1aveUjCV%=e0vy{)26VKUPjF9SJi=Y7?E)|;ZloK|9{D%yl&HAaS! z{oUXG`UZ1OjQWZi9SIGIvH3NO;q0RWviXLcGbAHdw`3+|u=}BW?;>QHbAUrdb&_^R z9xYo6yHwx4vGOg#XgE-g2OG|4gdOCjxNB2h!Wi;=^t7;vcl4d4pfJPp6K!>->pC(w zuvF2yY;V0;CTY?;1K`NGy54tNr+{;Y#QU~`f;|q6NzwoqJjZSKuXrD%AX-{RLVOt* z4+NhavTnQNNFSCf7(Pr%C>2LM_2)i+*sO$G0zIXNT2I*V*!H~qIfBkt>+OEmHn*=5 zBWs)E^TukHA?L!Jp-1DjEj&4$kgpt4L|Dc6wy<|&v0SN?Gx299!~f3>(zQpjhc_Rk z%Z*|bn+=7Otl`t7(i1Kzzbj~ad)sp$DJf~r`gU!wUD-GPeOE=PZcj%O@~;6l_W#Zv zEWYl32M4QgE!|_D-|g)&{^eo_vklgy$2^Vwev1l7U|s_~&M-a^Mtpt=`2ZYB9Cit= zGwi;`zoX7}FSx5w>Lg5lcp;-GgV3;%R66Ugq{>@CU2i62YL`X1ftnmTwgtUm7V8q| zeqAY!4b`R$GO5tiGD@jVRG38xeiSrLUHDsq@0*zc8lJ}iD@_%Mo_fzml|Nzl(<1Y~ z&`M??{m8}%(n)iNbxcLFd2)tC+DPhnOuraIKn2YO$KgNt-_Q`)wO(nMZ+TqTW(^q= z#wg#fygc}H#AiKj_6qU8niv9rgou!_PnOT~okI_q3-tBSXF>+6`RTcBC3p=)lk&jo z=;*}ON6&|3Q8+&45)T=>9?n97ja%PXhs%wA85Xnh!YVjG-~BdYWacfXIaBc4?Pp!< zf)&)M@@T`_TDg?V(Ao{TUNn7#vWSJ;KhzygJ56U)SIhs*9getkpbG24@xsBj-O#CM z{pu9v4GfHoxQLo90^=2;4Y*SH*Wu-o;^TS#LeLgXaE=aHd6UWbjD@=ine1VvL6C+A zxoq07z{-(L^G&3L;7JC*iL>)3K6t=g?^qKFMSDBgRx=8MPyw3*9bIT%SQc6*V9&-0 zRy4#DE&FkGg2@WSkxK&yZ#{Qd+qCC1oeVD^#X{Y`8GZhfY3VUx_AACOvOOxpOa&XV zd{WZa#Db(dc(MPPqmB**474cW;zCMfs``iVIdx>-YNc)b2;FC9Ojb-4@GEGum4r|1 z6qGE%CGGgVI7xS?VY4yCf`T0?M87g!cW(#`dOoX43zH@Lwxz0u5W$nt!&EK1UBhQB;HT^>ny3={<%E(K|yL1Ow{;Oi8D`vij!`ih0gXbD2 zk}_~)7_+`_Adj}k&2bT#QMWX7!@uWw;LwtH1Eoq~ z>WCwd4FoNo=gU~ca)NBLeh}ArnIWQ-suw9L{fh$aaDbZmvK<;x>iWn=mv~SEO@Mz; zgsLs}40Y5Y``;lm*r?|;ep8@NRr|2wL*l{P87iC}Qfn;T!IM#B-?5 zcn@zs$9)U>A}#pi+4oL;cJ@nqCnWHFr626>19Je`kw$srJkVZxhV-sZ*#qGO4!Ks1=`PcF*soMNKAtec)A%n0A`lf4jk;vdoK zL(dM)b}N`|+;2Icn&{7MoQH|f`lOcn4Y#$sHS6kzfjv1iM^;>>){XBt+;$ih3WSDP zMk{NOyz3g}$HPmDzm=F*_Fa2&U|y(Q=IPDjcj1cT3{sQX=g0tPYD$Wu&^HtBj@#SD z2jc#fEI^K!HB?3j1Lk{)+Qs)ow|&C8M35RrpQ}vb5O4tZT|4sBxFpWCqUPEzV;n@$ z&EVb9}~-(Fz_7Lfjadh7UjBWmf9O*gSD)WGeFCb=$m;g3&WJzxo$Mu{HtVyVu96_l#uqKkhsR1+ZP4XJH*`&<`m|UzJr(odQZTapeTwMG4Zo z58mMlGXe#WrRe@YBMLLRr8;clZZ4h3940n#ej=U8TkKrSN#tR8Lw@HOvpj%yR4rxL zWlE(ME6huhn&eAo*8!w;S^k7p8-VI$i$_p4kCpJ&E0o~^t54zoRc?wb=x}dEy}$h$ zBDa-KWAALcej=iTfaeEY$Ah#!V93bMh(btpc?wi_RCFBE&o&3MQNSsI%1JjCc~JI6 zZ%|B0h-Pz_6%|@Dfx=Cw33{p6F{533y2x<0AY7@XxS>RbFVF*=YDbap$BAzEk7O|U zHPw!2ZEbBcH#v#ha#e0`CgH@0=LYja={jATiQseN;?rZ}YpL0}SRU=UNplJ?nyFqV zG+!{oc9N?jGC1>Ax~iR6DI0689k!O4`cG7%Y3N^{rN4D1Q`Bv*A~ph=Wib|0w-tMp z`$7l`3Dz<^r65sWGSFf?y-VWtw}mg!sYj-NIJCsk&E-2rK6QHISF~eqW4yoKJyf@$ zpIv0&R=c3q2}^OBYO8`Vn3grN)-gqa$`)b=gp5@nQ>yf$pOflHc${o_eFXV6T~f_m z+kho)U0P!*HFZEbDn^HMM#LvHH$;CHq!}_p6`79=gGCK3c~$BI8-#9}M$86Y z4T`QO9xDSi;?-Xae?lXeA1$gQv(pE+V;N2vG2NxbeAQ(Wxk-5Pc*a?f>3w8)0%eIWXI`o2m-sd_;}@-X;<~P`On*%iUAL zC3B_jV1NTWuU78lB3n!E#SVlp!R^t}6&=w8(`%^|lX-7WjS)UH+-J1MiS0n?WtKvi z1Jc>Woef6oVzv5AJKruy#>vs5I%>YJ>O{`r)nOn<&`5{ZNOzO|BBv7rTiUkL5!iTR z@Rrr10`7=K3yI1JHK*2K(NU@dYCLr;D$F@8A39DOq7fg_W`xQWP(DsrM?B6CC#Mt< zHt>pQ;EUcY|zH>c~S8dpR72C^tcu#jwuezCYl*KRc7>4+8Q;y2noE*X~_%})A&7!K=*C^7)_dRV&HKhlh5w< z=VI|xT5c|bEussQTUvYle=gpfD$Rewz$DYMpiNx@?f^vyNZE}$9tc5Tu;CvILoIP}L&vt#UQ}@q7N`L2F596r z=PF791zaM`z4A^t%_v#)%~)R$a+O|hZI{&rn@Xz$!i22fCHqPqu;kfo05x}_ zR6^PLEVQO%^kWGxTd1z^%<0d>>yS-N6q0Us?75`hF^>g$QG}5wfYnq-wlJ=cq8l?`&E9ROuFZa$@gwpyP1f_pW0i>-pRDO?ksIN~9#5 z{^L49;iPD|ZBBP@9V}BhW5DBCdEf_CI=##UU#%kp#>~$6>04IckpX=C6q>3J5B-;E z+kt_BuZKo9=JN2@&b#fDtVJ2IMZNQOaHm&tsKl)O@O!v$zfQBQrC6JG==Z^vIE3np zRk+nNNHS4iJTC2Ef0wEO48L=zRS2KLa5}QZOo;Lagt{&OB=Q+!R9w&cV4p0Y1azJv zZrtmmQ!SKI&V<3vfSjfjhcEh>T5Fn~8bJZ+vs;Kqj*_og|4QQ*u`HDSmsEQ%U$9;| z-}kA#>V8n3!R@No6*s!evk~R8g=4f3-Ws_@zJzEw7sH-h2qo-$V6(?YgBiVtZHd^= zqKTd&hSvws6i!ZN{$AJpZDV`sZ>Q#sP$pNLCxWzG4KY>){W#>Ee~w@^DO>f4eD{MH zT#xD-XqLo`7);n3i8GN+X~=aeu-Zi4?Z?Joxx#G^?f5i5OyXXyI{DLO#i+%H4JK3g z0vEVHXsy(!j#`0sAzQJl{yt}uRsQM zkCsq+&AYOtj~D@TU*nVw``A+vt3mf-i%n={cHE^^!=eneYwGaW!LW;kJ^iteP5E$y zA~oyGc4k4;oN8EWX;kZ&ey}26BX8%@peVQe!mR}@S&~f}eF;YcA)OWMRF=3)32-y7m^m?^CvDTT--pN77xPSL>{)V?05v{fvv zm*bY&H46WfFi!pXnm7Gqt8w;HHF0UuqWgz3H# zwtP$K-s!o%6(9I)utQ$0z1m^ew9@@VIbHpoL zj99f{^Miv0Zq`Eu+8yRWZiFlvo^EVuKiA(McH*)mg!=T>K6TRVJ655nzOs51n4!vY zT{xZsxHuqX7=FZ76GJ|4rf8g#NXhUgW<+Rs{91O+<(~u2*7O}$nVl&Ue{!EZ8%E&Sj(3M zT>rYeCFh4~36VqMwVt2*=UO)u|Dx^zdp4V{#ba?(^4?ONW#lRc4q>x*(9QS5BV9V_ zTet2-4*>@j3vKv35^P~;Wp{UO*AK%Pd~+-7LfUp%HdG&xM>x?*UEWc$d6O{LXO(~? zB@EAi7Z0Cf5SdBsly9AT`1?u+o=QLLck3m8vD=mkM;rvLaWvnY{anJafo)y3WMkTR z*2Af4nKQ)$spo9!hjBg~B^92#)z=qT3-lm4CGjVqcXd@(6r{+%tdSH~d zGe?)#WYMpkIBrvgen2-ayx8iqREIR!dH95Q_~40teiI>oRrpyXc0E3-mJ`jFj><+s zh9Y5zOL2T$y_qO#&WO1TxJlVGv}p|)+Ezr7a@ebays{kCLtDSvVkifGu=C@$`%fNw z)gghttpCT?Tfas1M(_SOgfIg*)X*_>3{nyb5(6mRF(@&Vv~=gdP$DfL9U_u4fFL3< zbO~5UN(<7Vl+@Y0zn||p*ZJlA0bE?yGkfo6t$V%hd)4B0Pda zSekPdi|&(eCOdoZoxmoWG71;C5#bnnhUp=3M8!9PA;Zyt?!t6k&n@h6sKLB02Cx_> zxH-blv%*j5CmV<1(Es$`XHsrLDMMgm;yX@c^~DUP@KTLnvC-AKZJ9v4Iw9Lw6_UZt zLyj1KwJP}@zpR`Y!#wA6U|7b%W~oc<+mE88Q$M3yTa8 zN5+aQcH&hK&n$bs_?g8(y-VX71Dowqb!lTxmQ8z!as;+guB@DI#3nP1vcS!oPO2$K zRLl>mlbb7rb3<-5nWcl%>ZSIN8#J`J=i9ID1qf;-YbvUq9mR@!fM6=m#1!odNHDY zAYWQ>iVx#I3B998DqIK|n35y?>;}@38$A8!Cx;)h7!8sgGu~1u|B<~JJlDm^3EET@ z2}=Fz5Aj}zb${hY0i0eaPg?AA{D$zGedsij zp$=HD*TFf%D>x4{)YfH>B6R9jrlnNJ^J_<9=(fL-daixcmzuM~i*2TIdfDrx*nH#W zpXGYYu-2KtlI~ zcsm7g40_hl*`~J3qw~4Rcinw#`3Pu*2!44+{IhmwQ>ubKLajhkC72Tu67~Q?^GkcF zg${+&bOlT_v9C^wszN*!?c}g2$2YhZ=~E-6{8|eOZz6!~& zkKU()PJMkFz9eFzNDC(!+T?Yscc~cW;p8Dnd-?Ka3_d0Jo~2n@)N6y5@*1-?Te|cQ ze&RE{)Wywyjx$dHdf^-59+Px0FgrV!+>dQO7Q(4$W`QjuTFsJPl;e?Sw?PdHUC1(- zJ0EmUCR9g7bjI^d_s2gYWmZnoV`qaq`cDzH85%nhgtcU$N+nDtp~j@KuDK^63?b%g zS@@tG??MPLDGNrsxC~n~efU6TY8l8AJkn*uzn4Cq+6bjieNh?>Ri3S}ar6MbyVNK* zDB;XZ?&$oE1L40ihi`$b6B12On9Sfmsa>mtSx$q@#ebW%v4%Q#iRF*#lEpY7SBtxE z3W5ZKN05|t29_y@pX3OI!e-)SQj>=PfohowUKtbcHG(f*#Y-1Vr_Qm5Hb+y7w26z=lQ!dTmUL22=@ERI*{qf@R_;r8A&otH@z7LY(`au%qJc$F?ESM! zC}kRkN7nsYwWP893zzWci)nxWhS?sU&r|{)Z5N48VcL2E!DBZpsj3aoUl>X^?^PQZ z8ac)YBdIf8KXJ+*q$cBmp-rgF=}P)yuSl5K*?aeS`R2tK=7m@67mmOcGawschbKjm zz9uq>r17o*2|0wmB-SfDT)NGG!2Ck5I_Wfa)+mShvQJ?)V+s7^#?9QJ39EjDVFW(O3xOC!CEW`|HW*uRS{WN$01 z4$A4`1Bpxo%4{(Ocxv91^Oe{_(w6kFtluRUN2ZD@Gt*pWe9qV(p5Xl^hy}u#=MIcnx{&2H0aLQK6M5>P_2dX$ejB3f>9Kduiav!6>iKb?eYEAMGANHT@(z~tyF?v+V=loXj^LDHu{Kk`IhI{&zH{`CT}^IuNpC% z2Sba#dG$SVp({Fl`KtBavT)aZJSJRW>!a>;Ccb_^n3D&`7O-#wrl_$(2MX^Wx@T$? z>iZ&=$q6*vP)y4Syt?x*`fu#!?&@rTt2xd}9fQ-216RV|sHayV*^Cv>KXu$Ezw*1H za`fq}i3m_vENZ@O8mG7+!m(yvhlBFUNX3b>y64S_( zhCg_bB&tYaD9McSSmwYdt? z<6_}9M%9KXY{Te1$h9%FD|h~U^0LgeN`%$`?eVnc=);G02Nw%5&3}kYk~_J4lWvpVtAP5; z2Urr&Y%s^$aC={0?cs;zG(t7B z8S#$=9PGU!hkE0(R>Yq6W|YUK@-aK zM8XX(>fAVaNi;}$gxqIBe5}7Y`M)1Fa_|4ajALWlDG>H)?Xj=pA`fdlgSuw}}f7i(ZKec3p< zO7x@qmI8~mW|LS?d}s7o4W?_D{1}68ymfjYY4uFdD<5Q*K>8Zg=Hz#iRIKfaD(l;qdP*3hQq?qhR9QXKcYb$xjvA;jQ&x9~+ zCo=T9>b{E;zV0`Q0)27ODbw6jQgP>BQ|L2nIIZfoi+jbvdp-WLikRLif>(oh&=vh%c)aX4 ztrmps+GzgX^hCSV_7l`|!C7;{eFn}%`o!qRc0DwIF>eAZ3Zu`qaG%=LDOeMn9N#yHlhy>TOzf7As*qid36SvHCowX+)!# zuz|){Vx}zSZvvB&ZJ90|$s!%<9^G>_tz1jK>F0c@zx}6fpZqarG{Eq1Icgbe)}JHR zb}eOX$vxiyETr`YI%kdUcH#3K~t%(jwQ{x&Wj( zn@?-wCZC$rn5Pp9(6E0cuTJ_?_5w>DVpK%2{Qj8~Jqn|DbOx+l*!P7bu)s4@qCf7m zK-dCTs2uMwwg0LOJFHrkG2emfV58atDIWv)7qf%jUS?mA1n_FGcydJ{PRyU&->5>C z!_)GXeY=Ekw5CB_H*>clvS(_x0LUGn3vle5O~|5)gp%u3uD`9 zJGMNMh4p6V=4yUahjHCd{~cH`&R~Fin<58{YtUcnLIagyPcHZkNq=1hvN4LbhVnzK z;rVAhU7VXlKa_9XCsMfjqj2l}eAq`3`UGCsc9Z?rZUJ0O)$?d@CbtAmk4$Aku1OF} z99mxgI91C4o-kJDDP;bE$i)3aq+U_G&(XKU=UQKY@55A!6hEP637%6muc@S%lSNjZ z*u;SVG`uxc3~5hh+VR$wU^|(xV?-&XyXAwE`AoZ@sCwJY$nQTlf9&9$b-&POL_-Vd zTfASm=K=|i$Lho_k4T0zYjc*-iK%9qui9_<7Oj!|+8}NVZ-#C0i{pq~gzkH5cSMlY zW7cj62?+t1X^zWVPX&<-4W3N)XT2Uzo;>;bv_!R_Qz#~6AOZwQZVTa4w52M#2NC^q z=MUJ$NeK6k6k-0948eul$g*>5gfFkcy8H&jDhJvcE?iO#RffdT#9fBE_cKC+WyHj8 z_|FCMnTKB-?tOBQr)mF_zpD~ho*L;GN?QN@^V;cxAU0ox6p_TG&`7Buy(JC+X zxbWa(PaT~@+!4pxt6Y&kwWs1Z+R)pPynpJ0QSaOsACvN6U*A^CHzhNm>#CSefg!W- z2bPeQCxTx^MrS3s1+){M$ha{kxgNYIsz-F6^xsJ>vC~|g;U>J#ToRB>MQ4=nR*nz) zlRHm-wa5Nmz}Z#)ACwC{R&ZYr z0h{VrQ@A?ayFBh-pF3)L-zoflmipJ?0~8j!?dq5=G~ ztam-Z6sLJbYi_j8qGoi+7KDqwr0QM(B5EDL^02UPKt`q%y6iAe2Jb%(B zJ-No)HEek#egc>yC=~ukD5N{jARaVMKORaq31AR^uQRf+P#EV}N9zFP@ zK#n`+?CopW@ZEkvhk|w6-!Y3+1(2U)u?>a&&^vD`H6aOrKj}rQuC8vwd}>b+z*M5b z2ua!u3jmu5=5@oLy>t@uTYT=7@f1|Vp1>P^sm^1#{6bN``@Vl~&pel?3`i4s=d;~+ zK({Eur=?gaiB_ns3gYg-koh2pIvdzIiw5Op-|;uLd6=yFHv%i_LGq);nk2J=RX9;E zs3RIm?!A>)SRZW8U8Y@|{#@=`;;p%Q0JoJTX@r6+CFwdHoZ&0Nah{<&#SU2YeUkUW zgpWzbCwQOo)C?iZxl)#l&>!GCGisQ|;Nzzi+R&_#(b2N6yb1b90?fD*p8}?BmxeM> z(l_VW%_0ztJ`f-PFxuH;^@#gpsR|2?i}UJ2I4n5=B60FhbL_&|{FQv`Zs=~*b+ocRn=`P` z=Ee~HC1Kl6cMF3N8@(DC5{N+o&4xM)_SzYgJ@Vokk$;?YH4|z~NvmYiDN3p|@x@<0 zEp+3%ijTHB71txucpnbtg14d(04mh-}9SQx>NkEi&ZuwjnLkB=(9wa81WMZ*~7Z1tm+0vJ&YTJ^Y zzY=4JUTOk#aw~C$oyTRC!^~m3$6>c_cbt34`20@!NFIK5b=h%sE9~kxTw%P)?70UW z@^}|J2r{x-0O$kLfUQGz!goLNCicSKvfjdZWi=Fdp7=+OkB5K}w`~Ui?U+LmV}E9l zilL*!WRz@pEk21D(uo)}%quKyA+M)=j8dt=cipnu+}zyq=dKAmatMc6?nTn$-05oD zj-~;_#5A}%f`u-OGb+8Z9=S&=d7~J#T}X@iD;&l`I#iii}E32lqD2zOMlu zslgt@hk5I+7Ahn)hor>hBWtx17KB51@p`CWq4^mx=-PEm`6yN|6w}l>!bi}wn(tb;{w;rE zc$wg^do-wQE#2|(9)<;9s3wjY|BNnuK7xPN4lWL6k2{vrX8E{YdAs=6yIvz-r)v{M zv>Cd7g>y?w>#9Ni)*+GXi5NmMd^r*0akGOV zxuN^!J%2u7LBpbi!=?le^f)VuK9Vejx&>vcK&YqJH5ZI{o zS?14o^;R!L7%{9dp;m<{D>_5rW?iy|0E#rqYy9^i|MY{{u0Owa1->1NNLP)daEx)@ zZa-Oimx~q~;z&T7h~q?$Noth0DI)%T2}8bWTyIUw|$LJz@xYZbPgjnXVfS;w$Q)&N8a=8p`NTZcAwv9d`68u zy@w+R_w7zz9E}=wjr{X=^J%O^a&C;K$n-7xoq2l}0#+V#NvIjA5HK`(0pFqSeWrD~ zoEog!nZw`;e=go*kvYcmRK!q8FW59VbadwC1sD+?_i+}<|Gpt`Mv-$u=OH>3EEI?T zN<9BCkz=L;MVHRU+5L$0wUoh5*TSV%2RBq0tE9By^D|RQ0N?g0Ns=5GX89>RCcEHL zR`ROFR@lh!Yb89{b7P)G4$z?5Hq!x88wbr1#BmPdH&~g+vcexQ0M#R+&8!!wxO!}0 z*XVKSPuhgk2YsM?v3HE#?w`fKKm9w`Y~duTcmc(45swD-8I!Kh8gSs!$W!K};5(dCNPT2UOtOXXi$g4k1ZWf!C107W-w=kFF=* za4Tz4*lWK65j@a$d}1d?mfgxaePZ8>s?3?E`A@uk6N}XvJnhcDDSXQeNVQgUED1`b zlH4gZOUnlqz@E<}ENmwy!&SkO+hgldQ|_@yBdsNrGRQ?QE`w;j;0~j$ zc!}h5VoyCigYLKP&(#7G+kA`PrwdhpXoQR^RP)s28 zyzdpXC>8WNz-ul0F7reINJKmsk;;SsZgM7mers^TKDr^&(zFC}#3re&(*43|~aqj;X7w zdzb4d1||jj=sZfqyp$(I+jRVSfW5IZVjuA%?NuxCK*Kau%LT3#p+cSPuAcRUA)V_r zeh3vOTX9~SJznm{9i1kMh#rBn&e!S2ue z6~g^JNm5OfPiozLpod{wV$q;d@oKqa$(m@k? zlP1y7vG4yewE={_sT+xc+e);4jW%FD2>|G3*mm{q6Hi)bj8JzT<4D;U;I@iE|d4 zCvz85t}O^cR(T!Wx5X|WyPwbze%>FBoj=W=w=T@LJV?tD6^B^OfAn{s9%&p9jy&)% z(4%Dim-(t`(pygPf>hB>8XstSk7lw6x>jujM?4}1LRrLw85t-r0p&D_YF^Mrga_3#r~VF0ztJN|WA;Mx`~Hb7HQEwT9?HsI`cDk3>k zy*C*fphH`hl}B+SWJBgO$3lkld+BGQ#=HNF_5N`j@(W{apusOUc^X=Hi<`as-DJZc z#Xp{81Elz^z>o~~(px;;kAUvmO z?fLrYGk<4bE`Z{^lA$&e) zv*xqoodsZ=v5$*oGHa&W>rrQw4wSKd$^?v9o5XtStdNyQZr{uL>Z`u6C3h8dmM~`B}l&%k;n&R*OOBqXOj5#hZX9{wqXD1k-F>mJ%+l) zKjj6gon`@}aMSTELEZ^C;y`6r@|%ZqQ#~)C5XPH|>`oQ_nI)a$nK&nL=1@uY(isoQ zhgTc*O~d+tL62Jwz4)_jMmj&+zn9hhyFC3dWE?jESm9=M*W)1y|TpIH%6?XfAVGNd_A-ab#`t5{Pu3Rp>fxRB@`!o`I{AiVOW zeO5>!&g5_4d~V|Zb&u9nNf0de$5y}1Bqq+RLCV~DK?DgEZ6wMx$It=NOAe8>OWOo? z&nmzSMXrqoT+M|EWjr$sKw;?c;e?Sn%2SMkUP7(?7*93n?2E4X67xqV$9g4$A#eER zVjtO;(U{J5PbEq9FEP!U?b*k)sA#>j@fwMg=oCJdGvxOXq3&WqT~$t-Cs|3P`evjS zGu8kt2GV!Y0r-Ctam%;La!H$sFG;W2%={{kdv}y@Ic;M zr!k)dapsUkQ81!T!>-4~lC?*KU4_`BgL={>U4u+ckFIM@HPiTr{7*=+bG&1)$}fgA zm&r@(N}fJUMh4yXW>wXqa5ez^H9$E7qZda2*u;joNQP3c5>!2R%cI|z;C0TM9&zLY zahwXIm4p}QXspp#w)d z8a1=0CWWAc5(q6AA>W)2h((%Z3q;UbR!i^wJfBnfCCS$w;UI;5Wubz%!b{j}-dAj= zpwx(eIqz+ms8Gu%FDsd&oB5pJK1ttnEBExM?%43)l>i1Hls*Q(&ZTBPQq6(}wz-sIm4=MbP8U87{#G>6N@Lz!Lck{L8@TF*YMhR(*cV&AaZpV)vQ>hh%14 zwuNJGk00e@VKK_B*$;+isyF46)A_Zm#g^0z6EXb{e$l0w8os%KnBQro(#a#z`w77X ziQrdB@ROd4M}vAV`2Ev3QhUe|xihqE;K*JR_;V8sF{4p)qjcB*7!H3-Of1&8hYFd8 z9*%!5PT4YuADmOT+)wWAQF-ZZ(sXM!#Zw!wlbVOkCW*;3{fMCPOVXTBMq;-lfzIaq ztTsb4{xCz`v59_12!0%-qN_(BsvDN=fWE5`)xw_q6Z5nXFo-Tt^9_`MAn%o;DJvhO zkba#~w_ltLQBbjn{;YzDw#}jy(Zbj>EU`WKLPKnHY8?ub&WIKTnxB-Ht&e!g)rBjI zu1VZicDbGpQrl~(fZpkOrDhCu*JWE+hW;hxhUES{8%bndWAj;Tvee-xe zT;Y7W_Ttgfc0Ge6(XYiGwSVZD4{BF&vXb~1!7Lt1Vr`f>&RkL@0+g^TEB`op z!T5uqAYn=1o=R~bNhV`<2{}6(Ct(6<^lb|fD@g%iR+*+Z3+Rs| zV8qw~V}9k>1(FNn&bL2#GKRg=?ZU`&$_4Sfm9gCyO4VbkX+G_Gls2bgZuc^u9RNIFe@*jIf@wB^`9l*APpD`j3g<3y6x_+8S2Tip%I- z>wA)n&_N#ZoO#(m$Ft8=RX=W4WvWdijuEqzJ+fg507eJZp{xp~W}sotFo^Z@^r`GO zH@0M$(fhEcB^v$PAXiore%%`{-UM$@18XkxL;kFBK~bKV`S(>UgAu|){sSHvty)63 zn0L^!e6a^dO7kDazd9wT-lkeNrMZq7nToBhsS);P466$Gm0?;B92D;nFZ-MYq`2z` zk{c3@?T^=p1d0$zEFxvP@#fJ#ET3MFAXbnwlCP%A0f;iP%7|5=F`Zg1aWiy=ZzWQ3 zNBxEGTrS6l($dizu|?p!ycu={{%wrta0I^ep+jchA1}TCeN8E}n5*Gb3<@YF6_Ezj zW81+m3H&b^Y$96K|D5`^P*FcnDq`VJKa;116uXEJ&r1mGh;Z5|?WnVUvw~vA@L+d% z&t-{oJOd(G195pjy^^OuURqMGQ3aBa+UU}2woj4l*=L_bT@}`&pR&+U^MM3+$)V^= zkN++(8Z{?V)Kc{pIRNFu^wN;F**+TA3*cAotBKZ=UA+g$XRlq216|2VV9YB|si;%M zDcFIPD*uVxc_9abd)L{H7c=kO*=KnG<*c38m2&A%wger?@$Y7tc5UhBdZ_zYxR)B-jm-xza05+yDlxm6l&Op0dP0 zQq#vouZi|0zM&w8eB&TfO&V;3I49Rd>%G!ry3=DpSNfLo6PGdi@aWgdrr*s<rIkJ81^!eJe$7RiXqF_g&3A0ET_(t~O3xC%E*bkz|H1sqQc+ zfw=jWp|F^IU9~gbZhbZ$s zBFPmlvRP%DhlwVFJ(V`OEynru)m#iK6P?~881nV&abpHR~6PfLs_L#U+Vy z9!tp+L~eO9IyduA&eZvz=0(4Qw$S|}2V0Uuj?G-q@H;hfl~FQMvkrks+v$hTdG{`h zfzO(Eho$mQPB@ zt*#EOx`A*|?UUh-^WpE)&Z*vZ#hi?3DBXX=5mEC5GW&D{d~IEI^ft^podW7YK-h^8 z3<)GP3?KvktGO4AVtUTSKJh6h@KvttG4T+a@um90{-FJPJ-;sbl z0fZTMm0ARHN+Yjl@0dVJ7!|fTtIfUYn+cl7EILJjJ@5R0kb#cXlB2Qp9^F-Xn+XE9 z9iT~1tnz1JoL(^FN0A+PgkJTGQycvnhAhAv7)4iQY7|84F)zI}MXN@q6}5k%;w{Bj z9^P|ko#w)Uyl{5FE!@S$ez;9_ma1c6f1mb!7L^>K13Fqy>8B6+hJ1MUjpjcK;Ey*c zYg{{DL2|WzdR?~X_x1ZEkFbK0H=}0(tI`Fzey_)GSKg?zxrPf$TeuMvMV9Dfsg!F+ z0@Fo*xf@##1vp$#n$cRpJ3>m4iG{4fkL)@MqjeNdlmb=pLnBuw$ycQ8S10|8&xGDa z3=XPi21-gvkpQir$M4VZ$p(j8=Nq+`g0r6etY7o=ArJ|ol|}AM>F+nnw+LQ_lHfr5KY+_}&(a z;>+!#MmfXVBg-#jGSueDb$q(73~orz7iE#Xovo|l*e*Him*PY zt!y1IeS_TNnf5FhBt<+8HP_AyKd?UK1#`CFSDH}d7VQ?q+0l`n#`70qcw7?BX7E_7&@&DZ~ysGiFNV;HdQCKPUF6X<&@MCzEAiw`N8^Aj6u-Z<-&npV$Vjns zCR+SgFz(b*eXRwkh=5M2HN*0a9Qka)N3D~~W=r?(?FIlUy4DchTZ#%yaC?c#@bIG%s7}G9HQK%mVYforfWQdGz#oG+ z5M(CNnTI>1Oy|!(2;pD8!ljkepZ`rM<7*>dO|m&XNbh|4Y5GGC#brAwP=vY|v#Ani zj^OJ_xx~V1c#+jIb3E0 zAdH_F0pYE*sQc7I7k-6YdMEKrJM?^;awmHpdAd21)l`i%A2pgI8{e$fho5>5^{5ZyEtb(MVRRFr;AY^*SXq0_a$b8Bv#qK=P{Z2D2rG zE(yOhGjV97H4B+cE(avr4ZvUFvDOLW>P;n>ylZKF7io2JOt|;9P_FZ&d9?1-7R%*u zV%&QE_Hx1o;wi09q*;L%JY*z zJfL%OK!4c&t=*G=WFfe?|E{IWj9ROqOC2)u?xIyN6tim}w zCj4?O{7E2+KYGm*(CbwLn^Q5S`6H(@BSJuM)z1*FP!f~7H|EK7wR)V`SE>Y; zfYllY`*sm`2sLplCqi^X$FDet?YJjC17Bm1W}`(i9;}Ck4_0ugt{3za( zQq}1|ciHA&*=HI4dlJUN-u30XZb7!t1xgmohS55M&ag>#8I4^Jxb@ali#M2>!PaSv zD08|_rMV8f47)5TrmeLt6-hDUvS`&y1lGr$`!#+;+XPYqs**z~HkNzpMB=^9>R+%L z;E57d`S`#!euuV{i%}WB%gXX5hMUiE4VF2KDBjS)LdeDCUX!j)T_TF3d&<$E?{is6JxZpm`lK>so-+~ga zdfI7(bG|#Y-b*Z09OkVo`3Ir{jYn`6l%<|8{hiMTTOgT`*`G=y&PdvUY>%c_@-`R!>=LpcC005a2m@d5ehEcd& zR)_(@o^LTFlAQn5y}Hv1^q7qqH!2;2U0{&VYw~ItA!tj#5M9yK4qrw9 zWhda|50MZ#^DCrhitBl)C1zLF=MhFSa^oip&wj|AasGYDYV!Ah8hvG#`2c9>Uy-~T zQ4{Vdw0Z$1pF3j6#OG`gkKc@L4u19q3!tWedz~nu0#L2z)!q_Xx*KrCfIaDr_0n1! zvg5%`oosB3nG{R?t6wN;F6?>{C@k!?!zyQ%1UT`ULgLoi(9Z7@VGFVpsjl! zR)3C#uc_v|@@?C+QXmoy#xz)RGDI5v8Y7mdo%Q^tzHVL*Tr;H6kJG@2T#P+XyN0v{ zjT@VGew@E}`Z6acGF|5$*|jgCP7a_xXrphT>RuI#r!nylCSJ!bPP=y;J@oj)rRgzNY@~%(SOpzYo;~9W`y2dSB}J2jNG+LZyJ;SYlKg}p$^&W2lgL397vMYR7p#37 z6(&l7!*vETPTq!E_R+TNyL3ht@I80)q*{f*V+Jg7q5xfB*?i{o_7$VUBB?=3G?^!R zjBCkdaepU=-KZ$XuP15!v~)&MMqgh0o{BKTChkLuqoaujTa4|LWbGl=_P7XVd;ZYp zSaVAW+}@)?&+Jzk)Av^XN?t0@u*MWDA4trlng0&-Cw(jwe%s|1!8_(r$$qfE6A*9- zL%!PA-Xt&bNT4yn*o$#6fA(>pZ!{_6vHOb)bNy+>Jhsn*kKX10{q=hP)M;LVM3vb zfqkbDxG>;^Zp|iQ?o#(S)bi*b5!{B7c`^49qZOvcp`oKDr0SnJ+6H5Pe)hG z&bOfN4!mPSc62BJLG2Z@KZt%10_#G8O@c&v?rV``Zb3-LkS|4uVS-b;0P*P9xD^|e zKqS=8^SZBSuu=`J`Wmm%gXEX>8!C+uII<-2xQxBQdvrJSr#Z+`_^vY#@WZXW#u4O0 zDy#R*?*aLE~p0QEeuvD$>ha= zGB-*q?v_Z8g)W*$KRSi=%|t#4BQzfN7s0_PyXCu7#aHQ7dV9755_X*7(EUv(50g;?lWS`33@ft^f7QmKELLmf(C(LtNwu2k+ zbbl+ip`=9-ar+g7km0u^2$F)u@ZPl~&A%Z6PX}YSjvUqxP%j5e;*hUqAGa zdzv#+wF#uP-Ye303cU_#1+3fWnI5j6+<7XD1V6hzdLX$aJytfp87Yh%40F+?Nvz{W z&SpI0S$o5OZ_~8-^mzF#pFU^2v(P&=PB^U!9y@5eG)Tm@v|?UO{%}~++`jpL4~PMW z`^WNcT=%%$idD}SfnD=uEq)rrCWGut?QGDc0w2qiEUP!1IK@;v#h(S*_O|3Dm!UFBBC& z!Tq$ixk=HY_QWuG>6?Qde+47`ymB$Vy>zD6FdBINbku*rA#Bwl#0qTl_V-6%zu`U=PG5w2a2H<2}~M%74HH@D8(tADlKD=UCk64))7u;jsY zQ|x26T^EulQ@rQCH8G5eW)MaCOG=(D5dP!|)hTz2Axh7$!HTs#nzaUKlk6krwC9R{5V0u(jS>`|^R7&fNYlaBRx*Q0 z<9y%mXUA`I?dJ1uSO3U$O$&2$(qvkxb(NNFf7SkVO0!~HE$#tv0T#^MqWgyaLCrmc zxxKCkM0er`KWzjXReFf()EzR!Wn1N(bF93>ege4Fq}*i0kPi7A-F!6dvUqnaFv&fy zm}#@~Cc9Rn8|+oY^KvoA`spl;WDyARwHi4PzN#&w>5B4UTAz7eQjg)ps4^Ift8n{i zk4gxYHr8sNY}@N%jQhqiAJzb>6)R}-tER~IcfhqdNH25U`>U3NyP+4TMg1y zjN-+V*vBcR2RoOc7jq-Sw@<8H@h$7zeBiL5-4`iwmh5{hN8M01|^XuW5YRk5f$xd}?Jp zJ%CK7ff?6%fKjDcXFm_XXy2o({k{l@7LrcT^v5M>rAM=W`=_7GZV38p6^#SdwbFHE znh>Y|wNv0{k01CfZ(O8{c?}0H| zX1mU0-O3=a);SJM7D>{+@!Q17N8+q?ZOl&}O@(1y&8w)9x%2X$^Uv%at>g^l=*~Cs z4nV@*USn-&qP^w5?V?2CDY+bw;iLV*7xp2xib+t=i-@`j`T(fSgXefF@PexoNHuWv zopF&NF~%Rr=6lnag)KtEU;^ON9L$K?_T6n4)=idLcT9By4co}5x8`Cv4Xwv4nL_#5uB|d;A&Au)W1WONNq{R zvLTPj_b4(-2_VWOZb*o`7AHQ-L%;1mR%nX;U~&f-De0C1*HVogxN@3L2knB4la=zU zS{JCV2jYqx(1Ys6wvgf?JayDLyIc$9h)LFA|4zIhGd;sf@(1I42C=3;KV*B59h$*G z5V)@y^$K%sv)4|7K{XhmPqlu(mV@f^{Vbn2*(*6-Vv}-mgp$C+Q`5k@Tb!4_lP^gv zFAuW1E&PA>)~hP~`;uHg?+&DFo?P!?_4|W3NaASJq)s<;jbgUs7sPdu+`t+*AEZLPa(uX~u74iV*@&#Adzj<4%UgWw|)$oy=(33M!gV92@mA}|PiP#=ZRq?P=(zEOBkc5|J3q7O{ zzT%qy8OVp@IHfXY9OWYaNe;vTIs(q`w;o$KB3P=2Op?>g;R7KF_>PUGet59rG@Bu$ zxV+DZ_>{g#VZiEU0aUXjt*~;VtkSC-98dr_9za;0O$X&)uSY9gz^R{greV36Ajny^ zjwb^IewaA)XHpHfFuYqW|k&GrO4GTd8ih?sWly0ja@0bk5I-NZCOO z`o@B|oNg+wom17T({FM3p+u07Ca^-_9+LsxB#SRG%zvKN1r8YWgXOhBRd4Wu-b6q6 z(~@9yU4l4qg8iuXgIIC?ES{Yq<0X|GTG{{^Gu2iUE$Ufi=#m{u5^nSaT>)%iFOe-4 z|4#$1QiIaPmsXMU85#icthmAV2K!!xuusC+OF^tmndq^LeFGWu%P2{n`P=bY+<<47 zqsPLC;fq~IH>lPg|`kll>{26jQM(J147z= z$XYm(@QZXm;?zc9%gllT$M7Y~aP{A2q{>j#ECYUxp165TXO-&;TqSh{j0c&{FL$rx zL;ig$)cJT9_A0LE^B->BhE`)o6tPLpseHZd+HFE12&_xzPU%MZW75E-_eO+{?u^v0 z83krJcyPSkGcm%5ARM@wJc2J`4>*$2=Ld;KcTt|EZx49Cj^>g-5nrU|rR7f=-ajmg z|H}dN^hT!-ZTiwR(E`;m8W6Z4I$I#_DeD?Db)v~NR0d1y?J5#!IBVj{v0%OR5 zihta)f5hXP$*W&RwrQUw(=xO&*+qKc?qra*iKTq%d8}HNV^s2jDmHWmGLd-efX{bu z-TT|^bqp&u8|h7FMw0|WrUK|SJ_~l+yPf-9w#~-B>uht zLb5)C5i}*Yg`%15h+qxSm_~}nC>b1)$pie1vz9A=2}l!bdRL@C6QNV8JA0RME$_}U z^`kQ2J6&X;^rh6q9?tz0+eD*InGyY&tM!@rVgEvGJ3|+n{ENA(AQW^kK0U;u?D^SL zBYoJX>ABqUetS6i5R?WmmSbL5sk{uKL$;M@z3HMM9JGgKc+jpSXFHVmzKlHAEzI?b zI**zjnK_|;4?UnWO;nzRdqRqLvuV`O`oSDsv&KJA|n%0m<0G#33Lz`6xyOBN;jIiPHoHv3{WVgX|&Zm+-ex)Wb+n{EH{M z-SW3;y^Q7l7gOiqPxT+aeU6ctW1NGK$acsmB=gAL<8WkjY#AAui6Sd|WJKAUILOZ4 zBa*#$vPW4F_uKdPyC3)6U(osZjMw{mUDxx9zg^CHyE^1OUPYLfcH5^{Apw-gpG$V&2%O9R}_oFa@!myk7UgCi23&D26vtpStVVGW9(F09N`pcK^HrYP4@u}2y zxVN;BGpIP3tD?9SR(J*uKD_p!1C1yHrQbrD{3u=>RlhCQPgs+UUX|q=5R(#LoZL@x zP4;*HyOH01K7S&03i38XLS_F&C$EFGs-GR(5cBn9dqG|6-WIL+ztr;%RSwcf| zHUsNQC@bx{z{iqOn|a+l&@;kBRYVkM)L!;sFKf9aFBHhBHh17? z&f;M69enk#zaL;YR|snPyUHm|d~Nb11D)3a`Jlag|C;OVx6J9s>fB&oT1RMl^>?Cu z=-=$nno5{v`iKjwLkj8J&G+iI_FxNtF)kJM&h7m}mB;u1a}?%RST;$vdMy#TGDr zN(Mr36r^A?fTF_$pH=V$)FZz>&y&5B-SI&5H(IbcG_2ySwpY)fF!a-IA@ZHy#TkR< zqHE9`Fui;F2_|z6^*Mr1_V-muN_fI8_%&G7^aGID#Am0<`D`tz{hDD6{6zm8PJ0xkAoqNx`WNSY_N>@*QK#TilI^`( zf3!q!&kJ{z{C>Gj5q`VV0ZN4}P{oSoLGULfiU17QJNrHfIc)84IyX5FL(hFjReIB- zGilJl9;KI{=VMDw%{X@TF(jM@DGOS6jzATH&-uh3=BXRyhF`zEh(pdsA_98>>}_36 zb&X{gs)QL%jxnt zw5Y=CmCp0!doJLv8$MCH={@-8l~lE5jEq1zSRn|NTn4-K0R88V4Mhi-)HW1{JSo)Ui#+bC;at# zuXoMEiRej5PJT1wcU;-$IK3lsjf8l^TlSSI?P8TvZeE7khfP7Pxieom0d#%*2oNMv z<5}+1r5t!Y&+{Ybis4*7S3`GAxDG|6O0WVw6d>VAc@>_M%rD(bqxUK5PJ>8RP|Uvw z?#Y?Q{wSxKxn53b?`*OXa`n&#?q_tc$5m=ieIkmai%ytny=A{Gy*;PyZ&~gUJ~6a$ zV9^Vzl`9b_Z#vw$^|N}ZIPcKn?w}x_euDfKrb;K!ZtvgS!J>+n1o|!+xnoTVI;kaGPu{ z@IQA`=_)>nq^{+6%N(-NQL)o`CwKf4c#YO0>lSn@As-`D0^{P0kP=W-S( zhc}lWW}^yg7|)Z&+v{$cy!a|5_N{g8TXnOv%W`nP3xCMfr1@-~hPuh~*R6Nej* zn4e!9EKSR<$lPF?`!!p5@9%NviDsE*VKh3!Cvc^EM|x@cpg{EFrNR;2>%9nrFo&}z zjfJz-JI7~>zHq|(*86_(OcV(*r{YnD2kv>UW-GN+&Q; z`o*(-#vP++XvXn0KhgROnla zw58#MGco^nsxA39>!W#>hW#Yc&nLf8(V%8>=+nwUbrN(;aKFD{;p*Q)NI!oAS=!0h z^>V8P@I6|bc>Ql;>DO@ff56>X#%}WlRy;ehilPnVY`dO;#Y}F@UUn7q=b~_p5;_-5 z6M95d?Yvk8LCtqc52ZRJM}6b6Hgybwu#FGuCkD)YDAA`ZG?&s_xkGmgN|wk=v>9v7kdf=Nyx+7`-3`1OhkO9=|aHEM%<8SMhj`m z?x@>ApVJ&wf~_q^rhfhsoYXz8@$q(#Y^<)YAp8AW(2q0$A~OE*g^{tTKR}#$p0%~{ z7&TSZEg41Gil#cadfQ$SA^6I$N8|b~=W#y1BBGxzjg5^DkEY5y*=?E`ws5{>RSr@e z7hfJv%}xg>G>~>jG2u<>tYb?`#dZu$OkT`HK91NB`<^)`Q!yU@?%qBAmRI>FJBk!y z{Lem}o!!N1^R_M@R*Wdx7yJ4?Bpdgccy2~~j`q{ILHo(atYXxxJI^P-si`%Q*+i#? z_?%JrOBB;1lDTiEl}`FEEuOi51N5szp9v4gs^)Lx9z!*YJ;N3P8Xs^N_dkkOGiLcN z{nECLl0T`P-;6x6=!RpwJI{O*<|q=B((Ml|9!paY*8p^UqoQZNy@wJM}Wsx4*6_urtWw`;-9l z$)My|AYKUVq+j1Bg_4y3Y<(XA5e4qR-;2?FDS{+p*LMxu*P)ebCG#y{r#Aw-72QqQ z58Cf6G0zZzUl0)w`lh~LrJP@=ylLTC9Yt~#z1Pv@BLLt6J-3_|q!kqv%d^7X<-kYt zfU&2izlq3E#60i`w0PEbx3mO^9DDfVcbFMpuNn9eTpQQh(kZ08HH`c=L<7c^( zLFKsz1!HiWz6Wd7W1ALQmcC829_(7a4nsUqA?$RiznEWy_Hr}YrwZKN=ze$5j-4-U z-$u-5>*^4sKO&4ueOqW6>PwTH^5o65!VPPW!M2WI;XE4~Yov*N{Cdit$#_*QBggiE z0gYrdZo$|sftd!{w=<3SRaGLzqrpQ}+~du|(YlnvBfMHhT{d{f!=`I5$EH`l^HnX? zf5RZ{|7gz;z4G@WI=!fk`QM17v)5;0pSMI9p#1F z1EOv2f?3MO6qRQCRVojgUZ{{J=)C*Hkd}Ql>&*RA?T9$DWLC z|87nvEmyVLsL{%eJ|n|0Ab&m#s*@TLcNe{wA9{(hivIlIV^IZXrx8luLz;5`qg>=? z(*-6YCv18Hhy6Q=8(HFA&+lUh}4&H~;fks8~2}T(IaXxvuY4GfcRlC3DZZ zB(z_zz?R2C8`hS?6V6V2PsyYJUQmdmz%p$6&0bf&2_#%hYwuHh$RO!^Sgt5 z|7LAeMN!x*>fpLyUzXi4lO;g`M@2Fh+H+fUvYty0PhfIgyG2%B88TF#bF(G~(%Wd~ zw^Yn}ICg5e_;xot?d?eU0iCkJ^VOk84&^wk&Rkye#iNAfNP`2EDY5%L;Q@@b@b&)J zc%r+S*+mEn!2xN9&_|L_v4=e79zuQ$F7I1x-%d2{x;y_nngF#H3s;vXQ|ruZ)d++R zCu#8Ww*YeWFbA*{$x1W^VQ3_CKEUnt*^=l6#pq(n9jo6#c=cdyG|Ki*ed^OTi|%hw zPGu{JKXJGRA@x6QQoYwmlXf;>)24aRq1mD+bx4VR`qTzROqopK@ZwfD~OT! zRrI57?57@#tZpq2^;fMmzDhfW->fCQ#^~Yb{v_*$&65pb;IeF=6@8v7&o%=aDq<_@ z%fmfTO~tPBJ&*SIC9wo#WIk{zytr{OmHvc+ZBBOtwf<3DE>bLh&y?Gbu#v#K&RI6J?nX--_Vi3GD{;wnP_x=< zC=}uNG0<-a+3kmNT)BSw8=S}@DCc`u!@~aYz25nVBI-tNsh1QPsGi%{JxBOn1>XjQwb|Lk4 zG})~6=^mF-lB}{?+3SJoTtBzsNRFJ_a;h0BJGZJJF$PC<_;a?6bKI@bFTUoOooj#9 zw0@z0VbSuxyq+1a)h-&!0*_~$#cY(?MA==7#3eMy4aZO)WKqk!cvgT?UQ1B~7SjJ2 ziCzk0`tsXlsDJyd*HB_tTP&bN7;<8vwzTgJ2`R6)a$jaH$DaO!t&&@gu|7z6XVp;! zRnf!cNf=f#BqE@CoZqzw;{@owSgrl>d-XFtN^-Z&h`8vvyRJ+_wfs>!(3GrOJ(W=S zk4h-XD#JVl9U^jd3={`$t4nyH40PI)r4N9#J@dz3zP04KwXJP!?Z>BjAMv2A$av3qb6r?uP|UWw zHZ{}`N8RBu!!31dD__M9^8OZ`Kao5};d-kytw;Wjhl+ivV)R(Z#{X1#-f!qm~ zEi1f!;8S~tBa0ghy?HLhLvTl$|5rZ%i2hJ%jh2N?J+J>4b)d34e+9Otp0W*dSCL{( z%!~GLczJGx85Ew`1`_wHo$Y9bPNVJ46GoS;O$I|aEWp6Q zyGXQBz{aoHKj&tRy54G#5~L8}f4gwep!#uX3oEPk?{8d`6+#eI+SRnMAOQiq@M+!e zF~Khnd2ccZXySXzF2xYd`V8j_2+dS$awkL2eoj|`@?=|@H`l_$rK>^*@bbbQg}zLF z-Ly6=<#qM%r2RUW@~k}N#%-@V*G0QfV{`US3C5j|o*;l~-6F-z~<3P6$c8i!a6LchfUTCWNd12~8;uXopB4i63`(tBM+9EWd z0HNPp=31ECAJt9(hXM8^ZmFXtXlLR(@)xN~G}qS; zU7>}589nf)@}mM_qK1ENIW)N3Q9+}JAmRqS>`IhqXP%4x_1LnKQcZ0%zGzP5+}8(kCzW0i=%UHW~d40L27>* zSUC`cZ`rXz0a?(S&kv_Q>EQyv*}UIjBCa{4#x7lEcH=Pkw9lrfDHet2b_w8PwLF31 z?7`?d5n#BXe&7e0eaAJEKZGoxqsaOcNdb+fk}b+!Btt|7eRq7}j2l>xaZA`d^$Q%? zS9>G1-J7 zs$mH2xO4RGox7?iNNFW$zfH|%Lvy;bZ|9OA%p?<7gDG4*KR*rzq=a`i+F84yDuzgl zj%VTZlB3f6g3x$(Wvi=IZ71@g7FR!xhpUY$0kS}np0c@gT)B(2W!xtz$#JlgFx(8n zras?lR*bKZP66(^W(qm6GvPJ`17(JANWUTm_2L=J*K2$&0EKeQg3Pjjl+?}jPoB5G z0$EC};TbR_APNNCNDw*#^o2GBpDWfMtB{mP7!t|%W9&YFXEVqbv*U}M;Kqiwq#P5a z9gh?7V*4oPIJr>+ZhV1pWF^|gjL#^6&nDp^0UY&7s0JEFg7~O5(hNwGivk+I`AL^G?Qcn9b{Q;a{BGUo8A$L1HNIr)M6p#Que?O3Y9b*ZcHu?wXz+1@rOvUUFoFL4z`M*? z=KRR};fq7@2GJ=_PX}@#TU`uJf!9HVCM$rreNYJ<{PwN&>Xi8kZ}aG--A*MP41bM$ zLwe%zZ3Vu7^sj_eJag}f5r(ifMsDnMq(expFrBd?$eYp#CwPJC>h~#VN7M=NiMk<3 zyrGR0wo?o{NK`Ra`1dI}9%b~_oa*JO>k;g)ws85q(2S=U7T<_fu&W|vj+jmIADbyt z;@S^Pav{_YCF7;{hgFNGl1LEe=uUdoJsMna;g)XW!mh%RMaY6mD2M1zt7FaAsiP_q z zvVPWL4P)V`WKt$Xfi?~P@)$gbDqD$8qfE|Le5&$G;r^xVeLST+1wyOxe&!M)z!8OYf|lTD{}N{yJeaB-)tHcVrngQKaiYikaKp zX7${^FfMkqCkpufhA{$C- zNi?J7Z#5sQq;nm_SbCSM?(x~Ws}E~YLgpz|K9FehspE;Uao|c6Xs0^|hF$)z7i;%C zul%kHzCd@Ht;*+0IwngNJXP9d)BOtX03z74(3jv2AwIUy3nZc>E(EAc74f$p^Txw| zSAuOF7Z#n{DFAOHE8qvbWU?)aOs-1oFSFEFh(VCdcRB>u!5xN%&}Zn-MUv>Tg@R@j zs=$kiOXEA*{_^x_dCyd^7$-U+YreTNfqFsa9RfO(S!mFvZ@j)(D9KZ)5F>g^e81|L z!!s4(B8zEq0Q(hJN@8Wac&_N7GOq;!dJ&b7Pm1{79Y#9%@W>5W&{+b^H2QO98Q?}C zQtcurUTK591)F39V|#k@5)Q)gzS)pof};Un641Vb|21%6yeo+T+~>}S2NDEv{Z7q} zeX@t&0DKsI8tBriEh{T6Mdrp0X}t_5lP&a67HH@tcQFWjkji7LhMWb|p;K`!xHe#^ z$yN@e*bff%z5hK@H4*6wYXg_%&JXdW@@en}IkmS2+pue(En`S6^)=*0BP=tNw+DFG z#-93u;(AI%%7x$G6J9Q?81nl!=O1d8?bhgn9q4CmIU9@%TZ~5MuqIhY_Qn2tL@Ueg zD*}1^BCgB*Vo$;F?~c)Y?Q0YBo0UYl+U1eymio~S=$TcUOz z;!N{r*JH7CECZbAPM$S>^>xS_|+= z7W9m7?$AYpQU-BAU_EpH^)ahL0kAd0mV~ja6fP84466%AC3M~m6Km+dEBU3bbjGWW zD84Fgi1mGN(DyQc%qKdZ*s5{dbridmQF5w;UeuSjQM7}m1dZ%uMsM^zj#K) zNS8vT`p#S8_4D?g{@*kMck5s;$-oZX+qwv!cHE>kRX^2D7VgR5rN)XW|W{GZVD-epq$#|#`b}& zTqh#X>bD@gpk6HoaQu|Py6!=cQa5FTHM|^|xm2ZeE35OHCDrN^qE0ddYpa6WxSSnP zUqse#HddLuCp<^hnQxo}CncYYNhoqHO1&#-!|@a>QyJ?9=?!mKw3f+dnoNkQJ&W5= z<3v={$JKDem69y}9a)(!q=a{HCJX6<0v&F8FU`~V9ZjGvvhzUK3foIlS{CU=E z!16vOI&Y<=vZSl{7jcnWQf<(8+B?i5c%-j<6ZM>>pW?H6kg|yK&#t$G7Cn>S&q*lU zbg<59L%cQmCQ?bBCa@A~K=i+t3@Go5*L4bOYgX6k0iE9Tt>3Rfqq$W-sC8x`{nexj zN`Nb%_4EpPz(cMAN7DOu{!2SzM2O9AfEvtp+;~d67T_`}p|Kx5&UE zd8XnN>&wdAu5Y93FRj@KA$_$H?b@96lOU8xixT}68P!u066M(XR*^9rnntP_Z~)I_ zW;W4equ>sGb)V^mtT!etNZ@bxea*idpAQd;o7`breT6X$V01ndj z)EPLHNRF?db3$8W)}IF$O5p;Wy&lUSCg~88l2PylV8cF z`54oC*gCs!`8@qADrTsK>M6A>wWh96P}#uGkE-|26umC&>M)GUyG+d!rJ;u8;zRm4 zAP!3!xiIz_-tgR(z;$}H`Ph>3$&{NEmaTWj{OZSp#GuDpdJoNalS4HkRtYrGUGAR~ zx_|b(6j#)(DzI$;7;`JC<=wR|@@FioUGY)xndQlJf}q`lxwJbWAFf+sODBv<7WO$W zBm3{w*tSZWi!Z!NlEVyHU@y`DLk8-(;ZDZI zFLP#ada0E71@xY{25#Iz5uN9q&z{@sh_oFmye2@7)6}mkM!0MM>NLCW?JGzl}l?{(~2YO=m-c7xNLgVFDR0$+pmp{0C79iNB23_vR zY{wAPhV(X8Vi*5=lGyax^jK&<^X1Iap$Ondv_;uqvZwbSBXw~qB1SkZ|7tX(lI^ll zHft6Ur`s)M0rw0M0W3OchnfZG82+#qV%5p?>{4%fmV0@zSn?zZPwB5$S99$MS+Vih z+DY3lX5N(>PXb9Ao!-a6i^VL8?~+?gz~LEoFT%`LsfJyi5DD6FXR7$^md};XyC{o4 z!)1NzqT);Ax%)x$sIV5J4Ks~di)a6^m8eJnU+}bHS|I&hMbz+0kxUCgM&ZUrmnL;IzUZ_ZWm;tOjos!Vrz$de% zo;VYf8TTxPc`Y+O#=p;o4_|KkS2;lP=ZxWIOS7~anTQcnqRWfccx!s^pv`b2W)s&z z_N87aa&|+G6pe)J{z2Dc+Vr>|q%qNZPi`=!a=uZI-kIXhHsIN8d5VX+nW)-Du8L6( z(~CGvqHMPY@0(ynW?vdVu(G0VI{W6-HksV)noV+Taaii(E!#Ho1325s>FKaFJbr!a zj`K#bwsf1gw!zQpc|B+;53+jL-a~!c&#h{6bI>A|@EVqfPwDm#PY|78c}T zM}#GO2-)H#CJP(9-bBI@#t$JH4dluQME$vRQbMgB(*yt98IFd2nYQZY`t?b z7zMtEU-?R98lJfCP9PZ)A%iZ~_VKY@yxs7O$}SjW<31+eXNVcX_3&n}n6~7x7=!({ z&o@h6?4(UE?w&mdW|5|Kwve&0v;f!t^g<{nS01&{mqg6{n5X^^{>I-Pb`AM%byf>U zk`O1Aj%cbqV29t{G%yMEIo-5_rUqfnq056I7LTr$%gfH>_8-ur;~@B!lz+dA?)o!J ze8suV2m4_ZQ6M6eM`T@mA_n~)MV#Wkq zNttlnryE*c1)E>+&RwQoU$kY%JCx{o?%*dUeEF;!jQmW3wYb?;xU*HMg>JQ%zB$7E zF`AgL&XYvC%ZIjXf)QY~G@8rL-OKa^4GU2Ftv7v~ANfgS1(bKq{m%M)@8)RncJkPV z6--=%$h@R9lk-qF#d%shqgu0=ZYve~!)K&zH4_h3By&f@ty+#Skw?rZ|7F&@Sfe1V z$|%jTu~f_R5+9`B_E zo>P>y?)Ii=7AEU2R-t9%LQ2)Mm^7*!8L7R~un|SeR+E=)2=0K5^}r^mCx15o^F`n2 zBFzN%$*u0CsP<^s%TlK#Mozzy=)-dZ_%*DB?tj+6Ah8+gJZ@;yx_2?S_b1jM^vUP0 z%Xcjle_ebhJ?Dj&XRfOK&oA}fK>CN5{>EGVb%DPcQFfoSZ|6zNBx%A4(oy$gObmH@ zoEO!^R8p7x5EU3ud;9L6{0B`Iiq4M7?4NOh`reO3&*{IG z=74k)KH(y^h+|0VYjUv2)4|OUU#Vj1hBX}T=$%Xd^$FQc<-xfWZh8v)*B>y_zVKTv zvvMQyA)q5sSp7I~Wc14{8Yu zyr_SB?5+3RP{kcT+<;gMHOAlM?4CxHrKqSgr(ltCF94(Q=NJ*#xE;(^=ADav!?$GE zf0t*Btywbiz)ZRP>zn4NKeH8~H%3NAoBaE2H~qPIC+jb~p-G;TBh|$)6mm_ps-RYK~o~L)o6P8j%?w*TzYh4 zgbJFSAlR)NQ-ssRe;7E-p+Paww7zF)*Joj)QvOHpXp51NcwN$D zVdEt<`1A%V_5Q2n?R)Hc_~Ej!w(%4o*~sY(Kkpzy)HCm2{JKpn|0nFReVKV)f5WMDh;`;v2X%#7zLTZ6aovjf};EBix;-z8$wy(M=)X9wqd zkZTQ)Pto}K^v&E_Qmmr@^Qj523@<1Hm$FN%BE*&k?E;A7g&8`S4;t20`1(}TFM=S5 zP`6&l>6Q!$;>qClGh(C<6W)8bQiy%ZOrB7RCD9RXl&IeTx%?k<`F0Oii>k(9>7VFF z3VIK0D0!>9C+Z)*DX0*r>geG>J>I0LF(gckYM~+^108arg`5enGHc=97LwwwjJcZ5 zcW2NEXm4?GlZPndlX-Le`GLpu9uWykgPe&|uCGi0wdzylZvyo{76m0JjQcAQS$aEI zU66pXP&WRd6*pSj5s2NUeXzkbf<$l^dQFxWARWqUDXJ6jaG1xh<7X}^T;U~a6p z()MFX5TxN;nU&B(HZ=%7+~*fEP)~Z3-pJuoE6h?nsm@FqAoh+0Yt!S(?iR|prFo}Mb3Jm zN7|rznw8$tnV3k37m*=UF^0+DYyF`Ln&PN9F47zn6YyHm6S7$3RomMf%P2aL9hESs zi*X0!hWxNN@YbV|fk1m(0Qy|wmD}f`o3{EvT4u`Ou`$%)gdOgTXXXEZVA-9E0y7pl zsaXt!H0v(*N%j?Dy)}K^qxWA0R%s5IJZd?P!aO4?QM&d%5)kO85h1?blzN~E#7@~R zHXs=3W`ht)>=65v_hzW6$r9m0MIdoW9N^c`-3AZJT__(b=)&IIV znv~jjYBhA>=7|xvi|lk9Phiozl95DbJ2FvPmE>d!aTk+$TV(cFaJfok;{&rt9L4iH zQu^Gj!&Tx&WkX21N`kQlkAG#cMMXT^OSi!>FB8&*Z`=AfVdjKcj3o7&$g5SOllJD> z7ad9W^6sKf7hiqK;=Fm-R(0UQX08)WD!9itaQ4pIqI8oR7t)XNg7IT98`5yg@3DH! zs^~~rYQ3hg=?r?bLf6c8RtOwGeg&Td;M_D#fHQs38(5^{^s2UKctsYv&-}R$E z%CzrZC4$`>FtlLVZe&i5)O@`drTgPN>`$A}4Rgc62Mr!#onp5{ZhTwy9*KB}U09tbm(%-R zG+B0Ws?63g2VRA&+_MgwQ2ouLsw>#Nv(MXI2_KBSvf1nCiAzpkNFcQ z^uklkpXKg+B)I-A31l;)5P{Frbe>cH9yZkH7sPOwp!ci?lb1ip2Ejo@hs-Xlt1>S@I)X zCq?Gv+o);^Dh^w#RQj?O{~&U7S+HX?`~-{uQ7t52 zpj{GE;3|9+lq(67e!4y1F6n*#2HqyNXJL1f@{4k;K}8dhXX9y!h#xcbk@4~qmxYmR zuCT2&sfCv(?Of9R#05`YO}N*zf4Y&hlqOfapINqmcTGQOvMU7F0uL!)`$yfnp;u2w zw-um;Joh)+mMDf8?yegsu7L)i>Lz^f)YHzN^GYRYJ}rZ zIP#@Il)C@eFkradu%Lic%!4X36_)8P5|&7S0hSRf@?R9%asMY z%(d2s!?2t-#S;ijGL7#v!YN#s|2geg@!U2Et(i>caiutqpuDRW6w@Q43_~U)^gIi{ zsNMX15Kj-z?UXS&pezxgOU`l-@WH^AVSC8<_cW6y3@M5=MPjtAWF12vb^ppBvCpE_ zNY|4WmwM_@q3^L%_=o<@VW%@h<2#VgnL|dkT|QD&J&d%GZ-($*RBI3>zD!r9E*s4& zm`2LYlDjaQk+L}T&Ncf$^hKDKX@%FUIT;DKpST`8tTt)e=0mWC6q^%QK=X{jgF(@q zLt^Hzp2cE%=)j_8TxCfo#IH+Khr0{$f6xaAX#Tn1TI^ES=GkQd;YUe9+yf`^`Hf`n z6wiwlTL~(kf?5z>6VQ${#kGjL7rY7mX!%=( zr;k=HVMh1(rEEe)X^x;$pCF|h)AF4piSQ?Gb?SM)%7|_$_^(AWHA6__0Qs;0NhkmG zW;}JG=zgNHhc9UwQ&P?9@=MtvK~mE2J8g;wYg!;@E{eyC1a6eEE%PcUM>6kyu24oA z82X(N(U}xUaZ8-u9mXlB=R4P7cHe1m+Hol+xNbh{KF5K-Uj4GxrHbS6k=(h<3diRj ziMIDBu+RMxVtn|vfJo?5SuMbJWgK}V|IvD_3`5^)NUUviT?P<C=0-{Wxva=J zK`34wTO7g#_3^#zcDO)bbg>6DB2ZakYZiKfLzy6q9kWeT=h#t#id1qdb^egVT4pJLB|d5?KI1pr*o~KI*3Siu}}vYuGMtPEUzhsTz|Y_Ka@; zJ0XgdY-YXv%@C~|m1E_Yf26_;_~=p4Gj|{hZVwHwYsvc&9hIN+1l+U+Y6#yu``eF= zL)bsdnWR(t<^SZX3(epD6fm-$H@A*3bajyge_&ZNB}^y1)UNfJ-ZH3SGE)B;=TBv? zKMR8@_96@VsVwNOgT8R&#LIqOEYDApl9o1d7s^JaYh=9EQyjWSgDgc_MYtGYP1 z!f{S~siXQhW_3(dJ!YFr}`pRS8r+p9=k8-weofQg ziN?l5ZEfZ<`0CbkYurXe>SJ`JJo36Gd?FCuPK$1xAOSuCGEEF@fEd0#li-89+0E#R zf3@4MyLpfUSEZ-{b_k@(pk_-3+D2nMcl&e>*;Og%}CcSbdZe9<% z7Wli{!?@DQf;V0Ed>;%Mbj1}0E0Cv=6fGb*Qnx->M|kT?MFwf+P^vSsINEehhV6P<(bq)7{Ti{7hbT_hn;eDc|KPU!6$HTpKGcFf??8BO>a{ zn1a2<40~F@|0v9x#HGwy&L3v2mbJDW{FKn^)b7pN4DRmL``@DcHZ#s_Lj_zfbKpqo zT=N)6G0mQD07+VHbVmuT5te`}P_igLy_6Zh6^biuh?R0Ue zKz^HeIhHx6Cq~`vv5*4H`QG%5F@gRhSxL;Q+*4pah8_JKlX=N*M5Bdr$-(g~|B*pv zN(j$y39Fm)x4dOzW4S}DMRZUkO25oTMM_@*4X!i&QWWnG^E)?_Igu>$Sfgb3=P~`w zCU-o7w<#DkExbEAE!YjEkQQ z(}JK>=Pwbq2Yis|};%Cwv{4mQY zUTjZw(mOEn(eAC1fX-k-anTlJ6vV?!aRhvS+5+S6W3YQ6^S{b?F8(Y5#WH)79?d%Z zxP#&<y@jB*Xc&Z zRs}ugKqolHUYA?fD(5jSr>sKor)qmAA=X~u>c z#82pn6lk7n)@hsqO&of42*3o_=HuYO2GAhQTi>wB&5nG5hIQD|9Y$2IRZfQ~htzVK zvCJxfvbMMfu>kvo;kkS0giJ)=12OZX1u96JWuE#wB z`ED5KRuhi#sX1YN^ILiw8a+AwSNp#XskXJ#dO^@oywnQlB@D|qm%wUv>{Nlk{5JUz zZV!jgvJk38^7Bt_?B8N2A-CYiV=`+Nwz0?tUu05DFv)Fcjja@fO-UIf)8DgPzc(!? zW@yEA8gKd>@XNx*HXb7aiKTcuhIGtIiqh=(K*MYnh?63oeL{uj1C4--wWl-z=fK>Y z#cju7hs*#n3caTc^)ELh5>E8*t=mB^t0@CW80r0^A^{F}9go^I5mZ6StBR5@7*mM zxM2!O`VRqoc+yxRu!RV8NQN~r=0zjGKcQ!wt_f@jL~^sbTC((o&4-pOYTO-?z;1sR z&80~U)3>J(TfvjYyD$0?$Fonj?fQ3^b29Q$Wf_PL3Sk&LQgu$^^CWZ_Uc+~{>T?c~Au_|`w<|9%J`3RMXF7f4 z81tAYWlHvk1qFS-s<~ES(PSYRy$&pOJc0F4PtlqDhmTR6wP0+gL3@Ll9JYL0Bq{Rn zz4f3=Pt>6P>oh9O&A8Hq&vSf_-*No@^_P3@>%Ok@I?wm}^`5^m{J1>s?~*B#NfPt{oawl} z_soWiv8qzX@Ftcq43dyNs~RWVKxP+`vBS-J0g5 zy#v>x)^yR+QfKL4x+3y!5R6jGn{s(uVKH{_p=e+rbwETnay9-H6prj&pwFP!K!e_gXVIF52 zG~fy^?J{=6(k(Y8AI=?fi8J!xVe%_dE>y+yCpi%~uZYfAJRUz#Vl6T$96`>CcK-{% zt(lU|P$4MNqs{&L6zH^TPz=H+K1}8M{mOD#k@^wwzUD7`2U4?1CN$JYI*Gsryg_V$r39F@eN{U-*$W3?GXpAn z#Lra5l`Y-}CE-IvN`;PT4~_5ASNe7EH_H}`87BuVgsLexA?}i!X;2kq{ZRX+ZggOl z$?_eES~>L%k#byh9e~lk?wPA27KF0#O}Q0=t!=6oqU|>N%*Jk z!vN$BbcY?Zi0|{UQs%pMTJib*Wu;X9zsGKjYiEij=|CNy0`#GGYXv2zdqU#&8|J$} znfpk^BslgZO^T~)$hH_CAmqU6nGil>b+PiGaFfLMY$i>BR(NmxXT>t{USA$3#AhKq zP?x6v%^gGc$`~n>!MjSP`V3!#1|GPpTQtGg^uoMKR!sY6m7qv{A-**H)UVu{l%kQ6 z9tMYsdP-1_%BWY}Gt6)=NUbJ{73%r>zcGvkTkVFAUyHZ-RgquHY{9fIL|O`xW%Qu3 zy71*P&njaCBij#dhxHkI^#UN`KRv+DRyqE;eaPsSL@`7>Gz|6+8?GwA7__yuP0j^b ziK=b29At?sY>#|KuE(wfH?pDw3@}1fuosR1NG2sj0k|2%;-3|q<6LquJ_gmUEcLntWRTUC5nM>S5ND`eCBZa^RxkvpWYdO zU>%Eqw?RDn{lqyH246> ziCxr(&%K>g=07+qn?^(2(k9F$*bB{@^jqSwkXWk8H72v*$t-Odp{~5Ya6%Ubq}r@A z@U_MGF9fK&Smpc|D`=N(x-voEK)HP5mHQ1(v(_u8SG)IhDUeh3QyZ-z`HkG_6IUVL zS6m_5`PkmX_SP-?@T#H`CEp6+h;(;~1Vj)~DcN-&N` z7^i6D+F@KCj;ikm|4Oz56Bzj$Rj4dO5Fs4(7Rc}HYKIx-w_3I2N~EZ&w3EF|LX_%( z8aD}a3Cx(#2!3UNRe*@l?jvH0V7`WRCK6zo3*n&)p#iw`dsww^*(b%E&8Qr0Ckn6! zeCB&}g?CKnv%9T#-@H_{jK9Onk*xgul@a#6{X5}w2CczI4mA93e9vKiChiD)Wtu{I z<95oWrkTt^rrQ;Ug~Oi}+k{14iHqH*HT~RA`!MvZ>d`;G$B92fc#ZEx^1akeOph!X z1NY`0jAHgSIK>84gBJ(gWz~SY?1{(<6zTp}BdXL9aYc^A%`I05&l~Y$Ojs>6Anr)f zjhqf06H`(n3Xl>Mmw=^%qAi{O3f6Rs%9|4)`1GZ+&5BaV>VS=+1fM z;IkqE=D9dVr9Lp+**4U4ydYk9uS02|Hje4PK`M?Z1``#e6;nUW^bi2qNX9=-qgW(o zW*>ZKdUzHhDGUf`;2I&KBIupE58~7`Y%WgXj>h1f%pc!AB1x*h)1hxl(>&lGCfN0x z)1eXDx{afajA*G}XYj83iT@wLA&(V%{xxG{zLdg9#%{K+l1{cDKI@ACg`WPbuMk!G zkrl=7?c`+fov%t{@E%A3@XPju5aI+nu3J*x^Hef7fPd4AE#dl1-h{S9qtOGl_ouW# z3s%3UOjq2%{=Q}@koDaFVns@bkxki)P|B(}Rty3t*W+VwV;r6-u%qHC7#s|-jI3)7 z+rZw(vuNGJt@$%1Q|uvp$jO@#{bl=tp#MwPL`4-Ud`RqmnTULMpg;E^6|2vKmKQ5FF05O!wp_I>BKQ%Oe+;~I0uNX!#j8y>^fQxmA zCan}VVfs2w?JYY=3PI^%w6>uUvF znnmBNooEDnNo-akKMyOA-xa*MV?m){Gtnki*6u?ge=G0%N>B1~#iLZ)9L9~$9>I@Q zHnfd`a7GwhBhedHlS(TJLo8Fv%u&KVp1n#PC4^B(;8Q@)S1Tgr?5!Z>XQ_ihki1Bi7cBaPR_sapfn;@$ z^6UVrNU~BS3>jg&W$2QZDARLO03`h-3ce)TR<5;8DsgsDnIEQ`b(c21`uvW;gvMbG zA4qe!xbBgLmasd~oBZ%tH$iM2XT%F1N8i*t{6geO;-i)Rq_=j(0W)~elf>KYhWnOX zMPF==5CtLIjWB*y@q?iTIS{wzKeQk;eZvq}7EeC^r>CC%sa{W7rAO_;uV=00ni#t5 zzF7v0_sX2m+FCP1>-jm?V+}?noMF_%M~p6dPxv^NM{ZF?&98xG%tp|4mOba!xL$&s z4+$ZpkO=*HE}M5r_Do@fTy90;X*+Y-mp5_mq{eROQQ2ycWn_>_$Z_To`cVTe3HhoS zXb^6APwj0QBl-m#Wf0MN3GACZH3SJmB#q^&xGvdbMG^pXf(R9fi2bL{bVHCXO(sf` zJ0asdW~4wJP@*)Ea`%;Vvhh0&r|BJ4vtokJwrZpw`W7}l0WzG4Wjd%!j`Lp=YS=o; zrA5B^rr*T-$IKYSahz%tNiBbMMP1Dtl>XLVTtV>JU16yu{udWD{DMXO9R5R zQ8gM2)X^YJQSP&%i=dL@jQ5V*aD|q5BgE}O3NUTUgk}xjPL+%A`E{;vvZ8mQbsv zA;g#ssizQ;Rj5G$Br6z7I0?UKDfeWY0oj>t-$R{vwf5 z=#Ed>stJXQibOwm%jWW;S&8hrD@7CpA5EoFrnqC z5J5MUA+`o#WvM8TbsD4p5~3|Ta(}Db9RW%$xHzWKA|bC>>OMJmjF8VvXt_V@rUv;i zQ;J>#-x+z0W!Me1zr*1asB|#eVR@lw*I9!606Y4|i1z2WK7VWY#Lc6cES8<-c(3m9 zQ6T7Id;viX`%;mirXabFUuu3MvFq6uFJ$Oh-JV*&7XEG!@tI7AgCzbD0O%#+ym|^( z;qc;*=q(|xzI;tfw@-#vNy{aQJ8cP@7Fjb*^Q+!McsU7#2A0V&DyRlzVg;su;Red{ z9*varQ;1{^2sy6u7pomg9w7|;>-AkR>v{;zRg zSP_v;@|qQ$uUGO-W%f3AOxI-l*e1qji1&NDeFcI31Gb9{gS<}9a&M+Cu3?*fXpKz}_Ek+ZqySFYrIM-=0L_Xk`^ zqz8O5BS1&mnmohjp1379@XXgdYt^Pe+R$YRVlh=E3`}UNK}(8L#zMavrSG>sOfCwt)HHJ5PB$QDNsEQH|{E)|eZ})PpY+oN;4RCa_;mhI*MK$psTt-=O@XYhvjks0 zVoQC68p|(gEAYtSo~sYE#t>Lf1XB59m~y^v@Nc%I>pA!Ts!AT@JhR<)w=0tMC}gZ7 z6#5zU-$_}!xWBE&^XD}_jQj0UXA)6q$$Ge(^K|tCfAG@R%fBj9K}(ov{QQegiV8Dfc%+OU{iIpuy@_CURmlsjwL*LrR?Q`dO>bd12M2`=`)!&41yo6eJ- zv%Nd5eZ4{2dXeQ}M>5|~+E*0qVsOsXIU<(&&0WmY z2w_zs5vhStMN_*583s=G1DMB^k--QgDgx9)$!h|v1iUO~P6B=O@7 zqx8`_u3{ZG->kI?qml^{1CNFqAZ}S_qHpxusF51fOu`FXuXmrD5S^fQTHb-%DYq1U zW9kp5ZyH+e#}9XilsS>pb+3$m3npZuhE@C$y#VnabN750soB`KJoE2rWMAqeJrV8$ zl*rU7-;c79v=5Z{_d+ILmS(}QtH!}Tf5WHbD0|C+A|iTQQiL=k88Qrn?hzO01JaxU zT>e(J3KZoG+5AUn7G3s?S$uM7}zmdVvh*u;}lr-m- z{P_%w>?zXC|L3ZgRg+=+b%)Y_{1xSe>n>t!!wdvEiNseus%pj`vIuthFDG51Jk_$D z*@7)z_Z+WkvfXb{q_VwEQk_LZW+wXz=2uOq$0w0!RvnzfFo&!}lIsw~-z~(~PQv(V zBP)+5KLB-ZgbpP&5eQBd#q!xalM_B7vxSA>W^0XqslMc2VJdWMTYf0Y+FPyzj<*F* zHLF!|S0FI}yA)!F6(UsGrT4UIwLrM&LuI7hd*Bij8Y4PF}@yMD5WyrogitM-{+E2 z{Flk-?T`eXoa<%$s}C~A5};VE@H`Sx$opzBQ2*D)7chpz^_`;QmIW4sSsYo&k~zJd z3|&qngp9bZQQ5*J@y8wR4`YJ&t}L6RCNCenx=eKC{Mzl7B|#;>e-SD+y_z|oBuYEd zG$;;73gbcc3XXM4A*Amqw754u(g*=!QKDiHM&#eahSbR(@DYesSyRkeD9(?U`8S!V zDZh%FWsu{HJ8fmhdDHn{*4w9QY4>o!ZyQ-I_9(gl$BLf$485ed7q~_8&;b?FsYKEL z3+~AR;o6+bHUoE}f_BT5NUB?2c~!T3fd+w4jMOy^|8b!$Ay9EMLe0KMZ7OSQ7Ol85 zE3|?#5c66p<*O*^qYsSQ0Q#YWjNnhTSfNHXjD9R08*G47PnGk53}qUK2E*|Yn;e6nSlUoKn_roAFoh!7PH|TpFhFJy>%|h1!6R@xWp(9i2Pfb; zQ{M`FM0u_A(9Hj|>6YDa`0Y|E9{gSIOLwlGIgA^>Wh_-*(q`A4Rxou;RX&rgq<{)6 zT&5)D5V|vYEgdqf8uDDBuyjK8%sVPA)(VRoqBkLYh%;X#bdlFgS#8blT^Qk>tll_H z4;cf!UqHWJpm~;|5vCdBKPQ;T8}`e%2}Vn2q*r~Z@|r!UuVQ3XE!sahUxWtYaBOYd z;s3taiFFC=P$PM$Y$I6FbtZ_<=!ShGTvAm>nI;#totp=HMZhxeipHMB92h7EpT+3I z#}_iHnMrn2)2L)yKY*q+{G&bl*p-<}LJ?Qx>qiDE23<7`C`4K2Ebr4ZIs59F5zHYb zrXDNdT~uAM^+jHps(=d8u37aSr2*G9C7h(}%TSwQnTUl(nFkTMh%=Np;;XAy`{7ZwO~K&WmqgC-qqGlLCTnNf|u z@{;tF>z%)9%1if6pBH(V(|z(Dh$@2l)vySL&lM1hMnse@D}u2_Cz~iGJypMKO1yu& z7(3NAPHsVgzDzqqG{S-BibzJQK>!U_t?(j09+J-W$2R{_=AVNW)Pp&zLXFazh*tGV zBAwK`jrf7<*rsGB?Rpyf;V(p~Gd!|H^GB3V)8BI7sP#!>B15GQ}03F4h2Feg~H7#J~H2t zLh#_m{vNoww>Kp0lBElOzJ33%t1};V!T4? zS+5n*gMe~|;CSAQ>a&ZsDA2*nrGcDVoIxAy>q=CX510RVjD7)uzTaw5dN}JsWfLRP z^4`ut{6wiCuG^R*9aCZ%`I^ICCt8iLdMy8nSt_7UGu;a3#jX=5csk4(Xk$Q*TM<#{ z6W(#KhBJ;h(@TTA6iY1MaM!!PZ(_c}{8+rXK4i$=(9_cGVPN@u$lG0aiI@(pfszj? zXybk+P@*xz9BJ@;3a9|&qQ6UdxORW@r2C;h`i6H-Zz{Y)FiI3;xIJ+AtJH|sU;^2G zk#e6|zR)u6oR+HC=;o6f6!`GZ96#I_B8pcd3eKM?;#K~vqU!`=&nt|dID$}BSafzX zObf6k$in=dcb;o9$1iH$-CHQGy8|M)TkXX_ep$u51UM7Lz#(l!UJL7Tfu@*i)Wf`P zbR=w&%~uy{%l$~*B&EStP`{9ver5~5Iw1n>&ELC4Nu8^lUCFkvs z`vVK}(9gnrM{GZ}S{o(8L4yH-j91l*0(yMk0bsgRNpe<0kcbR6AC%s>yZ~XtZD;PL zZvR(r0VOT?T^D0KNh-vLHjm_+l}&7xB;8hs2;&|jJ*@v;aa_jxFcbFkm#SbnI!=;w z%QjjdoqKcF!WB z=X=8y)=RZ8`iEW`h({;$j8^(&`ZSI0jA$i@cB(8RzhGVqxltc2ne0K4V;XaBxIk0a zU2=ID`s9VfL2LE8-Te{*%Kb2Gp3a3Pgc~oII$TQ6_xrWrw}%f0>=c;`abn=uD#SAF zTSv+KJ`q$V%i!+G?_7WWUE78(`!V-S7q=_~I-;U_NqeN;g=?8p~LUhBnYXl>PF!(mGgYv~bq%Z*e1 zs9%F9ayVVWi~l}OYQUN@A{UCl&vFTei5);a!Y{HxpzozI~!oh zupm>GE?bH-uEU%lCk&LHCyWPll$8 z(zoZhl4X81H2u)AtZpERUnQ=B2@f!K#JhYu{a$0y_L%PLRKuZKkrp-`iWMVdNevN% z`2aOyF+gQX#rJEL8G^b-k&S0YD}tw4*>Cq!CFoX>PAB%U-o~TuXZi*WEht0Wf`sw% zHs2qM%L$(SPn-w;cEQa-qMkx zZu>grvx}3(S8cliwpyo#6EeML@-({~*8n5eb9%k*Srn*lb6?;Pc2qM} z>~kXHdaf-8-%&N9^(WKD7LR3!bE=6NnB|6#Y`)dVYLI7)k2qlv5iHw=Z#qCE zghW>OgsCawy;sy*B#E{&!eNfTh_F0ZBPAN4Rnn4Q@IfYSHMObtVgpm*FRyrVQvL+$ z8G4Yr6z>_#qxid0A9w5D!L4V}(Uiwq&bLIY{{i9=_gSCk8M)#L>*&)9P%TKk8pYpz z?)l_&4$on*G$5QaVBAz=NUpo(CGkVo=~?cUVat*;Mz+VAo9-isV3oO1exNLJf42Er z$4v|J%7SC+n#LSaOA1{IIGg3N7&pC?o`_eDOp`4I+ib>KrKWJ2i7CY;zLy^uU1vHk ziz1CkYTtC7CLxXBgQmPHDH~z8h%eHw-@hi(50E6N-TOHp(tIx`N*&;+q7kd#@`g*a zKA^IcAtI>&oD){YUI2^M{@8uXQ{HNiVNEL1p{G`%FFa}?N^?{0KTaSwuSCC(6dO(J zj~BUFG)V?X_{`UeR}$~~`pBhnb*Gy|rbA_+1WMWu#4g;O9Q-$>UONMsl&2sY6{2Lp z=!lP#>w4H{jBR+GHI*%NVj^!PdVBDkgQlci9 z5OUx^BJAhI{S|kOk!xYaky!3u-$Re)Ipjdd*uh$@esOG&_W(%^X>{jGp6Psf(3aWT z-6pf#SJ=+3;=MnOdE0xtytRmt)F1P8+tUIiO`UhkX$H<+$ zG4PbP4{PE$m0Votf9!R(zsaTC)n|a{IAw96d$XYI5qJK2?TfSRu0?s~AMfn>2HHY0 ze-OL4_|_(W;U@cjfqBUsyF&YV7uTuIa!7v{c6%l5DTA!{ejk69q)U@vH^opltB=Io zLejqn^mo2YrE!s&MF~e}ir|mq>U0>;6V#y3l%To(knuFh)m>BjDZ%daD@2eogq82n zkeWGmB4MP2Rl18l0c=gl<`6eDfTwUi1?;cLRXbavHtPQ$e|`7%{=P)6PEih$wlSBc zLRR5++x*Zz_xsE{r?~^an@lUF*zt{RQ1%Zpry-7xtZVC3B=-tw8K*;GB5u=#qG$`t zh(9sv{@jyhX48Sg)ovj(?){UwTTKrti{5*r+CH?;_h`X?+j=`K6geA;mSU-!^I>w* zyc#qXRNmC|?quU>=ra8J z+9t=>#}?CGJAW#6*k?KI9y{U>v@d@5cN-h7v%1p-ivp?XE&@S=X54Ar`K9E#PL~?n zn`FLqGDO`6>G&;r!EH&~ zv<6ryByWK^#`fpw|E2D8s_M)b3*!IwODAU)C{pC>?jmGboG-uM&W&WF{tiTnNb49&xbpE^7a|AX#_)$K)}ue@|u ze(4MA8!9vKm=SLu87Y<#CvKYMk24=G9(82ViQ|m?u#ih5oSUL}SadYOLyK zo!+&H2EI{X_SSDyX8JLEZkC-#R^Huk%a^nb4U$7Ly816~dsLE1O16&J`2Fem#y>Z; zrT4qJ+s;WnpM#{%SvFy>vZugJq3}NHW%EdA;oRFxYrdqxEKH|0Yd1IZfrCG>S89pW?K&<}sMO%XXk41y^%%0N(9LYqcBV0V3Keghip#E=)nvdtp4=_D1 z`|gto;Za~H<%A(KF?mGGTf>fkbk`-j4{%|7oqJXXQQu$1_7u6iFrtPcZT6y9Se_*2 zicK-y^4evXkfW+n)G9^i!Mhgxsf;RTF7_Vqb-6Nu;5rnX5W3gP+~CMK5IWR4gQ^R?|W$ZE!zsx^<&=4-cZ^9&ko z#v|Od&H55|7Feq@EOP7i@3DpSJvZ~79p93RVeVJ0;!J(?!P`2NV^(Hl(_p!PHTVi<^EplfioxZy97Jz#F?P4dT zU!qvlU(o+^ue=CuW&3x~$Ke)kf{efA(BepAR zQzZ4eOqE9p<_~mcL?T?`tEMua%@JGwydvhtL%W`aErD4L<-KkcZTRy_C<517KJ%as zTC8bH92i||*Ux+@>LBY#<-BQpb`hPqCs-&A0G?`(e6vh#tZWMA`Cj@nE2T3>+wNf)1-N=-ENm*;*KcZSyG5LGhlzenQBj1HAD#E96n1H>`@<|Om*&%zrD3T zz|-NWcnef3bsM;UzdO&(zlJUx6dVzPzMk4l)50|2+$wiigZT)80!ebV->SO?t6^}G zYFH%Sr>LZ16jN6ltmqncqPip7si9J|f_$YPyCX3!4G%DYjwT++mj<-|S;29v+Zm^} z=?iz=)cPr`cjw`iCrn-U4_$1<-vAjAp&nXeBAl}K_Zl!n-KijOOXz_Mf3hl6K9qh2 z)jj>%LDdcpa>Xwwgg3p>6rcPTzNf`vfyXdNNiTPsfY`~mS;#h(w)&PtKowaVII{LU z^}TxQd@s@V?Q`y6?LuaV9gDwk#eGVoH>ZR|k_nyS$BFV;;9Hlcu2e|rh;R}1At!UP zG3*@xvpPpGybrz(X99ghbK40`Tf&j)_?JFEO>|kYe>@*#a2hxYQc^V}Qe2YDO98wHa|qwfTMuIh+M_IO>m zwku`<_5JTvfoK=oWVf+zA2YFzq7gnUYcy(xfGxm`#_7w@F~4-3QZ>SMgHtdF?yFf{bv$>UN=6H00oI zhtphsR3vDKn}cY?^IJcbM2L-?3>X;(l*lqvA;rT4ZbLIPS$g?~)>jmIBpWh3%rQ)l zWvc1nsW-WD$K;EI-tP~<8S<&dUx@y_=7PZVgFPiRf6_pyer5XL|CuNIf18ec`-F;$ zidHm%P!G8Aq{(D?z_f%&QfbJB2l#)49+Hhp?(@5|g*;u#&`@Ea7~EfmkTS(Znti=ge7?-}a!sGXMG z#g>f_*rrj_ZVHZqd?7S{WAcLI3hRJ-2l2sHAKn#Mz1@=T!p0Z*H1>w;g^c~{ClH`H zO4#>I`emT@*7xnF!A#!kV# zm-W%WxP3M3jx*v)5|WuxBzX*Kk8s#b=jb(%4QE9wPkc$C=ua3%Wq~m9BHx7PrVX9REkmL6AxZ8oJs!0_uiR27EuC28-P^!~Un+-Fpn)ka%ij%P$~# zt}>+%imd@aQYmvx-S0;+MM*>XjS~3!7X+VLY$Wxd5qZXe-L616fqP%$nVdv>aLlMT zc$6UW43LN@-SmWW7qA0U+{eB;T_%dXA` zRWM(p7TDUeiyKjF1B^Ss(F`~CM+o{Sg~I#Dxj?z(XKIgai!R7woqHAn@1m$GT9#H{ zod9@=3x~P;(lK>*MF!^ID@yc&dlzW8<9$(7RFGs|0FNhn!8Ayo^?vyvk^iXa3o#U_ z0^}0eG($ypwY{pCrPoGsL3f!rRv^hY%Hq%z@r>xZdd9Q9Qh+7d#e-iq!~CdeQBLPs zxi$|I?zu_W&(4XT;I74!PDVYgO-N_nw?((!Fxh?p#2#l#4onmQxrkFF&uk~TyLMe7 zAxw2?Sak^G=u)www;B|AOy4~vq=*2114B73>FeWV--(62pMkh`CvZU3C`KQh1YXbG zSm+knY1dag6kFN-UyZI_=dX+v{j;E56PlgEd-fM5P(gq<(HA_3AuJvsG?)vdYJd+7 z!)i(Km{he2j@oIL_k-3%p0JhLUPP=#BX$s5_q{=LqJ&PlN05VTVLS9X<6Y*~fyf<4yCVSNEgE@t_qD>Ocxht3$jPrgGhPTnOl7cFcME}eh9 z`7_-r-(A?qf%aj0hHTE*{U+O*<=`=u)d{Btn)IvU^8c460sN0)qou$eA_B@X=Hs2UIB7W^Rqzn?YPJBKET=ofd zK~o6jLbpRI8oLc;ZA4$sm~?tA4YoH^hsOOcF#^zq1$Oi8R-TSrzKGT*B49h$|km~#=WEmr6o4dy`SD2W=RuJj z_f?Hp(Gn1mqVbZt4GAom+fC#FA4-;ekQm1t5t>!R^P7qTyz7;DlW5E!QeQ86f`hVc z995&L@lEBvTbkIZ?i4C3QJv(F_157hYVV^}OcMwBLOs@6nBKFtkB{dh;2`cttf$|C-wrC(?UKk-oN zx#KeF+-Fq9hi=VJfYd`6@{f8Or}~XaDcYhH1xpT&3bJ&)>OyWd53TX_BA+C{yILth zU^B8Ah^M1~z@3h`7FFqwwxDNNLhc=|8@I$q zKJjMPs%H5%Xku}!n*3%)Z`Mk1VnZdQ!at|v0+uY9C&j2k+G&rj?01dnay3iu)NME# zrw2#;xYPXy6Fpq_6V>pEHSo;3^$GLYj>u?adA=Nph?S_nyLb3s#q}Q6<}3GF@c1M& zg$ru%$)+bADqi{W^^cBE+=00>3|ce!c8>^)7i$li#1F7KzO(Rf#$jIe-_>!Y(rSBBu&yXkHS# zJziIVU+X_>(5hn@JnmMKyTCJqA_5}&)#oq;8je)4)7VjS8#*ilZ`;57H+02BsVA<{ zcdZZ508t?Xv--eUB1Sld!EZRK=QlM6e~uUrE}BK(;8X0dq1ERiao%<(;34Bv9*Dn}5%i+YhZd6Qh0GSZ z{>2UW0z|{=Ze}HTJax^!eD&*9298jlL@0%~MIK^jt7#$d(N=|MMD#kn_cM`oGi)up?eKB?Fcdm4nT=M};-%n11vOjY|Ji1AG~*b=~;%yUWns zkh`JdBi_E(4C%1P)bg9h96z@NcAajC1~}Rxhy-S&l)DFcbDtjZH_Hs%JaaH>Y)1q< z9#$ZRKOhHefGow{Kd=2`8_vGdtg)hB#gt;M(!K)aa}Jhic{)Fi4s8vf8lsme$P(Lb z+C~ldf{YUH$X0uD=0m0?(GjhP(mN^1+R8CyV$d47&tbN&nB5XD_9$Rk;i)9dUQOjF za6r%t1p=k$-)fFjoY)V|>P}&6L?Q6M0caOcKozR}mWi+QXF`Xs8_=*vhAxkY`+6?@ zd0rLWHCEfPbh{|uU@N6{?z)q)bPmTMj@UAjQ?Y&|qw{#{h5++*^`m%Pug14~2n~&sLXSjH|3>Y~=wLaK6uj8$-@pAsO;-`#S_e~#FI*ci zCX*%yEpiEbTzW~ z(TF4jWOyDmZ5Y`qa2xmlT!LBTIdT(pY`-6k)`I#AHaNkXbMd5e*7qOFliw^oL#+aiGe9-l;a9KW#JVS(-c~q7?Sw;|t@Jt0{D!vYC3&u2o zFMgUi1}^+`9jo}rio&J?;Q?w-pv&xn5T%WYr#P;8v0Xy2F@kYL}m1hIPCLt3YGqYW$f_Y_ab<5aL@vq_q%~n zq0gB%NbA5|(|2Wx))^x*GXfug%C$VEk+q!UdQ(n@u@$PQ3B z9Q6G^5ERy4e`};sV^}f>S|7xMWqLB9adGKY%>jBq;y2`RojFT2t>pYw-@YY_MCrzQDWqmm&8k6#0f0SWzzL zI>PUKT&Wtgq`T!Q*VUG$N^W++WqD(Rg`;YqT1O#pMl{_$$=GDziM~Uc?vaIMGJn8W zo3}iHwylQI7GlYwyV6>ReWQZRB=B9h*5@ZtkdXk!pPew`b=xqo@fc;Q1zhcAkeUKDslIY={5dm`KJ~Ot$!Z%9Y{x+ zi7%w1yRQeV3i*GQ>Un!aY`&AJ077_6`pP65ax?Mgtmp?(LDE);oja`P?IdVTl*;^< z^?2mN>m$Muy1)G&j?zse=Pyb2m4LpiMGplaID~2OYGY_Fg>o{HkeM}ni7tf}Pal@n z@;qs6j4pZyItB>*7;2gwF0V!>?FT%7BqQQ&`OVL#bV)|KNh?=61Y8}nt{yCqm&hl$ zu*hG%Bw5=y<%I?3)w{D{y*5wEZk{8N&5y}5UZN_VM&t0GJC#z<`X^*h z`#YDD*vkL?Bhm{|xjq#$R}TKAB|3`_Oo`JzAcDLKwjrESmW^e4;bUV)S(6vpnrb=< z@B}<7KPCU*cR$Ro>&8U{l)vn7Y7?f~WiYu#@4srh7&1O$_Ft7m4Ja0L%DCb7uDiM0 zVdwJ|OGkqY>cCJw#I|XU`MoQ!<^2f{gCDpeY8?7ef>hp9$!w*8)frR|2!HB8$+I>R z9rO033n5^d>+$=%Wx5G(o$j$qvogIfzpKVg5_yUW#Gv*ie7R&E=%HCA1bRr9<<%lS z5cb0bj2`)N`-MD^n6o6S_*F1rAM|bRfE5jFTy*GgLA+Fej_ph5N-&Sk#fd4CnF)sB zJYet8^r+e^_gaAfHsH zUmjsV&%*qwP=YXYJ_x$dyo5SSsMPXdSvlyR;8_Lm!mj>PNF%s26kJ8eI9 zb7~@K<@AL$ZtDJW#*|IaKW#|xJNdEK`@y3;v}r(g0c4VPHx&FvW)&1fzJ7h9^>{gU zX(4DAH%~BC9guLT!HG5E45YI??8;`Zijh=%e$_xyz)>ffQwTxDyR#X#q7iq0zbRFj zUx6kiYGKQay6E0zQDCKs-qBY<5q=4V(-fcTk+~*_w0P;%E}D)hi6Vkee<4_rq!*P8 zBv0;R&jSVB5`S!3syd4})>MMR+Z_B<7Ub|+hrv%Q%c2pQ)sv4{(KgqNyrL9d#SpI_ zuit6tu7YukGs`S0R&xC|tX7m49Fc!l%dH)BW`WLH{x6DWRn!yvEqUjrk{h8OenN-ptq#syf=^WRic(tHyY%<*fizMczSO8y>E+Ik@HsOof_wjACY?y z6B1hbCl{PU15_O#!u0g~yt?y5EZjx{-xt3m3k;!H5Rj z*|`+y52hFo3gMI}2|UVzVot~e5ob7&0`mCiAhtwdeAI|AUheiA>Lmm%Arg@aBFKV( zf2cf}*(@BDi*=+UL1Lv*6=@t05&ey9(7JUqNmcK}8T?%o@Ts0K#I;_|1sf%IC%9}w zZDl0z%{dEMWAP-31c?9l@A6Vs*8Y7R`9C*r{b6)cp=S3gFi8r`SA?~nA0nh@yF zs-OFQx4ly;d1&@U!T$r=AjC~$!cigQzbo{ARSc}cZozkmuskYVwH0X78*X-OHIPp& z>TNDIJyfk%Oz<0E|LHMx<)~s!#Y`~PPrlL}o;K)xAzQ!YBD3>?mZ`LW|JXa{P^=yF zLFdl~xQ*+VJf7SSh^0%;T)RE!WD9h=^8Y0y796#$zq_%X_xx_{|6~ZHGx-IbF{{5F zixK_e=SBh@!(G#J`tchTFWOD%C>weB4W8fp>H6^#-A=}==yodWn&8OI3Fe@=-(zUs z7Qb5EhaW7;<#x8;%B|haUCU`G^zHt`9?kJ-(@5~fTx0IvxnCpYvD>EGm#IdV{=Lb+ z-;XMH8Oxkby?cwF`bWs8ZB(&3=CSf)j*8LEy_UCMeEQ)l_rp%=n|59%8k>h7GW}GX z`6Cs1t8mh3H^6wcK-RQe_hr0+z0T>~m5*;dVP9Su{4rMS&+tmvxmmmW6~c4bo~`=j z%Y~48;`4rWM`sUS90K!`3|z&2TB#Yd1c@|Ln*yoevTR;n0jCv2%fR~&q3kvTxv2j_ zb+}#?JQCOB^v*N~)JepSl_&`Ot^L9NTjp$WiR|!{YIK_2Px2v6hwLX_WOCb^4L85; zj}zp^uG~{M%|zej>mwUh?|*9K%}~4g)B^1{?|mtFH|Q%Bb!g}QkjQBU=M5jy=H_O* zKTl(a#PSRnak4#?6VDA^$znnr8FG39nrj+6h(G-%e}o}6Tg!KZ6ZQ5PP8)g>F}}r@ z{>aP@WPon}hxQ*qvubT#t~=*_r0~r4b<0ibT%$el=3U=9qU!dkzDX-FGtb-hE>~sN z){C1TQTOfb{uVuKp{yEq6EpO@{m_GT66evEk(u|m_NB;?xs64Hkh{6d2M=?`3M2<^ zFp$JD*9!k)+S9}F%Z;z;d@Bd>e>(uz-+9;a8iy4`FhEtxHaO%W7{FchdY9Sz?Q4x@ zwvn?Bzh&*`b`1WeslepIX&c-Ude+7b*BgvrPHY~6jcj;{CjgAmt?MU{nTYpwuaPPG zxS6{@OU`be51b9_@jI{ARD!zSI;kgWrazR7hco8Tpl=4U%x)HpSB_cko%o$c)jx4= z-e})Wyd7juU()k=0}^z{v&J3uoN@kE`l|H$f~A$&f|Ymo@hqA;eqv&xxVZ&ngA)lD zWjP;ob6&sQK7oDicDY+L5$Qq2^i&_qWJ;bCf(9Mc=@2(JhN$BDKVeGA=s_MQqVCf@$=#V-n-3UnQ zkvJfo2auK$0i|2IQyK)M>(JdH9nv7(-3?OT_I>ZYzk9#$pKmxEdu%;pueIiyYp!R` zHPB!7Pv zHK#$LuPHwoF)+{6^TRa_Rpw3Y`ez=`!(6{^h$=Z}VseRXRQJ$Y z&`+C)Fr@Ym%zZ-P@VnBIvF-fxmg*}t0>}@fK|Vp5q75Rnw^Gw7Yr9XAk?# z2~4=NGr^C#r<1z84|a$Iw>grkdmU$I_HC&;R}4B$ zwgc1ezCeu2#LhrfttC#j#sbYzifPL=fps*mJ`W&;+_Z<)o*#)tb+LNqhkSlp5Sr7{ z{)JB^hfaY0Hqj5I!sM@eUV9Y*wZv8&uQT8(N2Pw_lsyFGJ8-5~mv5eX#on^3zu(i* zUaB}>5=e|*_q!%7^QgE|$USa%tX;FI6=n$iv*$aV82)0*Q<|U#-xgcaR!mn7LHtAM zO-E3-AWJ+ZTRQ7LnX1^d`?j-3XE(2TiB`|oh>6yQFa0&7hc`^4R_AG>Yo@n{saTs| za&GSU8~I6o8vTB#)@1BiR)3PWpJqVl(sa^Y^x}7`C33t#iX2*-Vy-lfNITKw`}n0u zU~?bE`N9Y}0i0Mvgo@uu1#iI%2v}bLDZM@p2ziL>rLkzO@%(4a z0H?>!J1$Yt`$wB2`5zlh19y*kHcm?(^46Drey^ckepKGuV6$UJp+Rym-p_04;I&r~ z$e63!Tp?T>pjWG-hRUOMPt5Ft2EoejrHBmG;-Aocl7V?ep;p8+9$h)!4__5~-fCPG z@0I|63J)JVf7S2UJE%{eYN)p!)uwIT_5#_j6HC1?f3hEQ8fcbj7T0p~DRo;K!{h20 zlns(xAT&Y`T!zE=^Q?Iwk>J}8QbH%)@{t-yQp)|)Ld*;-!mA%s2k+3J7$>q{b2n4N z76l8!#}+iIRL}mv)xX$w=9>GJxC#*r-sdWzbu^)&G?7>by(aFkB>~eCSs^t*`Rm=;_3S{LFg$mLe}Nd%F-ddQF1QioL?zP(WeWf?~tpgpq; z3Q#^itRO72O&Mho;oSc;2>M*9v%ArI7@v%v#(FOzIobV3LL`P|c43OPhjTaT7enRG zu$}8?XJ7)!ze|CsG}5VQjjF<+-Dhy=&&6I2WCTUKk;QMe!~(;@C^kJ1A_pj zfv0rVpoL{rO}E651k&@hH;HwjjYJ*i+vIwh8gyXd4KH0GnnE-jEOn*`jz>?$TW z`|Q!olF|knC#=nuRcNzu^J$+qvIbfVCEAEedX#^vd=gWdb`Ubih|BU`q3CsdC`oNV zcV2~GXZh2!z$ZP6MNx_D5E-rVT{c5$o_e$bN6nI@=af#DOs(aOG;Z?Ec&uo6$ZBH{ zqnW4P8Y_ZHUL7VHrIuE5Q(?#Nce>C&mwMOgN~@}n*WiWA7aHL@ce%;6emGE}s3Rmm zB}44QQti^TL1MU0W^F(|@H{lk znWVG~ggEK2Qu+mA(=14gcdUB1!s?gq^If-}r$+wf6PkVN7rTWI66p|C5O0q-aViL5 zn_c!TB=Fx+8{}wYK0$z01lu_!jfrg#3@5x>yy9=y{YePG9{%REBe5h^O`u?989D?i z(!j28z=Y_AaQyk9Fv$ms3|&fXe={{|zrHJFJ8k*lAqsHXJu5%{qGOvmjU-Qm{%^Yb zte8{R;WAg<=CZ8aP&0f2mMQbV`Mg_KuL8MrnJ?6EDaQud&LUSlX1_m=vy?C_NQ;XB zIkiYx7``ejH`Gh<~yhmaenB}ShA?Aso9*d0^Z9K?9-%LXHQ!dlueV)s@nKy zCLD}J1K&#<#Etugs|ihBvg+DRxDt8s?e>?pBQD(LoekP;nPU{rH_PO5v0oTrlSV6i zNX?lyw%cpSqV|r{X?tzM`G7Mg-A~~ixAzq-(Q|X@!otaY0B828vb`Bwda>S&wI6Dj zu25w6JIs}KX-Vq)AHBv2L7%ScZbGgS_2ix9r<3Un+B=&?-IDb_*qWPS(q+0=NfV)# z$f3pu=C3x5l5uoKw#m(lB7}pg*u)F*rI`J>V0dh;sP%&lguIf!Gmz z29QGud6X$l=uhQxA&1rlg5m0~*`<--s&{t;We8A{yqR=tfn_-7_@h5BRHP z#W2cq*Om#$j73d`H74D2?0xLGW?UvZ716U{h;blLo-ybpf6~(2S(xDcx=qla7Hf49 zG?6DVT>+^=gKnY)UT-ln4WVF*%y#7n?ItcDt&LIh3E`oY4FYzZ-9IF!`rW@A7u z`hYYLZXiaWKn%_Mm};0(x91i}aE}>m?y9rxEjj-dsYrH2`AKzv`lr(Yl%iiIm>DFD zxhk9CG?_o=H0Y;=6VP02v5hdJ;~tdtQs(M$44srN%%P*h5=zc)=1wYTBhQVD z<$J-UKR#V?{NAz)9YiPgja+G#OsMfByWPNQt~l3uvSD71rqk(#wxo6h0lBN>hdC?A zf|-VlP{y1+d+2sna|tc-@^7Nkwu*Pf$z)vgW!R*n;(BAPnM(d;tEgY%sWUz~Kv zRXcdU)D~4G%}|Ffr7YE*MA$VbG)dLnc$B#EebzkFp2gbf0n-FVeJ%*lH&ywGN_V}i zgD%aOW9Ix`PxNK}UHqx!J9#;B^2f{tQTnmj#HdH7`Mb=q;0^@Td_lu9_T392F0YZG z3E}T3hNN6fA6BjXcgz;csk8F2iv6N!>B}fWLxa4w)E`NBPe1N^^)44d=-mc~KPy@! za*nmmd@rT=ESgvyB$kBfN43e_=WK8xgnh^khS$e|cuglHB@AXT;fA^R4d7wh%m9{R zOhYY|x+W_f??Q`3c5A&hUKnxM4N;D=c)f-^MPpML{qR&w4EsU3 z{?M{@7Q%UW5L0}MzK(U(ObcuWA9O|)^6)!aLFeIeOes zele9PBIqvb17WV>GrH%p$Qlvmt5YagU`^PUb}xILd)+_8K;7h5roS|Ch+-5zT(nlS zK!9==Y^n?gvz^UdbUbKaxGSxm9|noUk9kaslFT1Irx}=b3nsF3VX+t*zrNtU@aIZ5 zpw7nltA%;|tPrbLQA8b(S*#I^P$JsZpK8!kWBC(S^&)3iv&h2jI;+PYAtfJWCuF2m zq3WSO&)z9pp0hn44AX2Vl0&mH}Mr{VV=RVAS z`TRlNT$`iO$Qt8@L0SNuQAZQQga=`cX2x-gu}i>3%SS6xL}WWghl8=8N6_NxV76ON8%3kOWz1UaOHGdGB>F`oRRX&gICnHkXeH)&2z*G{ zxhOPLU2Bdo+ly}la=;7zLg>Va9KVsA87<8G`urywh}qZcVmBr0w7>mL(%cl+y(srM z9#(<7oi^o%Tkkh7V%MdZfgDplxR7a({>F8-gC2rZ7xO?ouK$H5wIXs;8QwiEORrD%4-|H`Eri1HE1eB;noGXLufG2CXz>*R z@Bh}H8h)c#zlLM>Aa!TFt)i8zFDm@{h=nb zr0S{ifGh$yaJN?BKA5d5AtSsZxMJMsKp9B(*(Xif>y}5=;8qSn(qojAG4D=>V6gwE1imRS2Komlj=+Kh<=YNleu?RQw_&u4dU!rp`sLrN7M zx(ByoWxFk32EbK74j1igG>9rd>I^(x+WsT&P&HD!ust$^f-2x#%1R}ft;jsxmp;+o z$XF~2WmBY6!1mV%5{*B`7&^7xtrxm&lk42%zl1_jHCloc*==uvQ+g%y@&%`RrXkva zRNguKd11<4*_nozbOp&Ewxa8`xSDRim(O=U*c-`eQxMqh`_!MBXFa7w z#!9Ezrnz|2hB+)A+~Fn8p zA*DRda??KBF)2{cu5cw9LheI|e7bZ>1L+>n0Y*9=K5O1rPO49pLSZm0zPqApAg$dm zI3ppK)yin87-DLjGxUpZ(`;&+R6H_?GxH;}I*1X4jbcbOCcTu+ z^t)Fr&CCTX%QuAH>ZVAnoer9W00FE>#Dg($ybCb0H+Mm8$nW2+|FHi@oG8P(e8XFpOv4Y>pOWrV2wt)-mtCR5rMU=c)(d8GEx^MJ--L zN_v70xp^6ek12#4ijf$V{Ie8kW4mRa=oLtN62=|ys|Hy^x&}j|rI!c@h09@PNFx;~ zxvDLK*aplzaeplYnN7iJqPV`LLL4r}r{9!pTWVVHmL4T8ITOL5;cd1n-o$=hai>wx zSH?7NlItQf2MXRbu48RcvG#E8M?H+nxHOx=n@jY5XXIwB z>P|I`kLo+atIvk2ye3V~P&Da$0cba%i&UQc14E;1q%$>&qG!@;OcHYirrowHLU-#i zsqE*)XT9c~^O&x{Iha^CCuN`|QZ}Cry|GrJ%Jd>e!b9}M8xP8+;9ICiBd3l{nmcMM zTU6fH?aQWU#N7WtLmiIeetX{5uk*4)a&r7z*8Tfz{LPRYJ<)ETkhW96tP?3q!Ek{g zsK-FJd*CFH$-Z|DYSnMZYJQ$!TKGEd1Z!lSQ3VFbmb^~9P2HgWZkA^n=P;?xYM>0l z)^5XePzBK$F87PfzeOc+FT)izY{8qoWyEd@K|8J)LpepT_|-EUy7+aK6f1sdSv#aJI}!Q7ueL$kVUCAG(ecUP*^AA3uF<-??@_p2{ZU&3T$E(QQl^G8Gfmr~|In#3&o+v!! zHsl4%p^#+ucjIyVJrWFVXPUF;tRD+#P0rg+hdP`6Zslcwe0dJk^aAr7d=j8mBOco- zu9QJ^2*Y5|iy{N`tg8=A3Y*me@z<#YvVUgAh4BUszB)_l&CfB86pNk3Y^`tC7gtyx zjFA__biN~o0)zy{qPZeXeiO7V^kw##D~ZhZuW**Tag%(XV?R+YC^n4m{cXq_K0@K3 zhEo^O4O>#GE*FUKyqu3AnXNAV2&0X~;O2(w zSE#qUGqWE#tqS|?qnx+##ax0?-~EIEaYV)lWisz)BARvCSMId zTYuf@02ijS0pBoDq=>YEU{YOUun#7aKNp`mm>RAX zw$I(w>++R(te$~_`F+XYv59F&7Ag2;4D=Pbd%tbQt-KPmUk;IwQhHCOx_=p*@X9wb zd&m4wUIBM6XWH`sPKW|hHAemT|dWtTo$V=%tXZ|OmOEV!kh;vN*MAuf)W}nx1Ykr_53E}8tg>)hFd@21w1RXlI zKC9jc>NtyM^&hEjGqNylj9qCO5DKD$I~|=tFdl~8+3h{o2?r|=1?V?Qq1%bJTPN4( z#NHcx{$*sN;MGaqM>bpMEb&J=)j3Y8_q=&u@YxKQ}ka zbss&pj*+QyH{%XPmR*f+0~4+e4<0GXIJ;g+A_i3(kd=}qQD|EDjZ?!NQEme)-hRGV zki-`F-buuH>+3SWVX#xhMt_(2#GLiuVydargShU8XwdOJjFW2CBm}0gqc>$C8A?ak zCA~{T@OO}6w+1GVvUxQ+K+IeKelt=9Eu?+@tysmFX{HmI)-pv<3}KfJ#%_jG9PmdU z@+K+3P0ILWBnfjdNHV_#bjyv~0XJeV1(A!MHTueiuEnU1C59DE)Xo&+@GPya`Tr9L zbQ(cnh0`K-fA+7vgM}Ls=X^((Cf82}11JYZSEW$OB7zB@U1JeI9uUGo)?lbEoG~=8 zNjj`X&iI3yj$P;x&hdrt-sFaDp9v8?Ee~dx+59!f9`lXFARP}f7M~*F${*ZxVi(=R z0d_g}y^QA3V)lCxLf0j+`#PwPF;PbqOBoe{Pk1H4<&Qha*_$T54`M5&h95WikOkig z917;63pNcjDV($zw!j`u?Zi~n0XR-9uhFSO{!@3E(qZF|{OS2G>X`%evGqEfnJ*o8BRcKNOZae>t}GDk3$;D_50L zRZQ9-2lx@D`_-6xt~r#dQz_tS|WuV$y~0E{MYaEi-zXQ z59>Q;DN+z{TL2!DoyYr{!#&OMci{5=RA`;8bxI+>GXRU!RjPch5?hR{i;^RquiAi} zw9HEqEi`HtqA$r6PNEhfL-)*WT&}T@VLrQg#>1Ml+&H|g-)t4P0#X)&4;W3d8LK1g z-MdJi{$~Sq1DHW{(grvOj_G3XFt25_q9DIL`I}5){Ak(Wwu(Q@TJ#PdWIt?IfBMqT ztkRP#w#}5d2Vw)#@z3kmx&i(;D27%##tVZV@57Xc6!w`(suUtL+(Y581W99QT2bP?IV(y*L9hZ7E zLC}*ufb%ONH+x@11fc>^RtJ|lNA4sbbY(Nz)&vV8N%3^JwpX(dr!ZJ}^l)Ui73x&y zj~yU;^8} zpPf#_Pea+jGPN>ntwCxb81d>)s-<>cH&Kgnd3-vR++FAWT+2ZTL!I)wW${yBI3#cc zpx2s5UM!4pITam`SK^lfw){~~r`OFRw_2R&ukWR^ui$f=7er++Nn`o?(72iN`SaCa zlcr2t`QgxZ7NWR!FNGX-*ls#pdiqbWgtNcetLPn(Ym^p|I_B?OeN=&czq8N2kuYGLn`!~QU{2jt@JugnRi}e%zoCOvCah+$PS3SsMmrOazo_p@7)|R z;qB5yB6wcELR65IRd~8+<8#_JrmQA&B=#j~^(kE*KKg;T|3M&<_$zddl9FUw2_&jYJB{tjkrjL zl1ZRyFf3Ryu~Tn+H+F=4qXEOs125H0vwyAR^1}o}Ym=jebn^zoCS-2#Pqb`_ zJDk0gL*86>l8;;be&c3(hQD~jx=!)dD+LzBiC~Hg}qS5LzDI~ufLMwcn_#&cE|0$5V{>p zO)O;}3M^|{p z!+KbvTZ$oAi|VPXaeT?;+#ZKGM{-zg;ShPOp+F4d+oIW~mVTpMSTWM5WTfE$m_63g zbulqNMvMJoS1_F>cgs*+{rQ`xa6=mzhs;RU3s&ey?9|+tV>*8&P1=d=t4R{?4M7U} zR7iq(AH4xp@6p$dB8({sze4|62_=ju4xurxorS%XAsW#grF|w*) zVK3OrJt9?!jvfDxUNOxs63>hBcSrG`9h43mQYi03%QnZeS zq6&Iip!^{U#TP)@#)b*`92Tt z?WlXT3~gF&eXJasi{O@ASR|b%<;s6>FC;Grl!EjE!foJaN$QQ3 zPHTV7z1*dG5HO=cTo*x6m|{4k2XE`2#uWo3W2Hpp7ewR@VhhqFtII?t&qQDG_1(+d zBDeb^qfZ*0^+E#(*ON#fhqUdwOc4dnMGz&OO?8|lta|6f_qYgHAL60nJo$pM`f*w& z*K6|j6EzYmCK&$IY0Ei&`>J7dwOr}t@9}1xxU*;~ zD^EBiSR*<gyx)9nF7`k}EGCWMSNO7_MLza9-j>E*m`?VJ0kBP$U}P?k2N=d=|J zvAL7YHj6m}V$N_?|18qt^o${URNegPUS~kt@GXLR#j;AsZx{sEd8P=egf9YlLPygXV#yDoRq6S|S zH_zoa=lZWbQKC%2KlfcyRYZV_U*wR*GJYi z1)H`@<&lc^P%96X+*JyxFyYxo-ejNug64J0K$E=|q?<$fQa}uB1WIj%?`hLINQ<4E zp{BLZ)jt^MA{2noVTHX$C2+3Li$}Adbpo*@>Q$rOcmYt^aqk$tPU?+;CZ4p)-s)~- zj$>Z9t9z5^V$O3J3o;xkz{LmVZ~>IC`E6e^{u_+Pd z$f^d1s&$CeI(X0*xG#?hpW1tF_p1jgM|TeQF8d8)V>i&k85m_~uqxuiw6t{=DlEH2 z>`%Xql&Svfvja)HST^S}9wH3#!9o@Vk(%z=Sbq|Z6`&F`HTU;K!YrWcjP{AI);hkO zdsV@=_MIc&r_g6S4U~qY?@rq?eI#M>85qG`x#zT^BidiM68`$Pe5p+7!EV;}@pB+V zrrbQ18r-f~{0#_heP8QNQfA;BF~RnIZgx!;tK^jezVqJh3x2-+g+~?vBj)_`H);=d zsd^e8Q8oTNb3H3KZOLdCAsv%tm)IDuWA3zo!CiHjXfKd#TU znP}_>k?K~@JZD7|hC*}0cN@pip1JZXUG66!_9esG*nE@{UQ6;`o-p-4J`z|zgbD~k{g>7ca^y&3 zsVZ2gyEc$C6e87ThEn1o=7$l=8uf5VDXOPyXg!#g)S}IIMA1zYtGB7ppf`erIJgJ+ z*Rjk@{`OG99mO**%#XQjs=25SkOCa>$|Cta)bf z=T#=YjF?R=ix*(gw_&6p%_;6rIZ_SVK|G%Xnqrc_Ih-pLaw!E980~I%#@h40Fd53k z-T9b?o6&9W9&rP+*zRSu>}RLTMn5(x{FE>j6Wo@pFU;(hCYp7sMmKoRrjM#o@UNeM zfF^=}LPOd*#7gTXXmIcXiSn+pf$!-*4Qsr&%fr0hMmp$` z?+p__Ck|ZVQne`xp785kVXVw5d2*@QT6+2t3M}MIdfnGN)@j{+&Iky-E&^;4DonYP zs+NKHK9^gFL;dU-;ZLp#k-=l?am_(1^|R&p>C14En*bEQq(gE6Vz z-Mc)o!ZfV8V2b`TlMC}|cM-90jTsVC&2}s9?74Irk(46l|jkXsZdZ2pmu zlxl#}_wQmlGZrVOZxt{-jcaH~MCdfCNe&(We&Z~T5z8;>AWbTHnR*}*hL47s8dTr> zbFH88@FLCdBCRJjiiLimK;g29MWyw9wk5FD5Dyk%=o4CKnQ|H#ETbaHispFAez@G( zJok-Ew$LD};eB%B1hrvpPm|>p%B~t*v57_oVWHk zsF2pFtcV%P+|jFJjm$7wS>Sz-{M!GeaFEPU*u%TV{}9G0t9F17qobdJ2IumeK~H& zIL*dwUSaNr@XT!sap8yyAMR_X%B5X#baB8vPds>H_~CHo(jOZ??qX9ay7|%aPos&h zu$yGJn`D4v(&mx%p@0x_cR};Pp=JFU)O~tsrdIvKl+3R!iV{wVC_0L#x%aPAZr7?u z90D(vfB%_Wyywg-ZGO90>e4xxe=xIS?RI(p>pRVrg0K>sfk9yJ;!-lQeukCPIKOos zv7JX_YRy{f;Jx1G!$CWW9?0lNskG+Hx6xX8f;M=EW^*=9SHkNBRK9_L0r(9g&7N*I z27%#1nD3+;5PH9rxuZG8!6vX^B8$REv&?}f4`0ncBa9&ZACpuQetqNo?t}=*_hkxp z^i2#o1FsT`F?RW?X+N72Z*_-BG32h_ex0XNY?;dPdrPeYEkp&$&soR8mrN~aXySiI zm_`Q(jw{B2Cr{7gHdU|U5>kAP_9{3RSjHOr_7UPIqxNd*a;Uzp*xO1}dnb6q?=?8_ zDlI@9DJA^EM;G{wMvyg(JmDKaS)P-j=W&JGeBz-O#eJB0WC za7gdXVElgNwl}oH6dT(c@J>=FqFTH~r?sH4=7p*%f!oRe#Ld%CoCZ}0ds}r4lI1qE z_OaGF&u;mKGilD3DfFRu=bk&a_M|T|1w}2-;pny*G&oouE_|5g;?AYyoBw4wdLQZg zT6tB;E#c;mUtbnldFRgSEhY6Hyz%xF6yFkygGg{H<-s+)qP^&DUtd(*dInS7RX(Tl z3VRtOa)P%QakH5+)^jI$Msd0~ANa(tiVNGPe5s^%Yj3>gZijX7-l0;wf$H+Hov4W} zY&p5ZwtfKmyRF`3TM=?g!$V(qUPH485c-|4+&b)CFyR$5N8>!c^l1HJew1{@m$H^y z!`!^2=-w$8Xs0cp()BsqkTQiNX=%Du>q91|$AXo8%cj6mqV>_xL-kdghU&E!NbI_> z_Uy>NG4rD{4y?Fs!b-HcW%%J2B&^ITn$#{R{D zDTRA$fF56$?se&q&Wnm6<+2%tviFZgm%K#BUQ(#^<24J zCwADi<@bjN&z5z>_aEFOiw{30Rm+0{U6TG1>Rq==}qN5+#~B$mDGB zP3XAi+xZkS9*pCl{dT9{R7=qwMqyVn{XsIix1$6p`*zlWecpF_znpOMw-Y>hi9tR% zmp!hy_6QyfJB7V&18fWaph9nF3sJFaY+uV~PS0qtm!nP-kKnG_S7p8H`Cs=m#v{1t z%c3&lK5>t)Aoe_WRUs&x!Qdi+x!eqSBiXQ%&x7D9m~Y={XXOJISz|l33zrB?MGG2x->C651Ahs4+cTTOTU*$A^^|pP!s=Upi|;f;JHdrqP$%medMQ7mxaY3 zk-g&`&v7fv=dY%IgB7RmTFylqy55T|xhBUaj(!Qsm;6|`AO=0}ymJWO6eYGw+!7A2 zF*ZSzuzsB*O4c~PIIyJ%o0Y~(dhEIIYn?wsj_$^9I1!$9JRdx0KWRG}ktl`f#=ved z&%U?c3tc&ZEN<(!^ilud?N5FSiLe*+Gz=tgijwuylxR?!ea$~D+=tm5!$KIOj!!uZ z7t?9hNO30i+!z&#)0>0SBPtI+v+3`j2ld z1pA-8vThuD_H56@a61J?IzqM(qentqgqt{ERA#J8vR7R~Y4OVWG|W4d$5DYZG6)&F zBqTM!n?c1A-^CxvZ0T&%0Lj9X?6vVSVG(K4Gad|E&tl2~hTk^V>Kk)M-RGXH;yF_y zJ)|fuoj%rUTIzfb9IKQkBWc#ns^Jaymq9iaK-2R2T6uh0F;Fpx-F8SCeK?QOr)OUZrsI3hPK(lkm`%_UB7{uwxsBJPwB zIDnfbLSZZ%KHuFyGXGweriwgzZ}PsTR3b>;C)JXl?lpKQiiAC zuoAc5wNf+<3XGfRwSINglI!t8^QD!**fAM(QDn%KByL7t=_E*FQ15(F zecL6ks{CPgn;az&Y*!YjJ7%HKUB|4l9PCoQ*O$uAXMt-w`K7ONY1UmUB~O@2U({gA zbn|BR4W%6Y5Tnz=)VV;T-Rq?tu%w9D*iTt#h$|4zKSs z_|j__yhqkF-OPYnfbY*{?e&7$(P}W1p>?foF`P?c5jm|IzcmZ55(9_8j{jVEzH*cP-B}5n&Pl z22giFJS^d32owGD?f$FJgb#5T$q$Pny8`s-_;AyWx=;VysCo7iOC20D{m5);lRbi9K8qaZ2 zMbp4ae?CDfkd?&`$-_t&E1@iS4tHv~mx_Ig&I;4&b8C zt4Q{en_CX{E(^7lOZ>4p6tt*qa?qN@wHve~Uy8m;PB)?E zY`mYcXz7}_c{V$D*dNbtplp&GgmTRMM^JS`*w{DrD9QFJ7IwaL4XRbO=mFV{Z1-gOs7iwe|h$ zEG6N1aw!8a0l4r*W?0k@)=0;f43p`(W58Vj=qDwV0(4#qWM&_#p1z5Z6aqyne&HqO zS?nVwkQjx;mI1l_6z@Q$-4gp)yh2YU$6SrOkPnQr2t#{nBih=pDzZ<0W}ueZLZh11 zR9jdje0HowM0ArD$=6sUHINi%ldpU5z(Q1(Jm@%#Fm zw!=a!h+X@0xsKDK-IF=HSP$Wsc}2w=x=wkpJp6Y1dEff3n6hoS*aa zmj{QCP9OLcgmmxln@pDEs`mSH_rb`^$9Ws!{%6lgD@9?9HNuPs@x!_r`U46Brn+aW z`&V-;+d|Z5GZ3r3q$|qCOKTlE)wMO=e@xS4^Do1Nk|PIJWb-4DJr3swcG9G6$9mBL zwrClb%W{vnMh`i8SfNGmM0k_Nx@b%1-rPl%3GbEgRZ~!H;^tn`ea$C4&p`YI1f=q~ z*}^lNO<>0NMLp64bvDUkMG#ILxfBB zWpEV@W5sndZ@l_hy%w-_Xw@E^1_`k^CM!57qu3%)e538FkkPM>A<@}8w6uxoqofaG_zN|?Jo^%p!%-apC6UXg4pN~p!pwL`<-Tf55 z^A$N8qR%? z$_{BGT=2Kj=MKI!itEMW9K5%yD@FTK;_p$4_2=%)gJvr?uil(8?|^yZ568y!l;AwZ zehtRBG8gKOFMbt%F{bg={1P4KRWuJFuPu$4^b?8qsh)p!Z1;+5P5Zq$wJIdUZu%7a zCpGm&jUPfe8ejnd0qfov)V*@TfJ`>BV-&s}d0sVUB(#|do|*r=4CeThvP~d0C(?XY zR`@?B4AGkSrQ3uFIHjqlU;w~wUQ*_?)h@6|Etw`gqluUBsO+3*L-u6G7#E|#*&H{p z&o4G<9v@)I(iYXff95pcs?`!};kvM3mK;!bOw_*oWHPsANiWTzapp**N1({+SYlCL z_*Jp+u%MEfu&b8i;hZF{h{`;7 zVe$0KQ8MA>W-Fh%mircrYT-|Kxuw^B%B{`;#XapNTVh*wa_Pr5D}aY+_Ii+^T@JsF0@Za=jgbyD2mZPHYiM^1MSnaLq$h*p^%1SVKC%6fIUt3hee36*IQ#=7odQ*bQfOAQGLn&JMp~_`r zti^`Vqxh8E7bZb!c&=E)n>!gT24pBieTe%vUERbq8dee!#N4k8u=>2+gLm` zaR$G?-{E62prbmTP*sdu0;n>`@!0G;PaaMOIBRv$3j(g^cfBu`eeq0lXtgcO&6U2^ z|A?Vclcqrlrfp4+MgOP!nZRIX%RKr~B01@RFT%nx;ZH`Dxvazws>avZZCpa#_jHz2 z)X^UwjvnEG?gWKL6-?xZ${Qu4%>sx39A2#2dJr~4zn-Wk_# zCR?tbr9U4>pqX6baR=5U&40HA1r>0Z z&%gf9uOT>SZ2##Bfd!KR_@!;Kvd`b5XWeDMnJo zqCECW@2~b}=&h`*2tvD%mB~lj=bGJWt7>a+AMUeX{=d{X9Lo47O>U9={}Wh&@@uYk-Yo5ECS|NjUp z8}fezf`f+O85tR&oz`_qb$v1Fd+<-AK-+*7$sSt~eD=2w|GokbRY4%@_J2#X`WAs_K@ z<^t3p`vU%WxAJi)xALRD_qpaBo~jP7kys7>d-pEw#Vq^qeyeM(%8yQhDO;h~(XZ#c>2m{c>Ug_~Yrir?(`(SZ^92zad8PnZW zTCSRp`kpd=kK#+UBBTav-YIaiT>V>L_8q|69zT>KFfb&r#HOpOo8`54F79} zE9R(l-`$*r|K-;imV2AR_wps#`4hJ67^(#w=W6e$C=-s|f+k^A#;mUAUl zP0hST@yp-F8P+<*BSE9`rC(E4v#P6y^uUT9E=TKowi=L3Muvrzc7=63#u;^938rZL6tE1*=f~LNQ zy1x#6&emVM`!P$=V^_6M9{5Uv1xn-XvpGh1Nz0#)J4Z*FC^60=C7w2a`6%lDN^a{D zrHTb0HF-d4)c;EDZwDulfCIc3MTDO73R+HG%<&9hkKO~!75&C20nN`{QAkB(?q;W3 zau?TRASE=;C_m_>I@TD)Z-Vc(rNzI|JYM(M-ktOeJue2$OdSJ9cYIC zzb7XbA06i8RIB1-SLC^T4@~&qF>eBUUjhcT^XrG>K=Izyu2A0bQEHg(_QAnWa=I0< z2*@E&@Q69@`~TD1l?FAHW#QHq*+xKUx^Yk9CwL{vmXWN%~<2!sHM z3W_v~APS;Flqd*9mX>`m_)_jB{7*EKgQXj(0HJ31A=a(Y0MfN@N_ zy1`MdgSuvRsbsbese@tbI$8H9^(*$+5Cd+7bPCJdUF7JXw~l^0LRODa%9U|Om4@oY zva63d2U_d`C&JM3yIpI~NMWDUvE8p`VmVjO1iDMs&^eq^uKaH|n-Ex^??5*BJJ=cd zZys_RgdWHy57a6XqbouoW4FiwDUewf$@3oZfJq*9_VxKqGpgXzr;2u1&nFyK)wS}ml`1Cp`9CmWR^)@RW!oRSB7=Fxn6-4VUC zy^uS2fOaI*yVwPNLQPGd5Sq#L!P0a9O{gvTijzsH;1+!9LGIa~V#&4h@bui$9Z(Vu zJj`eJh4Odk!5i{Pn=Hb$5Yh=`^6YHjNGM)%(Oai-U4b#M_}Xbxryu`Z8*nb7Q83-G z>Ln~qEXFwLi05fA)u{AeRD3JKaS;QNKHfTC9`0#aI87)jIw9?-pNpAu%-*k#J(>ja zH(B@=p43xxuIM1E(5Wa1m|TA<^!}#h;h)I9A0C?`TSME-#Q9dI`z^a4Svi15Z16;V-SC*GVTVKUPWZ(e^w7ycN ztwJX_)mKyhneD(Pr=+MXhS`(x77@kKyvNM@Zxeqyap+JiCYKM$C3U_oFE4Ktr20F= z3l+!1XZl~{L9Q6mvLpYc0hXQ#&xCq*7@3)6Ij^0PG^t`YgRV=G=QTCXXNl)^5i(J` za-iwhTa#gn`SSMKjlI$V0qGn{kHKKDf@J(4h#_Qo;bZvDqC~P#+0WnK5s$}A7H~nF zv?|=9$;uiYx*=*P#&r(DaXjwQG8Zm8jT-iD&oP8aKGQsdgZJ`8Q0E3S*%FVX%@p3= zeZKTSYi!#Hg%X#~haABUUCN|jHFPODkaZkH{3^1cDKq-@q@|_cOK8Y!&3FrQsg?PT zKg00(U}0ji4N~x2ln})2_M;oG#bFvo@0}R^bysmJ1ndHmz^W&$W?4M-I^(d9eD+r2 zNXhbK$>?w*Jfk^YHLCw?*mWw61`d1*?6op*qiIJ+o8)2lSFoYlpCe^ov<5ARxC>oK zI*`nU?>$4P5m&O>qsR6^$-2m(Soj)y*W1_EZj-jAnR`M}m zQMs4qNRmL*?l4-)idol5P62;8-K7<$cKXO{J8b121AG zUQEg?!l(BJ7m2~@e4l_Qr7{NiCWNXg(|z!~m>GXJ`h{g^E?%;n@{gVa9wIOVY#R?7 z9PDsPvRHzfsm=y)S_v@d+)EkK-o9{-VpKJNDVk?6mwWHfMySQEbw)fgz_dx;I&G7e z0>GxJAZA#TT2KjvnqZLgvxjvwGe+<=e;G&H6rlw;(l3nt zP&ANd7=c#y=t956w~ZR_(Uv)pcT7|cyTZK@93IUCU8v3&sO4y&P_u!LIaoB9D77LB z<8?50sBWOWtrZ_#;ip43?*Ykj_A)skwdE!dRa=ZX#zDZ`+;N1=%-?)~Fmgo>$>G?l zSgUf79gT56AU~#9i`g4+pbw+{V8T|CPiO97XC`X8jMg&ywXO%h)>^%xp~0Ea5W+gX zxUe3HfL9N$uHm&=1blLIb1OM{mUja+T(HADu6HDE#(eCd$MJq7q$g}a?mh`EH5^*1 z;)8Z9|8)r9wFO3A`d&e|qBY-oamU2el%uBCpxJeUYujs*KGMT4f^yBya_wjH+qILG_NiKOu5&{V z#H&0T+^)F}cI)W@wApb7g8l+XC9YPjQGzKwqbx&VeZ{& z%iRPCAr=_BS+En9oS%jszVorI=0yQu)v|M-FoBSUE$Yy9E%A^|D^~p;$8^fzs1E-K z^c8f^;B9yfc;NPr4>tdLegend

Green intensity represents token scores (1/probability):

- - → Low score (high probability) + → Low score (high probability) + → High score (low probability)
- + Score gradient (low to high)
+
""" # Add tokens diff --git a/tools-superlinear/visualize_tokens/tokens.png_absolute.html b/tools-superlinear/visualize_tokens/tokens.png_absolute.html new file mode 100644 index 0000000000000..0295d5946b4d4 --- /dev/null +++ b/tools-superlinear/visualize_tokens/tokens.png_absolute.html @@ -0,0 +1,981 @@ + + + + + Token Probability Visualization + + + +

Token Probability Visualization - Absolute Mode

+

This visualization shows tokens colored by their probability scores using absolute mode.

+

Mode: Absolute (1/p)

+ +
+ +
+

Legend

+

Green intensity represents token scores (1/probability):

+
+ + → Low score (high probability) + + → High score (low probability) +
+ +
+ + Score gradient (low to high) +
+
+
+ +
+ Artificial + + Token ID: 105460
+ Text: Artificial
+ Probability: 0.173214
+ Score: 5.77 +
+
+
+ Intelligence + + Token ID: 30515
+ Text: Intelligence
+ Probability: 0.046023
+ Score: 21.73 +
+
+
+ ( + + Token ID: 591
+ Text: (
+ Probability: 0.293526
+ Score: 3.41 +
+
+
+ AI + + Token ID: 11716
+ Text: AI
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ ) + + Token ID: 235275
+ Text: )
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ is + + Token ID: 603
+ Text: is
+ Probability: 0.934620
+ Score: 1.07 +
+
+
+ the + + Token ID: 573
+ Text: the
+ Probability: 0.342451
+ Score: 2.92 +
+
+
+ simulation + + Token ID: 20095
+ Text: simulation
+ Probability: 0.224367
+ Score: 4.46 +
+
+
+ of + + Token ID: 576
+ Text: of
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ human + + Token ID: 3515
+ Text: human
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ intelligence + + Token ID: 17273
+ Text: intelligence
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ processes + + Token ID: 9756
+ Text: processes
+ Probability: 0.568624
+ Score: 1.76 +
+
+
+ by + + Token ID: 731
+ Text: by
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ computers + + Token ID: 25175
+ Text: computers
+ Probability: 0.054977
+ Score: 18.19 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 0.575647
+ Score: 1.74 +
+
+
+ especially + + Token ID: 5199
+ Text: especially
+ Probability: 0.574271
+ Score: 1.74 +
+
+
+ computer + + Token ID: 6875
+ Text: computer
+ Probability: 0.150116
+ Score: 6.66 +
+
+
+ systems + + Token ID: 5188
+ Text: systems
+ Probability: 0.363668
+ Score: 2.75 +
+
+
+ able + + Token ID: 3326
+ Text: able
+ Probability: 0.502875
+ Score: 1.99 +
+
+
+ to + + Token ID: 577
+ Text: to
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ perform + + Token ID: 3114
+ Text: perform
+ Probability: 0.720249
+ Score: 1.39 +
+
+
+ tasks + + Token ID: 13333
+ Text: tasks
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ that + + Token ID: 674
+ Text: that
+ Probability: 0.887141
+ Score: 1.13 +
+
+
+ normally + + Token ID: 16342
+ Text: normally
+ Probability: 0.937017
+ Score: 1.07 +
+
+
+ require + + Token ID: 2817
+ Text: require
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ human + + Token ID: 3515
+ Text: human
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ intelligence + + Token ID: 17273
+ Text: intelligence
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 0.823856
+ Score: 1.21 +
+
+
+ such + + Token ID: 1582
+ Text: such
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ as + + Token ID: 685
+ Text: as
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ visual + + Token ID: 9095
+ Text: visual
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ perception + + Token ID: 20908
+ Text: perception
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ speech + + Token ID: 11360
+ Text: speech
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ recognition + + Token ID: 16398
+ Text: recognition
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ decision + + Token ID: 4530
+ Text: decision
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ - + + Token ID: 235290
+ Text: -
+ Probability: 0.904792
+ Score: 1.11 +
+
+
+ making + + Token ID: 14577
+ Text: making
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 0.973670
+ Score: 1.03 +
+
+
+ and + + Token ID: 578
+ Text: and
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ translation + + Token ID: 17183
+ Text: translation
+ Probability: 0.954026
+ Score: 1.05 +
+
+
+ between + + Token ID: 1865
+ Text: between
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ languages + + Token ID: 17044
+ Text: languages
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ . + + Token ID: 235265
+ Text: .
+ Probability: 0.821057
+ Score: 1.22 +
+
+
+ <T109> + + Token ID: 109
+ Text: <T109>
+ Probability: 0.105776
+ Score: 9.45 +
+
+
+ The + + Token ID: 651
+ Text: The
+ Probability: 0.412099
+ Score: 2.43 +
+
+
+ term + + Token ID: 5168
+ Text: term
+ Probability: 0.501435
+ Score: 1.99 +
+
+
+ was + + Token ID: 729
+ Text: was
+ Probability: 0.430098
+ Score: 2.33 +
+
+
+ coined + + Token ID: 124133
+ Text: coined
+ Probability: 0.923018
+ Score: 1.08 +
+
+
+ in + + Token ID: 575
+ Text: in
+ Probability: 0.658250
+ Score: 1.52 +
+
+
+ + + Token ID: 235248
+ Text:
+ Probability: 0.930672
+ Score: 1.07 +
+
+
+ 1 + + Token ID: 235274
+ Text: 1
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ 9 + + Token ID: 235315
+ Text: 9
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ 5 + + Token ID: 235308
+ Text: 5
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ 6 + + Token ID: 235318
+ Text: 6
+ Probability: 0.954344
+ Score: 1.05 +
+
+
+ by + + Token ID: 731
+ Text: by
+ Probability: 0.968712
+ Score: 1.03 +
+
+
+ John + + Token ID: 3350
+ Text: John
+ Probability: 0.238503
+ Score: 4.19 +
+
+
+ McCarthy + + Token ID: 55900
+ Text: McCarthy
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ . + + Token ID: 235265
+ Text: .
+ Probability: 0.070430
+ Score: 14.20 +
+
+
+ AI + + Token ID: 16481
+ Text: AI
+ Probability: 0.045761
+ Score: 21.85 +
+
+
+ is + + Token ID: 603
+ Text: is
+ Probability: 0.398631
+ Score: 2.51 +
+
+
+ an + + Token ID: 671
+ Text: an
+ Probability: 0.074210
+ Score: 13.48 +
+
+
+ inter + + Token ID: 1061
+ Text: inter
+ Probability: 0.918106
+ Score: 1.09 +
+
+
+ disciplinary + + Token ID: 44894
+ Text: disciplinary
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ field + + Token ID: 2725
+ Text: field
+ Probability: 0.652847
+ Score: 1.53 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 0.054459
+ Score: 18.36 +
+
+
+ and + + Token ID: 578
+ Text: and
+ Probability: 0.088508
+ Score: 11.30 +
+
+
+ it + + Token ID: 665
+ Text: it
+ Probability: 0.099797
+ Score: 10.02 +
+
+
+ has + + Token ID: 919
+ Text: has
+ Probability: 0.237671
+ Score: 4.21 +
+
+
+ been + + Token ID: 1125
+ Text: been
+ Probability: 0.337246
+ Score: 2.97 +
+
+
+ developed + + Token ID: 6990
+ Text: developed
+ Probability: 0.089251
+ Score: 11.20 +
+
+
+ in + + Token ID: 575
+ Text: in
+ Probability: 0.257349
+ Score: 3.89 +
+
+
+ collaboration + + Token ID: 18872
+ Text: collaboration
+ Probability: 0.012660
+ Score: 78.99 +
+
+
+ with + + Token ID: 675
+ Text: with
+ Probability: 0.966162
+ Score: 1.04 +
+
+
+ disciplines + + Token ID: 50416
+ Text: disciplines
+ Probability: 0.020325
+ Score: 49.20 +
+
+
+ such + + Token ID: 1582
+ Text: such
+ Probability: 0.929477
+ Score: 1.08 +
+
+
+ as + + Token ID: 685
+ Text: as
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ computer + + Token ID: 6875
+ Text: computer
+ Probability: 0.753354
+ Score: 1.33 +
+
+
+ science + + Token ID: 8042
+ Text: science
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ cognitive + + Token ID: 31572
+ Text: cognitive
+ Probability: 0.230400
+ Score: 4.34 +
+
+
+ science + + Token ID: 8042
+ Text: science
+ Probability: 0.950245
+ Score: 1.05 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ linguistics + + Token ID: 146721
+ Text: linguistics
+ Probability: 0.456733
+ Score: 2.19 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ and + + Token ID: 578
+ Text: and
+ Probability: 0.136408
+ Score: 7.33 +
+
+
+ philosophy + + Token ID: 19754
+ Text: philosophy
+ Probability: 0.639822
+ Score: 1.56 +
+
+
+ . + + Token ID: 235265
+ Text: .
+ Probability: 0.900454
+ Score: 1.11 +
+
+
+ <T109> + + Token ID: 109
+ Text: <T109>
+ Probability: 0.671588
+ Score: 1.49 +
+
+
+ Since + + Token ID: 12496
+ Text: Since
+ Probability: 0.011366
+ Score: 87.98 +
+
+
+ the + + Token ID: 573
+ Text: the
+ Probability: 0.787236
+ Score: 1.27 +
+
+
+ + + Token ID: 235248
+ Text:
+ Probability: 0.839815
+ Score: 1.19 +
+
+
+ 1 + + Token ID: 235274
+ Text: 1
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ 9 + + Token ID: 235315
+ Text: 9
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ 6 + + Token ID: 235318
+ Text: 6
+ Probability: 0.153813
+ Score: 6.50 +
+
+
+ 0 + + Token ID: 235276
+ Text: 0
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ + diff --git a/tools-superlinear/visualize_tokens/tokens.png_relative.html b/tools-superlinear/visualize_tokens/tokens.png_relative.html new file mode 100644 index 0000000000000..90134b925d3c9 --- /dev/null +++ b/tools-superlinear/visualize_tokens/tokens.png_relative.html @@ -0,0 +1,981 @@ + + + + + Token Probability Visualization + + + +

Token Probability Visualization - Relative Mode

+

This visualization shows tokens colored by their probability scores using relative mode.

+

Mode: Relative (1/(p/max_p))

+ +
+ +
+

Legend

+

Green intensity represents token scores (1/probability):

+
+ + → Low score (high probability) + + → High score (low probability) +
+ +
+ + Score gradient (low to high) +
+
+
+ +
+ Artificial + + Token ID: 105460
+ Text: Artificial
+ Probability: 0.173214
+ Score: 1.58 +
+
+
+ Intelligence + + Token ID: 30515
+ Text: Intelligence
+ Probability: 0.046023
+ Score: 20.73 +
+
+
+ ( + + Token ID: 591
+ Text: (
+ Probability: 0.293526
+ Score: 1.84 +
+
+
+ AI + + Token ID: 11716
+ Text: AI
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ ) + + Token ID: 235275
+ Text: )
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ is + + Token ID: 603
+ Text: is
+ Probability: 0.934620
+ Score: 1.00 +
+
+
+ the + + Token ID: 573
+ Text: the
+ Probability: 0.342451
+ Score: 1.78 +
+
+
+ simulation + + Token ID: 20095
+ Text: simulation
+ Probability: 0.224367
+ Score: 1.02 +
+
+
+ of + + Token ID: 576
+ Text: of
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ human + + Token ID: 3515
+ Text: human
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ intelligence + + Token ID: 17273
+ Text: intelligence
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ processes + + Token ID: 9756
+ Text: processes
+ Probability: 0.568624
+ Score: 1.00 +
+
+
+ by + + Token ID: 731
+ Text: by
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ computers + + Token ID: 25175
+ Text: computers
+ Probability: 0.054977
+ Score: 16.65 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 0.575647
+ Score: 1.00 +
+
+
+ especially + + Token ID: 5199
+ Text: especially
+ Probability: 0.574271
+ Score: 1.00 +
+
+
+ computer + + Token ID: 6875
+ Text: computer
+ Probability: 0.150116
+ Score: 2.44 +
+
+
+ systems + + Token ID: 5188
+ Text: systems
+ Probability: 0.363668
+ Score: 1.14 +
+
+
+ able + + Token ID: 3326
+ Text: able
+ Probability: 0.502875
+ Score: 1.00 +
+
+
+ to + + Token ID: 577
+ Text: to
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ perform + + Token ID: 3114
+ Text: perform
+ Probability: 0.720249
+ Score: 1.00 +
+
+
+ tasks + + Token ID: 13333
+ Text: tasks
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ that + + Token ID: 674
+ Text: that
+ Probability: 0.887141
+ Score: 1.00 +
+
+
+ normally + + Token ID: 16342
+ Text: normally
+ Probability: 0.937017
+ Score: 1.00 +
+
+
+ require + + Token ID: 2817
+ Text: require
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ human + + Token ID: 3515
+ Text: human
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ intelligence + + Token ID: 17273
+ Text: intelligence
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 0.823856
+ Score: 1.00 +
+
+
+ such + + Token ID: 1582
+ Text: such
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ as + + Token ID: 685
+ Text: as
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ visual + + Token ID: 9095
+ Text: visual
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ perception + + Token ID: 20908
+ Text: perception
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ speech + + Token ID: 11360
+ Text: speech
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ recognition + + Token ID: 16398
+ Text: recognition
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ decision + + Token ID: 4530
+ Text: decision
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ - + + Token ID: 235290
+ Text: -
+ Probability: 0.904792
+ Score: 1.00 +
+
+
+ making + + Token ID: 14577
+ Text: making
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 0.973670
+ Score: 1.00 +
+
+
+ and + + Token ID: 578
+ Text: and
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ translation + + Token ID: 17183
+ Text: translation
+ Probability: 0.954026
+ Score: 1.00 +
+
+
+ between + + Token ID: 1865
+ Text: between
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ languages + + Token ID: 17044
+ Text: languages
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ . + + Token ID: 235265
+ Text: .
+ Probability: 0.821057
+ Score: 1.00 +
+
+
+ <T109> + + Token ID: 109
+ Text: <T109>
+ Probability: 0.105776
+ Score: 3.51 +
+
+
+ The + + Token ID: 651
+ Text: The
+ Probability: 0.412099
+ Score: 1.00 +
+
+
+ term + + Token ID: 5168
+ Text: term
+ Probability: 0.501435
+ Score: 1.00 +
+
+
+ was + + Token ID: 729
+ Text: was
+ Probability: 0.430098
+ Score: 1.00 +
+
+
+ coined + + Token ID: 124133
+ Text: coined
+ Probability: 0.923018
+ Score: 1.00 +
+
+
+ in + + Token ID: 575
+ Text: in
+ Probability: 0.658250
+ Score: 1.00 +
+
+
+ + + Token ID: 235248
+ Text:
+ Probability: 0.930672
+ Score: 1.00 +
+
+
+ 1 + + Token ID: 235274
+ Text: 1
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ 9 + + Token ID: 235315
+ Text: 9
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ 5 + + Token ID: 235308
+ Text: 5
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ 6 + + Token ID: 235318
+ Text: 6
+ Probability: 0.954344
+ Score: 1.00 +
+
+
+ by + + Token ID: 731
+ Text: by
+ Probability: 0.968712
+ Score: 1.00 +
+
+
+ John + + Token ID: 3350
+ Text: John
+ Probability: 0.238503
+ Score: 1.47 +
+
+
+ McCarthy + + Token ID: 55900
+ Text: McCarthy
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ . + + Token ID: 235265
+ Text: .
+ Probability: 0.070430
+ Score: 9.20 +
+
+
+ AI + + Token ID: 16481
+ Text: AI
+ Probability: 0.045761
+ Score: 6.80 +
+
+
+ is + + Token ID: 603
+ Text: is
+ Probability: 0.398631
+ Score: 1.00 +
+
+
+ an + + Token ID: 671
+ Text: an
+ Probability: 0.074210
+ Score: 5.88 +
+
+
+ inter + + Token ID: 1061
+ Text: inter
+ Probability: 0.918106
+ Score: 1.00 +
+
+
+ disciplinary + + Token ID: 44894
+ Text: disciplinary
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ field + + Token ID: 2725
+ Text: field
+ Probability: 0.652847
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 0.054459
+ Score: 8.84 +
+
+
+ and + + Token ID: 578
+ Text: and
+ Probability: 0.088508
+ Score: 2.14 +
+
+
+ it + + Token ID: 665
+ Text: it
+ Probability: 0.099797
+ Score: 4.41 +
+
+
+ has + + Token ID: 919
+ Text: has
+ Probability: 0.237671
+ Score: 1.48 +
+
+
+ been + + Token ID: 1125
+ Text: been
+ Probability: 0.337246
+ Score: 1.00 +
+
+
+ developed + + Token ID: 6990
+ Text: developed
+ Probability: 0.089251
+ Score: 3.57 +
+
+
+ in + + Token ID: 575
+ Text: in
+ Probability: 0.257349
+ Score: 1.71 +
+
+
+ collaboration + + Token ID: 18872
+ Text: collaboration
+ Probability: 0.012660
+ Score: 22.60 +
+
+
+ with + + Token ID: 675
+ Text: with
+ Probability: 0.966162
+ Score: 1.00 +
+
+
+ disciplines + + Token ID: 50416
+ Text: disciplines
+ Probability: 0.020325
+ Score: 14.24 +
+
+
+ such + + Token ID: 1582
+ Text: such
+ Probability: 0.929477
+ Score: 1.00 +
+
+
+ as + + Token ID: 685
+ Text: as
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ computer + + Token ID: 6875
+ Text: computer
+ Probability: 0.753354
+ Score: 1.00 +
+
+
+ science + + Token ID: 8042
+ Text: science
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ cognitive + + Token ID: 31572
+ Text: cognitive
+ Probability: 0.230400
+ Score: 1.29 +
+
+
+ science + + Token ID: 8042
+ Text: science
+ Probability: 0.950245
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ linguistics + + Token ID: 146721
+ Text: linguistics
+ Probability: 0.456733
+ Score: 1.00 +
+
+
+ , + + Token ID: 235269
+ Text: ,
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ and + + Token ID: 578
+ Text: and
+ Probability: 0.136408
+ Score: 2.54 +
+
+
+ philosophy + + Token ID: 19754
+ Text: philosophy
+ Probability: 0.639822
+ Score: 1.00 +
+
+
+ . + + Token ID: 235265
+ Text: .
+ Probability: 0.900454
+ Score: 1.00 +
+
+
+ <T109> + + Token ID: 109
+ Text: <T109>
+ Probability: 0.671588
+ Score: 1.00 +
+
+
+ Since + + Token ID: 12496
+ Text: Since
+ Probability: 0.011366
+ Score: 38.43 +
+
+
+ the + + Token ID: 573
+ Text: the
+ Probability: 0.787236
+ Score: 1.00 +
+
+
+ + + Token ID: 235248
+ Text:
+ Probability: 0.839815
+ Score: 1.00 +
+
+
+ 1 + + Token ID: 235274
+ Text: 1
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ 9 + + Token ID: 235315
+ Text: 9
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ 6 + + Token ID: 235318
+ Text: 6
+ Probability: 0.153813
+ Score: 4.10 +
+
+
+ 0 + + Token ID: 235276
+ Text: 0
+ Probability: 1.000000
+ Score: 1.00 +
+
+
+ + From 03b61c9c3bae790abadf7679b65dc59c9d737574 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Wed, 2 Apr 2025 18:04:02 +0300 Subject: [PATCH 21/22] Accept external list of prompts in parallel.cpp --- examples/parallel/parallel.cpp | 124 +++++++++++++++++++++++++++++++-- 1 file changed, 118 insertions(+), 6 deletions(-) diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index 7ef43d5e12876..671c65b451e96 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -12,6 +12,8 @@ #include #include #include +#include +#include // trim whitespace from the beginning and end of a string static std::string trim(const std::string & str) { @@ -78,6 +80,15 @@ struct client { struct common_sampler * smpl = nullptr; }; +// Structure to track prompt-response pairs +struct prompt_response { + std::string prompt; + std::string response; + int64_t processing_time_us; + int32_t prompt_tokens; + int32_t response_tokens; +}; + static void print_date_time() { std::time_t current_time = std::time(nullptr); std::tm* local_time = std::localtime(¤t_time); @@ -100,10 +111,55 @@ static std::vector split_string(const std::string& input, char deli return tokens; } +// Print custom usage information +static void print_custom_usage(const char* program_name) { + fprintf(stderr, "\nAdditional parameters for parallel processing:\n"); + fprintf(stderr, " -o, --output-file FNAME save results to specified file\n"); + fprintf(stderr, "\nUsage example:\n"); + fprintf(stderr, " %s -m models/7B/ggml-model-q4_0.bin -f prompts.txt -o results.txt --n-parallel 4\n\n", program_name); +} + +// Process custom command line arguments +bool process_custom_arguments(int argc, char ** argv, std::string & output_file) { + // Check if help is requested + for (int i = 1; i < argc; i++) { + std::string arg = argv[i]; + if (arg == "-h" || arg == "--help") { + // Print our custom usage first + print_custom_usage(argv[0]); + // Don't modify these, let common_params_parse handle them + continue; + } + } + + for (int i = 1; i < argc - 1; i++) { + std::string arg = argv[i]; + + if (arg == "--output-file" || arg == "-o") { + output_file = argv[++i]; + // Skip this argument so it won't be processed by common_params_parse + argv[i-1] = argv[i] = (char *)""; + } + } + + return true; +} + int main(int argc, char ** argv) { srand(1234); common_params params; + + // custom parameters not handled by common_params + std::string output_file_path; + + // Container to store all prompt-response pairs + std::vector results; + + // Process our custom arguments first + if (!process_custom_arguments(argc, argv, output_file_path)) { + return 1; + } if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_PARALLEL)) { return 1; @@ -118,7 +174,7 @@ int main(int argc, char ** argv) { params.n_parallel += 1; // requests to simulate - const int32_t n_seq = params.n_sequences; + const int32_t n_seq_param = params.n_sequences; // insert new requests as soon as the previous one is done const bool cont_batching = params.cont_batching; @@ -142,17 +198,30 @@ int main(int argc, char ** argv) { LOG_INF("\033[32mNo new questions so proceed with build-in defaults.\033[0m\n"); } else { // Output each line of the input params.prompts vector and copy to k_prompts + k_prompts.clear(); // Clear default prompts int index = 0; LOG_INF("\033[32mNow printing the external prompt file %s\033[0m\n\n", params.prompt_file.c_str()); std::vector prompts = split_string(params.prompt, '\n'); for (const auto& prompt : prompts) { - k_prompts.resize(index + 1); - k_prompts[index] = prompt; - index++; - LOG_INF("%3d prompt: %s\n", index, prompt.c_str()); + if (!prompt.empty()) { + k_prompts.resize(index + 1); + k_prompts[index] = prompt; + index++; + LOG_INF("%3d prompt: %s\n", index, prompt.c_str()); + } } } + + // Adjust number of sequences to match number of prompts + // We want to process each prompt exactly once + int32_t n_seq = k_prompts.size(); + if (n_seq_param < n_seq) { + // If user specified fewer sequences than prompts, respect their choice + n_seq = n_seq_param; + } + + LOG_INF("\n\nProcessing %d prompts with %d parallel clients\n\n", n_seq, params.n_parallel); LOG_INF("\n\n"); @@ -250,7 +319,9 @@ int main(int argc, char ** argv) { client.t_start_prompt = ggml_time_us(); client.t_start_gen = 0; - client.input = k_prompts[rand() % k_prompts.size()]; + // Get the next prompt in sequence instead of random + const size_t prompt_idx = g_seq_id % k_prompts.size(); + client.input = k_prompts[prompt_idx]; client.prompt = client.input + "\nAssistant:"; client.response = ""; @@ -384,6 +455,15 @@ int main(int argc, char ** argv) { ::trim(client.input).c_str(), ::trim(client.response).c_str()); + // Store the result + prompt_response result; + result.prompt = ::trim(client.input); + result.response = ::trim(client.response); + result.processing_time_us = t_main_end - client.t_start_prompt; + result.prompt_tokens = client.n_prompt; + result.response_tokens = client.n_decoded; + results.push_back(result); + n_total_prompt += client.n_prompt; n_total_gen += client.n_decoded; @@ -420,6 +500,38 @@ int main(int argc, char ** argv) { llama_backend_free(); + // Save results to file if output file path was provided + if (!output_file_path.empty()) { + std::ofstream outfile(output_file_path); + if (outfile.is_open()) { + LOG_INF("Saving results to file: %s\n", output_file_path.c_str()); + + outfile << "# Results from llama.cpp parallel processing\n"; + outfile << "# Total prompts: " << results.size() << "\n"; + outfile << "# Date: " << std::time(nullptr) << "\n\n"; + + for (size_t i = 0; i < results.size(); ++i) { + const auto& result = results[i]; + outfile << "### Prompt " << (i + 1) << ":\n"; + outfile << result.prompt << "\n\n"; + outfile << "### Response " << (i + 1) << ":\n"; + outfile << result.response << "\n\n"; + outfile << "### Stats " << (i + 1) << ":\n"; + outfile << "Processing time: " << (result.processing_time_us / 1e6) << " seconds\n"; + outfile << "Prompt tokens: " << result.prompt_tokens << "\n"; + outfile << "Response tokens: " << result.response_tokens << "\n"; + outfile << "Total tokens: " << (result.prompt_tokens + result.response_tokens) << "\n"; + outfile << "Token generation speed: " << ((result.prompt_tokens + result.response_tokens) / (result.processing_time_us / 1e6)) << " tokens/second\n"; + outfile << "\n---\n\n"; + } + + outfile.close(); + LOG_INF("Results saved successfully\n"); + } else { + LOG_ERR("Failed to open output file: %s\n", output_file_path.c_str()); + } + } + LOG("\n\n"); return 0; From 62f8111e8f65f0a0d4cb63a1cbb473405f963ba7 Mon Sep 17 00:00:00 2001 From: Reliable Magician Date: Wed, 2 Apr 2025 18:31:07 +0300 Subject: [PATCH 22/22] v2 - Accept external list of prompts in parallel.cpp --- examples/parallel/parallel.cpp | 71 ++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index 671c65b451e96..fe62bc5a429a0 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -41,17 +41,8 @@ User: Who is Richard Feynman? Assistant: Richard Feynman was an American physicist who is best known for his work in quantum mechanics and particle physics. He was awarded the Nobel Prize in Physics in 1965 for his contributions to the development of quantum electrodynamics. He was a popular lecturer and author, and he wrote several books, including "Surely You're Joking, Mr. Feynman!" and "What Do You Care What Other People Think?". User:)"; -static std::vector k_prompts = { - "What is the meaning of life?", - "Tell me an interesting fact about llamas.", - "What is the best way to cook a steak?", - "Are you familiar with the Special Theory of Relativity and can you explain it to me?", - "Recommend some interesting books to read.", - "What is the best way to learn a new language?", - "How to get a job at Google?", - "If you could have any superpower, what would it be?", - "I want to learn how to play the piano.", -}; +// No more default prompts - prompts will be loaded from file +std::vector k_prompts; struct client { ~client() { @@ -114,13 +105,16 @@ static std::vector split_string(const std::string& input, char deli // Print custom usage information static void print_custom_usage(const char* program_name) { fprintf(stderr, "\nAdditional parameters for parallel processing:\n"); - fprintf(stderr, " -o, --output-file FNAME save results to specified file\n"); + fprintf(stderr, " -f, --file FNAME input file with prompts (REQUIRED, one prompt per line)\n"); + fprintf(stderr, " -o, --output-file FNAME save results to specified file\n"); fprintf(stderr, "\nUsage example:\n"); fprintf(stderr, " %s -m models/7B/ggml-model-q4_0.bin -f prompts.txt -o results.txt --n-parallel 4\n\n", program_name); } // Process custom command line arguments bool process_custom_arguments(int argc, char ** argv, std::string & output_file) { + bool file_arg_present = false; + // Check if help is requested for (int i = 1; i < argc; i++) { std::string arg = argv[i]; @@ -130,6 +124,18 @@ bool process_custom_arguments(int argc, char ** argv, std::string & output_file) // Don't modify these, let common_params_parse handle them continue; } + + // Check if file argument is present + if (arg == "-f" || arg == "--file") { + file_arg_present = true; + } + } + + if (!file_arg_present) { + fprintf(stderr, "\033[31mError: No prompt file specified. A file with prompts is required.\033[0m\n"); + fprintf(stderr, "Please provide a file with prompts using the -f/--file option.\n\n"); + print_custom_usage(argv[0]); + return false; } for (int i = 1; i < argc - 1; i++) { @@ -193,26 +199,33 @@ int main(int argc, char ** argv) { const llama_vocab * vocab = llama_model_get_vocab(model); - // load the prompts from an external file if there are any + // load the prompts from file - required if (params.prompt.empty()) { - LOG_INF("\033[32mNo new questions so proceed with build-in defaults.\033[0m\n"); - } else { - // Output each line of the input params.prompts vector and copy to k_prompts - k_prompts.clear(); // Clear default prompts - int index = 0; - LOG_INF("\033[32mNow printing the external prompt file %s\033[0m\n\n", params.prompt_file.c_str()); - - std::vector prompts = split_string(params.prompt, '\n'); - for (const auto& prompt : prompts) { - if (!prompt.empty()) { - k_prompts.resize(index + 1); - k_prompts[index] = prompt; - index++; - LOG_INF("%3d prompt: %s\n", index, prompt.c_str()); - } + LOG_ERR("\033[31mError: No prompt file provided. A file with prompts is required.\033[0m\n"); + LOG_ERR("Please provide a file with prompts using the -f/--file option.\n"); + return 1; + } + + // Load prompts from the file + LOG_INF("\033[32mLoading prompts from file: %s\033[0m\n\n", params.prompt_file.c_str()); + + std::vector prompts = split_string(params.prompt, '\n'); + int index = 0; + for (const auto& prompt : prompts) { + if (!prompt.empty()) { + k_prompts.resize(index + 1); + k_prompts[index] = prompt; + index++; + LOG_INF("%3d prompt: %s\n", index, prompt.c_str()); } } + // Check if we have any valid prompts + if (k_prompts.empty()) { + LOG_ERR("\033[31mError: No valid prompts found in the file.\033[0m\n"); + return 1; + } + // Adjust number of sequences to match number of prompts // We want to process each prompt exactly once int32_t n_seq = k_prompts.size(); @@ -221,7 +234,7 @@ int main(int argc, char ** argv) { n_seq = n_seq_param; } - LOG_INF("\n\nProcessing %d prompts with %d parallel clients\n\n", n_seq, params.n_parallel); + LOG_INF("\n\nProcessing %d prompts sequentially (not randomly) with %d parallel clients\n\n", n_seq, params.n_parallel); LOG_INF("\n\n");